fire.c revision 219785
1/*-
2 * Copyright (c) 1999, 2000 Matthew R. Green
3 * Copyright (c) 2001 - 2003 by Thomas Moestl <tmm@FreeBSD.org>
4 * Copyright (c) 2009 by Marius Strobl <marius@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 *	from: NetBSD: psycho.c,v 1.39 2001/10/07 20:30:41 eeh Exp
31 *	from: FreeBSD: psycho.c 183152 2008-09-18 19:45:22Z marius
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/sparc64/pci/fire.c 219785 2011-03-19 20:36:05Z marius $");
36
37/*
38 * Driver for `Fire' JBus to PCI Express and `Oberon' Uranus to PCI Express
39 * bridges
40 */
41
42#include "opt_fire.h"
43#include "opt_ofw_pci.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/bus.h>
48#include <sys/interrupt.h>
49#include <sys/kernel.h>
50#include <sys/lock.h>
51#include <sys/malloc.h>
52#include <sys/module.h>
53#include <sys/mutex.h>
54#include <sys/pciio.h>
55#include <sys/pcpu.h>
56#include <sys/rman.h>
57#include <sys/smp.h>
58#include <sys/sysctl.h>
59#include <sys/timetc.h>
60
61#include <dev/ofw/ofw_bus.h>
62#include <dev/ofw/ofw_pci.h>
63#include <dev/ofw/openfirm.h>
64
65#include <vm/vm.h>
66#include <vm/pmap.h>
67
68#include <machine/bus.h>
69#include <machine/bus_common.h>
70#include <machine/bus_private.h>
71#include <machine/fsr.h>
72#include <machine/iommureg.h>
73#include <machine/iommuvar.h>
74#include <machine/pmap.h>
75#include <machine/resource.h>
76
77#include <dev/pci/pcireg.h>
78#include <dev/pci/pcivar.h>
79
80#include <sparc64/pci/ofw_pci.h>
81#include <sparc64/pci/firereg.h>
82#include <sparc64/pci/firevar.h>
83
84#include "pcib_if.h"
85
86struct fire_msiqarg;
87
88static bus_space_tag_t fire_alloc_bus_tag(struct fire_softc *sc, int type);
89static const struct fire_desc *fire_get_desc(device_t dev);
90static void fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
91    bus_dmasync_op_t op);
92static int fire_get_intrmap(struct fire_softc *sc, u_int ino,
93    bus_addr_t *intrmapptr, bus_addr_t *intrclrptr);
94static void fire_intr_assign(void *arg);
95static void fire_intr_clear(void *arg);
96static void fire_intr_disable(void *arg);
97static void fire_intr_enable(void *arg);
98static int fire_intr_register(struct fire_softc *sc, u_int ino);
99static inline void fire_msiq_common(struct intr_vector *iv,
100    struct fire_msiqarg *fmqa);
101static void fire_msiq_filter(void *cookie);
102static void fire_msiq_handler(void *cookie);
103static void fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
104    driver_filter_t handler, void *arg);
105static timecounter_get_t fire_get_timecount;
106
107/* Interrupt handlers */
108static driver_filter_t fire_dmc_pec;
109static driver_filter_t fire_pcie;
110static driver_filter_t fire_xcb;
111
112/*
113 * Methods
114 */
115static bus_activate_resource_t fire_activate_resource;
116static pcib_alloc_msi_t fire_alloc_msi;
117static pcib_alloc_msix_t fire_alloc_msix;
118static bus_alloc_resource_t fire_alloc_resource;
119static device_attach_t fire_attach;
120static bus_deactivate_resource_t fire_deactivate_resource;
121static bus_get_dma_tag_t fire_get_dma_tag;
122static ofw_bus_get_node_t fire_get_node;
123static pcib_map_msi_t fire_map_msi;
124static pcib_maxslots_t fire_maxslots;
125static device_probe_t fire_probe;
126static pcib_read_config_t fire_read_config;
127static bus_read_ivar_t fire_read_ivar;
128static pcib_release_msi_t fire_release_msi;
129static pcib_release_msix_t fire_release_msix;
130static bus_release_resource_t fire_release_resource;
131static pcib_route_interrupt_t fire_route_interrupt;
132static bus_setup_intr_t fire_setup_intr;
133static bus_teardown_intr_t fire_teardown_intr;
134static pcib_write_config_t fire_write_config;
135
136static device_method_t fire_methods[] = {
137	/* Device interface */
138	DEVMETHOD(device_probe,		fire_probe),
139	DEVMETHOD(device_attach,	fire_attach),
140	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
141	DEVMETHOD(device_suspend,	bus_generic_suspend),
142	DEVMETHOD(device_resume,	bus_generic_resume),
143
144	/* Bus interface */
145	DEVMETHOD(bus_print_child,	bus_generic_print_child),
146	DEVMETHOD(bus_read_ivar,	fire_read_ivar),
147	DEVMETHOD(bus_setup_intr,	fire_setup_intr),
148	DEVMETHOD(bus_teardown_intr,	fire_teardown_intr),
149	DEVMETHOD(bus_alloc_resource,	fire_alloc_resource),
150	DEVMETHOD(bus_activate_resource,	fire_activate_resource),
151	DEVMETHOD(bus_deactivate_resource,	fire_deactivate_resource),
152	DEVMETHOD(bus_release_resource,	fire_release_resource),
153	DEVMETHOD(bus_get_dma_tag,	fire_get_dma_tag),
154
155	/* pcib interface */
156	DEVMETHOD(pcib_maxslots,	fire_maxslots),
157	DEVMETHOD(pcib_read_config,	fire_read_config),
158	DEVMETHOD(pcib_write_config,	fire_write_config),
159	DEVMETHOD(pcib_route_interrupt,	fire_route_interrupt),
160	DEVMETHOD(pcib_alloc_msi,	fire_alloc_msi),
161	DEVMETHOD(pcib_release_msi,	fire_release_msi),
162	DEVMETHOD(pcib_alloc_msix,	fire_alloc_msix),
163	DEVMETHOD(pcib_release_msix,	fire_release_msix),
164	DEVMETHOD(pcib_map_msi,		fire_map_msi),
165
166	/* ofw_bus interface */
167	DEVMETHOD(ofw_bus_get_node,	fire_get_node),
168
169	KOBJMETHOD_END
170};
171
172static devclass_t fire_devclass;
173
174DEFINE_CLASS_0(pcib, fire_driver, fire_methods, sizeof(struct fire_softc));
175EARLY_DRIVER_MODULE(fire, nexus, fire_driver, fire_devclass, 0, 0,
176    BUS_PASS_BUS);
177MODULE_DEPEND(fire, nexus, 1, 1, 1);
178
179static const struct intr_controller fire_ic = {
180	fire_intr_enable,
181	fire_intr_disable,
182	fire_intr_assign,
183	fire_intr_clear
184};
185
186struct fire_icarg {
187	struct fire_softc	*fica_sc;
188	bus_addr_t		fica_map;
189	bus_addr_t		fica_clr;
190};
191
192static const struct intr_controller fire_msiqc_filter = {
193	fire_intr_enable,
194	fire_intr_disable,
195	fire_intr_assign,
196	NULL
197};
198
199struct fire_msiqarg {
200	struct fire_icarg	fmqa_fica;
201	struct mtx		fmqa_mtx;
202	struct fo_msiq_record	*fmqa_base;
203	uint64_t		fmqa_head;
204	uint64_t		fmqa_tail;
205	uint32_t		fmqa_msiq;
206	uint32_t		fmqa_msi;
207};
208
209#define	FIRE_PERF_CNT_QLTY	100
210
211#define	FIRE_SPC_BARRIER(spc, sc, offs, len, flags)			\
212	bus_barrier((sc)->sc_mem_res[(spc)], (offs), (len), (flags))
213#define	FIRE_SPC_READ_8(spc, sc, offs)					\
214	bus_read_8((sc)->sc_mem_res[(spc)], (offs))
215#define	FIRE_SPC_WRITE_8(spc, sc, offs, v)				\
216	bus_write_8((sc)->sc_mem_res[(spc)], (offs), (v))
217
218#ifndef FIRE_DEBUG
219#define	FIRE_SPC_SET(spc, sc, offs, reg, v)				\
220	FIRE_SPC_WRITE_8((spc), (sc), (offs), (v))
221#else
222#define	FIRE_SPC_SET(spc, sc, offs, reg, v) do {			\
223	device_printf((sc)->sc_dev, reg " 0x%016llx -> 0x%016llx\n",	\
224	    (unsigned long long)FIRE_SPC_READ_8((spc), (sc), (offs)),	\
225	    (unsigned long long)(v));					\
226	FIRE_SPC_WRITE_8((spc), (sc), (offs), (v));			\
227	} while (0)
228#endif
229
230#define	FIRE_PCI_BARRIER(sc, offs, len, flags)				\
231	FIRE_SPC_BARRIER(FIRE_PCI, (sc), (offs), len, flags)
232#define	FIRE_PCI_READ_8(sc, offs)					\
233	FIRE_SPC_READ_8(FIRE_PCI, (sc), (offs))
234#define	FIRE_PCI_WRITE_8(sc, offs, v)					\
235	FIRE_SPC_WRITE_8(FIRE_PCI, (sc), (offs), (v))
236#define	FIRE_CTRL_BARRIER(sc, offs, len, flags)				\
237	FIRE_SPC_BARRIER(FIRE_CTRL, (sc), (offs), len, flags)
238#define	FIRE_CTRL_READ_8(sc, offs)					\
239	FIRE_SPC_READ_8(FIRE_CTRL, (sc), (offs))
240#define	FIRE_CTRL_WRITE_8(sc, offs, v)					\
241	FIRE_SPC_WRITE_8(FIRE_CTRL, (sc), (offs), (v))
242
243#define	FIRE_PCI_SET(sc, offs, v)					\
244	FIRE_SPC_SET(FIRE_PCI, (sc), (offs), # offs, (v))
245#define	FIRE_CTRL_SET(sc, offs, v)					\
246	FIRE_SPC_SET(FIRE_CTRL, (sc), (offs), # offs, (v))
247
248struct fire_desc {
249	const char	*fd_string;
250	int		fd_mode;
251	const char	*fd_name;
252};
253
254static const struct fire_desc const fire_compats[] = {
255	{ "pciex108e,80f0",	FIRE_MODE_FIRE,		"Fire" },
256#if 0
257	{ "pciex108e,80f8",	FIRE_MODE_OBERON,	"Oberon" },
258#endif
259	{ NULL,			0,			NULL }
260};
261
262static const struct fire_desc *
263fire_get_desc(device_t dev)
264{
265	const struct fire_desc *desc;
266	const char *compat;
267
268	compat = ofw_bus_get_compat(dev);
269	if (compat == NULL)
270		return (NULL);
271	for (desc = fire_compats; desc->fd_string != NULL; desc++)
272		if (strcmp(desc->fd_string, compat) == 0)
273			return (desc);
274	return (NULL);
275}
276
277static int
278fire_probe(device_t dev)
279{
280	const char *dtype;
281
282	dtype = ofw_bus_get_type(dev);
283	if (dtype != NULL && strcmp(dtype, OFW_TYPE_PCIE) == 0 &&
284	    fire_get_desc(dev) != NULL) {
285		device_set_desc(dev, "Sun Host-PCIe bridge");
286		return (BUS_PROBE_GENERIC);
287	}
288	return (ENXIO);
289}
290
291static int
292fire_attach(device_t dev)
293{
294	struct fire_softc *sc;
295	const struct fire_desc *desc;
296	struct ofw_pci_msi_ranges msi_ranges;
297	struct ofw_pci_msi_addr_ranges msi_addr_ranges;
298	struct ofw_pci_msi_eq_to_devino msi_eq_to_devino;
299	struct fire_msiqarg *fmqa;
300	struct timecounter *tc;
301	struct ofw_pci_ranges *range;
302	uint64_t ino_bitmap, val;
303	phandle_t node;
304	uint32_t prop, prop_array[2];
305	int i, j, mode;
306	u_int lw;
307	uint16_t mps;
308
309	sc = device_get_softc(dev);
310	node = ofw_bus_get_node(dev);
311	desc = fire_get_desc(dev);
312	mode = desc->fd_mode;
313
314	sc->sc_dev = dev;
315	sc->sc_node = node;
316	sc->sc_mode = mode;
317	sc->sc_flags = 0;
318
319	mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF);
320	mtx_init(&sc->sc_pcib_mtx, "pcib_mtx", NULL, MTX_SPIN);
321
322	/*
323	 * Fire and Oberon have two register banks:
324	 * (0) per-PBM PCI Express configuration and status registers
325	 * (1) (shared) Fire/Oberon controller configuration and status
326	 *     registers
327	 */
328	for (i = 0; i < FIRE_NREG; i++) {
329		j = i;
330		sc->sc_mem_res[i] = bus_alloc_resource_any(dev,
331		    SYS_RES_MEMORY, &j, RF_ACTIVE);
332		if (sc->sc_mem_res[i] == NULL)
333			panic("%s: could not allocate register bank %d",
334			    __func__, i);
335	}
336
337	if (OF_getprop(node, "portid", &sc->sc_ign, sizeof(sc->sc_ign)) == -1)
338		panic("%s: could not determine IGN", __func__);
339	if (OF_getprop(node, "module-revision#", &prop, sizeof(prop)) == -1)
340		panic("%s: could not determine module-revision", __func__);
341
342	device_printf(dev, "%s, module-revision %d, IGN %#x\n",
343	    desc->fd_name, prop, sc->sc_ign);
344
345	/*
346	 * Hunt through all the interrupt mapping regs and register
347	 * the interrupt controller for our interrupt vectors.  We do
348	 * this early in order to be able to catch stray interrupts.
349	 */
350	i = OF_getprop(node, "ino-bitmap", (void *)prop_array,
351	    sizeof(prop_array));
352	if (i == -1)
353		panic("%s: could not get ino-bitmap", __func__);
354	ino_bitmap = ((uint64_t)prop_array[1] << 32) | prop_array[0];
355	for (i = 0; i <= FO_MAX_INO; i++) {
356		if ((ino_bitmap & (1ULL << i)) == 0)
357			continue;
358		j = fire_intr_register(sc, i);
359		if (j != 0)
360			device_printf(dev, "could not register interrupt "
361			    "controller for INO %d (%d)\n", i, j);
362	}
363
364	/* JBC/UBC module initialization */
365	FIRE_CTRL_SET(sc, FO_XBC_ERR_LOG_EN, ~0ULL);
366	FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
367	/* not enabled by OpenSolaris */
368	FIRE_CTRL_SET(sc, FO_XBC_INT_EN, ~0ULL);
369	if (sc->sc_mode == FIRE_MODE_FIRE) {
370		FIRE_CTRL_SET(sc, FIRE_JBUS_PAR_CTRL,
371		    FIRE_JBUS_PAR_CTRL_P_EN);
372		FIRE_CTRL_SET(sc, FIRE_JBC_FATAL_RST_EN,
373		    ((1ULL << FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_SHFT) &
374		    FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_MASK) |
375		    FIRE_JBC_FATAL_RST_EN_MB_PEA_P_INT |
376		    FIRE_JBC_FATAL_RST_EN_CPE_P_INT |
377		    FIRE_JBC_FATAL_RST_EN_APE_P_INT |
378		    FIRE_JBC_FATAL_RST_EN_PIO_CPE_INT |
379		    FIRE_JBC_FATAL_RST_EN_JTCEEW_P_INT |
380		    FIRE_JBC_FATAL_RST_EN_JTCEEI_P_INT |
381		    FIRE_JBC_FATAL_RST_EN_JTCEER_P_INT);
382		FIRE_CTRL_SET(sc, FIRE_JBC_CORE_BLOCK_INT_EN, ~0ULL);
383	}
384
385	/* TLU initialization */
386	FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_STAT_CLR,
387	    FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
388	/* not enabled by OpenSolaris */
389	FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_INT_EN,
390	    FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
391	FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_STAT_CLR,
392	    FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
393	/* not enabled by OpenSolaris */
394	FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_INT_EN,
395	    FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
396	FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_STAT_CLR,
397	    FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
398	/* not enabled by OpenSolaris */
399	FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_INT_EN,
400	    FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
401	val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) |
402	    ((FO_PCI_TLU_CTRL_L0S_TIM_DFLT << FO_PCI_TLU_CTRL_L0S_TIM_SHFT) &
403	    FO_PCI_TLU_CTRL_L0S_TIM_MASK) |
404	    ((FO_PCI_TLU_CTRL_CFG_DFLT << FO_PCI_TLU_CTRL_CFG_SHFT) &
405	    FO_PCI_TLU_CTRL_CFG_MASK);
406	if (sc->sc_mode == FIRE_MODE_OBERON)
407		val &= ~FO_PCI_TLU_CTRL_NWPR_EN;
408	val |= FO_PCI_TLU_CTRL_CFG_REMAIN_DETECT_QUIET;
409	FIRE_PCI_SET(sc, FO_PCI_TLU_CTRL, val);
410	FIRE_PCI_SET(sc, FO_PCI_TLU_DEV_CTRL, 0);
411	FIRE_PCI_SET(sc, FO_PCI_TLU_LNK_CTRL, FO_PCI_TLU_LNK_CTRL_CLK);
412
413	/* DLU/LPU initialization */
414	if (sc->sc_mode == FIRE_MODE_OBERON)
415		FIRE_PCI_SET(sc, FO_PCI_LPU_INT_MASK, 0);
416	else
417		FIRE_PCI_SET(sc, FO_PCI_LPU_RST, 0);
418	FIRE_PCI_SET(sc, FO_PCI_LPU_LNK_LYR_CFG,
419	    FO_PCI_LPU_LNK_LYR_CFG_VC0_EN);
420	FIRE_PCI_SET(sc, FO_PCI_LPU_FLW_CTRL_UPDT_CTRL,
421	    FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_NP_EN |
422	    FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_P_EN);
423	if (sc->sc_mode == FIRE_MODE_OBERON)
424		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
425		    (OBERON_PCI_LPU_TXLNK_RPLY_TMR_THRS_DFLT <<
426		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
427		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
428	else {
429		switch ((FIRE_PCI_READ_8(sc, FO_PCI_TLU_LNK_STAT) &
430		    FO_PCI_TLU_LNK_STAT_WDTH_MASK) >>
431		    FO_PCI_TLU_LNK_STAT_WDTH_SHFT) {
432		case 1:
433			lw = 0;
434			break;
435		case 4:
436			lw = 1;
437			break;
438		case 8:
439			lw = 2;
440			break;
441		case 16:
442			lw = 3;
443			break;
444		default:
445			lw = 0;
446		}
447		mps = (FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) &
448		    FO_PCI_TLU_CTRL_CFG_MASK) >> FO_PCI_TLU_CTRL_CFG_SHFT;
449		i = sizeof(fire_freq_nak_tmr_thrs) /
450		    sizeof(*fire_freq_nak_tmr_thrs);
451		if (mps >= i);
452			mps = i - 1;
453		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS,
454		    (fire_freq_nak_tmr_thrs[mps][lw] <<
455		    FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_SHFT) &
456		    FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_MASK);
457		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
458		    (fire_rply_tmr_thrs[mps][lw] <<
459		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
460		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
461		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RTR_FIFO_PTR,
462		    ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_DFLT <<
463		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_SHFT) &
464		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_MASK) |
465		    ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_DFLT <<
466		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_SHFT) &
467		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_MASK));
468		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG2,
469		    (FO_PCI_LPU_LTSSM_CFG2_12_TO_DFLT <<
470		    FO_PCI_LPU_LTSSM_CFG2_12_TO_SHFT) &
471		    FO_PCI_LPU_LTSSM_CFG2_12_TO_MASK);
472		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG3,
473		    (FO_PCI_LPU_LTSSM_CFG3_2_TO_DFLT <<
474		    FO_PCI_LPU_LTSSM_CFG3_2_TO_SHFT) &
475		    FO_PCI_LPU_LTSSM_CFG3_2_TO_MASK);
476		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG4,
477		    ((FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_DFLT <<
478		    FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_SHFT) &
479		    FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_MASK) |
480		    ((FO_PCI_LPU_LTSSM_CFG4_N_FTS_DFLT <<
481		    FO_PCI_LPU_LTSSM_CFG4_N_FTS_SHFT) &
482		    FO_PCI_LPU_LTSSM_CFG4_N_FTS_MASK));
483		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG5, 0);
484	}
485
486	/* ILU initialization */
487	FIRE_PCI_SET(sc, FO_PCI_ILU_ERR_STAT_CLR, ~0ULL);
488	/* not enabled by OpenSolaris */
489	FIRE_PCI_SET(sc, FO_PCI_ILU_INT_EN, ~0ULL);
490
491	/* IMU initialization */
492	FIRE_PCI_SET(sc, FO_PCI_IMU_ERR_STAT_CLR, ~0ULL);
493	FIRE_PCI_SET(sc, FO_PCI_IMU_INT_EN,
494	    FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_EN) &
495	    ~(FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_S |
496	    FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_S |
497	    FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_S |
498	    FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
499	    FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
500	    FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P));
501
502	/* MMU initialization */
503	FIRE_PCI_SET(sc, FO_PCI_MMU_ERR_STAT_CLR,
504	    FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
505	/* not enabled by OpenSolaris */
506	FIRE_PCI_SET(sc, FO_PCI_MMU_INT_EN,
507	    FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
508
509	/* DMC initialization */
510	FIRE_PCI_SET(sc, FO_PCI_DMC_CORE_BLOCK_INT_EN, ~0ULL);
511	FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTA, 0);
512	FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTB, 0);
513
514	/* PEC initialization */
515	FIRE_PCI_SET(sc, FO_PCI_PEC_CORE_BLOCK_INT_EN, ~0ULL);
516
517	/* Establish handlers for interesting interrupts. */
518	if ((ino_bitmap & (1ULL << FO_DMC_PEC_INO)) != 0)
519		fire_set_intr(sc, 1, FO_DMC_PEC_INO, fire_dmc_pec, sc);
520	if ((ino_bitmap & (1ULL << FO_XCB_INO)) != 0)
521		fire_set_intr(sc, 0, FO_XCB_INO, fire_xcb, sc);
522
523	/* MSI/MSI-X support */
524	if (OF_getprop(node, "#msi", &sc->sc_msi_count,
525	    sizeof(sc->sc_msi_count)) == -1)
526		panic("%s: could not determine MSI count", __func__);
527	if (OF_getprop(node, "msi-ranges", &msi_ranges,
528	    sizeof(msi_ranges)) == -1)
529		sc->sc_msi_first = 0;
530	else
531		sc->sc_msi_first = msi_ranges.first;
532	if (OF_getprop(node, "msi-data-mask", &sc->sc_msi_data_mask,
533	    sizeof(sc->sc_msi_data_mask)) == -1)
534		panic("%s: could not determine MSI data mask", __func__);
535	if (OF_getprop(node, "msix-data-width", &sc->sc_msix_data_width,
536	    sizeof(sc->sc_msix_data_width)) > 0)
537		sc->sc_flags |= FIRE_MSIX;
538	if (OF_getprop(node, "msi-address-ranges", &msi_addr_ranges,
539	    sizeof(msi_addr_ranges)) == -1)
540		panic("%s: could not determine MSI address ranges", __func__);
541	sc->sc_msi_addr32 = OFW_PCI_MSI_ADDR_RANGE_32(&msi_addr_ranges);
542	sc->sc_msi_addr64 = OFW_PCI_MSI_ADDR_RANGE_64(&msi_addr_ranges);
543	if (OF_getprop(node, "#msi-eqs", &sc->sc_msiq_count,
544	    sizeof(sc->sc_msiq_count)) == -1)
545		panic("%s: could not determine MSI event queue count",
546		    __func__);
547	if (OF_getprop(node, "msi-eq-size", &sc->sc_msiq_size,
548	    sizeof(sc->sc_msiq_size)) == -1)
549		panic("%s: could not determine MSI event queue size",
550		    __func__);
551	if (OF_getprop(node, "msi-eq-to-devino", &msi_eq_to_devino,
552	    sizeof(msi_eq_to_devino)) == -1 &&
553	    OF_getprop(node, "msi-eq-devino", &msi_eq_to_devino,
554	    sizeof(msi_eq_to_devino)) == -1) {
555		sc->sc_msiq_first = 0;
556		sc->sc_msiq_ino_first = FO_EQ_FIRST_INO;
557	} else {
558		sc->sc_msiq_first = msi_eq_to_devino.eq_first;
559		sc->sc_msiq_ino_first = msi_eq_to_devino.devino_first;
560	}
561	if (sc->sc_msiq_ino_first < FO_EQ_FIRST_INO ||
562	    sc->sc_msiq_ino_first + sc->sc_msiq_count - 1 > FO_EQ_LAST_INO)
563		panic("%s: event queues exceed INO range", __func__);
564	sc->sc_msi_bitmap = malloc(roundup2(sc->sc_msi_count, NBBY) / NBBY,
565	    M_DEVBUF, M_NOWAIT | M_ZERO);
566	if (sc->sc_msi_bitmap == NULL)
567		panic("%s: could not malloc MSI bitmap", __func__);
568	sc->sc_msi_msiq_table = malloc(sc->sc_msi_count *
569	    sizeof(*sc->sc_msi_msiq_table), M_DEVBUF, M_NOWAIT | M_ZERO);
570	if (sc->sc_msi_msiq_table == NULL)
571		panic("%s: could not malloc MSI-MSI event queue table",
572		    __func__);
573	sc->sc_msiq_bitmap = malloc(roundup2(sc->sc_msiq_count, NBBY) / NBBY,
574	    M_DEVBUF, M_NOWAIT | M_ZERO);
575	if (sc->sc_msiq_bitmap == NULL)
576		panic("%s: could not malloc MSI event queue bitmap", __func__);
577	j = FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * sc->sc_msiq_count;
578	sc->sc_msiq = contigmalloc(j, M_DEVBUF, M_NOWAIT, 0, ~0UL,
579	    FO_EQ_ALIGNMENT, 0);
580	if (sc->sc_msiq == NULL)
581		panic("%s: could not contigmalloc MSI event queue", __func__);
582	memset(sc->sc_msiq, 0, j);
583	FIRE_PCI_SET(sc, FO_PCI_EQ_BASE_ADDR, FO_PCI_EQ_BASE_ADDR_BYPASS |
584	    (pmap_kextract((vm_offset_t)sc->sc_msiq) &
585	    FO_PCI_EQ_BASE_ADDR_MASK));
586	for (i = 0; i < sc->sc_msi_count; i++) {
587		j = (i + sc->sc_msi_first) << 3;
588		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + j,
589		    FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + j) &
590		    ~FO_PCI_MSI_MAP_V);
591	}
592	for (i = 0; i < sc->sc_msiq_count; i++) {
593		j = i + sc->sc_msiq_ino_first;
594		if ((ino_bitmap & (1ULL << j)) == 0) {
595			mtx_lock(&sc->sc_msi_mtx);
596			setbit(sc->sc_msiq_bitmap, i);
597			mtx_unlock(&sc->sc_msi_mtx);
598		}
599		fmqa = intr_vectors[INTMAP_VEC(sc->sc_ign, j)].iv_icarg;
600		mtx_init(&fmqa->fmqa_mtx, "msiq_mtx", NULL, MTX_SPIN);
601		fmqa->fmqa_base =
602		    (struct fo_msiq_record *)((caddr_t)sc->sc_msiq +
603		    (FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * i));
604		j = i + sc->sc_msiq_first;
605		fmqa->fmqa_msiq = j;
606		j <<= 3;
607		fmqa->fmqa_head = FO_PCI_EQ_HD_BASE + j;
608		fmqa->fmqa_tail = FO_PCI_EQ_TL_BASE + j;
609		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + j,
610		    FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
611		    FO_PCI_EQ_CTRL_CLR_DIS);
612		FIRE_PCI_WRITE_8(sc, fmqa->fmqa_tail,
613		    (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
614		FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head,
615		    (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
616	}
617	FIRE_PCI_SET(sc, FO_PCI_MSI_32_BIT_ADDR, sc->sc_msi_addr32 &
618	    FO_PCI_MSI_32_BIT_ADDR_MASK);
619	FIRE_PCI_SET(sc, FO_PCI_MSI_64_BIT_ADDR, sc->sc_msi_addr64 &
620	    FO_PCI_MSI_64_BIT_ADDR_MASK);
621
622	/*
623	 * Establish a handler for interesting PCIe messages and disable
624	 * unintersting ones.
625	 */
626	mtx_lock(&sc->sc_msi_mtx);
627	for (i = 0; i < sc->sc_msiq_count; i++) {
628		if (isclr(sc->sc_msiq_bitmap, i) != 0) {
629			j = i;
630			break;
631		}
632	}
633	if (i == sc->sc_msiq_count) {
634		mtx_unlock(&sc->sc_msi_mtx);
635		panic("%s: no spare event queue for PCIe messages", __func__);
636	}
637	setbit(sc->sc_msiq_bitmap, j);
638	mtx_unlock(&sc->sc_msi_mtx);
639	i = INTMAP_VEC(sc->sc_ign, j + sc->sc_msiq_ino_first);
640	if (bus_set_resource(dev, SYS_RES_IRQ, 2, i, 1) != 0)
641		panic("%s: failed to add interrupt for PCIe messages",
642		    __func__);
643	fire_set_intr(sc, 2, INTINO(i), fire_pcie, intr_vectors[i].iv_icarg);
644	j += sc->sc_msiq_first;
645	/*
646	 * "Please note that setting the EQNUM field to a value larger than
647	 * 35 will yield unpredictable results."
648	 */
649	if (j > 35)
650		panic("%s: invalid queue for PCIe messages (%d)",
651		    __func__, j);
652	FIRE_PCI_SET(sc, FO_PCI_ERR_COR, FO_PCI_ERR_PME_V |
653	    ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
654	FIRE_PCI_SET(sc, FO_PCI_ERR_NONFATAL, FO_PCI_ERR_PME_V |
655	    ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
656	FIRE_PCI_SET(sc, FO_PCI_ERR_FATAL, FO_PCI_ERR_PME_V |
657	    ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
658	FIRE_PCI_SET(sc, FO_PCI_PM_PME, 0);
659	FIRE_PCI_SET(sc, FO_PCI_PME_TO_ACK, 0);
660	FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (j << 3),
661	    FO_PCI_EQ_CTRL_SET_EN);
662
663#define	TC_COUNTER_MAX_MASK	0xffffffff
664
665	/*
666	 * Setup JBC/UBC performance counter 0 in bus cycle counting
667	 * mode as timecounter.  Unfortunately, at least with Fire all
668	 * JBus-driven performance counters just don't advance in bus
669	 * cycle counting mode.
670	 */
671	if (device_get_unit(dev) == 0) {
672		FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT0, 0);
673		FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT1, 0);
674		FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT_SEL,
675		    (FO_XBC_PRF_CNT_NONE << FO_XBC_PRF_CNT_CNT1_SHFT) |
676		    (FO_XBC_PRF_CNT_XB_CLK << FO_XBC_PRF_CNT_CNT0_SHFT));
677#ifdef FIRE_DEBUG
678		device_printf(dev, "FO_XBC_PRF_CNT0 0x%016llx\n",
679		    (long long unsigned)FIRE_CTRL_READ_8(sc,
680		    FO_XBC_PRF_CNT0));
681		device_printf(dev, "FO_XBC_PRF_CNT0 0x%016llx\n",
682		    (long long unsigned)FIRE_CTRL_READ_8(sc,
683		    FO_XBC_PRF_CNT0));
684#endif
685		tc = malloc(sizeof(*tc), M_DEVBUF, M_NOWAIT | M_ZERO);
686		if (tc == NULL)
687			panic("%s: could not malloc timecounter", __func__);
688		tc->tc_get_timecount = fire_get_timecount;
689		tc->tc_poll_pps = NULL;
690		tc->tc_counter_mask = TC_COUNTER_MAX_MASK;
691		if (OF_getprop(OF_peer(0), "clock-frequency", &prop,
692		    sizeof(prop)) == -1)
693			panic("%s: could not determine clock frequency",
694			    __func__);
695		tc->tc_frequency = prop;
696		tc->tc_name = strdup(device_get_nameunit(dev), M_DEVBUF);
697		tc->tc_quality = -FIRE_PERF_CNT_QLTY;
698		tc->tc_priv = sc;
699		tc_init(tc);
700	}
701
702	/*
703	 * Set up the IOMMU.  Both Fire and Oberon have one per PBM, but
704	 * neither has a streaming buffer.
705	 */
706	memcpy(&sc->sc_dma_methods, &iommu_dma_methods,
707	    sizeof(sc->sc_dma_methods));
708	sc->sc_is.is_flags = IOMMU_FIRE | IOMMU_PRESERVE_PROM;
709	if (sc->sc_mode == FIRE_MODE_OBERON) {
710		sc->sc_is.is_flags |= IOMMU_FLUSH_CACHE;
711		sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(OBERON_IOMMU_BITS);
712	} else {
713		sc->sc_dma_methods.dm_dmamap_sync = fire_dmamap_sync;
714		sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(FIRE_IOMMU_BITS);
715	}
716	sc->sc_is.is_sb[0] = sc->sc_is.is_sb[1] = 0;
717	/* Punch in our copies. */
718	sc->sc_is.is_bustag = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]);
719	sc->sc_is.is_bushandle = rman_get_bushandle(sc->sc_mem_res[FIRE_PCI]);
720	sc->sc_is.is_iommu = FO_PCI_MMU;
721	val = FIRE_PCI_READ_8(sc, FO_PCI_MMU + IMR_CTL);
722	iommu_init(device_get_nameunit(sc->sc_dev), &sc->sc_is, 7, -1, 0);
723#ifdef FIRE_DEBUG
724	device_printf(dev, "FO_PCI_MMU + IMR_CTL 0x%016llx -> 0x%016llx\n",
725	    (long long unsigned)val, (long long unsigned)sc->sc_is.is_cr);
726#endif
727
728	/* Initialize memory and I/O rmans. */
729	sc->sc_pci_io_rman.rm_type = RMAN_ARRAY;
730	sc->sc_pci_io_rman.rm_descr = "Fire PCI I/O Ports";
731	if (rman_init(&sc->sc_pci_io_rman) != 0 ||
732	    rman_manage_region(&sc->sc_pci_io_rman, 0, FO_IO_SIZE) != 0)
733		panic("%s: failed to set up I/O rman", __func__);
734	sc->sc_pci_mem_rman.rm_type = RMAN_ARRAY;
735	sc->sc_pci_mem_rman.rm_descr = "Fire PCI Memory";
736	if (rman_init(&sc->sc_pci_mem_rman) != 0 ||
737	    rman_manage_region(&sc->sc_pci_mem_rman, 0, FO_MEM_SIZE) != 0)
738		panic("%s: failed to set up memory rman", __func__);
739
740	i = OF_getprop_alloc(node, "ranges", sizeof(*range), (void **)&range);
741	/*
742	 * Make sure that the expected ranges are present.  The
743	 * OFW_PCI_CS_MEM64 one is not currently used though.
744	 */
745	if (i != FIRE_NRANGE)
746		panic("%s: unsupported number of ranges", __func__);
747	/*
748	 * Find the addresses of the various bus spaces.
749	 * There should not be multiple ones of one kind.
750	 * The physical start addresses of the ranges are the configuration,
751	 * memory and I/O handles.
752	 */
753	for (i = 0; i < FIRE_NRANGE; i++) {
754		j = OFW_PCI_RANGE_CS(&range[i]);
755		if (sc->sc_pci_bh[j] != 0)
756			panic("%s: duplicate range for space %d",
757			    __func__, j);
758		sc->sc_pci_bh[j] = OFW_PCI_RANGE_PHYS(&range[i]);
759	}
760	free(range, M_OFWPROP);
761
762	/* Allocate our tags. */
763	sc->sc_pci_memt = fire_alloc_bus_tag(sc, PCI_MEMORY_BUS_SPACE);
764	sc->sc_pci_iot = fire_alloc_bus_tag(sc, PCI_IO_BUS_SPACE);
765	sc->sc_pci_cfgt = fire_alloc_bus_tag(sc, PCI_CONFIG_BUS_SPACE);
766	if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
767	    sc->sc_is.is_pmaxaddr, ~0, NULL, NULL, sc->sc_is.is_pmaxaddr,
768	    0xff, 0xffffffff, 0, NULL, NULL, &sc->sc_pci_dmat) != 0)
769		panic("%s: bus_dma_tag_create failed", __func__);
770	/* Customize the tag. */
771	sc->sc_pci_dmat->dt_cookie = &sc->sc_is;
772	sc->sc_pci_dmat->dt_mt = &sc->sc_dma_methods;
773
774	/*
775	 * Get the bus range from the firmware.
776	 * NB: Neither Fire nor Oberon support PCI bus reenumeration.
777	 */
778	i = OF_getprop(node, "bus-range", (void *)prop_array,
779	    sizeof(prop_array));
780	if (i == -1)
781		panic("%s: could not get bus-range", __func__);
782	if (i != sizeof(prop_array))
783		panic("%s: broken bus-range (%d)", __func__, i);
784	sc->sc_pci_secbus = prop_array[0];
785	sc->sc_pci_subbus = prop_array[1];
786	if (bootverbose != 0)
787		device_printf(dev, "bus range %u to %u; PCI bus %d\n",
788		    sc->sc_pci_secbus, sc->sc_pci_subbus, sc->sc_pci_secbus);
789
790	ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(ofw_pci_intr_t));
791
792#define	FIRE_SYSCTL_ADD_UINT(name, arg, desc)				\
793	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),			\
794	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,	\
795	    (name), CTLFLAG_RD, (arg), 0, (desc))
796
797	FIRE_SYSCTL_ADD_UINT("ilu_err", &sc->sc_stats_ilu_err,
798	    "ILU unknown errors");
799	FIRE_SYSCTL_ADD_UINT("jbc_ce_async", &sc->sc_stats_jbc_ce_async,
800	    "JBC correctable errors");
801	FIRE_SYSCTL_ADD_UINT("jbc_unsol_int", &sc->sc_stats_jbc_unsol_int,
802	    "JBC unsolicited interrupt ACK/NACK errors");
803	FIRE_SYSCTL_ADD_UINT("jbc_unsol_rd", &sc->sc_stats_jbc_unsol_rd,
804	    "JBC unsolicited read response errors");
805	FIRE_SYSCTL_ADD_UINT("mmu_err", &sc->sc_stats_mmu_err, "MMU errors");
806	FIRE_SYSCTL_ADD_UINT("tlu_ce", &sc->sc_stats_tlu_ce,
807	    "DLU/TLU correctable errors");
808	FIRE_SYSCTL_ADD_UINT("tlu_oe_non_fatal",
809	    &sc->sc_stats_tlu_oe_non_fatal,
810	    "DLU/TLU other event non-fatal errors summary"),
811	FIRE_SYSCTL_ADD_UINT("tlu_oe_rx_err", &sc->sc_stats_tlu_oe_rx_err,
812	    "DLU/TLU receive other event errors"),
813	FIRE_SYSCTL_ADD_UINT("tlu_oe_tx_err", &sc->sc_stats_tlu_oe_tx_err,
814	    "DLU/TLU transmit other event errors"),
815	FIRE_SYSCTL_ADD_UINT("ubc_dmardue", &sc->sc_stats_ubc_dmardue,
816	    "UBC DMARDUE erros");
817
818#undef FIRE_SYSCTL_ADD_UINT
819
820	device_add_child(dev, "pci", -1);
821	return (bus_generic_attach(dev));
822}
823
824static void
825fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
826    driver_filter_t handler, void *arg)
827{
828	u_long vec;
829	int rid;
830
831	rid = index;
832	sc->sc_irq_res[index] = bus_alloc_resource_any(sc->sc_dev,
833	    SYS_RES_IRQ, &rid, RF_ACTIVE);
834	if (sc->sc_irq_res[index] == NULL ||
835	    INTINO(vec = rman_get_start(sc->sc_irq_res[index])) != ino ||
836	    INTIGN(vec) != sc->sc_ign ||
837	    intr_vectors[vec].iv_ic != &fire_ic ||
838	    bus_setup_intr(sc->sc_dev, sc->sc_irq_res[index],
839	    INTR_TYPE_MISC | INTR_BRIDGE, handler, NULL, arg,
840	    &sc->sc_ihand[index]) != 0)
841		panic("%s: failed to set up interrupt %d", __func__, index);
842}
843
844static int
845fire_intr_register(struct fire_softc *sc, u_int ino)
846{
847	struct fire_icarg *fica;
848	bus_addr_t intrclr, intrmap;
849	int error;
850
851	if (fire_get_intrmap(sc, ino, &intrmap, &intrclr) == 0)
852		return (ENXIO);
853	fica = malloc((ino >= FO_EQ_FIRST_INO && ino <= FO_EQ_LAST_INO) ?
854	    sizeof(struct fire_msiqarg) : sizeof(struct fire_icarg), M_DEVBUF,
855	    M_NOWAIT | M_ZERO);
856	if (fica == NULL)
857		return (ENOMEM);
858	fica->fica_sc = sc;
859	fica->fica_map = intrmap;
860	fica->fica_clr = intrclr;
861	error = (intr_controller_register(INTMAP_VEC(sc->sc_ign, ino),
862	    &fire_ic, fica));
863	if (error != 0)
864		free(fica, M_DEVBUF);
865	return (error);
866}
867
868static int
869fire_get_intrmap(struct fire_softc *sc, u_int ino, bus_addr_t *intrmapptr,
870    bus_addr_t *intrclrptr)
871{
872
873	if (ino > FO_MAX_INO) {
874		device_printf(sc->sc_dev, "out of range INO %d requested\n",
875		    ino);
876		return (0);
877	}
878
879	ino <<= 3;
880	if (intrmapptr != NULL)
881		*intrmapptr = FO_PCI_INT_MAP_BASE + ino;
882	if (intrclrptr != NULL)
883		*intrclrptr = FO_PCI_INT_CLR_BASE + ino;
884	return (1);
885}
886
887/*
888 * Interrupt handlers
889 */
890static int
891fire_dmc_pec(void *arg)
892{
893	struct fire_softc *sc;
894	device_t dev;
895	uint64_t cestat, dmcstat, ilustat, imustat, mcstat, mmustat, mmutfar;
896	uint64_t mmutfsr, oestat, pecstat, uestat, val;
897	u_int fatal, oenfatal;
898
899	fatal = 0;
900	sc = arg;
901	dev = sc->sc_dev;
902	mtx_lock_spin(&sc->sc_pcib_mtx);
903	mcstat = FIRE_PCI_READ_8(sc, FO_PCI_MULTI_CORE_ERR_STAT);
904	if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_DMC) != 0) {
905		dmcstat = FIRE_PCI_READ_8(sc, FO_PCI_DMC_CORE_BLOCK_ERR_STAT);
906		if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_IMU) != 0) {
907			imustat = FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_STAT);
908			device_printf(dev, "IMU error %#llx\n",
909			    (unsigned long long)imustat);
910			if ((imustat &
911			    FO_PCI_IMU_ERR_INT_EQ_NOT_EN_P) != 0) {
912				fatal = 1;
913				val = FIRE_PCI_READ_8(sc,
914				    FO_PCI_IMU_SCS_ERR_LOG);
915				device_printf(dev, "SCS error log %#llx\n",
916				    (unsigned long long)val);
917			}
918			if ((imustat & FO_PCI_IMU_ERR_INT_EQ_OVER_P) != 0) {
919				fatal = 1;
920				val = FIRE_PCI_READ_8(sc,
921				    FO_PCI_IMU_EQS_ERR_LOG);
922				device_printf(dev, "EQS error log %#llx\n",
923				    (unsigned long long)val);
924			}
925			if ((imustat & (FO_PCI_IMU_ERR_INT_MSI_MAL_ERR_P |
926			    FO_PCI_IMU_ERR_INT_MSI_PAR_ERR_P |
927			    FO_PCI_IMU_ERR_INT_PMEACK_MES_NOT_EN_P |
928			    FO_PCI_IMU_ERR_INT_PMPME_MES_NOT_EN_P |
929			    FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
930			    FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
931			    FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P |
932			    FO_PCI_IMU_ERR_INT_MSI_NOT_EN_P)) != 0) {
933				fatal = 1;
934				val = FIRE_PCI_READ_8(sc,
935				    FO_PCI_IMU_RDS_ERR_LOG);
936				device_printf(dev, "RDS error log %#llx\n",
937				    (unsigned long long)val);
938			}
939		}
940		if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_MMU) != 0) {
941			fatal = 1;
942			mmustat = FIRE_PCI_READ_8(sc, FO_PCI_MMU_INT_STAT);
943			mmutfar = FIRE_PCI_READ_8(sc,
944			    FO_PCI_MMU_TRANS_FAULT_ADDR);
945			mmutfsr = FIRE_PCI_READ_8(sc,
946			    FO_PCI_MMU_TRANS_FAULT_STAT);
947			if ((mmustat & (FO_PCI_MMU_ERR_INT_TBW_DPE_P |
948			    FO_PCI_MMU_ERR_INT_TBW_ERR_P |
949			    FO_PCI_MMU_ERR_INT_TBW_UDE_P |
950			    FO_PCI_MMU_ERR_INT_TBW_DME_P |
951			    FO_PCI_MMU_ERR_INT_TTC_CAE_P |
952			    FIRE_PCI_MMU_ERR_INT_TTC_DPE_P |
953			    OBERON_PCI_MMU_ERR_INT_TTC_DUE_P |
954			    FO_PCI_MMU_ERR_INT_TRN_ERR_P)) != 0)
955				fatal = 1;
956			else {
957				sc->sc_stats_mmu_err++;
958				FIRE_PCI_WRITE_8(sc, FO_PCI_MMU_ERR_STAT_CLR,
959				    mmustat);
960			}
961			device_printf(dev,
962			    "MMU error %#llx: TFAR %#llx TFSR %#llx\n",
963			    (unsigned long long)mmustat,
964			    (unsigned long long)mmutfar,
965			    (unsigned long long)mmutfsr);
966		}
967	}
968	if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_PEC) != 0) {
969		pecstat = FIRE_PCI_READ_8(sc, FO_PCI_PEC_CORE_BLOCK_INT_STAT);
970		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_UERR) != 0) {
971			fatal = 1;
972			uestat = FIRE_PCI_READ_8(sc,
973			    FO_PCI_TLU_UERR_INT_STAT);
974			device_printf(dev,
975			    "DLU/TLU uncorrectable error %#llx\n",
976			    (unsigned long long)uestat);
977			if ((uestat & (FO_PCI_TLU_UERR_INT_UR_P |
978			    OBERON_PCI_TLU_UERR_INT_POIS_P |
979			    FO_PCI_TLU_UERR_INT_MFP_P |
980			    FO_PCI_TLU_UERR_INT_ROF_P |
981			    FO_PCI_TLU_UERR_INT_UC_P |
982			    FIRE_PCI_TLU_UERR_INT_PP_P |
983			    OBERON_PCI_TLU_UERR_INT_POIS_P)) != 0) {
984				val = FIRE_PCI_READ_8(sc,
985				    FO_PCI_TLU_RX_UERR_HDR1_LOG);
986				device_printf(dev,
987				    "receive header log %#llx\n",
988				    (unsigned long long)val);
989				val = FIRE_PCI_READ_8(sc,
990				    FO_PCI_TLU_RX_UERR_HDR2_LOG);
991				device_printf(dev,
992				    "receive header log 2 %#llx\n",
993				    (unsigned long long)val);
994			}
995			if ((uestat & FO_PCI_TLU_UERR_INT_CTO_P) != 0) {
996				val = FIRE_PCI_READ_8(sc,
997				    FO_PCI_TLU_TX_UERR_HDR1_LOG);
998				device_printf(dev,
999				    "transmit header log %#llx\n",
1000				    (unsigned long long)val);
1001				val = FIRE_PCI_READ_8(sc,
1002				    FO_PCI_TLU_TX_UERR_HDR2_LOG);
1003				device_printf(dev,
1004				    "transmit header log 2 %#llx\n",
1005				    (unsigned long long)val);
1006			}
1007			if ((uestat & FO_PCI_TLU_UERR_INT_DLP_P) != 0) {
1008				val = FIRE_PCI_READ_8(sc,
1009				    FO_PCI_LPU_LNK_LYR_INT_STAT);
1010				device_printf(dev,
1011				    "link layer interrupt and status %#llx\n",
1012				    (unsigned long long)val);
1013			}
1014			if ((uestat & FO_PCI_TLU_UERR_INT_TE_P) != 0) {
1015				val = FIRE_PCI_READ_8(sc,
1016				    FO_PCI_LPU_PHY_LYR_INT_STAT);
1017				device_printf(dev,
1018				    "phy layer interrupt and status %#llx\n",
1019				    (unsigned long long)val);
1020			}
1021		}
1022		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_CERR) != 0) {
1023			sc->sc_stats_tlu_ce++;
1024			cestat = FIRE_PCI_READ_8(sc,
1025			    FO_PCI_TLU_CERR_INT_STAT);
1026			device_printf(dev,
1027			    "DLU/TLU correctable error %#llx\n",
1028			    (unsigned long long)cestat);
1029			val = FIRE_PCI_READ_8(sc,
1030			    FO_PCI_LPU_LNK_LYR_INT_STAT);
1031			device_printf(dev,
1032			    "link layer interrupt and status %#llx\n",
1033			    (unsigned long long)val);
1034			if ((cestat & FO_PCI_TLU_CERR_INT_RE_P) != 0) {
1035				FIRE_PCI_WRITE_8(sc,
1036				    FO_PCI_LPU_LNK_LYR_INT_STAT, val);
1037				val = FIRE_PCI_READ_8(sc,
1038				    FO_PCI_LPU_PHY_LYR_INT_STAT);
1039				device_printf(dev,
1040				    "phy layer interrupt and status %#llx\n",
1041				    (unsigned long long)val);
1042			}
1043			FIRE_PCI_WRITE_8(sc, FO_PCI_TLU_CERR_STAT_CLR,
1044			    cestat);
1045		}
1046		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_OEVENT) != 0) {
1047			oenfatal = 0;
1048			oestat = FIRE_PCI_READ_8(sc,
1049			    FO_PCI_TLU_OEVENT_INT_STAT);
1050			device_printf(dev, "DLU/TLU other event %#llx\n",
1051			    (unsigned long long)oestat);
1052			if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1053			    FO_PCI_TLU_OEVENT_MRC_P |
1054			    FO_PCI_TLU_OEVENT_WUC_P |
1055			    FO_PCI_TLU_OEVENT_RUC_P |
1056			    FO_PCI_TLU_OEVENT_CRS_P)) != 0) {
1057				val = FIRE_PCI_READ_8(sc,
1058				    FO_PCI_TLU_RX_OEVENT_HDR1_LOG);
1059				device_printf(dev,
1060				    "receive header log %#llx\n",
1061				    (unsigned long long)val);
1062				val = FIRE_PCI_READ_8(sc,
1063				    FO_PCI_TLU_RX_OEVENT_HDR2_LOG);
1064				device_printf(dev,
1065				    "receive header log 2 %#llx\n",
1066				    (unsigned long long)val);
1067				if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1068				    FO_PCI_TLU_OEVENT_MRC_P |
1069				    FO_PCI_TLU_OEVENT_WUC_P |
1070				    FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1071					fatal = 1;
1072				else {
1073					sc->sc_stats_tlu_oe_rx_err++;
1074					oenfatal = 1;
1075				}
1076			}
1077			if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1078			    FO_PCI_TLU_OEVENT_CTO_P |
1079			    FO_PCI_TLU_OEVENT_WUC_P |
1080			    FO_PCI_TLU_OEVENT_RUC_P)) != 0) {
1081				val = FIRE_PCI_READ_8(sc,
1082				    FO_PCI_TLU_TX_OEVENT_HDR1_LOG);
1083				device_printf(dev,
1084				    "transmit header log %#llx\n",
1085				    (unsigned long long)val);
1086				val = FIRE_PCI_READ_8(sc,
1087				    FO_PCI_TLU_TX_OEVENT_HDR2_LOG);
1088				device_printf(dev,
1089				    "transmit header log 2 %#llx\n",
1090				    (unsigned long long)val);
1091				if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1092				    FO_PCI_TLU_OEVENT_CTO_P |
1093				    FO_PCI_TLU_OEVENT_WUC_P |
1094				    FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1095					fatal = 1;
1096				else {
1097					sc->sc_stats_tlu_oe_tx_err++;
1098					oenfatal = 1;
1099				}
1100			}
1101			if ((oestat & (FO_PCI_TLU_OEVENT_ERO_P |
1102			    FO_PCI_TLU_OEVENT_EMP_P |
1103			    FO_PCI_TLU_OEVENT_EPE_P |
1104			    FIRE_PCI_TLU_OEVENT_ERP_P |
1105			    OBERON_PCI_TLU_OEVENT_ERBU_P |
1106			    FIRE_PCI_TLU_OEVENT_EIP_P |
1107			    OBERON_PCI_TLU_OEVENT_EIUE_P)) != 0) {
1108				fatal = 1;
1109				val = FIRE_PCI_READ_8(sc,
1110				    FO_PCI_LPU_LNK_LYR_INT_STAT);
1111				device_printf(dev,
1112				    "link layer interrupt and status %#llx\n",
1113				    (unsigned long long)val);
1114			}
1115			if ((oestat & (FO_PCI_TLU_OEVENT_IIP_P |
1116			    FO_PCI_TLU_OEVENT_EDP_P |
1117			    FIRE_PCI_TLU_OEVENT_EHP_P |
1118			    OBERON_PCI_TLU_OEVENT_TLUEITMO_S |
1119			    FO_PCI_TLU_OEVENT_ERU_P)) != 0)
1120				fatal = 1;
1121			if ((oestat & (FO_PCI_TLU_OEVENT_NFP_P |
1122			    FO_PCI_TLU_OEVENT_LWC_P |
1123			    FO_PCI_TLU_OEVENT_LIN_P |
1124			    FO_PCI_TLU_OEVENT_LRS_P |
1125			    FO_PCI_TLU_OEVENT_LDN_P |
1126			    FO_PCI_TLU_OEVENT_LUP_P)) != 0)
1127				oenfatal = 1;
1128			if (oenfatal != 0) {
1129				sc->sc_stats_tlu_oe_non_fatal++;
1130				FIRE_PCI_WRITE_8(sc,
1131				    FO_PCI_TLU_OEVENT_STAT_CLR, oestat);
1132				if ((oestat & FO_PCI_TLU_OEVENT_LIN_P) != 0)
1133					FIRE_PCI_WRITE_8(sc,
1134					    FO_PCI_LPU_LNK_LYR_INT_STAT,
1135					    FIRE_PCI_READ_8(sc,
1136					    FO_PCI_LPU_LNK_LYR_INT_STAT));
1137			}
1138		}
1139		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_ILU) != 0) {
1140			ilustat = FIRE_PCI_READ_8(sc, FO_PCI_ILU_INT_STAT);
1141			device_printf(dev, "ILU error %#llx\n",
1142			    (unsigned long long)ilustat);
1143			if ((ilustat & (FIRE_PCI_ILU_ERR_INT_IHB_PE_P |
1144			    FIRE_PCI_ILU_ERR_INT_IHB_PE_P)) != 0)
1145			    fatal = 1;
1146			else {
1147				sc->sc_stats_ilu_err++;
1148				FIRE_PCI_WRITE_8(sc, FO_PCI_ILU_INT_STAT,
1149				    ilustat);
1150			}
1151		}
1152	}
1153	mtx_unlock_spin(&sc->sc_pcib_mtx);
1154	if (fatal != 0)
1155		panic("%s: fatal DMC/PEC error",
1156		    device_get_nameunit(sc->sc_dev));
1157	return (FILTER_HANDLED);
1158}
1159
1160static int
1161fire_xcb(void *arg)
1162{
1163	struct fire_softc *sc;
1164	device_t dev;
1165	uint64_t errstat, intstat, val;
1166	u_int fatal;
1167
1168	fatal = 0;
1169	sc = arg;
1170	dev = sc->sc_dev;
1171	mtx_lock_spin(&sc->sc_pcib_mtx);
1172	if (sc->sc_mode == FIRE_MODE_OBERON) {
1173		intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1174		device_printf(dev, "UBC error: interrupt status %#llx\n",
1175		    (unsigned long long)intstat);
1176		if ((intstat & ~(OBERON_UBC_ERR_INT_DMARDUEB_P |
1177		    OBERON_UBC_ERR_INT_DMARDUEA_P)) != 0)
1178			fatal = 1;
1179		else
1180			sc->sc_stats_ubc_dmardue++;
1181		if (fatal != 0) {
1182			mtx_unlock_spin(&sc->sc_pcib_mtx);
1183			panic("%s: fatal UBC core block error",
1184			    device_get_nameunit(sc->sc_dev));
1185		} else {
1186			FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1187			mtx_unlock_spin(&sc->sc_pcib_mtx);
1188		}
1189	} else {
1190		errstat = FIRE_CTRL_READ_8(sc, FIRE_JBC_CORE_BLOCK_ERR_STAT);
1191		if ((errstat & (FIRE_JBC_CORE_BLOCK_ERR_STAT_MERGE |
1192		    FIRE_JBC_CORE_BLOCK_ERR_STAT_JBCINT |
1193		    FIRE_JBC_CORE_BLOCK_ERR_STAT_DMCINT)) != 0) {
1194			intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1195			device_printf(dev, "JBC interrupt status %#llx\n",
1196			    (unsigned long long)intstat);
1197			if ((intstat & FIRE_JBC_ERR_INT_EBUS_TO_P) != 0) {
1198				val = FIRE_CTRL_READ_8(sc,
1199				    FIRE_JBC_CSR_ERR_LOG);
1200				device_printf(dev, "CSR error log %#llx\n",
1201				    (unsigned long long)val);
1202			}
1203			if ((intstat & (FIRE_JBC_ERR_INT_UNSOL_RD_P |
1204			    FIRE_JBC_ERR_INT_UNSOL_INT_P)) != 0) {
1205				if ((intstat &
1206				    FIRE_JBC_ERR_INT_UNSOL_RD_P) != 0)
1207					sc->sc_stats_jbc_unsol_rd++;
1208				if ((intstat &
1209				    FIRE_JBC_ERR_INT_UNSOL_INT_P) != 0)
1210					sc->sc_stats_jbc_unsol_int++;
1211				val = FIRE_CTRL_READ_8(sc,
1212				    FIRE_DMCINT_IDC_ERR_LOG);
1213				device_printf(dev,
1214				    "DMCINT IDC error log %#llx\n",
1215				    (unsigned long long)val);
1216			}
1217			if ((intstat & (FIRE_JBC_ERR_INT_MB_PER_P |
1218			    FIRE_JBC_ERR_INT_MB_PEW_P)) != 0) {
1219				fatal = 1;
1220				val = FIRE_CTRL_READ_8(sc,
1221				    FIRE_MERGE_TRANS_ERR_LOG);
1222				device_printf(dev,
1223				    "merge transaction error log %#llx\n",
1224				    (unsigned long long)val);
1225			}
1226			if ((intstat & FIRE_JBC_ERR_INT_IJP_P) != 0) {
1227				fatal = 1;
1228				val = FIRE_CTRL_READ_8(sc,
1229				    FIRE_JBCINT_OTRANS_ERR_LOG);
1230				device_printf(dev,
1231				    "JBCINT out transaction error log "
1232				    "%#llx\n", (unsigned long long)val);
1233				val = FIRE_CTRL_READ_8(sc,
1234				    FIRE_JBCINT_OTRANS_ERR_LOG2);
1235				device_printf(dev,
1236				    "JBCINT out transaction error log 2 "
1237				    "%#llx\n", (unsigned long long)val);
1238			}
1239			if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1240			    FIRE_JBC_ERR_INT_CE_ASYN_P |
1241			    FIRE_JBC_ERR_INT_JTE_P | FIRE_JBC_ERR_INT_JBE_P |
1242			    FIRE_JBC_ERR_INT_JUE_P |
1243			    FIRE_JBC_ERR_INT_ICISE_P |
1244			    FIRE_JBC_ERR_INT_WR_DPE_P |
1245			    FIRE_JBC_ERR_INT_RD_DPE_P |
1246			    FIRE_JBC_ERR_INT_ILL_BMW_P |
1247			    FIRE_JBC_ERR_INT_ILL_BMR_P |
1248			    FIRE_JBC_ERR_INT_BJC_P)) != 0) {
1249				if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1250				    FIRE_JBC_ERR_INT_JTE_P |
1251				    FIRE_JBC_ERR_INT_JBE_P |
1252				    FIRE_JBC_ERR_INT_JUE_P |
1253				    FIRE_JBC_ERR_INT_ICISE_P |
1254				    FIRE_JBC_ERR_INT_WR_DPE_P |
1255				    FIRE_JBC_ERR_INT_RD_DPE_P |
1256				    FIRE_JBC_ERR_INT_ILL_BMW_P |
1257				    FIRE_JBC_ERR_INT_ILL_BMR_P |
1258				    FIRE_JBC_ERR_INT_BJC_P)) != 0)
1259					fatal = 1;
1260				else
1261					sc->sc_stats_jbc_ce_async++;
1262				val = FIRE_CTRL_READ_8(sc,
1263				    FIRE_JBCINT_ITRANS_ERR_LOG);
1264				device_printf(dev,
1265				    "JBCINT in transaction error log %#llx\n",
1266				    (unsigned long long)val);
1267				val = FIRE_CTRL_READ_8(sc,
1268				    FIRE_JBCINT_ITRANS_ERR_LOG2);
1269				device_printf(dev,
1270				    "JBCINT in transaction error log 2 "
1271				    "%#llx\n", (unsigned long long)val);
1272			}
1273			if ((intstat & (FIRE_JBC_ERR_INT_PIO_UNMAP_RD_P |
1274			    FIRE_JBC_ERR_INT_ILL_ACC_RD_P |
1275			    FIRE_JBC_ERR_INT_PIO_UNMAP_P |
1276			    FIRE_JBC_ERR_INT_PIO_DPE_P |
1277			    FIRE_JBC_ERR_INT_PIO_CPE_P |
1278			    FIRE_JBC_ERR_INT_ILL_ACC_P)) != 0) {
1279				fatal = 1;
1280				val = FIRE_CTRL_READ_8(sc,
1281				    FIRE_JBC_CSR_ERR_LOG);
1282				device_printf(dev,
1283				    "DMCINT ODCD error log %#llx\n",
1284				    (unsigned long long)val);
1285			}
1286			if ((intstat & (FIRE_JBC_ERR_INT_MB_PEA_P |
1287			    FIRE_JBC_ERR_INT_CPE_P | FIRE_JBC_ERR_INT_APE_P |
1288			    FIRE_JBC_ERR_INT_PIO_CPE_P |
1289			    FIRE_JBC_ERR_INT_JTCEEW_P |
1290			    FIRE_JBC_ERR_INT_JTCEEI_P |
1291			    FIRE_JBC_ERR_INT_JTCEER_P)) != 0) {
1292				fatal = 1;
1293				val = FIRE_CTRL_READ_8(sc,
1294				    FIRE_FATAL_ERR_LOG);
1295				device_printf(dev, "fatal error log %#llx\n",
1296				    (unsigned long long)val);
1297				val = FIRE_CTRL_READ_8(sc,
1298				    FIRE_FATAL_ERR_LOG2);
1299				device_printf(dev, "fatal error log 2 "
1300				    "%#llx\n", (unsigned long long)val);
1301			}
1302			if (fatal != 0) {
1303				mtx_unlock_spin(&sc->sc_pcib_mtx);
1304				panic("%s: fatal JBC core block error",
1305				    device_get_nameunit(sc->sc_dev));
1306			} else {
1307				FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1308				mtx_unlock_spin(&sc->sc_pcib_mtx);
1309			}
1310		} else {
1311			mtx_unlock_spin(&sc->sc_pcib_mtx);
1312			panic("%s: unknown JCB core block error status %#llx",
1313			    device_get_nameunit(sc->sc_dev),
1314			    (unsigned long long)errstat);
1315		}
1316	}
1317	return (FILTER_HANDLED);
1318}
1319
1320static int
1321fire_pcie(void *arg)
1322{
1323	struct fire_msiqarg *fmqa;
1324	struct fire_softc *sc;
1325	struct fo_msiq_record *qrec;
1326	device_t dev;
1327	uint64_t word0;
1328	u_int head, msg, msiq;
1329
1330	fmqa = arg;
1331	sc = fmqa->fmqa_fica.fica_sc;
1332	dev = sc->sc_dev;
1333	msiq = fmqa->fmqa_msiq;
1334	mtx_lock_spin(&fmqa->fmqa_mtx);
1335	head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1336	    FO_PCI_EQ_HD_SHFT;
1337	qrec = &fmqa->fmqa_base[head];
1338	word0 = qrec->fomqr_word0;
1339	for (;;) {
1340		KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSG) != 0,
1341		    ("%s: received non-PCIe message in event queue %d "
1342		    "(word0 %#llx)", device_get_nameunit(dev), msiq,
1343		    (unsigned long long)word0));
1344		msg = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1345		    FO_MQR_WORD0_DATA0_SHFT;
1346
1347#define	PCIE_MSG_CODE_ERR_COR		0x30
1348#define	PCIE_MSG_CODE_ERR_NONFATAL	0x31
1349#define	PCIE_MSG_CODE_ERR_FATAL		0x33
1350
1351		if (msg == PCIE_MSG_CODE_ERR_COR)
1352			device_printf(dev, "correctable PCIe error\n");
1353		else if (msg == PCIE_MSG_CODE_ERR_NONFATAL ||
1354		    msg == PCIE_MSG_CODE_ERR_FATAL)
1355			panic("%s: %sfatal PCIe error",
1356			    device_get_nameunit(dev),
1357			    msg == PCIE_MSG_CODE_ERR_NONFATAL ? "non-" : "");
1358		else
1359			panic("%s: received unknown PCIe message %#x",
1360			    device_get_nameunit(dev), msg);
1361		qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1362		head = (head + 1) % sc->sc_msiq_size;
1363		qrec = &fmqa->fmqa_base[head];
1364		word0 = qrec->fomqr_word0;
1365		if (__predict_true((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1366			break;
1367	}
1368	FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1369	    FO_PCI_EQ_HD_SHFT);
1370	if ((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1371	    FO_PCI_EQ_TL_OVERR) != 0) {
1372		device_printf(dev, "event queue %d overflow\n", msiq);
1373		msiq <<= 3;
1374		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1375		    FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1376		    FO_PCI_EQ_CTRL_CLR_COVERR);
1377	}
1378	mtx_unlock_spin(&fmqa->fmqa_mtx);
1379	return (FILTER_HANDLED);
1380}
1381
1382static int
1383fire_maxslots(device_t dev)
1384{
1385
1386	return (1);
1387}
1388
1389static uint32_t
1390fire_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1391    int width)
1392{
1393	struct fire_softc *sc;
1394	bus_space_handle_t bh;
1395	u_long offset = 0;
1396	uint32_t r, wrd;
1397	int i;
1398	uint16_t shrt;
1399	uint8_t byte;
1400
1401	sc = device_get_softc(dev);
1402	if (bus < sc->sc_pci_secbus || bus > sc->sc_pci_subbus ||
1403	    slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX)
1404		return (-1);
1405
1406	offset = FO_CONF_OFF(bus, slot, func, reg);
1407	bh = sc->sc_pci_bh[OFW_PCI_CS_CONFIG];
1408	switch (width) {
1409	case 1:
1410		i = bus_space_peek_1(sc->sc_pci_cfgt, bh, offset, &byte);
1411		r = byte;
1412		break;
1413	case 2:
1414		i = bus_space_peek_2(sc->sc_pci_cfgt, bh, offset, &shrt);
1415		r = shrt;
1416		break;
1417	case 4:
1418		i = bus_space_peek_4(sc->sc_pci_cfgt, bh, offset, &wrd);
1419		r = wrd;
1420		break;
1421	default:
1422		panic("%s: bad width", __func__);
1423		/* NOTREACHED */
1424	}
1425
1426	if (i) {
1427#ifdef FIRE_DEBUG
1428		printf("%s: read data error reading: %d.%d.%d: 0x%x\n",
1429		    __func__, bus, slot, func, reg);
1430#endif
1431		r = -1;
1432	}
1433	return (r);
1434}
1435
1436static void
1437fire_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1438    uint32_t val, int width)
1439{
1440	struct fire_softc *sc;
1441	bus_space_handle_t bh;
1442	u_long offset = 0;
1443
1444	sc = device_get_softc(dev);
1445	if (bus < sc->sc_pci_secbus || bus > sc->sc_pci_subbus ||
1446	    slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX)
1447		return;
1448
1449	offset = FO_CONF_OFF(bus, slot, func, reg);
1450	bh = sc->sc_pci_bh[OFW_PCI_CS_CONFIG];
1451	switch (width) {
1452	case 1:
1453		bus_space_write_1(sc->sc_pci_cfgt, bh, offset, val);
1454		break;
1455	case 2:
1456		bus_space_write_2(sc->sc_pci_cfgt, bh, offset, val);
1457		break;
1458	case 4:
1459		bus_space_write_4(sc->sc_pci_cfgt, bh, offset, val);
1460		break;
1461	default:
1462		panic("%s: bad width", __func__);
1463		/* NOTREACHED */
1464	}
1465}
1466
1467static int
1468fire_route_interrupt(device_t bridge, device_t dev, int pin)
1469{
1470	struct fire_softc *sc;
1471	struct ofw_pci_register reg;
1472	ofw_pci_intr_t pintr, mintr;
1473	uint8_t maskbuf[sizeof(reg) + sizeof(pintr)];
1474
1475	sc = device_get_softc(bridge);
1476	pintr = pin;
1477	if (ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo,
1478	    &reg, sizeof(reg), &pintr, sizeof(pintr), &mintr, sizeof(mintr),
1479	    NULL, maskbuf) != 0)
1480		return (mintr);
1481
1482	device_printf(bridge, "could not route pin %d for device %d.%d\n",
1483	    pin, pci_get_slot(dev), pci_get_function(dev));
1484	return (PCI_INVALID_IRQ);
1485}
1486
1487static int
1488fire_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1489{
1490	struct fire_softc *sc;
1491
1492	sc = device_get_softc(dev);
1493	switch (which) {
1494	case PCIB_IVAR_DOMAIN:
1495		*result = device_get_unit(dev);
1496		return (0);
1497	case PCIB_IVAR_BUS:
1498		*result = sc->sc_pci_secbus;
1499		return (0);
1500	}
1501	return (ENOENT);
1502}
1503
1504#define	VIS_BLOCKSIZE	64
1505
1506static void
1507fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
1508    bus_dmasync_op_t op)
1509{
1510	static u_char buf[VIS_BLOCKSIZE] __aligned(VIS_BLOCKSIZE);
1511	register_t reg, s;
1512
1513	if ((map->dm_flags & DMF_LOADED) == 0)
1514		return;
1515
1516	if ((op & BUS_DMASYNC_POSTREAD) != 0) {
1517		s = intr_disable();
1518		reg = rd(fprs);
1519		wr(fprs, reg | FPRS_FEF, 0);
1520		__asm __volatile("stda %%f0, [%0] %1"
1521		    : : "r" (buf), "n" (ASI_BLK_COMMIT_S));
1522		membar(Sync);
1523		wr(fprs, reg, 0);
1524		intr_restore(s);
1525	} else if ((op & BUS_DMASYNC_PREWRITE) != 0)
1526		membar(Sync);
1527}
1528
1529static void
1530fire_intr_enable(void *arg)
1531{
1532	struct intr_vector *iv;
1533	struct fire_icarg *fica;
1534	struct fire_softc *sc;
1535	struct pcpu *pc;
1536	uint64_t mr;
1537	u_int ctrl, i;
1538
1539	iv = arg;
1540	fica = iv->iv_icarg;
1541	sc = fica->fica_sc;
1542	mr = FO_PCI_IMAP_V;
1543	if (sc->sc_mode == FIRE_MODE_OBERON)
1544		mr |= (iv->iv_mid << OBERON_PCI_IMAP_T_DESTID_SHFT) &
1545		    OBERON_PCI_IMAP_T_DESTID_MASK;
1546	else
1547		mr |= (iv->iv_mid << FIRE_PCI_IMAP_T_JPID_SHFT) &
1548		    FIRE_PCI_IMAP_T_JPID_MASK;
1549	/*
1550	 * Given that all mondos for the same target are required to use the
1551	 * same interrupt controller we just use the CPU ID for indexing the
1552	 * latter.
1553	 */
1554	ctrl = 0;
1555	for (i = 0; i < mp_ncpus; ++i) {
1556		pc = pcpu_find(i);
1557		if (pc == NULL || iv->iv_mid != pc->pc_mid)
1558			continue;
1559		ctrl = pc->pc_cpuid % 4;
1560		break;
1561	}
1562	mr |= (1ULL << ctrl) << FO_PCI_IMAP_INT_CTRL_NUM_SHFT &
1563	    FO_PCI_IMAP_INT_CTRL_NUM_MASK;
1564	FIRE_PCI_WRITE_8(sc, fica->fica_map, mr);
1565}
1566
1567static void
1568fire_intr_disable(void *arg)
1569{
1570	struct intr_vector *iv;
1571	struct fire_icarg *fica;
1572	struct fire_softc *sc;
1573
1574	iv = arg;
1575	fica = iv->iv_icarg;
1576	sc = fica->fica_sc;
1577	FIRE_PCI_WRITE_8(sc, fica->fica_map,
1578	    FIRE_PCI_READ_8(sc, fica->fica_map) & ~FO_PCI_IMAP_V);
1579}
1580
1581static void
1582fire_intr_assign(void *arg)
1583{
1584	struct intr_vector *iv;
1585	struct fire_icarg *fica;
1586	struct fire_softc *sc;
1587	uint64_t mr;
1588
1589	iv = arg;
1590	fica = iv->iv_icarg;
1591	sc = fica->fica_sc;
1592	mr = FIRE_PCI_READ_8(sc, fica->fica_map);
1593	if ((mr & FO_PCI_IMAP_V) != 0) {
1594		FIRE_PCI_WRITE_8(sc, fica->fica_map, mr & ~FO_PCI_IMAP_V);
1595		FIRE_PCI_BARRIER(sc, fica->fica_map, 8,
1596		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1597	}
1598	while (FIRE_PCI_READ_8(sc, fica->fica_clr) != INTCLR_IDLE)
1599		;
1600	if ((mr & FO_PCI_IMAP_V) != 0)
1601		fire_intr_enable(arg);
1602}
1603
1604static void
1605fire_intr_clear(void *arg)
1606{
1607	struct intr_vector *iv;
1608	struct fire_icarg *fica;
1609
1610	iv = arg;
1611	fica = iv->iv_icarg;
1612	FIRE_PCI_WRITE_8(fica->fica_sc, fica->fica_clr, INTCLR_IDLE);
1613}
1614
1615/*
1616 * Given that the event queue implementation matches our current MD and MI
1617 * interrupt frameworks like square pegs fit into round holes we are generous
1618 * and use one event queue per MSI for now, which limits us to 35 MSIs/MSI-Xs
1619 * per Host-PCIe-bridge (we use one event queue for the PCIe error messages).
1620 * This seems tolerable as long as most devices just use one MSI/MSI-X anyway.
1621 * Adding knowledge about MSIs/MSI-Xs to the MD interrupt code should allow us
1622 * to decouple the 1:1 mapping at the cost of no longer being able to bind
1623 * MSIs/MSI-Xs to specific CPUs as we currently have no reliable way to
1624 * quiesce a device while we move its MSIs/MSI-Xs to another event queue.
1625 */
1626
1627static int
1628fire_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused,
1629    int *irqs)
1630{
1631	struct fire_softc *sc;
1632	u_int i, j, msiqrun;
1633
1634	if (powerof2(count) == 0 || count > 32)
1635		return (EINVAL);
1636
1637	sc = device_get_softc(dev);
1638	mtx_lock(&sc->sc_msi_mtx);
1639	msiqrun = 0;
1640	for (i = 0; i < sc->sc_msiq_count; i++) {
1641		for (j = i; j < i + count; j++) {
1642			if (isclr(sc->sc_msiq_bitmap, j) == 0)
1643				break;
1644		}
1645		if (j == i + count) {
1646			msiqrun = i;
1647			break;
1648		}
1649	}
1650	if (i == sc->sc_msiq_count) {
1651		mtx_unlock(&sc->sc_msi_mtx);
1652		return (ENXIO);
1653	}
1654	for (i = 0; i + count < sc->sc_msi_count; i += count) {
1655		for (j = i; j < i + count; j++)
1656			if (isclr(sc->sc_msi_bitmap, j) == 0)
1657				break;
1658		if (j == i + count) {
1659			for (j = 0; j < count; j++) {
1660				setbit(sc->sc_msiq_bitmap, msiqrun + j);
1661				setbit(sc->sc_msi_bitmap, i + j);
1662				sc->sc_msi_msiq_table[i + j] = msiqrun + j;
1663				irqs[j] = sc->sc_msi_first + i + j;
1664			}
1665			mtx_unlock(&sc->sc_msi_mtx);
1666			return (0);
1667		}
1668	}
1669	mtx_unlock(&sc->sc_msi_mtx);
1670	return (ENXIO);
1671}
1672
1673static int
1674fire_release_msi(device_t dev, device_t child, int count, int *irqs)
1675{
1676	struct fire_softc *sc;
1677	u_int i;
1678
1679	sc = device_get_softc(dev);
1680	mtx_lock(&sc->sc_msi_mtx);
1681	for (i = 0; i < count; i++) {
1682		clrbit(sc->sc_msiq_bitmap,
1683		    sc->sc_msi_msiq_table[irqs[i] - sc->sc_msi_first]);
1684		clrbit(sc->sc_msi_bitmap, irqs[i] - sc->sc_msi_first);
1685	}
1686	mtx_unlock(&sc->sc_msi_mtx);
1687	return (0);
1688}
1689
1690static int
1691fire_alloc_msix(device_t dev, device_t child, int *irq)
1692{
1693	struct fire_softc *sc;
1694	u_int i, msiq;
1695
1696	sc = device_get_softc(dev);
1697	if ((sc->sc_flags & FIRE_MSIX) == 0)
1698		return (ENXIO);
1699	mtx_lock(&sc->sc_msi_mtx);
1700	msiq = 0;
1701	for (i = 0; i < sc->sc_msiq_count; i++) {
1702		if (isclr(sc->sc_msiq_bitmap, i) != 0) {
1703			msiq = i;
1704			break;
1705		}
1706	}
1707	if (i == sc->sc_msiq_count) {
1708		mtx_unlock(&sc->sc_msi_mtx);
1709		return (ENXIO);
1710	}
1711	for (i = sc->sc_msi_count - 1; i >= 0; i--) {
1712		if (isclr(sc->sc_msi_bitmap, i) != 0) {
1713			setbit(sc->sc_msiq_bitmap, msiq);
1714			setbit(sc->sc_msi_bitmap, i);
1715			sc->sc_msi_msiq_table[i] = msiq;
1716			*irq = sc->sc_msi_first + i;
1717			mtx_unlock(&sc->sc_msi_mtx);
1718			return (0);
1719		}
1720	}
1721	mtx_unlock(&sc->sc_msi_mtx);
1722	return (ENXIO);
1723}
1724
1725static int
1726fire_release_msix(device_t dev, device_t child, int irq)
1727{
1728	struct fire_softc *sc;
1729
1730	sc = device_get_softc(dev);
1731	if ((sc->sc_flags & FIRE_MSIX) == 0)
1732		return (ENXIO);
1733	mtx_lock(&sc->sc_msi_mtx);
1734	clrbit(sc->sc_msiq_bitmap,
1735	    sc->sc_msi_msiq_table[irq - sc->sc_msi_first]);
1736	clrbit(sc->sc_msi_bitmap, irq - sc->sc_msi_first);
1737	mtx_unlock(&sc->sc_msi_mtx);
1738	return (0);
1739}
1740
1741static int
1742fire_map_msi(device_t dev, device_t child, int irq, uint64_t *addr,
1743    uint32_t *data)
1744{
1745	struct fire_softc *sc;
1746	struct pci_devinfo *dinfo;
1747
1748	sc = device_get_softc(dev);
1749	dinfo = device_get_ivars(child);
1750	if (dinfo->cfg.msi.msi_alloc > 0) {
1751		if ((irq & ~sc->sc_msi_data_mask) != 0) {
1752			device_printf(dev, "invalid MSI 0x%x\n", irq);
1753			return (EINVAL);
1754		}
1755	} else {
1756		if ((sc->sc_flags & FIRE_MSIX) == 0)
1757			return (ENXIO);
1758		if (fls(irq) > sc->sc_msix_data_width) {
1759			device_printf(dev, "invalid MSI-X 0x%x\n", irq);
1760			return (EINVAL);
1761		}
1762	}
1763	if (dinfo->cfg.msi.msi_alloc > 0 &&
1764	    (dinfo->cfg.msi.msi_ctrl & PCIM_MSICTRL_64BIT) == 0)
1765		*addr = sc->sc_msi_addr32;
1766	else
1767		*addr = sc->sc_msi_addr64;
1768	*data = irq;
1769	return (0);
1770}
1771
1772static void
1773fire_msiq_handler(void *cookie)
1774{
1775	struct intr_vector *iv;
1776	struct fire_msiqarg *fmqa;
1777
1778	iv = cookie;
1779	fmqa = iv->iv_icarg;
1780	/*
1781	 * Note that since fire_intr_clear() will clear the event queue
1782	 * interrupt after the handler associated with the MSI [sic] has
1783	 * been executed we have to protect the access to the event queue as
1784	 * otherwise nested event queue interrupts cause corruption of the
1785	 * event queue on MP machines.  Obviously especially when abandoning
1786	 * the 1:1 mapping it would be better to not clear the event queue
1787	 * interrupt after each handler invocation but only once when the
1788	 * outstanding MSIs have been processed but unfortunately that
1789	 * doesn't work well and leads to interrupt storms with controllers/
1790	 * drivers which don't mask interrupts while the handler is executed.
1791	 * Maybe delaying clearing the MSI until after the handler has been
1792	 * executed could be used to work around this but that's not the
1793	 * intended usage and might in turn cause lost MSIs.
1794	 */
1795	mtx_lock_spin(&fmqa->fmqa_mtx);
1796	fire_msiq_common(iv, fmqa);
1797	mtx_unlock_spin(&fmqa->fmqa_mtx);
1798}
1799
1800static void
1801fire_msiq_filter(void *cookie)
1802{
1803	struct intr_vector *iv;
1804	struct fire_msiqarg *fmqa;
1805
1806	iv = cookie;
1807	fmqa = iv->iv_icarg;
1808	/*
1809	 * For filters we don't use fire_intr_clear() since it would clear
1810	 * the event queue interrupt while we're still processing the event
1811	 * queue as filters and associated post-filter handler are executed
1812	 * directly, which in turn would lead to lost MSIs.  So we clear the
1813	 * event queue interrupt only once after processing the event queue.
1814	 * Given that this still guarantees the filters to not be executed
1815	 * concurrently and no other CPU can clear the event queue interrupt
1816	 * while the event queue is still processed, we don't even need to
1817	 * interlock the access to the event queue in this case.
1818	 */
1819	critical_enter();
1820	fire_msiq_common(iv, fmqa);
1821	FIRE_PCI_WRITE_8(fmqa->fmqa_fica.fica_sc, fmqa->fmqa_fica.fica_clr,
1822	    INTCLR_IDLE);
1823	critical_exit();
1824}
1825
1826static inline void
1827fire_msiq_common(struct intr_vector *iv, struct fire_msiqarg *fmqa)
1828{
1829	struct fire_softc *sc;
1830	struct fo_msiq_record *qrec;
1831	device_t dev;
1832	uint64_t word0;
1833	u_int head, msi, msiq;
1834
1835	sc = fmqa->fmqa_fica.fica_sc;
1836	dev = sc->sc_dev;
1837	msiq = fmqa->fmqa_msiq;
1838	head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1839	    FO_PCI_EQ_HD_SHFT;
1840	qrec = &fmqa->fmqa_base[head];
1841	word0 = qrec->fomqr_word0;
1842	for (;;) {
1843		if (__predict_false((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1844			break;
1845		KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSI64) != 0 ||
1846		    (word0 & FO_MQR_WORD0_FMT_TYPE_MSI32) != 0,
1847		    ("%s: received non-MSI/MSI-X message in event queue %d "
1848		    "(word0 %#llx)", device_get_nameunit(dev), msiq,
1849		    (unsigned long long)word0));
1850		msi = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1851		    FO_MQR_WORD0_DATA0_SHFT;
1852		/*
1853		 * Sanity check the MSI/MSI-X as long as we use a 1:1 mapping.
1854		 */
1855		KASSERT(msi == fmqa->fmqa_msi,
1856		    ("%s: received non-matching MSI/MSI-X in event queue %d "
1857		    "(%d versus %d)", device_get_nameunit(dev), msiq, msi,
1858		    fmqa->fmqa_msi));
1859		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + (msi << 3),
1860		    FO_PCI_MSI_CLR_EQWR_N);
1861		if (__predict_false(intr_event_handle(iv->iv_event,
1862		    NULL) != 0))
1863			printf("stray MSI/MSI-X in event queue %d\n", msiq);
1864		qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1865		head = (head + 1) % sc->sc_msiq_size;
1866		qrec = &fmqa->fmqa_base[head];
1867		word0 = qrec->fomqr_word0;
1868	}
1869	FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1870	    FO_PCI_EQ_HD_SHFT);
1871	if (__predict_false((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1872	    FO_PCI_EQ_TL_OVERR) != 0)) {
1873		device_printf(dev, "event queue %d overflow\n", msiq);
1874		msiq <<= 3;
1875		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1876		    FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1877		    FO_PCI_EQ_CTRL_CLR_COVERR);
1878	}
1879}
1880
1881static int
1882fire_setup_intr(device_t dev, device_t child, struct resource *ires,
1883    int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg,
1884    void **cookiep)
1885{
1886	struct fire_softc *sc;
1887	struct fire_msiqarg *fmqa;
1888	u_long vec;
1889	int error;
1890	u_int msi, msiq;
1891
1892	sc = device_get_softc(dev);
1893	/*
1894	 * XXX this assumes that a device only has one INTx, while in fact
1895	 * Cassini+ and Saturn can use all four the firmware has assigned
1896	 * to them, but so does pci(4).
1897	 */
1898	if (rman_get_rid(ires) != 0) {
1899		msi = rman_get_start(ires);
1900		msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1901		vec = INTMAP_VEC(sc->sc_ign, sc->sc_msiq_ino_first + msiq);
1902		msiq += sc->sc_msiq_first;
1903		if (intr_vectors[vec].iv_ic != &fire_ic) {
1904			device_printf(dev,
1905			    "invalid interrupt controller for vector 0x%lx\n",
1906			    vec);
1907			return (EINVAL);
1908		}
1909		/*
1910		 * The MD interrupt code needs the vector rather than the MSI.
1911		 */
1912		rman_set_start(ires, vec);
1913		rman_set_end(ires, vec);
1914		error = bus_generic_setup_intr(dev, child, ires, flags, filt,
1915		    intr, arg, cookiep);
1916		rman_set_start(ires, msi);
1917		rman_set_end(ires, msi);
1918		if (error != 0)
1919			return (error);
1920		fmqa = intr_vectors[vec].iv_icarg;
1921		/*
1922		 * XXX inject our event queue handler.
1923		 */
1924		if (filt != NULL) {
1925			intr_vectors[vec].iv_func = fire_msiq_filter;
1926			intr_vectors[vec].iv_ic = &fire_msiqc_filter;
1927			/*
1928			 * Ensure the event queue interrupt is cleared, it
1929			 * might have triggered before.  Given we supply NULL
1930			 * as ic_clear, inthand_add() won't do this for us.
1931			 */
1932			FIRE_PCI_WRITE_8(sc, fmqa->fmqa_fica.fica_clr,
1933			    INTCLR_IDLE);
1934		} else
1935			intr_vectors[vec].iv_func = fire_msiq_handler;
1936		/* Record the MSI/MSI-X as long as we we use a 1:1 mapping. */
1937		fmqa->fmqa_msi = msi;
1938		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (msiq << 3),
1939		    FO_PCI_EQ_CTRL_SET_EN);
1940		msi <<= 3;
1941		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1942		    (FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1943		    ~FO_PCI_MSI_MAP_EQNUM_MASK) |
1944		    ((msiq << FO_PCI_MSI_MAP_EQNUM_SHFT) &
1945		    FO_PCI_MSI_MAP_EQNUM_MASK));
1946		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + msi,
1947		    FO_PCI_MSI_CLR_EQWR_N);
1948		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1949		    FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) |
1950		    FO_PCI_MSI_MAP_V);
1951		return (error);
1952	}
1953
1954	/*
1955	 * Make sure the vector is fully specified and we registered
1956	 * our interrupt controller for it.
1957	 */
1958	vec = rman_get_start(ires);
1959	if (INTIGN(vec) != sc->sc_ign) {
1960		device_printf(dev, "invalid interrupt vector 0x%lx\n", vec);
1961		return (EINVAL);
1962	}
1963	if (intr_vectors[vec].iv_ic != &fire_ic) {
1964		device_printf(dev,
1965		    "invalid interrupt controller for vector 0x%lx\n", vec);
1966		return (EINVAL);
1967	}
1968	return (bus_generic_setup_intr(dev, child, ires, flags, filt, intr,
1969	    arg, cookiep));
1970}
1971
1972static int
1973fire_teardown_intr(device_t dev, device_t child, struct resource *ires,
1974    void *cookie)
1975{
1976	struct fire_softc *sc;
1977	u_long vec;
1978	int error;
1979	u_int msi, msiq;
1980
1981	sc = device_get_softc(dev);
1982	if (rman_get_rid(ires) != 0) {
1983		msi = rman_get_start(ires);
1984		msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1985		vec = INTMAP_VEC(sc->sc_ign, msiq + sc->sc_msiq_ino_first);
1986		msiq += sc->sc_msiq_first;
1987		msi <<= 3;
1988		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1989		    FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1990		    ~FO_PCI_MSI_MAP_V);
1991		msiq <<= 3;
1992		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1993		    FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
1994		    FO_PCI_EQ_CTRL_CLR_DIS);
1995		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_TL_BASE + msiq,
1996		    (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
1997		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_HD_BASE + msiq,
1998		    (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
1999		intr_vectors[vec].iv_ic = &fire_ic;
2000		/*
2001		 * The MD interrupt code needs the vector rather than the MSI.
2002		 */
2003		rman_set_start(ires, vec);
2004		rman_set_end(ires, vec);
2005		error = bus_generic_teardown_intr(dev, child, ires, cookie);
2006		msi >>= 3;
2007		rman_set_start(ires, msi);
2008		rman_set_end(ires, msi);
2009		return (error);
2010	}
2011	return (bus_generic_teardown_intr(dev, child, ires, cookie));
2012}
2013
2014static struct resource *
2015fire_alloc_resource(device_t bus, device_t child, int type, int *rid,
2016    u_long start, u_long end, u_long count, u_int flags)
2017{
2018	struct fire_softc *sc;
2019	struct resource *rv;
2020	struct rman *rm;
2021	bus_space_tag_t bt;
2022	bus_space_handle_t bh;
2023	int needactivate = flags & RF_ACTIVE;
2024
2025	flags &= ~RF_ACTIVE;
2026
2027	sc = device_get_softc(bus);
2028	if (type == SYS_RES_IRQ) {
2029		/*
2030		 * XXX: Don't accept blank ranges for now, only single
2031		 * interrupts.  The other case should not happen with
2032		 * the MI PCI code...
2033		 * XXX: This may return a resource that is out of the
2034		 * range that was specified.  Is this correct...?
2035		 */
2036		if (start != end)
2037			panic("%s: XXX: interrupt range", __func__);
2038		if (*rid == 0)
2039			start = end = INTMAP_VEC(sc->sc_ign, end);
2040		return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child,
2041		    type, rid, start, end, count, flags));
2042	}
2043	switch (type) {
2044	case SYS_RES_MEMORY:
2045		rm = &sc->sc_pci_mem_rman;
2046		bt = sc->sc_pci_memt;
2047		bh = sc->sc_pci_bh[OFW_PCI_CS_MEM32];
2048		break;
2049	case SYS_RES_IOPORT:
2050		rm = &sc->sc_pci_io_rman;
2051		bt = sc->sc_pci_iot;
2052		bh = sc->sc_pci_bh[OFW_PCI_CS_IO];
2053		break;
2054	default:
2055		return (NULL);
2056		/* NOTREACHED */
2057	}
2058
2059	rv = rman_reserve_resource(rm, start, end, count, flags, child);
2060	if (rv == NULL)
2061		return (NULL);
2062	rman_set_rid(rv, *rid);
2063	bh += rman_get_start(rv);
2064	rman_set_bustag(rv, bt);
2065	rman_set_bushandle(rv, bh);
2066
2067	if (needactivate) {
2068		if (bus_activate_resource(child, type, *rid, rv)) {
2069			rman_release_resource(rv);
2070			return (NULL);
2071		}
2072	}
2073	return (rv);
2074}
2075
2076static int
2077fire_activate_resource(device_t bus, device_t child, int type, int rid,
2078    struct resource *r)
2079{
2080	void *p;
2081	int error;
2082
2083	if (type == SYS_RES_IRQ)
2084		return (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child,
2085		    type, rid, r));
2086	if (type == SYS_RES_MEMORY) {
2087		/*
2088		 * Need to memory-map the device space, as some drivers
2089		 * depend on the virtual address being set and usable.
2090		 */
2091		error = sparc64_bus_mem_map(rman_get_bustag(r),
2092		    rman_get_bushandle(r), rman_get_size(r), 0, 0, &p);
2093		if (error != 0)
2094			return (error);
2095		rman_set_virtual(r, p);
2096	}
2097	return (rman_activate_resource(r));
2098}
2099
2100static int
2101fire_deactivate_resource(device_t bus, device_t child, int type, int rid,
2102    struct resource *r)
2103{
2104
2105	if (type == SYS_RES_IRQ)
2106		return (BUS_DEACTIVATE_RESOURCE(device_get_parent(bus), child,
2107		    type, rid, r));
2108	if (type == SYS_RES_MEMORY) {
2109		sparc64_bus_mem_unmap(rman_get_virtual(r), rman_get_size(r));
2110		rman_set_virtual(r, NULL);
2111	}
2112	return (rman_deactivate_resource(r));
2113}
2114
2115static int
2116fire_release_resource(device_t bus, device_t child, int type, int rid,
2117    struct resource *r)
2118{
2119	int error;
2120
2121	if (type == SYS_RES_IRQ)
2122		return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child,
2123		    type, rid, r));
2124	if (rman_get_flags(r) & RF_ACTIVE) {
2125		error = bus_deactivate_resource(child, type, rid, r);
2126		if (error)
2127			return (error);
2128	}
2129	return (rman_release_resource(r));
2130}
2131
2132static bus_dma_tag_t
2133fire_get_dma_tag(device_t bus, device_t child __unused)
2134{
2135	struct fire_softc *sc;
2136
2137	sc = device_get_softc(bus);
2138	return (sc->sc_pci_dmat);
2139}
2140
2141static phandle_t
2142fire_get_node(device_t bus, device_t child __unused)
2143{
2144	struct fire_softc *sc;
2145
2146	sc = device_get_softc(bus);
2147	/* We only have one child, the PCI bus, which needs our own node. */
2148	return (sc->sc_node);
2149}
2150
2151static bus_space_tag_t
2152fire_alloc_bus_tag(struct fire_softc *sc, int type)
2153{
2154	bus_space_tag_t bt;
2155
2156	bt = malloc(sizeof(struct bus_space_tag), M_DEVBUF,
2157	    M_NOWAIT | M_ZERO);
2158	if (bt == NULL)
2159		panic("%s: out of memory", __func__);
2160
2161	bt->bst_cookie = sc;
2162	bt->bst_parent = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]);
2163	bt->bst_type = type;
2164	return (bt);
2165}
2166
2167static u_int
2168fire_get_timecount(struct timecounter *tc)
2169{
2170	struct fire_softc *sc;
2171
2172	sc = tc->tc_priv;
2173	return (FIRE_CTRL_READ_8(sc, FO_XBC_PRF_CNT0) & TC_COUNTER_MAX_MASK);
2174}
2175