1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1999, 2000 Matthew R. Green
5 * Copyright (c) 2001 - 2003 by Thomas Moestl <tmm@FreeBSD.org>
6 * Copyright (c) 2009 by Marius Strobl <marius@FreeBSD.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. The name of the author may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: NetBSD: psycho.c,v 1.39 2001/10/07 20:30:41 eeh Exp
33 *	from: FreeBSD: psycho.c 183152 2008-09-18 19:45:22Z marius
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD$");
38
39/*
40 * Driver for `Fire' JBus to PCI Express and `Oberon' Uranus to PCI Express
41 * bridges
42 */
43
44#include "opt_fire.h"
45#include "opt_ofw_pci.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/bus.h>
50#include <sys/interrupt.h>
51#include <sys/kernel.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/module.h>
55#include <sys/mutex.h>
56#include <sys/pciio.h>
57#include <sys/pcpu.h>
58#include <sys/rman.h>
59#include <sys/smp.h>
60#include <sys/sysctl.h>
61#include <sys/timetc.h>
62
63#include <dev/ofw/ofw_bus.h>
64#include <dev/ofw/openfirm.h>
65
66#include <vm/vm.h>
67#include <vm/pmap.h>
68
69#include <machine/bus.h>
70#include <machine/bus_common.h>
71#include <machine/bus_private.h>
72#include <machine/iommureg.h>
73#include <machine/iommuvar.h>
74#include <machine/resource.h>
75
76#include <dev/pci/pcireg.h>
77#include <dev/pci/pcivar.h>
78#include <dev/pci/pcib_private.h>
79
80#include <sparc64/pci/ofw_pci.h>
81#include <sparc64/pci/firereg.h>
82#include <sparc64/pci/firevar.h>
83
84#include "pcib_if.h"
85
86struct fire_msiqarg;
87
88static const struct fire_desc *fire_get_desc(device_t dev);
89static void fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
90    bus_dmasync_op_t op);
91static int fire_get_intrmap(struct fire_softc *sc, u_int ino,
92    bus_addr_t *intrmapptr, bus_addr_t *intrclrptr);
93static void fire_intr_assign(void *arg);
94static void fire_intr_clear(void *arg);
95static void fire_intr_disable(void *arg);
96static void fire_intr_enable(void *arg);
97static int fire_intr_register(struct fire_softc *sc, u_int ino);
98static inline void fire_msiq_common(struct intr_vector *iv,
99    struct fire_msiqarg *fmqa);
100static void fire_msiq_filter(void *cookie);
101static void fire_msiq_handler(void *cookie);
102static void fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
103    driver_filter_t handler, void *arg);
104static timecounter_get_t fire_get_timecount;
105
106/* Interrupt handlers */
107static driver_filter_t fire_dmc_pec;
108static driver_filter_t fire_pcie;
109static driver_filter_t fire_xcb;
110
111/*
112 * Methods
113 */
114static pcib_alloc_msi_t fire_alloc_msi;
115static pcib_alloc_msix_t fire_alloc_msix;
116static bus_alloc_resource_t fire_alloc_resource;
117static device_attach_t fire_attach;
118static pcib_map_msi_t fire_map_msi;
119static pcib_maxslots_t fire_maxslots;
120static device_probe_t fire_probe;
121static pcib_read_config_t fire_read_config;
122static pcib_release_msi_t fire_release_msi;
123static pcib_release_msix_t fire_release_msix;
124static pcib_route_interrupt_t fire_route_interrupt;
125static bus_setup_intr_t fire_setup_intr;
126static bus_teardown_intr_t fire_teardown_intr;
127static pcib_write_config_t fire_write_config;
128
129static device_method_t fire_methods[] = {
130	/* Device interface */
131	DEVMETHOD(device_probe,		fire_probe),
132	DEVMETHOD(device_attach,	fire_attach),
133	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
134	DEVMETHOD(device_suspend,	bus_generic_suspend),
135	DEVMETHOD(device_resume,	bus_generic_resume),
136
137	/* Bus interface */
138	DEVMETHOD(bus_read_ivar,	ofw_pci_read_ivar),
139	DEVMETHOD(bus_setup_intr,	fire_setup_intr),
140	DEVMETHOD(bus_teardown_intr,	fire_teardown_intr),
141	DEVMETHOD(bus_alloc_resource,	fire_alloc_resource),
142	DEVMETHOD(bus_activate_resource, ofw_pci_activate_resource),
143	DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
144	DEVMETHOD(bus_adjust_resource,	ofw_pci_adjust_resource),
145	DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
146	DEVMETHOD(bus_get_dma_tag,	ofw_pci_get_dma_tag),
147
148	/* pcib interface */
149	DEVMETHOD(pcib_maxslots,	fire_maxslots),
150	DEVMETHOD(pcib_read_config,	fire_read_config),
151	DEVMETHOD(pcib_write_config,	fire_write_config),
152	DEVMETHOD(pcib_route_interrupt,	fire_route_interrupt),
153	DEVMETHOD(pcib_alloc_msi,	fire_alloc_msi),
154	DEVMETHOD(pcib_release_msi,	fire_release_msi),
155	DEVMETHOD(pcib_alloc_msix,	fire_alloc_msix),
156	DEVMETHOD(pcib_release_msix,	fire_release_msix),
157	DEVMETHOD(pcib_map_msi,		fire_map_msi),
158	DEVMETHOD(pcib_request_feature,	pcib_request_feature_allow),
159
160	/* ofw_bus interface */
161	DEVMETHOD(ofw_bus_get_node,	ofw_pci_get_node),
162
163	DEVMETHOD_END
164};
165
166static devclass_t fire_devclass;
167
168DEFINE_CLASS_0(pcib, fire_driver, fire_methods, sizeof(struct fire_softc));
169EARLY_DRIVER_MODULE(fire, nexus, fire_driver, fire_devclass, 0, 0,
170    BUS_PASS_BUS);
171MODULE_DEPEND(fire, nexus, 1, 1, 1);
172
173static const struct intr_controller fire_ic = {
174	fire_intr_enable,
175	fire_intr_disable,
176	fire_intr_assign,
177	fire_intr_clear
178};
179
180struct fire_icarg {
181	struct fire_softc	*fica_sc;
182	bus_addr_t		fica_map;
183	bus_addr_t		fica_clr;
184};
185
186static const struct intr_controller fire_msiqc_filter = {
187	fire_intr_enable,
188	fire_intr_disable,
189	fire_intr_assign,
190	NULL
191};
192
193struct fire_msiqarg {
194	struct fire_icarg	fmqa_fica;
195	struct mtx		fmqa_mtx;
196	struct fo_msiq_record	*fmqa_base;
197	uint64_t		fmqa_head;
198	uint64_t		fmqa_tail;
199	uint32_t		fmqa_msiq;
200	uint32_t		fmqa_msi;
201};
202
203#define	FIRE_PERF_CNT_QLTY	100
204
205#define	FIRE_SPC_BARRIER(spc, sc, offs, len, flags)			\
206	bus_barrier((sc)->sc_mem_res[(spc)], (offs), (len), (flags))
207#define	FIRE_SPC_READ_8(spc, sc, offs)					\
208	bus_read_8((sc)->sc_mem_res[(spc)], (offs))
209#define	FIRE_SPC_WRITE_8(spc, sc, offs, v)				\
210	bus_write_8((sc)->sc_mem_res[(spc)], (offs), (v))
211
212#ifndef FIRE_DEBUG
213#define	FIRE_SPC_SET(spc, sc, offs, reg, v)				\
214	FIRE_SPC_WRITE_8((spc), (sc), (offs), (v))
215#else
216#define	FIRE_SPC_SET(spc, sc, offs, reg, v) do {			\
217	device_printf((sc)->sc_dev, reg " 0x%016llx -> 0x%016llx\n",	\
218	    (unsigned long long)FIRE_SPC_READ_8((spc), (sc), (offs)),	\
219	    (unsigned long long)(v));					\
220	FIRE_SPC_WRITE_8((spc), (sc), (offs), (v));			\
221	} while (0)
222#endif
223
224#define	FIRE_PCI_BARRIER(sc, offs, len, flags)				\
225	FIRE_SPC_BARRIER(FIRE_PCI, (sc), (offs), len, flags)
226#define	FIRE_PCI_READ_8(sc, offs)					\
227	FIRE_SPC_READ_8(FIRE_PCI, (sc), (offs))
228#define	FIRE_PCI_WRITE_8(sc, offs, v)					\
229	FIRE_SPC_WRITE_8(FIRE_PCI, (sc), (offs), (v))
230#define	FIRE_CTRL_BARRIER(sc, offs, len, flags)				\
231	FIRE_SPC_BARRIER(FIRE_CTRL, (sc), (offs), len, flags)
232#define	FIRE_CTRL_READ_8(sc, offs)					\
233	FIRE_SPC_READ_8(FIRE_CTRL, (sc), (offs))
234#define	FIRE_CTRL_WRITE_8(sc, offs, v)					\
235	FIRE_SPC_WRITE_8(FIRE_CTRL, (sc), (offs), (v))
236
237#define	FIRE_PCI_SET(sc, offs, v)					\
238	FIRE_SPC_SET(FIRE_PCI, (sc), (offs), # offs, (v))
239#define	FIRE_CTRL_SET(sc, offs, v)					\
240	FIRE_SPC_SET(FIRE_CTRL, (sc), (offs), # offs, (v))
241
242struct fire_desc {
243	const char	*fd_string;
244	int		fd_mode;
245	const char	*fd_name;
246};
247
248static const struct fire_desc fire_compats[] = {
249	{ "pciex108e,80f0",	FIRE_MODE_FIRE,		"Fire" },
250#if 0
251	{ "pciex108e,80f8",	FIRE_MODE_OBERON,	"Oberon" },
252#endif
253	{ NULL,			0,			NULL }
254};
255
256static const struct fire_desc *
257fire_get_desc(device_t dev)
258{
259	const struct fire_desc *desc;
260	const char *compat;
261
262	compat = ofw_bus_get_compat(dev);
263	if (compat == NULL)
264		return (NULL);
265	for (desc = fire_compats; desc->fd_string != NULL; desc++)
266		if (strcmp(desc->fd_string, compat) == 0)
267			return (desc);
268	return (NULL);
269}
270
271static int
272fire_probe(device_t dev)
273{
274	const char *dtype;
275
276	dtype = ofw_bus_get_type(dev);
277	if (dtype != NULL && strcmp(dtype, OFW_TYPE_PCIE) == 0 &&
278	    fire_get_desc(dev) != NULL) {
279		device_set_desc(dev, "Sun Host-PCIe bridge");
280		return (BUS_PROBE_GENERIC);
281	}
282	return (ENXIO);
283}
284
285static int
286fire_attach(device_t dev)
287{
288	struct fire_softc *sc;
289	const struct fire_desc *desc;
290	struct ofw_pci_msi_ranges msi_ranges;
291	struct ofw_pci_msi_addr_ranges msi_addr_ranges;
292	struct ofw_pci_msi_eq_to_devino msi_eq_to_devino;
293	struct fire_msiqarg *fmqa;
294	struct timecounter *tc;
295	bus_dma_tag_t dmat;
296	uint64_t ino_bitmap, val;
297	phandle_t node;
298	uint32_t prop, prop_array[2];
299	int i, j, mode;
300	u_int lw;
301	uint16_t mps;
302
303	sc = device_get_softc(dev);
304	node = ofw_bus_get_node(dev);
305	desc = fire_get_desc(dev);
306	mode = desc->fd_mode;
307
308	sc->sc_dev = dev;
309	sc->sc_mode = mode;
310	sc->sc_flags = 0;
311
312	mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF);
313	mtx_init(&sc->sc_pcib_mtx, "pcib_mtx", NULL, MTX_SPIN);
314
315	/*
316	 * Fire and Oberon have two register banks:
317	 * (0) per-PBM PCI Express configuration and status registers
318	 * (1) (shared) Fire/Oberon controller configuration and status
319	 *     registers
320	 */
321	for (i = 0; i < FIRE_NREG; i++) {
322		j = i;
323		sc->sc_mem_res[i] = bus_alloc_resource_any(dev,
324		    SYS_RES_MEMORY, &j, RF_ACTIVE);
325		if (sc->sc_mem_res[i] == NULL)
326			panic("%s: could not allocate register bank %d",
327			    __func__, i);
328	}
329
330	if (OF_getprop(node, "portid", &sc->sc_ign, sizeof(sc->sc_ign)) == -1)
331		panic("%s: could not determine IGN", __func__);
332	if (OF_getprop(node, "module-revision#", &prop, sizeof(prop)) == -1)
333		panic("%s: could not determine module-revision", __func__);
334
335	device_printf(dev, "%s, module-revision %d, IGN %#x\n",
336	    desc->fd_name, prop, sc->sc_ign);
337
338	/*
339	 * Hunt through all the interrupt mapping regs and register
340	 * the interrupt controller for our interrupt vectors.  We do
341	 * this early in order to be able to catch stray interrupts.
342	 */
343	i = OF_getprop(node, "ino-bitmap", (void *)prop_array,
344	    sizeof(prop_array));
345	if (i == -1)
346		panic("%s: could not get ino-bitmap", __func__);
347	ino_bitmap = ((uint64_t)prop_array[1] << 32) | prop_array[0];
348	for (i = 0; i <= FO_MAX_INO; i++) {
349		if ((ino_bitmap & (1ULL << i)) == 0)
350			continue;
351		j = fire_intr_register(sc, i);
352		if (j != 0)
353			device_printf(dev, "could not register interrupt "
354			    "controller for INO %d (%d)\n", i, j);
355	}
356
357	/* JBC/UBC module initialization */
358	FIRE_CTRL_SET(sc, FO_XBC_ERR_LOG_EN, ~0ULL);
359	FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
360	/* not enabled by OpenSolaris */
361	FIRE_CTRL_SET(sc, FO_XBC_INT_EN, ~0ULL);
362	if (sc->sc_mode == FIRE_MODE_FIRE) {
363		FIRE_CTRL_SET(sc, FIRE_JBUS_PAR_CTRL,
364		    FIRE_JBUS_PAR_CTRL_P_EN);
365		FIRE_CTRL_SET(sc, FIRE_JBC_FATAL_RST_EN,
366		    ((1ULL << FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_SHFT) &
367		    FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_MASK) |
368		    FIRE_JBC_FATAL_RST_EN_MB_PEA_P_INT |
369		    FIRE_JBC_FATAL_RST_EN_CPE_P_INT |
370		    FIRE_JBC_FATAL_RST_EN_APE_P_INT |
371		    FIRE_JBC_FATAL_RST_EN_PIO_CPE_INT |
372		    FIRE_JBC_FATAL_RST_EN_JTCEEW_P_INT |
373		    FIRE_JBC_FATAL_RST_EN_JTCEEI_P_INT |
374		    FIRE_JBC_FATAL_RST_EN_JTCEER_P_INT);
375		FIRE_CTRL_SET(sc, FIRE_JBC_CORE_BLOCK_INT_EN, ~0ULL);
376	}
377
378	/* TLU initialization */
379	FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_STAT_CLR,
380	    FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
381	/* not enabled by OpenSolaris */
382	FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_INT_EN,
383	    FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK);
384	FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_STAT_CLR,
385	    FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
386	/* not enabled by OpenSolaris */
387	FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_INT_EN,
388	    FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK);
389	FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_STAT_CLR,
390	    FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
391	/* not enabled by OpenSolaris */
392	FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_INT_EN,
393	    FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK);
394	val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) |
395	    ((FO_PCI_TLU_CTRL_L0S_TIM_DFLT << FO_PCI_TLU_CTRL_L0S_TIM_SHFT) &
396	    FO_PCI_TLU_CTRL_L0S_TIM_MASK) |
397	    ((FO_PCI_TLU_CTRL_CFG_DFLT << FO_PCI_TLU_CTRL_CFG_SHFT) &
398	    FO_PCI_TLU_CTRL_CFG_MASK);
399	if (sc->sc_mode == FIRE_MODE_OBERON)
400		val &= ~FO_PCI_TLU_CTRL_NWPR_EN;
401	val |= FO_PCI_TLU_CTRL_CFG_REMAIN_DETECT_QUIET;
402	FIRE_PCI_SET(sc, FO_PCI_TLU_CTRL, val);
403	FIRE_PCI_SET(sc, FO_PCI_TLU_DEV_CTRL, 0);
404	FIRE_PCI_SET(sc, FO_PCI_TLU_LNK_CTRL, FO_PCI_TLU_LNK_CTRL_CLK);
405
406	/* DLU/LPU initialization */
407	if (sc->sc_mode == FIRE_MODE_OBERON)
408		FIRE_PCI_SET(sc, FO_PCI_LPU_INT_MASK, 0);
409	else
410		FIRE_PCI_SET(sc, FO_PCI_LPU_RST, 0);
411	FIRE_PCI_SET(sc, FO_PCI_LPU_LNK_LYR_CFG,
412	    FO_PCI_LPU_LNK_LYR_CFG_VC0_EN);
413	FIRE_PCI_SET(sc, FO_PCI_LPU_FLW_CTRL_UPDT_CTRL,
414	    FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_NP_EN |
415	    FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_P_EN);
416	if (sc->sc_mode == FIRE_MODE_OBERON)
417		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
418		    (OBERON_PCI_LPU_TXLNK_RPLY_TMR_THRS_DFLT <<
419		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
420		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
421	else {
422		switch ((FIRE_PCI_READ_8(sc, FO_PCI_TLU_LNK_STAT) &
423		    FO_PCI_TLU_LNK_STAT_WDTH_MASK) >>
424		    FO_PCI_TLU_LNK_STAT_WDTH_SHFT) {
425		case 1:
426			lw = 0;
427			break;
428		case 4:
429			lw = 1;
430			break;
431		case 8:
432			lw = 2;
433			break;
434		case 16:
435			lw = 3;
436			break;
437		default:
438			lw = 0;
439		}
440		mps = (FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) &
441		    FO_PCI_TLU_CTRL_CFG_MPS_MASK) >>
442		    FO_PCI_TLU_CTRL_CFG_MPS_SHFT;
443		i = sizeof(fire_freq_nak_tmr_thrs) /
444		    sizeof(*fire_freq_nak_tmr_thrs);
445		if (mps >= i)
446			mps = i - 1;
447		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS,
448		    (fire_freq_nak_tmr_thrs[mps][lw] <<
449		    FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_SHFT) &
450		    FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_MASK);
451		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS,
452		    (fire_rply_tmr_thrs[mps][lw] <<
453		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) &
454		    FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK);
455		FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RTR_FIFO_PTR,
456		    ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_DFLT <<
457		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_SHFT) &
458		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_MASK) |
459		    ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_DFLT <<
460		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_SHFT) &
461		    FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_MASK));
462		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG2,
463		    (FO_PCI_LPU_LTSSM_CFG2_12_TO_DFLT <<
464		    FO_PCI_LPU_LTSSM_CFG2_12_TO_SHFT) &
465		    FO_PCI_LPU_LTSSM_CFG2_12_TO_MASK);
466		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG3,
467		    (FO_PCI_LPU_LTSSM_CFG3_2_TO_DFLT <<
468		    FO_PCI_LPU_LTSSM_CFG3_2_TO_SHFT) &
469		    FO_PCI_LPU_LTSSM_CFG3_2_TO_MASK);
470		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG4,
471		    ((FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_DFLT <<
472		    FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_SHFT) &
473		    FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_MASK) |
474		    ((FO_PCI_LPU_LTSSM_CFG4_N_FTS_DFLT <<
475		    FO_PCI_LPU_LTSSM_CFG4_N_FTS_SHFT) &
476		    FO_PCI_LPU_LTSSM_CFG4_N_FTS_MASK));
477		FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG5, 0);
478	}
479
480	/* ILU initialization */
481	FIRE_PCI_SET(sc, FO_PCI_ILU_ERR_STAT_CLR, ~0ULL);
482	/* not enabled by OpenSolaris */
483	FIRE_PCI_SET(sc, FO_PCI_ILU_INT_EN, ~0ULL);
484
485	/* IMU initialization */
486	FIRE_PCI_SET(sc, FO_PCI_IMU_ERR_STAT_CLR, ~0ULL);
487	FIRE_PCI_SET(sc, FO_PCI_IMU_INT_EN,
488	    FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_EN) &
489	    ~(FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_S |
490	    FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_S |
491	    FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_S |
492	    FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
493	    FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
494	    FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P));
495
496	/* MMU initialization */
497	FIRE_PCI_SET(sc, FO_PCI_MMU_ERR_STAT_CLR,
498	    FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
499	/* not enabled by OpenSolaris */
500	FIRE_PCI_SET(sc, FO_PCI_MMU_INT_EN,
501	    FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK);
502
503	/* DMC initialization */
504	FIRE_PCI_SET(sc, FO_PCI_DMC_CORE_BLOCK_INT_EN, ~0ULL);
505	FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTA, 0);
506	FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTB, 0);
507
508	/* PEC initialization */
509	FIRE_PCI_SET(sc, FO_PCI_PEC_CORE_BLOCK_INT_EN, ~0ULL);
510
511	/* Establish handlers for interesting interrupts. */
512	if ((ino_bitmap & (1ULL << FO_DMC_PEC_INO)) != 0)
513		fire_set_intr(sc, 1, FO_DMC_PEC_INO, fire_dmc_pec, sc);
514	if ((ino_bitmap & (1ULL << FO_XCB_INO)) != 0)
515		fire_set_intr(sc, 0, FO_XCB_INO, fire_xcb, sc);
516
517	/* MSI/MSI-X support */
518	if (OF_getprop(node, "#msi", &sc->sc_msi_count,
519	    sizeof(sc->sc_msi_count)) == -1)
520		panic("%s: could not determine MSI count", __func__);
521	if (OF_getprop(node, "msi-ranges", &msi_ranges,
522	    sizeof(msi_ranges)) == -1)
523		sc->sc_msi_first = 0;
524	else
525		sc->sc_msi_first = msi_ranges.first;
526	if (OF_getprop(node, "msi-data-mask", &sc->sc_msi_data_mask,
527	    sizeof(sc->sc_msi_data_mask)) == -1)
528		panic("%s: could not determine MSI data mask", __func__);
529	if (OF_getprop(node, "msix-data-width", &sc->sc_msix_data_width,
530	    sizeof(sc->sc_msix_data_width)) > 0)
531		sc->sc_flags |= FIRE_MSIX;
532	if (OF_getprop(node, "msi-address-ranges", &msi_addr_ranges,
533	    sizeof(msi_addr_ranges)) == -1)
534		panic("%s: could not determine MSI address ranges", __func__);
535	sc->sc_msi_addr32 = OFW_PCI_MSI_ADDR_RANGE_32(&msi_addr_ranges);
536	sc->sc_msi_addr64 = OFW_PCI_MSI_ADDR_RANGE_64(&msi_addr_ranges);
537	if (OF_getprop(node, "#msi-eqs", &sc->sc_msiq_count,
538	    sizeof(sc->sc_msiq_count)) == -1)
539		panic("%s: could not determine MSI event queue count",
540		    __func__);
541	if (OF_getprop(node, "msi-eq-size", &sc->sc_msiq_size,
542	    sizeof(sc->sc_msiq_size)) == -1)
543		panic("%s: could not determine MSI event queue size",
544		    __func__);
545	if (OF_getprop(node, "msi-eq-to-devino", &msi_eq_to_devino,
546	    sizeof(msi_eq_to_devino)) == -1 &&
547	    OF_getprop(node, "msi-eq-devino", &msi_eq_to_devino,
548	    sizeof(msi_eq_to_devino)) == -1) {
549		sc->sc_msiq_first = 0;
550		sc->sc_msiq_ino_first = FO_EQ_FIRST_INO;
551	} else {
552		sc->sc_msiq_first = msi_eq_to_devino.eq_first;
553		sc->sc_msiq_ino_first = msi_eq_to_devino.devino_first;
554	}
555	if (sc->sc_msiq_ino_first < FO_EQ_FIRST_INO ||
556	    sc->sc_msiq_ino_first + sc->sc_msiq_count - 1 > FO_EQ_LAST_INO)
557		panic("%s: event queues exceed INO range", __func__);
558	sc->sc_msi_bitmap = malloc(roundup2(sc->sc_msi_count, NBBY) / NBBY,
559	    M_DEVBUF, M_NOWAIT | M_ZERO);
560	if (sc->sc_msi_bitmap == NULL)
561		panic("%s: could not malloc MSI bitmap", __func__);
562	sc->sc_msi_msiq_table = malloc(sc->sc_msi_count *
563	    sizeof(*sc->sc_msi_msiq_table), M_DEVBUF, M_NOWAIT | M_ZERO);
564	if (sc->sc_msi_msiq_table == NULL)
565		panic("%s: could not malloc MSI-MSI event queue table",
566		    __func__);
567	sc->sc_msiq_bitmap = malloc(roundup2(sc->sc_msiq_count, NBBY) / NBBY,
568	    M_DEVBUF, M_NOWAIT | M_ZERO);
569	if (sc->sc_msiq_bitmap == NULL)
570		panic("%s: could not malloc MSI event queue bitmap", __func__);
571	j = FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * sc->sc_msiq_count;
572	sc->sc_msiq = contigmalloc(j, M_DEVBUF, M_NOWAIT, 0, ~0UL,
573	    FO_EQ_ALIGNMENT, 0);
574	if (sc->sc_msiq == NULL)
575		panic("%s: could not contigmalloc MSI event queue", __func__);
576	memset(sc->sc_msiq, 0, j);
577	FIRE_PCI_SET(sc, FO_PCI_EQ_BASE_ADDR, FO_PCI_EQ_BASE_ADDR_BYPASS |
578	    (pmap_kextract((vm_offset_t)sc->sc_msiq) &
579	    FO_PCI_EQ_BASE_ADDR_MASK));
580	for (i = 0; i < sc->sc_msi_count; i++) {
581		j = (i + sc->sc_msi_first) << 3;
582		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + j,
583		    FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + j) &
584		    ~FO_PCI_MSI_MAP_V);
585	}
586	for (i = 0; i < sc->sc_msiq_count; i++) {
587		j = i + sc->sc_msiq_ino_first;
588		if ((ino_bitmap & (1ULL << j)) == 0) {
589			mtx_lock(&sc->sc_msi_mtx);
590			setbit(sc->sc_msiq_bitmap, i);
591			mtx_unlock(&sc->sc_msi_mtx);
592		}
593		fmqa = intr_vectors[INTMAP_VEC(sc->sc_ign, j)].iv_icarg;
594		mtx_init(&fmqa->fmqa_mtx, "msiq_mtx", NULL, MTX_SPIN);
595		fmqa->fmqa_base =
596		    (struct fo_msiq_record *)((caddr_t)sc->sc_msiq +
597		    (FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * i));
598		j = i + sc->sc_msiq_first;
599		fmqa->fmqa_msiq = j;
600		j <<= 3;
601		fmqa->fmqa_head = FO_PCI_EQ_HD_BASE + j;
602		fmqa->fmqa_tail = FO_PCI_EQ_TL_BASE + j;
603		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + j,
604		    FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
605		    FO_PCI_EQ_CTRL_CLR_DIS);
606		FIRE_PCI_WRITE_8(sc, fmqa->fmqa_tail,
607		    (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
608		FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head,
609		    (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
610	}
611	FIRE_PCI_SET(sc, FO_PCI_MSI_32_BIT_ADDR, sc->sc_msi_addr32 &
612	    FO_PCI_MSI_32_BIT_ADDR_MASK);
613	FIRE_PCI_SET(sc, FO_PCI_MSI_64_BIT_ADDR, sc->sc_msi_addr64 &
614	    FO_PCI_MSI_64_BIT_ADDR_MASK);
615
616	/*
617	 * Establish a handler for interesting PCIe messages and disable
618	 * unintersting ones.
619	 */
620	mtx_lock(&sc->sc_msi_mtx);
621	for (i = 0; i < sc->sc_msiq_count; i++) {
622		if (isclr(sc->sc_msiq_bitmap, i) != 0) {
623			j = i;
624			break;
625		}
626	}
627	if (i == sc->sc_msiq_count) {
628		mtx_unlock(&sc->sc_msi_mtx);
629		panic("%s: no spare event queue for PCIe messages", __func__);
630	}
631	setbit(sc->sc_msiq_bitmap, j);
632	mtx_unlock(&sc->sc_msi_mtx);
633	i = INTMAP_VEC(sc->sc_ign, j + sc->sc_msiq_ino_first);
634	if (bus_set_resource(dev, SYS_RES_IRQ, 2, i, 1) != 0)
635		panic("%s: failed to add interrupt for PCIe messages",
636		    __func__);
637	fire_set_intr(sc, 2, INTINO(i), fire_pcie, intr_vectors[i].iv_icarg);
638	j += sc->sc_msiq_first;
639	/*
640	 * "Please note that setting the EQNUM field to a value larger than
641	 * 35 will yield unpredictable results."
642	 */
643	if (j > 35)
644		panic("%s: invalid queue for PCIe messages (%d)",
645		    __func__, j);
646	FIRE_PCI_SET(sc, FO_PCI_ERR_COR, FO_PCI_ERR_PME_V |
647	    ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
648	FIRE_PCI_SET(sc, FO_PCI_ERR_NONFATAL, FO_PCI_ERR_PME_V |
649	    ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
650	FIRE_PCI_SET(sc, FO_PCI_ERR_FATAL, FO_PCI_ERR_PME_V |
651	    ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK));
652	FIRE_PCI_SET(sc, FO_PCI_PM_PME, 0);
653	FIRE_PCI_SET(sc, FO_PCI_PME_TO_ACK, 0);
654	FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (j << 3),
655	    FO_PCI_EQ_CTRL_SET_EN);
656
657#define	TC_COUNTER_MAX_MASK	0xffffffff
658
659	/*
660	 * Setup JBC/UBC performance counter 0 in bus cycle counting
661	 * mode as timecounter.
662	 */
663	if (device_get_unit(dev) == 0) {
664		FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT0, 0);
665		FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT1, 0);
666		FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT_SEL,
667		    (FO_XBC_PRF_CNT_NONE << FO_XBC_PRF_CNT_CNT1_SHFT) |
668		    (FO_XBC_PRF_CNT_XB_CLK << FO_XBC_PRF_CNT_CNT0_SHFT));
669		tc = malloc(sizeof(*tc), M_DEVBUF, M_NOWAIT | M_ZERO);
670		if (tc == NULL)
671			panic("%s: could not malloc timecounter", __func__);
672		tc->tc_get_timecount = fire_get_timecount;
673		tc->tc_counter_mask = TC_COUNTER_MAX_MASK;
674		if (OF_getprop(OF_peer(0), "clock-frequency", &prop,
675		    sizeof(prop)) == -1)
676			panic("%s: could not determine clock frequency",
677			    __func__);
678		tc->tc_frequency = prop;
679		tc->tc_name = strdup(device_get_nameunit(dev), M_DEVBUF);
680		tc->tc_priv = sc;
681		/*
682		 * Due to initial problems with the JBus-driven performance
683		 * counters not advancing which might be firmware dependent
684		 * ensure that it actually works.
685		 */
686		if (fire_get_timecount(tc) - fire_get_timecount(tc) != 0)
687			tc->tc_quality = FIRE_PERF_CNT_QLTY;
688		else
689			tc->tc_quality = -FIRE_PERF_CNT_QLTY;
690		tc_init(tc);
691	}
692
693	/*
694	 * Set up the IOMMU.  Both Fire and Oberon have one per PBM, but
695	 * neither has a streaming buffer.
696	 */
697	memcpy(&sc->sc_dma_methods, &iommu_dma_methods,
698	    sizeof(sc->sc_dma_methods));
699	sc->sc_is.is_flags = IOMMU_FIRE | IOMMU_PRESERVE_PROM;
700	if (sc->sc_mode == FIRE_MODE_OBERON) {
701		sc->sc_is.is_flags |= IOMMU_FLUSH_CACHE;
702		sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(OBERON_IOMMU_BITS);
703	} else {
704		sc->sc_dma_methods.dm_dmamap_sync = fire_dmamap_sync;
705		sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(FIRE_IOMMU_BITS);
706	}
707	sc->sc_is.is_sb[0] = sc->sc_is.is_sb[1] = 0;
708	/* Punch in our copies. */
709	sc->sc_is.is_bustag = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]);
710	sc->sc_is.is_bushandle = rman_get_bushandle(sc->sc_mem_res[FIRE_PCI]);
711	sc->sc_is.is_iommu = FO_PCI_MMU;
712	val = FIRE_PCI_READ_8(sc, FO_PCI_MMU + IMR_CTL);
713	iommu_init(device_get_nameunit(dev), &sc->sc_is, 7, -1, 0);
714#ifdef FIRE_DEBUG
715	device_printf(dev, "FO_PCI_MMU + IMR_CTL 0x%016llx -> 0x%016llx\n",
716	    (long long unsigned)val, (long long unsigned)sc->sc_is.is_cr);
717#endif
718	/* Create our DMA tag. */
719	if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0x100000000,
720	    sc->sc_is.is_pmaxaddr, ~0, NULL, NULL, sc->sc_is.is_pmaxaddr,
721	    0xff, 0xffffffff, 0, NULL, NULL, &dmat) != 0)
722		panic("%s: could not create PCI DMA tag", __func__);
723	dmat->dt_cookie = &sc->sc_is;
724	dmat->dt_mt = &sc->sc_dma_methods;
725
726	if (ofw_pci_attach_common(dev, dmat, FO_IO_SIZE, FO_MEM_SIZE) != 0)
727		panic("%s: ofw_pci_attach_common() failed", __func__);
728
729#define	FIRE_SYSCTL_ADD_UINT(name, arg, desc)				\
730	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),			\
731	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,	\
732	    (name), CTLFLAG_RD, (arg), 0, (desc))
733
734	FIRE_SYSCTL_ADD_UINT("ilu_err", &sc->sc_stats_ilu_err,
735	    "ILU unknown errors");
736	FIRE_SYSCTL_ADD_UINT("jbc_ce_async", &sc->sc_stats_jbc_ce_async,
737	    "JBC correctable errors");
738	FIRE_SYSCTL_ADD_UINT("jbc_unsol_int", &sc->sc_stats_jbc_unsol_int,
739	    "JBC unsolicited interrupt ACK/NACK errors");
740	FIRE_SYSCTL_ADD_UINT("jbc_unsol_rd", &sc->sc_stats_jbc_unsol_rd,
741	    "JBC unsolicited read response errors");
742	FIRE_SYSCTL_ADD_UINT("mmu_err", &sc->sc_stats_mmu_err, "MMU errors");
743	FIRE_SYSCTL_ADD_UINT("tlu_ce", &sc->sc_stats_tlu_ce,
744	    "DLU/TLU correctable errors");
745	FIRE_SYSCTL_ADD_UINT("tlu_oe_non_fatal",
746	    &sc->sc_stats_tlu_oe_non_fatal,
747	    "DLU/TLU other event non-fatal errors summary");
748	FIRE_SYSCTL_ADD_UINT("tlu_oe_rx_err", &sc->sc_stats_tlu_oe_rx_err,
749	    "DLU/TLU receive other event errors");
750	FIRE_SYSCTL_ADD_UINT("tlu_oe_tx_err", &sc->sc_stats_tlu_oe_tx_err,
751	    "DLU/TLU transmit other event errors");
752	FIRE_SYSCTL_ADD_UINT("ubc_dmardue", &sc->sc_stats_ubc_dmardue,
753	    "UBC DMARDUE erros");
754
755#undef FIRE_SYSCTL_ADD_UINT
756
757	device_add_child(dev, "pci", -1);
758	return (bus_generic_attach(dev));
759}
760
761static void
762fire_set_intr(struct fire_softc *sc, u_int index, u_int ino,
763    driver_filter_t handler, void *arg)
764{
765	u_long vec;
766	int rid;
767
768	rid = index;
769	sc->sc_irq_res[index] = bus_alloc_resource_any(sc->sc_dev,
770	    SYS_RES_IRQ, &rid, RF_ACTIVE);
771	if (sc->sc_irq_res[index] == NULL ||
772	    INTINO(vec = rman_get_start(sc->sc_irq_res[index])) != ino ||
773	    INTIGN(vec) != sc->sc_ign ||
774	    intr_vectors[vec].iv_ic != &fire_ic ||
775	    bus_setup_intr(sc->sc_dev, sc->sc_irq_res[index],
776	    INTR_TYPE_MISC | INTR_BRIDGE, handler, NULL, arg,
777	    &sc->sc_ihand[index]) != 0)
778		panic("%s: failed to set up interrupt %d", __func__, index);
779}
780
781static int
782fire_intr_register(struct fire_softc *sc, u_int ino)
783{
784	struct fire_icarg *fica;
785	bus_addr_t intrclr, intrmap;
786	int error;
787
788	if (fire_get_intrmap(sc, ino, &intrmap, &intrclr) == 0)
789		return (ENXIO);
790	fica = malloc((ino >= FO_EQ_FIRST_INO && ino <= FO_EQ_LAST_INO) ?
791	    sizeof(struct fire_msiqarg) : sizeof(struct fire_icarg), M_DEVBUF,
792	    M_NOWAIT | M_ZERO);
793	if (fica == NULL)
794		return (ENOMEM);
795	fica->fica_sc = sc;
796	fica->fica_map = intrmap;
797	fica->fica_clr = intrclr;
798	error = (intr_controller_register(INTMAP_VEC(sc->sc_ign, ino),
799	    &fire_ic, fica));
800	if (error != 0)
801		free(fica, M_DEVBUF);
802	return (error);
803}
804
805static int
806fire_get_intrmap(struct fire_softc *sc, u_int ino, bus_addr_t *intrmapptr,
807    bus_addr_t *intrclrptr)
808{
809
810	if (ino > FO_MAX_INO) {
811		device_printf(sc->sc_dev, "out of range INO %d requested\n",
812		    ino);
813		return (0);
814	}
815
816	ino <<= 3;
817	if (intrmapptr != NULL)
818		*intrmapptr = FO_PCI_INT_MAP_BASE + ino;
819	if (intrclrptr != NULL)
820		*intrclrptr = FO_PCI_INT_CLR_BASE + ino;
821	return (1);
822}
823
824/*
825 * Interrupt handlers
826 */
827static int
828fire_dmc_pec(void *arg)
829{
830	struct fire_softc *sc;
831	device_t dev;
832	uint64_t cestat, dmcstat, ilustat, imustat, mcstat, mmustat, mmutfar;
833	uint64_t mmutfsr, oestat, pecstat, uestat, val;
834	u_int fatal, oenfatal;
835
836	fatal = 0;
837	sc = arg;
838	dev = sc->sc_dev;
839	mtx_lock_spin(&sc->sc_pcib_mtx);
840	mcstat = FIRE_PCI_READ_8(sc, FO_PCI_MULTI_CORE_ERR_STAT);
841	if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_DMC) != 0) {
842		dmcstat = FIRE_PCI_READ_8(sc, FO_PCI_DMC_CORE_BLOCK_ERR_STAT);
843		if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_IMU) != 0) {
844			imustat = FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_STAT);
845			device_printf(dev, "IMU error %#llx\n",
846			    (unsigned long long)imustat);
847			if ((imustat &
848			    FO_PCI_IMU_ERR_INT_EQ_NOT_EN_P) != 0) {
849				fatal = 1;
850				val = FIRE_PCI_READ_8(sc,
851				    FO_PCI_IMU_SCS_ERR_LOG);
852				device_printf(dev, "SCS error log %#llx\n",
853				    (unsigned long long)val);
854			}
855			if ((imustat & FO_PCI_IMU_ERR_INT_EQ_OVER_P) != 0) {
856				fatal = 1;
857				val = FIRE_PCI_READ_8(sc,
858				    FO_PCI_IMU_EQS_ERR_LOG);
859				device_printf(dev, "EQS error log %#llx\n",
860				    (unsigned long long)val);
861			}
862			if ((imustat & (FO_PCI_IMU_ERR_INT_MSI_MAL_ERR_P |
863			    FO_PCI_IMU_ERR_INT_MSI_PAR_ERR_P |
864			    FO_PCI_IMU_ERR_INT_PMEACK_MES_NOT_EN_P |
865			    FO_PCI_IMU_ERR_INT_PMPME_MES_NOT_EN_P |
866			    FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P |
867			    FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P |
868			    FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P |
869			    FO_PCI_IMU_ERR_INT_MSI_NOT_EN_P)) != 0) {
870				fatal = 1;
871				val = FIRE_PCI_READ_8(sc,
872				    FO_PCI_IMU_RDS_ERR_LOG);
873				device_printf(dev, "RDS error log %#llx\n",
874				    (unsigned long long)val);
875			}
876		}
877		if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_MMU) != 0) {
878			fatal = 1;
879			mmustat = FIRE_PCI_READ_8(sc, FO_PCI_MMU_INT_STAT);
880			mmutfar = FIRE_PCI_READ_8(sc,
881			    FO_PCI_MMU_TRANS_FAULT_ADDR);
882			mmutfsr = FIRE_PCI_READ_8(sc,
883			    FO_PCI_MMU_TRANS_FAULT_STAT);
884			if ((mmustat & (FO_PCI_MMU_ERR_INT_TBW_DPE_P |
885			    FO_PCI_MMU_ERR_INT_TBW_ERR_P |
886			    FO_PCI_MMU_ERR_INT_TBW_UDE_P |
887			    FO_PCI_MMU_ERR_INT_TBW_DME_P |
888			    FO_PCI_MMU_ERR_INT_TTC_CAE_P |
889			    FIRE_PCI_MMU_ERR_INT_TTC_DPE_P |
890			    OBERON_PCI_MMU_ERR_INT_TTC_DUE_P |
891			    FO_PCI_MMU_ERR_INT_TRN_ERR_P)) != 0)
892				fatal = 1;
893			else {
894				sc->sc_stats_mmu_err++;
895				FIRE_PCI_WRITE_8(sc, FO_PCI_MMU_ERR_STAT_CLR,
896				    mmustat);
897			}
898			device_printf(dev,
899			    "MMU error %#llx: TFAR %#llx TFSR %#llx\n",
900			    (unsigned long long)mmustat,
901			    (unsigned long long)mmutfar,
902			    (unsigned long long)mmutfsr);
903		}
904	}
905	if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_PEC) != 0) {
906		pecstat = FIRE_PCI_READ_8(sc, FO_PCI_PEC_CORE_BLOCK_INT_STAT);
907		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_UERR) != 0) {
908			fatal = 1;
909			uestat = FIRE_PCI_READ_8(sc,
910			    FO_PCI_TLU_UERR_INT_STAT);
911			device_printf(dev,
912			    "DLU/TLU uncorrectable error %#llx\n",
913			    (unsigned long long)uestat);
914			if ((uestat & (FO_PCI_TLU_UERR_INT_UR_P |
915			    OBERON_PCI_TLU_UERR_INT_POIS_P |
916			    FO_PCI_TLU_UERR_INT_MFP_P |
917			    FO_PCI_TLU_UERR_INT_ROF_P |
918			    FO_PCI_TLU_UERR_INT_UC_P |
919			    FIRE_PCI_TLU_UERR_INT_PP_P |
920			    OBERON_PCI_TLU_UERR_INT_POIS_P)) != 0) {
921				val = FIRE_PCI_READ_8(sc,
922				    FO_PCI_TLU_RX_UERR_HDR1_LOG);
923				device_printf(dev,
924				    "receive header log %#llx\n",
925				    (unsigned long long)val);
926				val = FIRE_PCI_READ_8(sc,
927				    FO_PCI_TLU_RX_UERR_HDR2_LOG);
928				device_printf(dev,
929				    "receive header log 2 %#llx\n",
930				    (unsigned long long)val);
931			}
932			if ((uestat & FO_PCI_TLU_UERR_INT_CTO_P) != 0) {
933				val = FIRE_PCI_READ_8(sc,
934				    FO_PCI_TLU_TX_UERR_HDR1_LOG);
935				device_printf(dev,
936				    "transmit header log %#llx\n",
937				    (unsigned long long)val);
938				val = FIRE_PCI_READ_8(sc,
939				    FO_PCI_TLU_TX_UERR_HDR2_LOG);
940				device_printf(dev,
941				    "transmit header log 2 %#llx\n",
942				    (unsigned long long)val);
943			}
944			if ((uestat & FO_PCI_TLU_UERR_INT_DLP_P) != 0) {
945				val = FIRE_PCI_READ_8(sc,
946				    FO_PCI_LPU_LNK_LYR_INT_STAT);
947				device_printf(dev,
948				    "link layer interrupt and status %#llx\n",
949				    (unsigned long long)val);
950			}
951			if ((uestat & FO_PCI_TLU_UERR_INT_TE_P) != 0) {
952				val = FIRE_PCI_READ_8(sc,
953				    FO_PCI_LPU_PHY_LYR_INT_STAT);
954				device_printf(dev,
955				    "phy layer interrupt and status %#llx\n",
956				    (unsigned long long)val);
957			}
958		}
959		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_CERR) != 0) {
960			sc->sc_stats_tlu_ce++;
961			cestat = FIRE_PCI_READ_8(sc,
962			    FO_PCI_TLU_CERR_INT_STAT);
963			device_printf(dev,
964			    "DLU/TLU correctable error %#llx\n",
965			    (unsigned long long)cestat);
966			val = FIRE_PCI_READ_8(sc,
967			    FO_PCI_LPU_LNK_LYR_INT_STAT);
968			device_printf(dev,
969			    "link layer interrupt and status %#llx\n",
970			    (unsigned long long)val);
971			if ((cestat & FO_PCI_TLU_CERR_INT_RE_P) != 0) {
972				FIRE_PCI_WRITE_8(sc,
973				    FO_PCI_LPU_LNK_LYR_INT_STAT, val);
974				val = FIRE_PCI_READ_8(sc,
975				    FO_PCI_LPU_PHY_LYR_INT_STAT);
976				device_printf(dev,
977				    "phy layer interrupt and status %#llx\n",
978				    (unsigned long long)val);
979			}
980			FIRE_PCI_WRITE_8(sc, FO_PCI_TLU_CERR_STAT_CLR,
981			    cestat);
982		}
983		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_OEVENT) != 0) {
984			oenfatal = 0;
985			oestat = FIRE_PCI_READ_8(sc,
986			    FO_PCI_TLU_OEVENT_INT_STAT);
987			device_printf(dev, "DLU/TLU other event %#llx\n",
988			    (unsigned long long)oestat);
989			if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
990			    FO_PCI_TLU_OEVENT_MRC_P |
991			    FO_PCI_TLU_OEVENT_WUC_P |
992			    FO_PCI_TLU_OEVENT_RUC_P |
993			    FO_PCI_TLU_OEVENT_CRS_P)) != 0) {
994				val = FIRE_PCI_READ_8(sc,
995				    FO_PCI_TLU_RX_OEVENT_HDR1_LOG);
996				device_printf(dev,
997				    "receive header log %#llx\n",
998				    (unsigned long long)val);
999				val = FIRE_PCI_READ_8(sc,
1000				    FO_PCI_TLU_RX_OEVENT_HDR2_LOG);
1001				device_printf(dev,
1002				    "receive header log 2 %#llx\n",
1003				    (unsigned long long)val);
1004				if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1005				    FO_PCI_TLU_OEVENT_MRC_P |
1006				    FO_PCI_TLU_OEVENT_WUC_P |
1007				    FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1008					fatal = 1;
1009				else {
1010					sc->sc_stats_tlu_oe_rx_err++;
1011					oenfatal = 1;
1012				}
1013			}
1014			if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1015			    FO_PCI_TLU_OEVENT_CTO_P |
1016			    FO_PCI_TLU_OEVENT_WUC_P |
1017			    FO_PCI_TLU_OEVENT_RUC_P)) != 0) {
1018				val = FIRE_PCI_READ_8(sc,
1019				    FO_PCI_TLU_TX_OEVENT_HDR1_LOG);
1020				device_printf(dev,
1021				    "transmit header log %#llx\n",
1022				    (unsigned long long)val);
1023				val = FIRE_PCI_READ_8(sc,
1024				    FO_PCI_TLU_TX_OEVENT_HDR2_LOG);
1025				device_printf(dev,
1026				    "transmit header log 2 %#llx\n",
1027				    (unsigned long long)val);
1028				if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P |
1029				    FO_PCI_TLU_OEVENT_CTO_P |
1030				    FO_PCI_TLU_OEVENT_WUC_P |
1031				    FO_PCI_TLU_OEVENT_RUC_P)) != 0)
1032					fatal = 1;
1033				else {
1034					sc->sc_stats_tlu_oe_tx_err++;
1035					oenfatal = 1;
1036				}
1037			}
1038			if ((oestat & (FO_PCI_TLU_OEVENT_ERO_P |
1039			    FO_PCI_TLU_OEVENT_EMP_P |
1040			    FO_PCI_TLU_OEVENT_EPE_P |
1041			    FIRE_PCI_TLU_OEVENT_ERP_P |
1042			    OBERON_PCI_TLU_OEVENT_ERBU_P |
1043			    FIRE_PCI_TLU_OEVENT_EIP_P |
1044			    OBERON_PCI_TLU_OEVENT_EIUE_P)) != 0) {
1045				fatal = 1;
1046				val = FIRE_PCI_READ_8(sc,
1047				    FO_PCI_LPU_LNK_LYR_INT_STAT);
1048				device_printf(dev,
1049				    "link layer interrupt and status %#llx\n",
1050				    (unsigned long long)val);
1051			}
1052			if ((oestat & (FO_PCI_TLU_OEVENT_IIP_P |
1053			    FO_PCI_TLU_OEVENT_EDP_P |
1054			    FIRE_PCI_TLU_OEVENT_EHP_P |
1055			    OBERON_PCI_TLU_OEVENT_TLUEITMO_S |
1056			    FO_PCI_TLU_OEVENT_ERU_P)) != 0)
1057				fatal = 1;
1058			if ((oestat & (FO_PCI_TLU_OEVENT_NFP_P |
1059			    FO_PCI_TLU_OEVENT_LWC_P |
1060			    FO_PCI_TLU_OEVENT_LIN_P |
1061			    FO_PCI_TLU_OEVENT_LRS_P |
1062			    FO_PCI_TLU_OEVENT_LDN_P |
1063			    FO_PCI_TLU_OEVENT_LUP_P)) != 0)
1064				oenfatal = 1;
1065			if (oenfatal != 0) {
1066				sc->sc_stats_tlu_oe_non_fatal++;
1067				FIRE_PCI_WRITE_8(sc,
1068				    FO_PCI_TLU_OEVENT_STAT_CLR, oestat);
1069				if ((oestat & FO_PCI_TLU_OEVENT_LIN_P) != 0)
1070					FIRE_PCI_WRITE_8(sc,
1071					    FO_PCI_LPU_LNK_LYR_INT_STAT,
1072					    FIRE_PCI_READ_8(sc,
1073					    FO_PCI_LPU_LNK_LYR_INT_STAT));
1074			}
1075		}
1076		if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_ILU) != 0) {
1077			ilustat = FIRE_PCI_READ_8(sc, FO_PCI_ILU_INT_STAT);
1078			device_printf(dev, "ILU error %#llx\n",
1079			    (unsigned long long)ilustat);
1080			if ((ilustat & (FIRE_PCI_ILU_ERR_INT_IHB_PE_P |
1081			    FIRE_PCI_ILU_ERR_INT_IHB_PE_P)) != 0)
1082			    fatal = 1;
1083			else {
1084				sc->sc_stats_ilu_err++;
1085				FIRE_PCI_WRITE_8(sc, FO_PCI_ILU_INT_STAT,
1086				    ilustat);
1087			}
1088		}
1089	}
1090	mtx_unlock_spin(&sc->sc_pcib_mtx);
1091	if (fatal != 0)
1092		panic("%s: fatal DMC/PEC error",
1093		    device_get_nameunit(sc->sc_dev));
1094	return (FILTER_HANDLED);
1095}
1096
1097static int
1098fire_xcb(void *arg)
1099{
1100	struct fire_softc *sc;
1101	device_t dev;
1102	uint64_t errstat, intstat, val;
1103	u_int fatal;
1104
1105	fatal = 0;
1106	sc = arg;
1107	dev = sc->sc_dev;
1108	mtx_lock_spin(&sc->sc_pcib_mtx);
1109	if (sc->sc_mode == FIRE_MODE_OBERON) {
1110		intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1111		device_printf(dev, "UBC error: interrupt status %#llx\n",
1112		    (unsigned long long)intstat);
1113		if ((intstat & ~(OBERON_UBC_ERR_INT_DMARDUEB_P |
1114		    OBERON_UBC_ERR_INT_DMARDUEA_P)) != 0)
1115			fatal = 1;
1116		else
1117			sc->sc_stats_ubc_dmardue++;
1118		if (fatal != 0) {
1119			mtx_unlock_spin(&sc->sc_pcib_mtx);
1120			panic("%s: fatal UBC core block error",
1121			    device_get_nameunit(sc->sc_dev));
1122		} else {
1123			FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1124			mtx_unlock_spin(&sc->sc_pcib_mtx);
1125		}
1126	} else {
1127		errstat = FIRE_CTRL_READ_8(sc, FIRE_JBC_CORE_BLOCK_ERR_STAT);
1128		if ((errstat & (FIRE_JBC_CORE_BLOCK_ERR_STAT_MERGE |
1129		    FIRE_JBC_CORE_BLOCK_ERR_STAT_JBCINT |
1130		    FIRE_JBC_CORE_BLOCK_ERR_STAT_DMCINT)) != 0) {
1131			intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT);
1132			device_printf(dev, "JBC interrupt status %#llx\n",
1133			    (unsigned long long)intstat);
1134			if ((intstat & FIRE_JBC_ERR_INT_EBUS_TO_P) != 0) {
1135				val = FIRE_CTRL_READ_8(sc,
1136				    FIRE_JBC_CSR_ERR_LOG);
1137				device_printf(dev, "CSR error log %#llx\n",
1138				    (unsigned long long)val);
1139			}
1140			if ((intstat & (FIRE_JBC_ERR_INT_UNSOL_RD_P |
1141			    FIRE_JBC_ERR_INT_UNSOL_INT_P)) != 0) {
1142				if ((intstat &
1143				    FIRE_JBC_ERR_INT_UNSOL_RD_P) != 0)
1144					sc->sc_stats_jbc_unsol_rd++;
1145				if ((intstat &
1146				    FIRE_JBC_ERR_INT_UNSOL_INT_P) != 0)
1147					sc->sc_stats_jbc_unsol_int++;
1148				val = FIRE_CTRL_READ_8(sc,
1149				    FIRE_DMCINT_IDC_ERR_LOG);
1150				device_printf(dev,
1151				    "DMCINT IDC error log %#llx\n",
1152				    (unsigned long long)val);
1153			}
1154			if ((intstat & (FIRE_JBC_ERR_INT_MB_PER_P |
1155			    FIRE_JBC_ERR_INT_MB_PEW_P)) != 0) {
1156				fatal = 1;
1157				val = FIRE_CTRL_READ_8(sc,
1158				    FIRE_MERGE_TRANS_ERR_LOG);
1159				device_printf(dev,
1160				    "merge transaction error log %#llx\n",
1161				    (unsigned long long)val);
1162			}
1163			if ((intstat & FIRE_JBC_ERR_INT_IJP_P) != 0) {
1164				fatal = 1;
1165				val = FIRE_CTRL_READ_8(sc,
1166				    FIRE_JBCINT_OTRANS_ERR_LOG);
1167				device_printf(dev,
1168				    "JBCINT out transaction error log "
1169				    "%#llx\n", (unsigned long long)val);
1170				val = FIRE_CTRL_READ_8(sc,
1171				    FIRE_JBCINT_OTRANS_ERR_LOG2);
1172				device_printf(dev,
1173				    "JBCINT out transaction error log 2 "
1174				    "%#llx\n", (unsigned long long)val);
1175			}
1176			if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1177			    FIRE_JBC_ERR_INT_CE_ASYN_P |
1178			    FIRE_JBC_ERR_INT_JTE_P | FIRE_JBC_ERR_INT_JBE_P |
1179			    FIRE_JBC_ERR_INT_JUE_P |
1180			    FIRE_JBC_ERR_INT_ICISE_P |
1181			    FIRE_JBC_ERR_INT_WR_DPE_P |
1182			    FIRE_JBC_ERR_INT_RD_DPE_P |
1183			    FIRE_JBC_ERR_INT_ILL_BMW_P |
1184			    FIRE_JBC_ERR_INT_ILL_BMR_P |
1185			    FIRE_JBC_ERR_INT_BJC_P)) != 0) {
1186				if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P |
1187				    FIRE_JBC_ERR_INT_JTE_P |
1188				    FIRE_JBC_ERR_INT_JBE_P |
1189				    FIRE_JBC_ERR_INT_JUE_P |
1190				    FIRE_JBC_ERR_INT_ICISE_P |
1191				    FIRE_JBC_ERR_INT_WR_DPE_P |
1192				    FIRE_JBC_ERR_INT_RD_DPE_P |
1193				    FIRE_JBC_ERR_INT_ILL_BMW_P |
1194				    FIRE_JBC_ERR_INT_ILL_BMR_P |
1195				    FIRE_JBC_ERR_INT_BJC_P)) != 0)
1196					fatal = 1;
1197				else
1198					sc->sc_stats_jbc_ce_async++;
1199				val = FIRE_CTRL_READ_8(sc,
1200				    FIRE_JBCINT_ITRANS_ERR_LOG);
1201				device_printf(dev,
1202				    "JBCINT in transaction error log %#llx\n",
1203				    (unsigned long long)val);
1204				val = FIRE_CTRL_READ_8(sc,
1205				    FIRE_JBCINT_ITRANS_ERR_LOG2);
1206				device_printf(dev,
1207				    "JBCINT in transaction error log 2 "
1208				    "%#llx\n", (unsigned long long)val);
1209			}
1210			if ((intstat & (FIRE_JBC_ERR_INT_PIO_UNMAP_RD_P |
1211			    FIRE_JBC_ERR_INT_ILL_ACC_RD_P |
1212			    FIRE_JBC_ERR_INT_PIO_UNMAP_P |
1213			    FIRE_JBC_ERR_INT_PIO_DPE_P |
1214			    FIRE_JBC_ERR_INT_PIO_CPE_P |
1215			    FIRE_JBC_ERR_INT_ILL_ACC_P)) != 0) {
1216				fatal = 1;
1217				val = FIRE_CTRL_READ_8(sc,
1218				    FIRE_JBC_CSR_ERR_LOG);
1219				device_printf(dev,
1220				    "DMCINT ODCD error log %#llx\n",
1221				    (unsigned long long)val);
1222			}
1223			if ((intstat & (FIRE_JBC_ERR_INT_MB_PEA_P |
1224			    FIRE_JBC_ERR_INT_CPE_P | FIRE_JBC_ERR_INT_APE_P |
1225			    FIRE_JBC_ERR_INT_PIO_CPE_P |
1226			    FIRE_JBC_ERR_INT_JTCEEW_P |
1227			    FIRE_JBC_ERR_INT_JTCEEI_P |
1228			    FIRE_JBC_ERR_INT_JTCEER_P)) != 0) {
1229				fatal = 1;
1230				val = FIRE_CTRL_READ_8(sc,
1231				    FIRE_FATAL_ERR_LOG);
1232				device_printf(dev, "fatal error log %#llx\n",
1233				    (unsigned long long)val);
1234				val = FIRE_CTRL_READ_8(sc,
1235				    FIRE_FATAL_ERR_LOG2);
1236				device_printf(dev, "fatal error log 2 "
1237				    "%#llx\n", (unsigned long long)val);
1238			}
1239			if (fatal != 0) {
1240				mtx_unlock_spin(&sc->sc_pcib_mtx);
1241				panic("%s: fatal JBC core block error",
1242				    device_get_nameunit(sc->sc_dev));
1243			} else {
1244				FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL);
1245				mtx_unlock_spin(&sc->sc_pcib_mtx);
1246			}
1247		} else {
1248			mtx_unlock_spin(&sc->sc_pcib_mtx);
1249			panic("%s: unknown JCB core block error status %#llx",
1250			    device_get_nameunit(sc->sc_dev),
1251			    (unsigned long long)errstat);
1252		}
1253	}
1254	return (FILTER_HANDLED);
1255}
1256
1257static int
1258fire_pcie(void *arg)
1259{
1260	struct fire_msiqarg *fmqa;
1261	struct fire_softc *sc;
1262	struct fo_msiq_record *qrec;
1263	device_t dev;
1264	uint64_t word0;
1265	u_int head, msg, msiq;
1266
1267	fmqa = arg;
1268	sc = fmqa->fmqa_fica.fica_sc;
1269	dev = sc->sc_dev;
1270	msiq = fmqa->fmqa_msiq;
1271	mtx_lock_spin(&fmqa->fmqa_mtx);
1272	head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1273	    FO_PCI_EQ_HD_SHFT;
1274	qrec = &fmqa->fmqa_base[head];
1275	word0 = qrec->fomqr_word0;
1276	for (;;) {
1277		KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSG) != 0,
1278		    ("%s: received non-PCIe message in event queue %d "
1279		    "(word0 %#llx)", device_get_nameunit(dev), msiq,
1280		    (unsigned long long)word0));
1281		msg = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1282		    FO_MQR_WORD0_DATA0_SHFT;
1283
1284#define	PCIE_MSG_CODE_ERR_COR		0x30
1285#define	PCIE_MSG_CODE_ERR_NONFATAL	0x31
1286#define	PCIE_MSG_CODE_ERR_FATAL		0x33
1287
1288		if (msg == PCIE_MSG_CODE_ERR_COR)
1289			device_printf(dev, "correctable PCIe error\n");
1290		else if (msg == PCIE_MSG_CODE_ERR_NONFATAL ||
1291		    msg == PCIE_MSG_CODE_ERR_FATAL)
1292			panic("%s: %sfatal PCIe error",
1293			    device_get_nameunit(dev),
1294			    msg == PCIE_MSG_CODE_ERR_NONFATAL ? "non-" : "");
1295		else
1296			panic("%s: received unknown PCIe message %#x",
1297			    device_get_nameunit(dev), msg);
1298		qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1299		head = (head + 1) % sc->sc_msiq_size;
1300		qrec = &fmqa->fmqa_base[head];
1301		word0 = qrec->fomqr_word0;
1302		if (__predict_true((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1303			break;
1304	}
1305	FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1306	    FO_PCI_EQ_HD_SHFT);
1307	if ((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1308	    FO_PCI_EQ_TL_OVERR) != 0) {
1309		device_printf(dev, "event queue %d overflow\n", msiq);
1310		msiq <<= 3;
1311		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1312		    FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1313		    FO_PCI_EQ_CTRL_CLR_COVERR);
1314	}
1315	mtx_unlock_spin(&fmqa->fmqa_mtx);
1316	return (FILTER_HANDLED);
1317}
1318
1319static int
1320fire_maxslots(device_t dev)
1321{
1322
1323	return (1);
1324}
1325
1326static uint32_t
1327fire_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1328    int width)
1329{
1330
1331	return (ofw_pci_read_config_common(dev, PCIE_REGMAX, FO_CONF_OFF(bus,
1332	    slot, func, reg), bus, slot, func, reg, width));
1333}
1334
1335static void
1336fire_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg,
1337    uint32_t val, int width)
1338{
1339
1340	ofw_pci_write_config_common(dev, PCIE_REGMAX, FO_CONF_OFF(bus, slot,
1341	    func, reg), bus, slot, func, reg, val, width);
1342}
1343
1344static int
1345fire_route_interrupt(device_t bridge, device_t dev, int pin)
1346{
1347	ofw_pci_intr_t mintr;
1348
1349	mintr = ofw_pci_route_interrupt_common(bridge, dev, pin);
1350	if (!PCI_INTERRUPT_VALID(mintr))
1351		device_printf(bridge,
1352		    "could not route pin %d for device %d.%d\n",
1353		    pin, pci_get_slot(dev), pci_get_function(dev));
1354	return (mintr);
1355}
1356
1357static void
1358fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map,
1359    bus_dmasync_op_t op)
1360{
1361
1362	if ((map->dm_flags & DMF_LOADED) == 0)
1363		return;
1364
1365	if ((op & BUS_DMASYNC_POSTREAD) != 0)
1366		ofw_pci_dmamap_sync_stst_order_common();
1367	else if ((op & BUS_DMASYNC_PREWRITE) != 0)
1368		membar(Sync);
1369}
1370
1371static void
1372fire_intr_enable(void *arg)
1373{
1374	struct intr_vector *iv;
1375	struct fire_icarg *fica;
1376	struct fire_softc *sc;
1377	struct pcpu *pc;
1378	uint64_t mr;
1379	u_int ctrl, i;
1380
1381	iv = arg;
1382	fica = iv->iv_icarg;
1383	sc = fica->fica_sc;
1384	mr = FO_PCI_IMAP_V;
1385	if (sc->sc_mode == FIRE_MODE_OBERON)
1386		mr |= (iv->iv_mid << OBERON_PCI_IMAP_T_DESTID_SHFT) &
1387		    OBERON_PCI_IMAP_T_DESTID_MASK;
1388	else
1389		mr |= (iv->iv_mid << FIRE_PCI_IMAP_T_JPID_SHFT) &
1390		    FIRE_PCI_IMAP_T_JPID_MASK;
1391	/*
1392	 * Given that all mondos for the same target are required to use the
1393	 * same interrupt controller we just use the CPU ID for indexing the
1394	 * latter.
1395	 */
1396	ctrl = 0;
1397	for (i = 0; i < mp_ncpus; ++i) {
1398		pc = pcpu_find(i);
1399		if (pc == NULL || iv->iv_mid != pc->pc_mid)
1400			continue;
1401		ctrl = pc->pc_cpuid % 4;
1402		break;
1403	}
1404	mr |= (1ULL << ctrl) << FO_PCI_IMAP_INT_CTRL_NUM_SHFT &
1405	    FO_PCI_IMAP_INT_CTRL_NUM_MASK;
1406	FIRE_PCI_WRITE_8(sc, fica->fica_map, mr);
1407}
1408
1409static void
1410fire_intr_disable(void *arg)
1411{
1412	struct intr_vector *iv;
1413	struct fire_icarg *fica;
1414	struct fire_softc *sc;
1415
1416	iv = arg;
1417	fica = iv->iv_icarg;
1418	sc = fica->fica_sc;
1419	FIRE_PCI_WRITE_8(sc, fica->fica_map,
1420	    FIRE_PCI_READ_8(sc, fica->fica_map) & ~FO_PCI_IMAP_V);
1421}
1422
1423static void
1424fire_intr_assign(void *arg)
1425{
1426	struct intr_vector *iv;
1427	struct fire_icarg *fica;
1428	struct fire_softc *sc;
1429	uint64_t mr;
1430
1431	iv = arg;
1432	fica = iv->iv_icarg;
1433	sc = fica->fica_sc;
1434	mr = FIRE_PCI_READ_8(sc, fica->fica_map);
1435	if ((mr & FO_PCI_IMAP_V) != 0) {
1436		FIRE_PCI_WRITE_8(sc, fica->fica_map, mr & ~FO_PCI_IMAP_V);
1437		FIRE_PCI_BARRIER(sc, fica->fica_map, 8,
1438		    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1439	}
1440	while (FIRE_PCI_READ_8(sc, fica->fica_clr) != INTCLR_IDLE)
1441		;
1442	if ((mr & FO_PCI_IMAP_V) != 0)
1443		fire_intr_enable(arg);
1444}
1445
1446static void
1447fire_intr_clear(void *arg)
1448{
1449	struct intr_vector *iv;
1450	struct fire_icarg *fica;
1451
1452	iv = arg;
1453	fica = iv->iv_icarg;
1454	FIRE_PCI_WRITE_8(fica->fica_sc, fica->fica_clr, INTCLR_IDLE);
1455}
1456
1457/*
1458 * Given that the event queue implementation matches our current MD and MI
1459 * interrupt frameworks like square pegs fit into round holes we are generous
1460 * and use one event queue per MSI for now, which limits us to 35 MSIs/MSI-Xs
1461 * per Host-PCIe-bridge (we use one event queue for the PCIe error messages).
1462 * This seems tolerable as long as most devices just use one MSI/MSI-X anyway.
1463 * Adding knowledge about MSIs/MSI-Xs to the MD interrupt code should allow us
1464 * to decouple the 1:1 mapping at the cost of no longer being able to bind
1465 * MSIs/MSI-Xs to specific CPUs as we currently have no reliable way to
1466 * quiesce a device while we move its MSIs/MSI-Xs to another event queue.
1467 */
1468
1469static int
1470fire_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused,
1471    int *irqs)
1472{
1473	struct fire_softc *sc;
1474	u_int i, j, msiqrun;
1475
1476	if (powerof2(count) == 0 || count > 32)
1477		return (EINVAL);
1478
1479	sc = device_get_softc(dev);
1480	mtx_lock(&sc->sc_msi_mtx);
1481	msiqrun = 0;
1482	for (i = 0; i < sc->sc_msiq_count; i++) {
1483		for (j = i; j < i + count; j++) {
1484			if (isclr(sc->sc_msiq_bitmap, j) == 0)
1485				break;
1486		}
1487		if (j == i + count) {
1488			msiqrun = i;
1489			break;
1490		}
1491	}
1492	if (i == sc->sc_msiq_count) {
1493		mtx_unlock(&sc->sc_msi_mtx);
1494		return (ENXIO);
1495	}
1496	for (i = 0; i + count < sc->sc_msi_count; i += count) {
1497		for (j = i; j < i + count; j++)
1498			if (isclr(sc->sc_msi_bitmap, j) == 0)
1499				break;
1500		if (j == i + count) {
1501			for (j = 0; j < count; j++) {
1502				setbit(sc->sc_msiq_bitmap, msiqrun + j);
1503				setbit(sc->sc_msi_bitmap, i + j);
1504				sc->sc_msi_msiq_table[i + j] = msiqrun + j;
1505				irqs[j] = sc->sc_msi_first + i + j;
1506			}
1507			mtx_unlock(&sc->sc_msi_mtx);
1508			return (0);
1509		}
1510	}
1511	mtx_unlock(&sc->sc_msi_mtx);
1512	return (ENXIO);
1513}
1514
1515static int
1516fire_release_msi(device_t dev, device_t child, int count, int *irqs)
1517{
1518	struct fire_softc *sc;
1519	u_int i;
1520
1521	sc = device_get_softc(dev);
1522	mtx_lock(&sc->sc_msi_mtx);
1523	for (i = 0; i < count; i++) {
1524		clrbit(sc->sc_msiq_bitmap,
1525		    sc->sc_msi_msiq_table[irqs[i] - sc->sc_msi_first]);
1526		clrbit(sc->sc_msi_bitmap, irqs[i] - sc->sc_msi_first);
1527	}
1528	mtx_unlock(&sc->sc_msi_mtx);
1529	return (0);
1530}
1531
1532static int
1533fire_alloc_msix(device_t dev, device_t child, int *irq)
1534{
1535	struct fire_softc *sc;
1536	int i, msiq;
1537
1538	sc = device_get_softc(dev);
1539	if ((sc->sc_flags & FIRE_MSIX) == 0)
1540		return (ENXIO);
1541	mtx_lock(&sc->sc_msi_mtx);
1542	msiq = 0;
1543	for (i = 0; i < sc->sc_msiq_count; i++) {
1544		if (isclr(sc->sc_msiq_bitmap, i) != 0) {
1545			msiq = i;
1546			break;
1547		}
1548	}
1549	if (i == sc->sc_msiq_count) {
1550		mtx_unlock(&sc->sc_msi_mtx);
1551		return (ENXIO);
1552	}
1553	for (i = sc->sc_msi_count - 1; i >= 0; i--) {
1554		if (isclr(sc->sc_msi_bitmap, i) != 0) {
1555			setbit(sc->sc_msiq_bitmap, msiq);
1556			setbit(sc->sc_msi_bitmap, i);
1557			sc->sc_msi_msiq_table[i] = msiq;
1558			*irq = sc->sc_msi_first + i;
1559			mtx_unlock(&sc->sc_msi_mtx);
1560			return (0);
1561		}
1562	}
1563	mtx_unlock(&sc->sc_msi_mtx);
1564	return (ENXIO);
1565}
1566
1567static int
1568fire_release_msix(device_t dev, device_t child, int irq)
1569{
1570	struct fire_softc *sc;
1571
1572	sc = device_get_softc(dev);
1573	if ((sc->sc_flags & FIRE_MSIX) == 0)
1574		return (ENXIO);
1575	mtx_lock(&sc->sc_msi_mtx);
1576	clrbit(sc->sc_msiq_bitmap,
1577	    sc->sc_msi_msiq_table[irq - sc->sc_msi_first]);
1578	clrbit(sc->sc_msi_bitmap, irq - sc->sc_msi_first);
1579	mtx_unlock(&sc->sc_msi_mtx);
1580	return (0);
1581}
1582
1583static int
1584fire_map_msi(device_t dev, device_t child, int irq, uint64_t *addr,
1585    uint32_t *data)
1586{
1587	struct fire_softc *sc;
1588	struct pci_devinfo *dinfo;
1589
1590	sc = device_get_softc(dev);
1591	dinfo = device_get_ivars(child);
1592	if (dinfo->cfg.msi.msi_alloc > 0) {
1593		if ((irq & ~sc->sc_msi_data_mask) != 0) {
1594			device_printf(dev, "invalid MSI 0x%x\n", irq);
1595			return (EINVAL);
1596		}
1597	} else {
1598		if ((sc->sc_flags & FIRE_MSIX) == 0)
1599			return (ENXIO);
1600		if (fls(irq) > sc->sc_msix_data_width) {
1601			device_printf(dev, "invalid MSI-X 0x%x\n", irq);
1602			return (EINVAL);
1603		}
1604	}
1605	if (dinfo->cfg.msi.msi_alloc > 0 &&
1606	    (dinfo->cfg.msi.msi_ctrl & PCIM_MSICTRL_64BIT) == 0)
1607		*addr = sc->sc_msi_addr32;
1608	else
1609		*addr = sc->sc_msi_addr64;
1610	*data = irq;
1611	return (0);
1612}
1613
1614static void
1615fire_msiq_handler(void *cookie)
1616{
1617	struct intr_vector *iv;
1618	struct fire_msiqarg *fmqa;
1619
1620	iv = cookie;
1621	fmqa = iv->iv_icarg;
1622	/*
1623	 * Note that since fire_intr_clear() will clear the event queue
1624	 * interrupt after the handler associated with the MSI [sic] has
1625	 * been executed we have to protect the access to the event queue as
1626	 * otherwise nested event queue interrupts cause corruption of the
1627	 * event queue on MP machines.  Obviously especially when abandoning
1628	 * the 1:1 mapping it would be better to not clear the event queue
1629	 * interrupt after each handler invocation but only once when the
1630	 * outstanding MSIs have been processed but unfortunately that
1631	 * doesn't work well and leads to interrupt storms with controllers/
1632	 * drivers which don't mask interrupts while the handler is executed.
1633	 * Maybe delaying clearing the MSI until after the handler has been
1634	 * executed could be used to work around this but that's not the
1635	 * intended usage and might in turn cause lost MSIs.
1636	 */
1637	mtx_lock_spin(&fmqa->fmqa_mtx);
1638	fire_msiq_common(iv, fmqa);
1639	mtx_unlock_spin(&fmqa->fmqa_mtx);
1640}
1641
1642static void
1643fire_msiq_filter(void *cookie)
1644{
1645	struct intr_vector *iv;
1646	struct fire_msiqarg *fmqa;
1647
1648	iv = cookie;
1649	fmqa = iv->iv_icarg;
1650	/*
1651	 * For filters we don't use fire_intr_clear() since it would clear
1652	 * the event queue interrupt while we're still processing the event
1653	 * queue as filters and associated post-filter handler are executed
1654	 * directly, which in turn would lead to lost MSIs.  So we clear the
1655	 * event queue interrupt only once after processing the event queue.
1656	 * Given that this still guarantees the filters to not be executed
1657	 * concurrently and no other CPU can clear the event queue interrupt
1658	 * while the event queue is still processed, we don't even need to
1659	 * interlock the access to the event queue in this case.
1660	 */
1661	critical_enter();
1662	fire_msiq_common(iv, fmqa);
1663	FIRE_PCI_WRITE_8(fmqa->fmqa_fica.fica_sc, fmqa->fmqa_fica.fica_clr,
1664	    INTCLR_IDLE);
1665	critical_exit();
1666}
1667
1668static inline void
1669fire_msiq_common(struct intr_vector *iv, struct fire_msiqarg *fmqa)
1670{
1671	struct fire_softc *sc;
1672	struct fo_msiq_record *qrec;
1673	device_t dev;
1674	uint64_t word0;
1675	u_int head, msi, msiq;
1676
1677	sc = fmqa->fmqa_fica.fica_sc;
1678	dev = sc->sc_dev;
1679	msiq = fmqa->fmqa_msiq;
1680	head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >>
1681	    FO_PCI_EQ_HD_SHFT;
1682	qrec = &fmqa->fmqa_base[head];
1683	word0 = qrec->fomqr_word0;
1684	for (;;) {
1685		if (__predict_false((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0))
1686			break;
1687		KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSI64) != 0 ||
1688		    (word0 & FO_MQR_WORD0_FMT_TYPE_MSI32) != 0,
1689		    ("%s: received non-MSI/MSI-X message in event queue %d "
1690		    "(word0 %#llx)", device_get_nameunit(dev), msiq,
1691		    (unsigned long long)word0));
1692		msi = (word0 & FO_MQR_WORD0_DATA0_MASK) >>
1693		    FO_MQR_WORD0_DATA0_SHFT;
1694		/*
1695		 * Sanity check the MSI/MSI-X as long as we use a 1:1 mapping.
1696		 */
1697		KASSERT(msi == fmqa->fmqa_msi,
1698		    ("%s: received non-matching MSI/MSI-X in event queue %d "
1699		    "(%d versus %d)", device_get_nameunit(dev), msiq, msi,
1700		    fmqa->fmqa_msi));
1701		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + (msi << 3),
1702		    FO_PCI_MSI_CLR_EQWR_N);
1703		if (__predict_false(intr_event_handle(iv->iv_event,
1704		    NULL) != 0))
1705			printf("stray MSI/MSI-X in event queue %d\n", msiq);
1706		qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK;
1707		head = (head + 1) % sc->sc_msiq_size;
1708		qrec = &fmqa->fmqa_base[head];
1709		word0 = qrec->fomqr_word0;
1710	}
1711	FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) <<
1712	    FO_PCI_EQ_HD_SHFT);
1713	if (__predict_false((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) &
1714	    FO_PCI_EQ_TL_OVERR) != 0)) {
1715		device_printf(dev, "event queue %d overflow\n", msiq);
1716		msiq <<= 3;
1717		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1718		    FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) |
1719		    FO_PCI_EQ_CTRL_CLR_COVERR);
1720	}
1721}
1722
1723static int
1724fire_setup_intr(device_t dev, device_t child, struct resource *ires,
1725    int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg,
1726    void **cookiep)
1727{
1728	struct fire_softc *sc;
1729	struct fire_msiqarg *fmqa;
1730	u_long vec;
1731	int error;
1732	u_int msi, msiq;
1733
1734	sc = device_get_softc(dev);
1735	/*
1736	 * XXX this assumes that a device only has one INTx, while in fact
1737	 * Cassini+ and Saturn can use all four the firmware has assigned
1738	 * to them, but so does pci(4).
1739	 */
1740	if (rman_get_rid(ires) != 0) {
1741		msi = rman_get_start(ires);
1742		msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1743		vec = INTMAP_VEC(sc->sc_ign, sc->sc_msiq_ino_first + msiq);
1744		msiq += sc->sc_msiq_first;
1745		if (intr_vectors[vec].iv_ic != &fire_ic) {
1746			device_printf(dev,
1747			    "invalid interrupt controller for vector 0x%lx\n",
1748			    vec);
1749			return (EINVAL);
1750		}
1751		/*
1752		 * The MD interrupt code needs the vector rather than the MSI.
1753		 */
1754		rman_set_start(ires, vec);
1755		rman_set_end(ires, vec);
1756		error = bus_generic_setup_intr(dev, child, ires, flags, filt,
1757		    intr, arg, cookiep);
1758		rman_set_start(ires, msi);
1759		rman_set_end(ires, msi);
1760		if (error != 0)
1761			return (error);
1762		fmqa = intr_vectors[vec].iv_icarg;
1763		/*
1764		 * XXX inject our event queue handler.
1765		 */
1766		if (filt != NULL) {
1767			intr_vectors[vec].iv_func = fire_msiq_filter;
1768			intr_vectors[vec].iv_ic = &fire_msiqc_filter;
1769			/*
1770			 * Ensure the event queue interrupt is cleared, it
1771			 * might have triggered before.  Given we supply NULL
1772			 * as ic_clear, inthand_add() won't do this for us.
1773			 */
1774			FIRE_PCI_WRITE_8(sc, fmqa->fmqa_fica.fica_clr,
1775			    INTCLR_IDLE);
1776		} else
1777			intr_vectors[vec].iv_func = fire_msiq_handler;
1778		/* Record the MSI/MSI-X as long as we we use a 1:1 mapping. */
1779		fmqa->fmqa_msi = msi;
1780		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (msiq << 3),
1781		    FO_PCI_EQ_CTRL_SET_EN);
1782		msi <<= 3;
1783		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1784		    (FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1785		    ~FO_PCI_MSI_MAP_EQNUM_MASK) |
1786		    ((msiq << FO_PCI_MSI_MAP_EQNUM_SHFT) &
1787		    FO_PCI_MSI_MAP_EQNUM_MASK));
1788		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + msi,
1789		    FO_PCI_MSI_CLR_EQWR_N);
1790		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1791		    FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) |
1792		    FO_PCI_MSI_MAP_V);
1793		return (error);
1794	}
1795
1796	/*
1797	 * Make sure the vector is fully specified and we registered
1798	 * our interrupt controller for it.
1799	 */
1800	vec = rman_get_start(ires);
1801	if (INTIGN(vec) != sc->sc_ign) {
1802		device_printf(dev, "invalid interrupt vector 0x%lx\n", vec);
1803		return (EINVAL);
1804	}
1805	if (intr_vectors[vec].iv_ic != &fire_ic) {
1806		device_printf(dev,
1807		    "invalid interrupt controller for vector 0x%lx\n", vec);
1808		return (EINVAL);
1809	}
1810	return (bus_generic_setup_intr(dev, child, ires, flags, filt, intr,
1811	    arg, cookiep));
1812}
1813
1814static int
1815fire_teardown_intr(device_t dev, device_t child, struct resource *ires,
1816    void *cookie)
1817{
1818	struct fire_softc *sc;
1819	u_long vec;
1820	int error;
1821	u_int msi, msiq;
1822
1823	sc = device_get_softc(dev);
1824	if (rman_get_rid(ires) != 0) {
1825		msi = rman_get_start(ires);
1826		msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first];
1827		vec = INTMAP_VEC(sc->sc_ign, msiq + sc->sc_msiq_ino_first);
1828		msiq += sc->sc_msiq_first;
1829		msi <<= 3;
1830		FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi,
1831		    FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) &
1832		    ~FO_PCI_MSI_MAP_V);
1833		msiq <<= 3;
1834		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq,
1835		    FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I |
1836		    FO_PCI_EQ_CTRL_CLR_DIS);
1837		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_TL_BASE + msiq,
1838		    (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK);
1839		FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_HD_BASE + msiq,
1840		    (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK);
1841		intr_vectors[vec].iv_ic = &fire_ic;
1842		/*
1843		 * The MD interrupt code needs the vector rather than the MSI.
1844		 */
1845		rman_set_start(ires, vec);
1846		rman_set_end(ires, vec);
1847		error = bus_generic_teardown_intr(dev, child, ires, cookie);
1848		msi >>= 3;
1849		rman_set_start(ires, msi);
1850		rman_set_end(ires, msi);
1851		return (error);
1852	}
1853	return (bus_generic_teardown_intr(dev, child, ires, cookie));
1854}
1855
1856static struct resource *
1857fire_alloc_resource(device_t bus, device_t child, int type, int *rid,
1858    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1859{
1860	struct fire_softc *sc;
1861
1862	if (type == SYS_RES_IRQ && *rid == 0) {
1863		sc = device_get_softc(bus);
1864		start = end = INTMAP_VEC(sc->sc_ign, end);
1865	}
1866	return (ofw_pci_alloc_resource(bus, child, type, rid, start, end,
1867	    count, flags));
1868}
1869
1870static u_int
1871fire_get_timecount(struct timecounter *tc)
1872{
1873	struct fire_softc *sc;
1874
1875	sc = tc->tc_priv;
1876	return (FIRE_CTRL_READ_8(sc, FO_XBC_PRF_CNT0) & TC_COUNTER_MAX_MASK);
1877}
1878