pci_pci.c revision 360275
1/*-
2 * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier
3 * Copyright (c) 2000 Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000 BSDi
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/11/sys/dev/pci/pci_pci.c 360275 2020-04-24 16:40:42Z mav $");
33
34/*
35 * PCI:PCI bridge support.
36 */
37
38#include "opt_pci.h"
39
40#include <sys/param.h>
41#include <sys/bus.h>
42#include <sys/kernel.h>
43#include <sys/malloc.h>
44#include <sys/module.h>
45#include <sys/pciio.h>
46#include <sys/rman.h>
47#include <sys/sysctl.h>
48#include <sys/systm.h>
49#include <sys/taskqueue.h>
50
51#include <dev/pci/pcivar.h>
52#include <dev/pci/pcireg.h>
53#include <dev/pci/pci_private.h>
54#include <dev/pci/pcib_private.h>
55
56#include "pcib_if.h"
57
58static int		pcib_probe(device_t dev);
59static int		pcib_suspend(device_t dev);
60static int		pcib_resume(device_t dev);
61static int		pcib_power_for_sleep(device_t pcib, device_t dev,
62			    int *pstate);
63static int		pcib_ari_get_id(device_t pcib, device_t dev,
64    enum pci_id_type type, uintptr_t *id);
65static uint32_t		pcib_read_config(device_t dev, u_int b, u_int s,
66    u_int f, u_int reg, int width);
67static void		pcib_write_config(device_t dev, u_int b, u_int s,
68    u_int f, u_int reg, uint32_t val, int width);
69static int		pcib_ari_maxslots(device_t dev);
70static int		pcib_ari_maxfuncs(device_t dev);
71static int		pcib_try_enable_ari(device_t pcib, device_t dev);
72static int		pcib_ari_enabled(device_t pcib);
73static void		pcib_ari_decode_rid(device_t pcib, uint16_t rid,
74			    int *bus, int *slot, int *func);
75#ifdef PCI_HP
76static void		pcib_pcie_ab_timeout(void *arg);
77static void		pcib_pcie_cc_timeout(void *arg);
78static void		pcib_pcie_dll_timeout(void *arg);
79#endif
80static int		pcib_reset_child(device_t dev, device_t child, int flags);
81
82static device_method_t pcib_methods[] = {
83    /* Device interface */
84    DEVMETHOD(device_probe,		pcib_probe),
85    DEVMETHOD(device_attach,		pcib_attach),
86    DEVMETHOD(device_detach,		pcib_detach),
87    DEVMETHOD(device_shutdown,		bus_generic_shutdown),
88    DEVMETHOD(device_suspend,		pcib_suspend),
89    DEVMETHOD(device_resume,		pcib_resume),
90
91    /* Bus interface */
92    DEVMETHOD(bus_child_present,	pcib_child_present),
93    DEVMETHOD(bus_read_ivar,		pcib_read_ivar),
94    DEVMETHOD(bus_write_ivar,		pcib_write_ivar),
95    DEVMETHOD(bus_alloc_resource,	pcib_alloc_resource),
96#ifdef NEW_PCIB
97    DEVMETHOD(bus_adjust_resource,	pcib_adjust_resource),
98    DEVMETHOD(bus_release_resource,	pcib_release_resource),
99#else
100    DEVMETHOD(bus_adjust_resource,	bus_generic_adjust_resource),
101    DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
102#endif
103    DEVMETHOD(bus_activate_resource,	bus_generic_activate_resource),
104    DEVMETHOD(bus_deactivate_resource,	bus_generic_deactivate_resource),
105    DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
106    DEVMETHOD(bus_teardown_intr,	bus_generic_teardown_intr),
107    DEVMETHOD(bus_reset_child,		pcib_reset_child),
108
109    /* pcib interface */
110    DEVMETHOD(pcib_maxslots,		pcib_ari_maxslots),
111    DEVMETHOD(pcib_maxfuncs,		pcib_ari_maxfuncs),
112    DEVMETHOD(pcib_read_config,		pcib_read_config),
113    DEVMETHOD(pcib_write_config,	pcib_write_config),
114    DEVMETHOD(pcib_route_interrupt,	pcib_route_interrupt),
115    DEVMETHOD(pcib_alloc_msi,		pcib_alloc_msi),
116    DEVMETHOD(pcib_release_msi,		pcib_release_msi),
117    DEVMETHOD(pcib_alloc_msix,		pcib_alloc_msix),
118    DEVMETHOD(pcib_release_msix,	pcib_release_msix),
119    DEVMETHOD(pcib_map_msi,		pcib_map_msi),
120    DEVMETHOD(pcib_power_for_sleep,	pcib_power_for_sleep),
121    DEVMETHOD(pcib_get_id,		pcib_ari_get_id),
122    DEVMETHOD(pcib_try_enable_ari,	pcib_try_enable_ari),
123    DEVMETHOD(pcib_ari_enabled,		pcib_ari_enabled),
124    DEVMETHOD(pcib_decode_rid,		pcib_ari_decode_rid),
125
126    DEVMETHOD_END
127};
128
129static devclass_t pcib_devclass;
130
131DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc));
132DRIVER_MODULE(pcib, pci, pcib_driver, pcib_devclass, NULL, NULL);
133
134#if defined(NEW_PCIB) || defined(PCI_HP)
135SYSCTL_DECL(_hw_pci);
136#endif
137
138#ifdef NEW_PCIB
139static int pci_clear_pcib;
140SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0,
141    "Clear firmware-assigned resources for PCI-PCI bridge I/O windows.");
142
143/*
144 * Is a resource from a child device sub-allocated from one of our
145 * resource managers?
146 */
147static int
148pcib_is_resource_managed(struct pcib_softc *sc, int type, struct resource *r)
149{
150
151	switch (type) {
152#ifdef PCI_RES_BUS
153	case PCI_RES_BUS:
154		return (rman_is_region_manager(r, &sc->bus.rman));
155#endif
156	case SYS_RES_IOPORT:
157		return (rman_is_region_manager(r, &sc->io.rman));
158	case SYS_RES_MEMORY:
159		/* Prefetchable resources may live in either memory rman. */
160		if (rman_get_flags(r) & RF_PREFETCHABLE &&
161		    rman_is_region_manager(r, &sc->pmem.rman))
162			return (1);
163		return (rman_is_region_manager(r, &sc->mem.rman));
164	}
165	return (0);
166}
167
168static int
169pcib_is_window_open(struct pcib_window *pw)
170{
171
172	return (pw->valid && pw->base < pw->limit);
173}
174
175/*
176 * XXX: If RF_ACTIVE did not also imply allocating a bus space tag and
177 * handle for the resource, we could pass RF_ACTIVE up to the PCI bus
178 * when allocating the resource windows and rely on the PCI bus driver
179 * to do this for us.
180 */
181static void
182pcib_activate_window(struct pcib_softc *sc, int type)
183{
184
185	PCI_ENABLE_IO(device_get_parent(sc->dev), sc->dev, type);
186}
187
188static void
189pcib_write_windows(struct pcib_softc *sc, int mask)
190{
191	device_t dev;
192	uint32_t val;
193
194	dev = sc->dev;
195	if (sc->io.valid && mask & WIN_IO) {
196		val = pci_read_config(dev, PCIR_IOBASEL_1, 1);
197		if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) {
198			pci_write_config(dev, PCIR_IOBASEH_1,
199			    sc->io.base >> 16, 2);
200			pci_write_config(dev, PCIR_IOLIMITH_1,
201			    sc->io.limit >> 16, 2);
202		}
203		pci_write_config(dev, PCIR_IOBASEL_1, sc->io.base >> 8, 1);
204		pci_write_config(dev, PCIR_IOLIMITL_1, sc->io.limit >> 8, 1);
205	}
206
207	if (mask & WIN_MEM) {
208		pci_write_config(dev, PCIR_MEMBASE_1, sc->mem.base >> 16, 2);
209		pci_write_config(dev, PCIR_MEMLIMIT_1, sc->mem.limit >> 16, 2);
210	}
211
212	if (sc->pmem.valid && mask & WIN_PMEM) {
213		val = pci_read_config(dev, PCIR_PMBASEL_1, 2);
214		if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) {
215			pci_write_config(dev, PCIR_PMBASEH_1,
216			    sc->pmem.base >> 32, 4);
217			pci_write_config(dev, PCIR_PMLIMITH_1,
218			    sc->pmem.limit >> 32, 4);
219		}
220		pci_write_config(dev, PCIR_PMBASEL_1, sc->pmem.base >> 16, 2);
221		pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmem.limit >> 16, 2);
222	}
223}
224
225/*
226 * This is used to reject I/O port allocations that conflict with an
227 * ISA alias range.
228 */
229static int
230pcib_is_isa_range(struct pcib_softc *sc, rman_res_t start, rman_res_t end,
231    rman_res_t count)
232{
233	rman_res_t next_alias;
234
235	if (!(sc->bridgectl & PCIB_BCR_ISA_ENABLE))
236		return (0);
237
238	/* Only check fixed ranges for overlap. */
239	if (start + count - 1 != end)
240		return (0);
241
242	/* ISA aliases are only in the lower 64KB of I/O space. */
243	if (start >= 65536)
244		return (0);
245
246	/* Check for overlap with 0x000 - 0x0ff as a special case. */
247	if (start < 0x100)
248		goto alias;
249
250	/*
251	 * If the start address is an alias, the range is an alias.
252	 * Otherwise, compute the start of the next alias range and
253	 * check if it is before the end of the candidate range.
254	 */
255	if ((start & 0x300) != 0)
256		goto alias;
257	next_alias = (start & ~0x3fful) | 0x100;
258	if (next_alias <= end)
259		goto alias;
260	return (0);
261
262alias:
263	if (bootverbose)
264		device_printf(sc->dev,
265		    "I/O range %#jx-%#jx overlaps with an ISA alias\n", start,
266		    end);
267	return (1);
268}
269
270static void
271pcib_add_window_resources(struct pcib_window *w, struct resource **res,
272    int count)
273{
274	struct resource **newarray;
275	int error, i;
276
277	newarray = malloc(sizeof(struct resource *) * (w->count + count),
278	    M_DEVBUF, M_WAITOK);
279	if (w->res != NULL)
280		bcopy(w->res, newarray, sizeof(struct resource *) * w->count);
281	bcopy(res, newarray + w->count, sizeof(struct resource *) * count);
282	free(w->res, M_DEVBUF);
283	w->res = newarray;
284	w->count += count;
285
286	for (i = 0; i < count; i++) {
287		error = rman_manage_region(&w->rman, rman_get_start(res[i]),
288		    rman_get_end(res[i]));
289		if (error)
290			panic("Failed to add resource to rman");
291	}
292}
293
294typedef void (nonisa_callback)(rman_res_t start, rman_res_t end, void *arg);
295
296static void
297pcib_walk_nonisa_ranges(rman_res_t start, rman_res_t end, nonisa_callback *cb,
298    void *arg)
299{
300	rman_res_t next_end;
301
302	/*
303	 * If start is within an ISA alias range, move up to the start
304	 * of the next non-alias range.  As a special case, addresses
305	 * in the range 0x000 - 0x0ff should also be skipped since
306	 * those are used for various system I/O devices in ISA
307	 * systems.
308	 */
309	if (start <= 65535) {
310		if (start < 0x100 || (start & 0x300) != 0) {
311			start &= ~0x3ff;
312			start += 0x400;
313		}
314	}
315
316	/* ISA aliases are only in the lower 64KB of I/O space. */
317	while (start <= MIN(end, 65535)) {
318		next_end = MIN(start | 0xff, end);
319		cb(start, next_end, arg);
320		start += 0x400;
321	}
322
323	if (start <= end)
324		cb(start, end, arg);
325}
326
327static void
328count_ranges(rman_res_t start, rman_res_t end, void *arg)
329{
330	int *countp;
331
332	countp = arg;
333	(*countp)++;
334}
335
336struct alloc_state {
337	struct resource **res;
338	struct pcib_softc *sc;
339	int count, error;
340};
341
342static void
343alloc_ranges(rman_res_t start, rman_res_t end, void *arg)
344{
345	struct alloc_state *as;
346	struct pcib_window *w;
347	int rid;
348
349	as = arg;
350	if (as->error != 0)
351		return;
352
353	w = &as->sc->io;
354	rid = w->reg;
355	if (bootverbose)
356		device_printf(as->sc->dev,
357		    "allocating non-ISA range %#jx-%#jx\n", start, end);
358	as->res[as->count] = bus_alloc_resource(as->sc->dev, SYS_RES_IOPORT,
359	    &rid, start, end, end - start + 1, 0);
360	if (as->res[as->count] == NULL)
361		as->error = ENXIO;
362	else
363		as->count++;
364}
365
366static int
367pcib_alloc_nonisa_ranges(struct pcib_softc *sc, rman_res_t start, rman_res_t end)
368{
369	struct alloc_state as;
370	int i, new_count;
371
372	/* First, see how many ranges we need. */
373	new_count = 0;
374	pcib_walk_nonisa_ranges(start, end, count_ranges, &new_count);
375
376	/* Second, allocate the ranges. */
377	as.res = malloc(sizeof(struct resource *) * new_count, M_DEVBUF,
378	    M_WAITOK);
379	as.sc = sc;
380	as.count = 0;
381	as.error = 0;
382	pcib_walk_nonisa_ranges(start, end, alloc_ranges, &as);
383	if (as.error != 0) {
384		for (i = 0; i < as.count; i++)
385			bus_release_resource(sc->dev, SYS_RES_IOPORT,
386			    sc->io.reg, as.res[i]);
387		free(as.res, M_DEVBUF);
388		return (as.error);
389	}
390	KASSERT(as.count == new_count, ("%s: count mismatch", __func__));
391
392	/* Third, add the ranges to the window. */
393	pcib_add_window_resources(&sc->io, as.res, as.count);
394	free(as.res, M_DEVBUF);
395	return (0);
396}
397
398static void
399pcib_alloc_window(struct pcib_softc *sc, struct pcib_window *w, int type,
400    int flags, pci_addr_t max_address)
401{
402	struct resource *res;
403	char buf[64];
404	int error, rid;
405
406	if (max_address != (rman_res_t)max_address)
407		max_address = ~0;
408	w->rman.rm_start = 0;
409	w->rman.rm_end = max_address;
410	w->rman.rm_type = RMAN_ARRAY;
411	snprintf(buf, sizeof(buf), "%s %s window",
412	    device_get_nameunit(sc->dev), w->name);
413	w->rman.rm_descr = strdup(buf, M_DEVBUF);
414	error = rman_init(&w->rman);
415	if (error)
416		panic("Failed to initialize %s %s rman",
417		    device_get_nameunit(sc->dev), w->name);
418
419	if (!pcib_is_window_open(w))
420		return;
421
422	if (w->base > max_address || w->limit > max_address) {
423		device_printf(sc->dev,
424		    "initial %s window has too many bits, ignoring\n", w->name);
425		return;
426	}
427	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE)
428		(void)pcib_alloc_nonisa_ranges(sc, w->base, w->limit);
429	else {
430		rid = w->reg;
431		res = bus_alloc_resource(sc->dev, type, &rid, w->base, w->limit,
432		    w->limit - w->base + 1, flags);
433		if (res != NULL)
434			pcib_add_window_resources(w, &res, 1);
435	}
436	if (w->res == NULL) {
437		device_printf(sc->dev,
438		    "failed to allocate initial %s window: %#jx-%#jx\n",
439		    w->name, (uintmax_t)w->base, (uintmax_t)w->limit);
440		w->base = max_address;
441		w->limit = 0;
442		pcib_write_windows(sc, w->mask);
443		return;
444	}
445	pcib_activate_window(sc, type);
446}
447
448/*
449 * Initialize I/O windows.
450 */
451static void
452pcib_probe_windows(struct pcib_softc *sc)
453{
454	pci_addr_t max;
455	device_t dev;
456	uint32_t val;
457
458	dev = sc->dev;
459
460	if (pci_clear_pcib) {
461		pcib_bridge_init(dev);
462	}
463
464	/* Determine if the I/O port window is implemented. */
465	val = pci_read_config(dev, PCIR_IOBASEL_1, 1);
466	if (val == 0) {
467		/*
468		 * If 'val' is zero, then only 16-bits of I/O space
469		 * are supported.
470		 */
471		pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1);
472		if (pci_read_config(dev, PCIR_IOBASEL_1, 1) != 0) {
473			sc->io.valid = 1;
474			pci_write_config(dev, PCIR_IOBASEL_1, 0, 1);
475		}
476	} else
477		sc->io.valid = 1;
478
479	/* Read the existing I/O port window. */
480	if (sc->io.valid) {
481		sc->io.reg = PCIR_IOBASEL_1;
482		sc->io.step = 12;
483		sc->io.mask = WIN_IO;
484		sc->io.name = "I/O port";
485		if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) {
486			sc->io.base = PCI_PPBIOBASE(
487			    pci_read_config(dev, PCIR_IOBASEH_1, 2), val);
488			sc->io.limit = PCI_PPBIOLIMIT(
489			    pci_read_config(dev, PCIR_IOLIMITH_1, 2),
490			    pci_read_config(dev, PCIR_IOLIMITL_1, 1));
491			max = 0xffffffff;
492		} else {
493			sc->io.base = PCI_PPBIOBASE(0, val);
494			sc->io.limit = PCI_PPBIOLIMIT(0,
495			    pci_read_config(dev, PCIR_IOLIMITL_1, 1));
496			max = 0xffff;
497		}
498		pcib_alloc_window(sc, &sc->io, SYS_RES_IOPORT, 0, max);
499	}
500
501	/* Read the existing memory window. */
502	sc->mem.valid = 1;
503	sc->mem.reg = PCIR_MEMBASE_1;
504	sc->mem.step = 20;
505	sc->mem.mask = WIN_MEM;
506	sc->mem.name = "memory";
507	sc->mem.base = PCI_PPBMEMBASE(0,
508	    pci_read_config(dev, PCIR_MEMBASE_1, 2));
509	sc->mem.limit = PCI_PPBMEMLIMIT(0,
510	    pci_read_config(dev, PCIR_MEMLIMIT_1, 2));
511	pcib_alloc_window(sc, &sc->mem, SYS_RES_MEMORY, 0, 0xffffffff);
512
513	/* Determine if the prefetchable memory window is implemented. */
514	val = pci_read_config(dev, PCIR_PMBASEL_1, 2);
515	if (val == 0) {
516		/*
517		 * If 'val' is zero, then only 32-bits of memory space
518		 * are supported.
519		 */
520		pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2);
521		if (pci_read_config(dev, PCIR_PMBASEL_1, 2) != 0) {
522			sc->pmem.valid = 1;
523			pci_write_config(dev, PCIR_PMBASEL_1, 0, 2);
524		}
525	} else
526		sc->pmem.valid = 1;
527
528	/* Read the existing prefetchable memory window. */
529	if (sc->pmem.valid) {
530		sc->pmem.reg = PCIR_PMBASEL_1;
531		sc->pmem.step = 20;
532		sc->pmem.mask = WIN_PMEM;
533		sc->pmem.name = "prefetch";
534		if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) {
535			sc->pmem.base = PCI_PPBMEMBASE(
536			    pci_read_config(dev, PCIR_PMBASEH_1, 4), val);
537			sc->pmem.limit = PCI_PPBMEMLIMIT(
538			    pci_read_config(dev, PCIR_PMLIMITH_1, 4),
539			    pci_read_config(dev, PCIR_PMLIMITL_1, 2));
540			max = 0xffffffffffffffff;
541		} else {
542			sc->pmem.base = PCI_PPBMEMBASE(0, val);
543			sc->pmem.limit = PCI_PPBMEMLIMIT(0,
544			    pci_read_config(dev, PCIR_PMLIMITL_1, 2));
545			max = 0xffffffff;
546		}
547		pcib_alloc_window(sc, &sc->pmem, SYS_RES_MEMORY,
548		    RF_PREFETCHABLE, max);
549	}
550}
551
552static void
553pcib_release_window(struct pcib_softc *sc, struct pcib_window *w, int type)
554{
555	device_t dev;
556	int error, i;
557
558	if (!w->valid)
559		return;
560
561	dev = sc->dev;
562	error = rman_fini(&w->rman);
563	if (error) {
564		device_printf(dev, "failed to release %s rman\n", w->name);
565		return;
566	}
567	free(__DECONST(char *, w->rman.rm_descr), M_DEVBUF);
568
569	for (i = 0; i < w->count; i++) {
570		error = bus_free_resource(dev, type, w->res[i]);
571		if (error)
572			device_printf(dev,
573			    "failed to release %s resource: %d\n", w->name,
574			    error);
575	}
576	free(w->res, M_DEVBUF);
577}
578
579static void
580pcib_free_windows(struct pcib_softc *sc)
581{
582
583	pcib_release_window(sc, &sc->pmem, SYS_RES_MEMORY);
584	pcib_release_window(sc, &sc->mem, SYS_RES_MEMORY);
585	pcib_release_window(sc, &sc->io, SYS_RES_IOPORT);
586}
587
588#ifdef PCI_RES_BUS
589/*
590 * Allocate a suitable secondary bus for this bridge if needed and
591 * initialize the resource manager for the secondary bus range.  Note
592 * that the minimum count is a desired value and this may allocate a
593 * smaller range.
594 */
595void
596pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count)
597{
598	char buf[64];
599	int error, rid, sec_reg;
600
601	switch (pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) {
602	case PCIM_HDRTYPE_BRIDGE:
603		sec_reg = PCIR_SECBUS_1;
604		bus->sub_reg = PCIR_SUBBUS_1;
605		break;
606	case PCIM_HDRTYPE_CARDBUS:
607		sec_reg = PCIR_SECBUS_2;
608		bus->sub_reg = PCIR_SUBBUS_2;
609		break;
610	default:
611		panic("not a PCI bridge");
612	}
613	bus->sec = pci_read_config(dev, sec_reg, 1);
614	bus->sub = pci_read_config(dev, bus->sub_reg, 1);
615	bus->dev = dev;
616	bus->rman.rm_start = 0;
617	bus->rman.rm_end = PCI_BUSMAX;
618	bus->rman.rm_type = RMAN_ARRAY;
619	snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
620	bus->rman.rm_descr = strdup(buf, M_DEVBUF);
621	error = rman_init(&bus->rman);
622	if (error)
623		panic("Failed to initialize %s bus number rman",
624		    device_get_nameunit(dev));
625
626	/*
627	 * Allocate a bus range.  This will return an existing bus range
628	 * if one exists, or a new bus range if one does not.
629	 */
630	rid = 0;
631	bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
632	    min_count, 0);
633	if (bus->res == NULL) {
634		/*
635		 * Fall back to just allocating a range of a single bus
636		 * number.
637		 */
638		bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
639		    1, 0);
640	} else if (rman_get_size(bus->res) < min_count)
641		/*
642		 * Attempt to grow the existing range to satisfy the
643		 * minimum desired count.
644		 */
645		(void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res,
646		    rman_get_start(bus->res), rman_get_start(bus->res) +
647		    min_count - 1);
648
649	/*
650	 * Add the initial resource to the rman.
651	 */
652	if (bus->res != NULL) {
653		error = rman_manage_region(&bus->rman, rman_get_start(bus->res),
654		    rman_get_end(bus->res));
655		if (error)
656			panic("Failed to add resource to rman");
657		bus->sec = rman_get_start(bus->res);
658		bus->sub = rman_get_end(bus->res);
659	}
660}
661
662void
663pcib_free_secbus(device_t dev, struct pcib_secbus *bus)
664{
665	int error;
666
667	error = rman_fini(&bus->rman);
668	if (error) {
669		device_printf(dev, "failed to release bus number rman\n");
670		return;
671	}
672	free(__DECONST(char *, bus->rman.rm_descr), M_DEVBUF);
673
674	error = bus_free_resource(dev, PCI_RES_BUS, bus->res);
675	if (error)
676		device_printf(dev,
677		    "failed to release bus numbers resource: %d\n", error);
678}
679
680static struct resource *
681pcib_suballoc_bus(struct pcib_secbus *bus, device_t child, int *rid,
682    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
683{
684	struct resource *res;
685
686	res = rman_reserve_resource(&bus->rman, start, end, count, flags,
687	    child);
688	if (res == NULL)
689		return (NULL);
690
691	if (bootverbose)
692		device_printf(bus->dev,
693		    "allocated bus range (%ju-%ju) for rid %d of %s\n",
694		    rman_get_start(res), rman_get_end(res), *rid,
695		    pcib_child_name(child));
696	rman_set_rid(res, *rid);
697	return (res);
698}
699
700/*
701 * Attempt to grow the secondary bus range.  This is much simpler than
702 * for I/O windows as the range can only be grown by increasing
703 * subbus.
704 */
705static int
706pcib_grow_subbus(struct pcib_secbus *bus, rman_res_t new_end)
707{
708	rman_res_t old_end;
709	int error;
710
711	old_end = rman_get_end(bus->res);
712	KASSERT(new_end > old_end, ("attempt to shrink subbus"));
713	error = bus_adjust_resource(bus->dev, PCI_RES_BUS, bus->res,
714	    rman_get_start(bus->res), new_end);
715	if (error)
716		return (error);
717	if (bootverbose)
718		device_printf(bus->dev, "grew bus range to %ju-%ju\n",
719		    rman_get_start(bus->res), rman_get_end(bus->res));
720	error = rman_manage_region(&bus->rman, old_end + 1,
721	    rman_get_end(bus->res));
722	if (error)
723		panic("Failed to add resource to rman");
724	bus->sub = rman_get_end(bus->res);
725	pci_write_config(bus->dev, bus->sub_reg, bus->sub, 1);
726	return (0);
727}
728
729struct resource *
730pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid,
731    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
732{
733	struct resource *res;
734	rman_res_t start_free, end_free, new_end;
735
736	/*
737	 * First, see if the request can be satisified by the existing
738	 * bus range.
739	 */
740	res = pcib_suballoc_bus(bus, child, rid, start, end, count, flags);
741	if (res != NULL)
742		return (res);
743
744	/*
745	 * Figure out a range to grow the bus range.  First, find the
746	 * first bus number after the last allocated bus in the rman and
747	 * enforce that as a minimum starting point for the range.
748	 */
749	if (rman_last_free_region(&bus->rman, &start_free, &end_free) != 0 ||
750	    end_free != bus->sub)
751		start_free = bus->sub + 1;
752	if (start_free < start)
753		start_free = start;
754	new_end = start_free + count - 1;
755
756	/*
757	 * See if this new range would satisfy the request if it
758	 * succeeds.
759	 */
760	if (new_end > end)
761		return (NULL);
762
763	/* Finally, attempt to grow the existing resource. */
764	if (bootverbose) {
765		device_printf(bus->dev,
766		    "attempting to grow bus range for %ju buses\n", count);
767		printf("\tback candidate range: %ju-%ju\n", start_free,
768		    new_end);
769	}
770	if (pcib_grow_subbus(bus, new_end) == 0)
771		return (pcib_suballoc_bus(bus, child, rid, start, end, count,
772		    flags));
773	return (NULL);
774}
775#endif
776
777#else
778
779/*
780 * Is the prefetch window open (eg, can we allocate memory in it?)
781 */
782static int
783pcib_is_prefetch_open(struct pcib_softc *sc)
784{
785	return (sc->pmembase > 0 && sc->pmembase < sc->pmemlimit);
786}
787
788/*
789 * Is the nonprefetch window open (eg, can we allocate memory in it?)
790 */
791static int
792pcib_is_nonprefetch_open(struct pcib_softc *sc)
793{
794	return (sc->membase > 0 && sc->membase < sc->memlimit);
795}
796
797/*
798 * Is the io window open (eg, can we allocate ports in it?)
799 */
800static int
801pcib_is_io_open(struct pcib_softc *sc)
802{
803	return (sc->iobase > 0 && sc->iobase < sc->iolimit);
804}
805
806/*
807 * Get current I/O decode.
808 */
809static void
810pcib_get_io_decode(struct pcib_softc *sc)
811{
812	device_t	dev;
813	uint32_t	iolow;
814
815	dev = sc->dev;
816
817	iolow = pci_read_config(dev, PCIR_IOBASEL_1, 1);
818	if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32)
819		sc->iobase = PCI_PPBIOBASE(
820		    pci_read_config(dev, PCIR_IOBASEH_1, 2), iolow);
821	else
822		sc->iobase = PCI_PPBIOBASE(0, iolow);
823
824	iolow = pci_read_config(dev, PCIR_IOLIMITL_1, 1);
825	if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32)
826		sc->iolimit = PCI_PPBIOLIMIT(
827		    pci_read_config(dev, PCIR_IOLIMITH_1, 2), iolow);
828	else
829		sc->iolimit = PCI_PPBIOLIMIT(0, iolow);
830}
831
832/*
833 * Get current memory decode.
834 */
835static void
836pcib_get_mem_decode(struct pcib_softc *sc)
837{
838	device_t	dev;
839	pci_addr_t	pmemlow;
840
841	dev = sc->dev;
842
843	sc->membase = PCI_PPBMEMBASE(0,
844	    pci_read_config(dev, PCIR_MEMBASE_1, 2));
845	sc->memlimit = PCI_PPBMEMLIMIT(0,
846	    pci_read_config(dev, PCIR_MEMLIMIT_1, 2));
847
848	pmemlow = pci_read_config(dev, PCIR_PMBASEL_1, 2);
849	if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64)
850		sc->pmembase = PCI_PPBMEMBASE(
851		    pci_read_config(dev, PCIR_PMBASEH_1, 4), pmemlow);
852	else
853		sc->pmembase = PCI_PPBMEMBASE(0, pmemlow);
854
855	pmemlow = pci_read_config(dev, PCIR_PMLIMITL_1, 2);
856	if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64)
857		sc->pmemlimit = PCI_PPBMEMLIMIT(
858		    pci_read_config(dev, PCIR_PMLIMITH_1, 4), pmemlow);
859	else
860		sc->pmemlimit = PCI_PPBMEMLIMIT(0, pmemlow);
861}
862
863/*
864 * Restore previous I/O decode.
865 */
866static void
867pcib_set_io_decode(struct pcib_softc *sc)
868{
869	device_t	dev;
870	uint32_t	iohi;
871
872	dev = sc->dev;
873
874	iohi = sc->iobase >> 16;
875	if (iohi > 0)
876		pci_write_config(dev, PCIR_IOBASEH_1, iohi, 2);
877	pci_write_config(dev, PCIR_IOBASEL_1, sc->iobase >> 8, 1);
878
879	iohi = sc->iolimit >> 16;
880	if (iohi > 0)
881		pci_write_config(dev, PCIR_IOLIMITH_1, iohi, 2);
882	pci_write_config(dev, PCIR_IOLIMITL_1, sc->iolimit >> 8, 1);
883}
884
885/*
886 * Restore previous memory decode.
887 */
888static void
889pcib_set_mem_decode(struct pcib_softc *sc)
890{
891	device_t	dev;
892	pci_addr_t	pmemhi;
893
894	dev = sc->dev;
895
896	pci_write_config(dev, PCIR_MEMBASE_1, sc->membase >> 16, 2);
897	pci_write_config(dev, PCIR_MEMLIMIT_1, sc->memlimit >> 16, 2);
898
899	pmemhi = sc->pmembase >> 32;
900	if (pmemhi > 0)
901		pci_write_config(dev, PCIR_PMBASEH_1, pmemhi, 4);
902	pci_write_config(dev, PCIR_PMBASEL_1, sc->pmembase >> 16, 2);
903
904	pmemhi = sc->pmemlimit >> 32;
905	if (pmemhi > 0)
906		pci_write_config(dev, PCIR_PMLIMITH_1, pmemhi, 4);
907	pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmemlimit >> 16, 2);
908}
909#endif
910
911#ifdef PCI_HP
912/*
913 * PCI-express HotPlug support.
914 */
915static int pci_enable_pcie_hp = 1;
916SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_hp, CTLFLAG_RDTUN,
917    &pci_enable_pcie_hp, 0,
918    "Enable support for native PCI-express HotPlug.");
919
920static void
921pcib_probe_hotplug(struct pcib_softc *sc)
922{
923	device_t dev;
924	uint32_t link_cap;
925	uint16_t link_sta, slot_sta;
926
927	if (!pci_enable_pcie_hp)
928		return;
929
930	dev = sc->dev;
931	if (pci_find_cap(dev, PCIY_EXPRESS, NULL) != 0)
932		return;
933
934	if (!(pcie_read_config(dev, PCIER_FLAGS, 2) & PCIEM_FLAGS_SLOT))
935		return;
936
937	sc->pcie_slot_cap = pcie_read_config(dev, PCIER_SLOT_CAP, 4);
938
939	if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_HPC) == 0)
940		return;
941	link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4);
942	if ((link_cap & PCIEM_LINK_CAP_DL_ACTIVE) == 0)
943		return;
944
945	/*
946	 * Some devices report that they have an MRL when they actually
947	 * do not.  Since they always report that the MRL is open, child
948	 * devices would be ignored.  Try to detect these devices and
949	 * ignore their claim of HotPlug support.
950	 *
951	 * If there is an open MRL but the Data Link Layer is active,
952	 * the MRL is not real.
953	 */
954	if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) != 0) {
955		link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
956		slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
957		if ((slot_sta & PCIEM_SLOT_STA_MRLSS) != 0 &&
958		    (link_sta & PCIEM_LINK_STA_DL_ACTIVE) != 0) {
959			return;
960		}
961	}
962
963	sc->flags |= PCIB_HOTPLUG;
964}
965
966/*
967 * Send a HotPlug command to the slot control register.  If this slot
968 * uses command completion interrupts and a previous command is still
969 * in progress, then the command is dropped.  Once the previous
970 * command completes or times out, pcib_pcie_hotplug_update() will be
971 * invoked to post a new command based on the slot's state at that
972 * time.
973 */
974static void
975pcib_pcie_hotplug_command(struct pcib_softc *sc, uint16_t val, uint16_t mask)
976{
977	device_t dev;
978	uint16_t ctl, new;
979
980	dev = sc->dev;
981
982	if (sc->flags & PCIB_HOTPLUG_CMD_PENDING)
983		return;
984
985	ctl = pcie_read_config(dev, PCIER_SLOT_CTL, 2);
986	new = (ctl & ~mask) | val;
987	if (new == ctl)
988		return;
989	if (bootverbose)
990		device_printf(dev, "HotPlug command: %04x -> %04x\n", ctl, new);
991	pcie_write_config(dev, PCIER_SLOT_CTL, new, 2);
992	if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS) &&
993	    (ctl & new) & PCIEM_SLOT_CTL_CCIE) {
994		sc->flags |= PCIB_HOTPLUG_CMD_PENDING;
995		if (!cold)
996			callout_reset(&sc->pcie_cc_timer, hz,
997			    pcib_pcie_cc_timeout, sc);
998	}
999}
1000
1001static void
1002pcib_pcie_hotplug_command_completed(struct pcib_softc *sc)
1003{
1004	device_t dev;
1005
1006	dev = sc->dev;
1007
1008	if (bootverbose)
1009		device_printf(dev, "Command Completed\n");
1010	if (!(sc->flags & PCIB_HOTPLUG_CMD_PENDING))
1011		return;
1012	callout_stop(&sc->pcie_cc_timer);
1013	sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
1014	wakeup(sc);
1015}
1016
1017/*
1018 * Returns true if a card is fully inserted from the user's
1019 * perspective.  It may not yet be ready for access, but the driver
1020 * can now start enabling access if necessary.
1021 */
1022static bool
1023pcib_hotplug_inserted(struct pcib_softc *sc)
1024{
1025
1026	/* Pretend the card isn't present if a detach is forced. */
1027	if (sc->flags & PCIB_DETACHING)
1028		return (false);
1029
1030	/* Card must be present in the slot. */
1031	if ((sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS) == 0)
1032		return (false);
1033
1034	/* A power fault implicitly turns off power to the slot. */
1035	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD)
1036		return (false);
1037
1038	/* If the MRL is disengaged, the slot is powered off. */
1039	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP &&
1040	    (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS) != 0)
1041		return (false);
1042
1043	return (true);
1044}
1045
1046/*
1047 * Returns -1 if the card is fully inserted, powered, and ready for
1048 * access.  Otherwise, returns 0.
1049 */
1050static int
1051pcib_hotplug_present(struct pcib_softc *sc)
1052{
1053
1054	/* Card must be inserted. */
1055	if (!pcib_hotplug_inserted(sc))
1056		return (0);
1057
1058	/*
1059	 * Require the Electromechanical Interlock to be engaged if
1060	 * present.
1061	 */
1062	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP &&
1063	    (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) == 0)
1064		return (0);
1065
1066	/* Require the Data Link Layer to be active. */
1067	if (!(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE))
1068		return (0);
1069
1070	return (-1);
1071}
1072
1073static void
1074pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask,
1075    bool schedule_task)
1076{
1077	bool card_inserted, ei_engaged;
1078
1079	/* Clear DETACHING if Presence Detect has cleared. */
1080	if ((sc->pcie_slot_sta & (PCIEM_SLOT_STA_PDC | PCIEM_SLOT_STA_PDS)) ==
1081	    PCIEM_SLOT_STA_PDC)
1082		sc->flags &= ~PCIB_DETACHING;
1083
1084	card_inserted = pcib_hotplug_inserted(sc);
1085
1086	/* Turn the power indicator on if a card is inserted. */
1087	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PIP) {
1088		mask |= PCIEM_SLOT_CTL_PIC;
1089		if (card_inserted)
1090			val |= PCIEM_SLOT_CTL_PI_ON;
1091		else if (sc->flags & PCIB_DETACH_PENDING)
1092			val |= PCIEM_SLOT_CTL_PI_BLINK;
1093		else
1094			val |= PCIEM_SLOT_CTL_PI_OFF;
1095	}
1096
1097	/* Turn the power on via the Power Controller if a card is inserted. */
1098	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) {
1099		mask |= PCIEM_SLOT_CTL_PCC;
1100		if (card_inserted)
1101			val |= PCIEM_SLOT_CTL_PC_ON;
1102		else
1103			val |= PCIEM_SLOT_CTL_PC_OFF;
1104	}
1105
1106	/*
1107	 * If a card is inserted, enable the Electromechanical
1108	 * Interlock.  If a card is not inserted (or we are in the
1109	 * process of detaching), disable the Electromechanical
1110	 * Interlock.
1111	 */
1112	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP) {
1113		mask |= PCIEM_SLOT_CTL_EIC;
1114		ei_engaged = (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) != 0;
1115		if (card_inserted != ei_engaged)
1116			val |= PCIEM_SLOT_CTL_EIC;
1117	}
1118
1119	/*
1120	 * Start a timer to see if the Data Link Layer times out.
1121	 * Note that we only start the timer if Presence Detect or MRL Sensor
1122	 * changed on this interrupt.  Stop any scheduled timer if
1123	 * the Data Link Layer is active.
1124	 */
1125	if (card_inserted &&
1126	    !(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) &&
1127	    sc->pcie_slot_sta &
1128	    (PCIEM_SLOT_STA_MRLSC | PCIEM_SLOT_STA_PDC)) {
1129		if (cold)
1130			device_printf(sc->dev,
1131			    "Data Link Layer inactive\n");
1132		else
1133			callout_reset(&sc->pcie_dll_timer, hz,
1134			    pcib_pcie_dll_timeout, sc);
1135	} else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)
1136		callout_stop(&sc->pcie_dll_timer);
1137
1138	pcib_pcie_hotplug_command(sc, val, mask);
1139
1140	/*
1141	 * During attach the child "pci" device is added synchronously;
1142	 * otherwise, the task is scheduled to manage the child
1143	 * device.
1144	 */
1145	if (schedule_task &&
1146	    (pcib_hotplug_present(sc) != 0) != (sc->child != NULL))
1147		taskqueue_enqueue(taskqueue_thread, &sc->pcie_hp_task);
1148}
1149
1150static void
1151pcib_pcie_intr(void *arg)
1152{
1153	struct pcib_softc *sc;
1154	device_t dev;
1155
1156	sc = arg;
1157	dev = sc->dev;
1158	sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
1159
1160	/* Clear the events just reported. */
1161	pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2);
1162
1163	if (bootverbose)
1164		device_printf(dev, "HotPlug interrupt: %#x\n",
1165		    sc->pcie_slot_sta);
1166
1167	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_ABP) {
1168		if (sc->flags & PCIB_DETACH_PENDING) {
1169			device_printf(dev,
1170			    "Attention Button Pressed: Detach Cancelled\n");
1171			sc->flags &= ~PCIB_DETACH_PENDING;
1172			callout_stop(&sc->pcie_ab_timer);
1173		} else {
1174			device_printf(dev,
1175		    "Attention Button Pressed: Detaching in 5 seconds\n");
1176			sc->flags |= PCIB_DETACH_PENDING;
1177			callout_reset(&sc->pcie_ab_timer, 5 * hz,
1178			    pcib_pcie_ab_timeout, sc);
1179		}
1180	}
1181	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD)
1182		device_printf(dev, "Power Fault Detected\n");
1183	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSC)
1184		device_printf(dev, "MRL Sensor Changed to %s\n",
1185		    sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS ? "open" :
1186		    "closed");
1187	if (bootverbose && sc->pcie_slot_sta & PCIEM_SLOT_STA_PDC)
1188		device_printf(dev, "Presence Detect Changed to %s\n",
1189		    sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS ? "card present" :
1190		    "empty");
1191	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_CC)
1192		pcib_pcie_hotplug_command_completed(sc);
1193	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_DLLSC) {
1194		sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
1195		if (bootverbose)
1196			device_printf(dev,
1197			    "Data Link Layer State Changed to %s\n",
1198			    sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE ?
1199			    "active" : "inactive");
1200	}
1201
1202	pcib_pcie_hotplug_update(sc, 0, 0, true);
1203}
1204
1205static void
1206pcib_pcie_hotplug_task(void *context, int pending)
1207{
1208	struct pcib_softc *sc;
1209	device_t dev;
1210
1211	sc = context;
1212	mtx_lock(&Giant);
1213	dev = sc->dev;
1214	if (pcib_hotplug_present(sc) != 0) {
1215		if (sc->child == NULL) {
1216			sc->child = device_add_child(dev, "pci", -1);
1217			bus_generic_attach(dev);
1218		}
1219	} else {
1220		if (sc->child != NULL) {
1221			if (device_delete_child(dev, sc->child) == 0)
1222				sc->child = NULL;
1223		}
1224	}
1225	mtx_unlock(&Giant);
1226}
1227
1228static void
1229pcib_pcie_ab_timeout(void *arg)
1230{
1231	struct pcib_softc *sc;
1232	device_t dev;
1233
1234	sc = arg;
1235	dev = sc->dev;
1236	mtx_assert(&Giant, MA_OWNED);
1237	if (sc->flags & PCIB_DETACH_PENDING) {
1238		sc->flags |= PCIB_DETACHING;
1239		sc->flags &= ~PCIB_DETACH_PENDING;
1240		pcib_pcie_hotplug_update(sc, 0, 0, true);
1241	}
1242}
1243
1244static void
1245pcib_pcie_cc_timeout(void *arg)
1246{
1247	struct pcib_softc *sc;
1248	device_t dev;
1249	uint16_t sta;
1250
1251	sc = arg;
1252	dev = sc->dev;
1253	mtx_assert(&Giant, MA_OWNED);
1254	sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
1255	if (!(sta & PCIEM_SLOT_STA_CC)) {
1256		device_printf(dev, "HotPlug Command Timed Out\n");
1257		sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
1258	} else {
1259		device_printf(dev,
1260	    "Missed HotPlug interrupt waiting for Command Completion\n");
1261		pcib_pcie_intr(sc);
1262	}
1263}
1264
1265static void
1266pcib_pcie_dll_timeout(void *arg)
1267{
1268	struct pcib_softc *sc;
1269	device_t dev;
1270	uint16_t sta;
1271
1272	sc = arg;
1273	dev = sc->dev;
1274	mtx_assert(&Giant, MA_OWNED);
1275	sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
1276	if (!(sta & PCIEM_LINK_STA_DL_ACTIVE)) {
1277		device_printf(dev,
1278		    "Timed out waiting for Data Link Layer Active\n");
1279		sc->flags |= PCIB_DETACHING;
1280		pcib_pcie_hotplug_update(sc, 0, 0, true);
1281	} else if (sta != sc->pcie_link_sta) {
1282		device_printf(dev,
1283		    "Missed HotPlug interrupt waiting for DLL Active\n");
1284		pcib_pcie_intr(sc);
1285	}
1286}
1287
1288static int
1289pcib_alloc_pcie_irq(struct pcib_softc *sc)
1290{
1291	device_t dev;
1292	int count, error, rid;
1293
1294	rid = -1;
1295	dev = sc->dev;
1296
1297	/*
1298	 * For simplicity, only use MSI-X if there is a single message.
1299	 * To support a device with multiple messages we would have to
1300	 * use remap intr if the MSI number is not 0.
1301	 */
1302	count = pci_msix_count(dev);
1303	if (count == 1) {
1304		error = pci_alloc_msix(dev, &count);
1305		if (error == 0)
1306			rid = 1;
1307	}
1308
1309	if (rid < 0 && pci_msi_count(dev) > 0) {
1310		count = 1;
1311		error = pci_alloc_msi(dev, &count);
1312		if (error == 0)
1313			rid = 1;
1314	}
1315
1316	if (rid < 0)
1317		rid = 0;
1318
1319	sc->pcie_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1320	    RF_ACTIVE);
1321	if (sc->pcie_irq == NULL) {
1322		device_printf(dev,
1323		    "Failed to allocate interrupt for PCI-e events\n");
1324		if (rid > 0)
1325			pci_release_msi(dev);
1326		return (ENXIO);
1327	}
1328
1329	error = bus_setup_intr(dev, sc->pcie_irq, INTR_TYPE_MISC,
1330	    NULL, pcib_pcie_intr, sc, &sc->pcie_ihand);
1331	if (error) {
1332		device_printf(dev, "Failed to setup PCI-e interrupt handler\n");
1333		bus_release_resource(dev, SYS_RES_IRQ, rid, sc->pcie_irq);
1334		if (rid > 0)
1335			pci_release_msi(dev);
1336		return (error);
1337	}
1338	return (0);
1339}
1340
1341static int
1342pcib_release_pcie_irq(struct pcib_softc *sc)
1343{
1344	device_t dev;
1345	int error;
1346
1347	dev = sc->dev;
1348	error = bus_teardown_intr(dev, sc->pcie_irq, sc->pcie_ihand);
1349	if (error)
1350		return (error);
1351	error = bus_free_resource(dev, SYS_RES_IRQ, sc->pcie_irq);
1352	if (error)
1353		return (error);
1354	return (pci_release_msi(dev));
1355}
1356
1357static void
1358pcib_setup_hotplug(struct pcib_softc *sc)
1359{
1360	device_t dev;
1361	uint16_t mask, val;
1362
1363	dev = sc->dev;
1364	callout_init(&sc->pcie_ab_timer, 0);
1365	callout_init(&sc->pcie_cc_timer, 0);
1366	callout_init(&sc->pcie_dll_timer, 0);
1367	TASK_INIT(&sc->pcie_hp_task, 0, pcib_pcie_hotplug_task, sc);
1368
1369	/* Allocate IRQ. */
1370	if (pcib_alloc_pcie_irq(sc) != 0)
1371		return;
1372
1373	sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
1374	sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
1375
1376	/* Clear any events previously pending. */
1377	pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2);
1378
1379	/* Enable HotPlug events. */
1380	mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE |
1381	    PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE |
1382	    PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE;
1383	val = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_PDCE;
1384	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_APB)
1385		val |= PCIEM_SLOT_CTL_ABPE;
1386	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP)
1387		val |= PCIEM_SLOT_CTL_PFDE;
1388	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP)
1389		val |= PCIEM_SLOT_CTL_MRLSCE;
1390	if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS))
1391		val |= PCIEM_SLOT_CTL_CCIE;
1392
1393	/* Turn the attention indicator off. */
1394	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) {
1395		mask |= PCIEM_SLOT_CTL_AIC;
1396		val |= PCIEM_SLOT_CTL_AI_OFF;
1397	}
1398
1399	pcib_pcie_hotplug_update(sc, val, mask, false);
1400}
1401
1402static int
1403pcib_detach_hotplug(struct pcib_softc *sc)
1404{
1405	uint16_t mask, val;
1406	int error;
1407
1408	/* Disable the card in the slot and force it to detach. */
1409	if (sc->flags & PCIB_DETACH_PENDING) {
1410		sc->flags &= ~PCIB_DETACH_PENDING;
1411		callout_stop(&sc->pcie_ab_timer);
1412	}
1413	sc->flags |= PCIB_DETACHING;
1414
1415	if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) {
1416		callout_stop(&sc->pcie_cc_timer);
1417		tsleep(sc, 0, "hpcmd", hz);
1418		sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
1419	}
1420
1421	/* Disable HotPlug events. */
1422	mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE |
1423	    PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE |
1424	    PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE;
1425	val = 0;
1426
1427	/* Turn the attention indicator off. */
1428	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) {
1429		mask |= PCIEM_SLOT_CTL_AIC;
1430		val |= PCIEM_SLOT_CTL_AI_OFF;
1431	}
1432
1433	pcib_pcie_hotplug_update(sc, val, mask, false);
1434
1435	error = pcib_release_pcie_irq(sc);
1436	if (error)
1437		return (error);
1438	taskqueue_drain(taskqueue_thread, &sc->pcie_hp_task);
1439	callout_drain(&sc->pcie_ab_timer);
1440	callout_drain(&sc->pcie_cc_timer);
1441	callout_drain(&sc->pcie_dll_timer);
1442	return (0);
1443}
1444#endif
1445
1446/*
1447 * Get current bridge configuration.
1448 */
1449static void
1450pcib_cfg_save(struct pcib_softc *sc)
1451{
1452#ifndef NEW_PCIB
1453	device_t	dev;
1454	uint16_t command;
1455
1456	dev = sc->dev;
1457
1458	command = pci_read_config(dev, PCIR_COMMAND, 2);
1459	if (command & PCIM_CMD_PORTEN)
1460		pcib_get_io_decode(sc);
1461	if (command & PCIM_CMD_MEMEN)
1462		pcib_get_mem_decode(sc);
1463#endif
1464}
1465
1466/*
1467 * Restore previous bridge configuration.
1468 */
1469static void
1470pcib_cfg_restore(struct pcib_softc *sc)
1471{
1472	device_t	dev;
1473#ifndef NEW_PCIB
1474	uint16_t command;
1475#endif
1476	dev = sc->dev;
1477
1478#ifdef NEW_PCIB
1479	pcib_write_windows(sc, WIN_IO | WIN_MEM | WIN_PMEM);
1480#else
1481	command = pci_read_config(dev, PCIR_COMMAND, 2);
1482	if (command & PCIM_CMD_PORTEN)
1483		pcib_set_io_decode(sc);
1484	if (command & PCIM_CMD_MEMEN)
1485		pcib_set_mem_decode(sc);
1486#endif
1487}
1488
1489/*
1490 * Generic device interface
1491 */
1492static int
1493pcib_probe(device_t dev)
1494{
1495    if ((pci_get_class(dev) == PCIC_BRIDGE) &&
1496	(pci_get_subclass(dev) == PCIS_BRIDGE_PCI)) {
1497	device_set_desc(dev, "PCI-PCI bridge");
1498	return(-10000);
1499    }
1500    return(ENXIO);
1501}
1502
1503void
1504pcib_attach_common(device_t dev)
1505{
1506    struct pcib_softc	*sc;
1507    struct sysctl_ctx_list *sctx;
1508    struct sysctl_oid	*soid;
1509    int comma;
1510
1511    sc = device_get_softc(dev);
1512    sc->dev = dev;
1513
1514    /*
1515     * Get current bridge configuration.
1516     */
1517    sc->domain = pci_get_domain(dev);
1518#if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
1519    sc->bus.sec = pci_read_config(dev, PCIR_SECBUS_1, 1);
1520    sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1);
1521#endif
1522    sc->bridgectl = pci_read_config(dev, PCIR_BRIDGECTL_1, 2);
1523    pcib_cfg_save(sc);
1524
1525    /*
1526     * The primary bus register should always be the bus of the
1527     * parent.
1528     */
1529    sc->pribus = pci_get_bus(dev);
1530    pci_write_config(dev, PCIR_PRIBUS_1, sc->pribus, 1);
1531
1532    /*
1533     * Setup sysctl reporting nodes
1534     */
1535    sctx = device_get_sysctl_ctx(dev);
1536    soid = device_get_sysctl_tree(dev);
1537    SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain",
1538      CTLFLAG_RD, &sc->domain, 0, "Domain number");
1539    SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus",
1540      CTLFLAG_RD, &sc->pribus, 0, "Primary bus number");
1541    SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus",
1542      CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number");
1543    SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus",
1544      CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number");
1545
1546    /*
1547     * Quirk handling.
1548     */
1549    switch (pci_get_devid(dev)) {
1550#if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
1551    case 0x12258086:		/* Intel 82454KX/GX (Orion) */
1552	{
1553	    uint8_t	supbus;
1554
1555	    supbus = pci_read_config(dev, 0x41, 1);
1556	    if (supbus != 0xff) {
1557		sc->bus.sec = supbus + 1;
1558		sc->bus.sub = supbus + 1;
1559	    }
1560	    break;
1561	}
1562#endif
1563
1564    /*
1565     * The i82380FB mobile docking controller is a PCI-PCI bridge,
1566     * and it is a subtractive bridge.  However, the ProgIf is wrong
1567     * so the normal setting of PCIB_SUBTRACTIVE bit doesn't
1568     * happen.  There are also Toshiba and Cavium ThunderX bridges
1569     * that behave this way.
1570     */
1571    case 0xa002177d:		/* Cavium ThunderX */
1572    case 0x124b8086:		/* Intel 82380FB Mobile */
1573    case 0x060513d7:		/* Toshiba ???? */
1574	sc->flags |= PCIB_SUBTRACTIVE;
1575	break;
1576
1577#if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
1578    /* Compaq R3000 BIOS sets wrong subordinate bus number. */
1579    case 0x00dd10de:
1580	{
1581	    char *cp;
1582
1583	    if ((cp = kern_getenv("smbios.planar.maker")) == NULL)
1584		break;
1585	    if (strncmp(cp, "Compal", 6) != 0) {
1586		freeenv(cp);
1587		break;
1588	    }
1589	    freeenv(cp);
1590	    if ((cp = kern_getenv("smbios.planar.product")) == NULL)
1591		break;
1592	    if (strncmp(cp, "08A0", 4) != 0) {
1593		freeenv(cp);
1594		break;
1595	    }
1596	    freeenv(cp);
1597	    if (sc->bus.sub < 0xa) {
1598		pci_write_config(dev, PCIR_SUBBUS_1, 0xa, 1);
1599		sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1);
1600	    }
1601	    break;
1602	}
1603#endif
1604    }
1605
1606    if (pci_msi_device_blacklisted(dev))
1607	sc->flags |= PCIB_DISABLE_MSI;
1608
1609    if (pci_msix_device_blacklisted(dev))
1610	sc->flags |= PCIB_DISABLE_MSIX;
1611
1612    /*
1613     * Intel 815, 845 and other chipsets say they are PCI-PCI bridges,
1614     * but have a ProgIF of 0x80.  The 82801 family (AA, AB, BAM/CAM,
1615     * BA/CA/DB and E) PCI bridges are HUB-PCI bridges, in Intelese.
1616     * This means they act as if they were subtractively decoding
1617     * bridges and pass all transactions.  Mark them and real ProgIf 1
1618     * parts as subtractive.
1619     */
1620    if ((pci_get_devid(dev) & 0xff00ffff) == 0x24008086 ||
1621      pci_read_config(dev, PCIR_PROGIF, 1) == PCIP_BRIDGE_PCI_SUBTRACTIVE)
1622	sc->flags |= PCIB_SUBTRACTIVE;
1623
1624#ifdef PCI_HP
1625    pcib_probe_hotplug(sc);
1626#endif
1627#ifdef NEW_PCIB
1628#ifdef PCI_RES_BUS
1629    pcib_setup_secbus(dev, &sc->bus, 1);
1630#endif
1631    pcib_probe_windows(sc);
1632#endif
1633#ifdef PCI_HP
1634    if (sc->flags & PCIB_HOTPLUG)
1635	    pcib_setup_hotplug(sc);
1636#endif
1637    if (bootverbose) {
1638	device_printf(dev, "  domain            %d\n", sc->domain);
1639	device_printf(dev, "  secondary bus     %d\n", sc->bus.sec);
1640	device_printf(dev, "  subordinate bus   %d\n", sc->bus.sub);
1641#ifdef NEW_PCIB
1642	if (pcib_is_window_open(&sc->io))
1643	    device_printf(dev, "  I/O decode        0x%jx-0x%jx\n",
1644	      (uintmax_t)sc->io.base, (uintmax_t)sc->io.limit);
1645	if (pcib_is_window_open(&sc->mem))
1646	    device_printf(dev, "  memory decode     0x%jx-0x%jx\n",
1647	      (uintmax_t)sc->mem.base, (uintmax_t)sc->mem.limit);
1648	if (pcib_is_window_open(&sc->pmem))
1649	    device_printf(dev, "  prefetched decode 0x%jx-0x%jx\n",
1650	      (uintmax_t)sc->pmem.base, (uintmax_t)sc->pmem.limit);
1651#else
1652	if (pcib_is_io_open(sc))
1653	    device_printf(dev, "  I/O decode        0x%x-0x%x\n",
1654	      sc->iobase, sc->iolimit);
1655	if (pcib_is_nonprefetch_open(sc))
1656	    device_printf(dev, "  memory decode     0x%jx-0x%jx\n",
1657	      (uintmax_t)sc->membase, (uintmax_t)sc->memlimit);
1658	if (pcib_is_prefetch_open(sc))
1659	    device_printf(dev, "  prefetched decode 0x%jx-0x%jx\n",
1660	      (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit);
1661#endif
1662	if (sc->bridgectl & (PCIB_BCR_ISA_ENABLE | PCIB_BCR_VGA_ENABLE) ||
1663	    sc->flags & PCIB_SUBTRACTIVE) {
1664		device_printf(dev, "  special decode    ");
1665		comma = 0;
1666		if (sc->bridgectl & PCIB_BCR_ISA_ENABLE) {
1667			printf("ISA");
1668			comma = 1;
1669		}
1670		if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) {
1671			printf("%sVGA", comma ? ", " : "");
1672			comma = 1;
1673		}
1674		if (sc->flags & PCIB_SUBTRACTIVE)
1675			printf("%ssubtractive", comma ? ", " : "");
1676		printf("\n");
1677	}
1678    }
1679
1680    /*
1681     * Always enable busmastering on bridges so that transactions
1682     * initiated on the secondary bus are passed through to the
1683     * primary bus.
1684     */
1685    pci_enable_busmaster(dev);
1686}
1687
1688#ifdef PCI_HP
1689static int
1690pcib_present(struct pcib_softc *sc)
1691{
1692
1693	if (sc->flags & PCIB_HOTPLUG)
1694		return (pcib_hotplug_present(sc) != 0);
1695	return (1);
1696}
1697#endif
1698
1699int
1700pcib_attach_child(device_t dev)
1701{
1702	struct pcib_softc *sc;
1703
1704	sc = device_get_softc(dev);
1705	if (sc->bus.sec == 0) {
1706		/* no secondary bus; we should have fixed this */
1707		return(0);
1708	}
1709
1710#ifdef PCI_HP
1711	if (!pcib_present(sc)) {
1712		/* An empty HotPlug slot, so don't add a PCI bus yet. */
1713		return (0);
1714	}
1715#endif
1716
1717	sc->child = device_add_child(dev, "pci", -1);
1718	return (bus_generic_attach(dev));
1719}
1720
1721int
1722pcib_attach(device_t dev)
1723{
1724
1725    pcib_attach_common(dev);
1726    return (pcib_attach_child(dev));
1727}
1728
1729int
1730pcib_detach(device_t dev)
1731{
1732#if defined(PCI_HP) || defined(NEW_PCIB)
1733	struct pcib_softc *sc;
1734#endif
1735	int error;
1736
1737#if defined(PCI_HP) || defined(NEW_PCIB)
1738	sc = device_get_softc(dev);
1739#endif
1740	error = bus_generic_detach(dev);
1741	if (error)
1742		return (error);
1743#ifdef PCI_HP
1744	if (sc->flags & PCIB_HOTPLUG) {
1745		error = pcib_detach_hotplug(sc);
1746		if (error)
1747			return (error);
1748	}
1749#endif
1750	error = device_delete_children(dev);
1751	if (error)
1752		return (error);
1753#ifdef NEW_PCIB
1754	pcib_free_windows(sc);
1755#ifdef PCI_RES_BUS
1756	pcib_free_secbus(dev, &sc->bus);
1757#endif
1758#endif
1759	return (0);
1760}
1761
1762int
1763pcib_suspend(device_t dev)
1764{
1765
1766	pcib_cfg_save(device_get_softc(dev));
1767	return (bus_generic_suspend(dev));
1768}
1769
1770int
1771pcib_resume(device_t dev)
1772{
1773
1774	pcib_cfg_restore(device_get_softc(dev));
1775	return (bus_generic_resume(dev));
1776}
1777
1778void
1779pcib_bridge_init(device_t dev)
1780{
1781	pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1);
1782	pci_write_config(dev, PCIR_IOBASEH_1, 0xffff, 2);
1783	pci_write_config(dev, PCIR_IOLIMITL_1, 0, 1);
1784	pci_write_config(dev, PCIR_IOLIMITH_1, 0, 2);
1785	pci_write_config(dev, PCIR_MEMBASE_1, 0xffff, 2);
1786	pci_write_config(dev, PCIR_MEMLIMIT_1, 0, 2);
1787	pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2);
1788	pci_write_config(dev, PCIR_PMBASEH_1, 0xffffffff, 4);
1789	pci_write_config(dev, PCIR_PMLIMITL_1, 0, 2);
1790	pci_write_config(dev, PCIR_PMLIMITH_1, 0, 4);
1791}
1792
1793int
1794pcib_child_present(device_t dev, device_t child)
1795{
1796#ifdef PCI_HP
1797	struct pcib_softc *sc = device_get_softc(dev);
1798	int retval;
1799
1800	retval = bus_child_present(dev);
1801	if (retval != 0 && sc->flags & PCIB_HOTPLUG)
1802		retval = pcib_hotplug_present(sc);
1803	return (retval);
1804#else
1805	return (bus_child_present(dev));
1806#endif
1807}
1808
1809int
1810pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1811{
1812    struct pcib_softc	*sc = device_get_softc(dev);
1813
1814    switch (which) {
1815    case PCIB_IVAR_DOMAIN:
1816	*result = sc->domain;
1817	return(0);
1818    case PCIB_IVAR_BUS:
1819	*result = sc->bus.sec;
1820	return(0);
1821    }
1822    return(ENOENT);
1823}
1824
1825int
1826pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
1827{
1828
1829    switch (which) {
1830    case PCIB_IVAR_DOMAIN:
1831	return(EINVAL);
1832    case PCIB_IVAR_BUS:
1833	return(EINVAL);
1834    }
1835    return(ENOENT);
1836}
1837
1838#ifdef NEW_PCIB
1839/*
1840 * Attempt to allocate a resource from the existing resources assigned
1841 * to a window.
1842 */
1843static struct resource *
1844pcib_suballoc_resource(struct pcib_softc *sc, struct pcib_window *w,
1845    device_t child, int type, int *rid, rman_res_t start, rman_res_t end,
1846    rman_res_t count, u_int flags)
1847{
1848	struct resource *res;
1849
1850	if (!pcib_is_window_open(w))
1851		return (NULL);
1852
1853	res = rman_reserve_resource(&w->rman, start, end, count,
1854	    flags & ~RF_ACTIVE, child);
1855	if (res == NULL)
1856		return (NULL);
1857
1858	if (bootverbose)
1859		device_printf(sc->dev,
1860		    "allocated %s range (%#jx-%#jx) for rid %x of %s\n",
1861		    w->name, rman_get_start(res), rman_get_end(res), *rid,
1862		    pcib_child_name(child));
1863	rman_set_rid(res, *rid);
1864
1865	/*
1866	 * If the resource should be active, pass that request up the
1867	 * tree.  This assumes the parent drivers can handle
1868	 * activating sub-allocated resources.
1869	 */
1870	if (flags & RF_ACTIVE) {
1871		if (bus_activate_resource(child, type, *rid, res) != 0) {
1872			rman_release_resource(res);
1873			return (NULL);
1874		}
1875	}
1876
1877	return (res);
1878}
1879
1880/* Allocate a fresh resource range for an unconfigured window. */
1881static int
1882pcib_alloc_new_window(struct pcib_softc *sc, struct pcib_window *w, int type,
1883    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1884{
1885	struct resource *res;
1886	rman_res_t base, limit, wmask;
1887	int rid;
1888
1889	/*
1890	 * If this is an I/O window on a bridge with ISA enable set
1891	 * and the start address is below 64k, then try to allocate an
1892	 * initial window of 0x1000 bytes long starting at address
1893	 * 0xf000 and walking down.  Note that if the original request
1894	 * was larger than the non-aliased range size of 0x100 our
1895	 * caller would have raised the start address up to 64k
1896	 * already.
1897	 */
1898	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE &&
1899	    start < 65536) {
1900		for (base = 0xf000; (long)base >= 0; base -= 0x1000) {
1901			limit = base + 0xfff;
1902
1903			/*
1904			 * Skip ranges that wouldn't work for the
1905			 * original request.  Note that the actual
1906			 * window that overlaps are the non-alias
1907			 * ranges within [base, limit], so this isn't
1908			 * quite a simple comparison.
1909			 */
1910			if (start + count > limit - 0x400)
1911				continue;
1912			if (base == 0) {
1913				/*
1914				 * The first open region for the window at
1915				 * 0 is 0x400-0x4ff.
1916				 */
1917				if (end - count + 1 < 0x400)
1918					continue;
1919			} else {
1920				if (end - count + 1 < base)
1921					continue;
1922			}
1923
1924			if (pcib_alloc_nonisa_ranges(sc, base, limit) == 0) {
1925				w->base = base;
1926				w->limit = limit;
1927				return (0);
1928			}
1929		}
1930		return (ENOSPC);
1931	}
1932
1933	wmask = ((rman_res_t)1 << w->step) - 1;
1934	if (RF_ALIGNMENT(flags) < w->step) {
1935		flags &= ~RF_ALIGNMENT_MASK;
1936		flags |= RF_ALIGNMENT_LOG2(w->step);
1937	}
1938	start &= ~wmask;
1939	end |= wmask;
1940	count = roundup2(count, (rman_res_t)1 << w->step);
1941	rid = w->reg;
1942	res = bus_alloc_resource(sc->dev, type, &rid, start, end, count,
1943	    flags & ~RF_ACTIVE);
1944	if (res == NULL)
1945		return (ENOSPC);
1946	pcib_add_window_resources(w, &res, 1);
1947	pcib_activate_window(sc, type);
1948	w->base = rman_get_start(res);
1949	w->limit = rman_get_end(res);
1950	return (0);
1951}
1952
1953/* Try to expand an existing window to the requested base and limit. */
1954static int
1955pcib_expand_window(struct pcib_softc *sc, struct pcib_window *w, int type,
1956    rman_res_t base, rman_res_t limit)
1957{
1958	struct resource *res;
1959	int error, i, force_64k_base;
1960
1961	KASSERT(base <= w->base && limit >= w->limit,
1962	    ("attempting to shrink window"));
1963
1964	/*
1965	 * XXX: pcib_grow_window() doesn't try to do this anyway and
1966	 * the error handling for all the edge cases would be tedious.
1967	 */
1968	KASSERT(limit == w->limit || base == w->base,
1969	    ("attempting to grow both ends of a window"));
1970
1971	/*
1972	 * Yet more special handling for requests to expand an I/O
1973	 * window behind an ISA-enabled bridge.  Since I/O windows
1974	 * have to grow in 0x1000 increments and the end of the 0xffff
1975	 * range is an alias, growing a window below 64k will always
1976	 * result in allocating new resources and never adjusting an
1977	 * existing resource.
1978	 */
1979	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE &&
1980	    (limit <= 65535 || (base <= 65535 && base != w->base))) {
1981		KASSERT(limit == w->limit || limit <= 65535,
1982		    ("attempting to grow both ends across 64k ISA alias"));
1983
1984		if (base != w->base)
1985			error = pcib_alloc_nonisa_ranges(sc, base, w->base - 1);
1986		else
1987			error = pcib_alloc_nonisa_ranges(sc, w->limit + 1,
1988			    limit);
1989		if (error == 0) {
1990			w->base = base;
1991			w->limit = limit;
1992		}
1993		return (error);
1994	}
1995
1996	/*
1997	 * Find the existing resource to adjust.  Usually there is only one,
1998	 * but for an ISA-enabled bridge we might be growing the I/O window
1999	 * above 64k and need to find the existing resource that maps all
2000	 * of the area above 64k.
2001	 */
2002	for (i = 0; i < w->count; i++) {
2003		if (rman_get_end(w->res[i]) == w->limit)
2004			break;
2005	}
2006	KASSERT(i != w->count, ("did not find existing resource"));
2007	res = w->res[i];
2008
2009	/*
2010	 * Usually the resource we found should match the window's
2011	 * existing range.  The one exception is the ISA-enabled case
2012	 * mentioned above in which case the resource should start at
2013	 * 64k.
2014	 */
2015	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE &&
2016	    w->base <= 65535) {
2017		KASSERT(rman_get_start(res) == 65536,
2018		    ("existing resource mismatch"));
2019		force_64k_base = 1;
2020	} else {
2021		KASSERT(w->base == rman_get_start(res),
2022		    ("existing resource mismatch"));
2023		force_64k_base = 0;
2024	}
2025
2026	error = bus_adjust_resource(sc->dev, type, res, force_64k_base ?
2027	    rman_get_start(res) : base, limit);
2028	if (error)
2029		return (error);
2030
2031	/* Add the newly allocated region to the resource manager. */
2032	if (w->base != base) {
2033		error = rman_manage_region(&w->rman, base, w->base - 1);
2034		w->base = base;
2035	} else {
2036		error = rman_manage_region(&w->rman, w->limit + 1, limit);
2037		w->limit = limit;
2038	}
2039	if (error) {
2040		if (bootverbose)
2041			device_printf(sc->dev,
2042			    "failed to expand %s resource manager\n", w->name);
2043		(void)bus_adjust_resource(sc->dev, type, res, force_64k_base ?
2044		    rman_get_start(res) : w->base, w->limit);
2045	}
2046	return (error);
2047}
2048
2049/*
2050 * Attempt to grow a window to make room for a given resource request.
2051 */
2052static int
2053pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type,
2054    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
2055{
2056	rman_res_t align, start_free, end_free, front, back, wmask;
2057	int error;
2058
2059	/*
2060	 * Clamp the desired resource range to the maximum address
2061	 * this window supports.  Reject impossible requests.
2062	 *
2063	 * For I/O port requests behind a bridge with the ISA enable
2064	 * bit set, force large allocations to start above 64k.
2065	 */
2066	if (!w->valid)
2067		return (EINVAL);
2068	if (sc->bridgectl & PCIB_BCR_ISA_ENABLE && count > 0x100 &&
2069	    start < 65536)
2070		start = 65536;
2071	if (end > w->rman.rm_end)
2072		end = w->rman.rm_end;
2073	if (start + count - 1 > end || start + count < start)
2074		return (EINVAL);
2075	wmask = ((rman_res_t)1 << w->step) - 1;
2076
2077	/*
2078	 * If there is no resource at all, just try to allocate enough
2079	 * aligned space for this resource.
2080	 */
2081	if (w->res == NULL) {
2082		error = pcib_alloc_new_window(sc, w, type, start, end, count,
2083		    flags);
2084		if (error) {
2085			if (bootverbose)
2086				device_printf(sc->dev,
2087		    "failed to allocate initial %s window (%#jx-%#jx,%#jx)\n",
2088				    w->name, start, end, count);
2089			return (error);
2090		}
2091		if (bootverbose)
2092			device_printf(sc->dev,
2093			    "allocated initial %s window of %#jx-%#jx\n",
2094			    w->name, (uintmax_t)w->base, (uintmax_t)w->limit);
2095		goto updatewin;
2096	}
2097
2098	/*
2099	 * See if growing the window would help.  Compute the minimum
2100	 * amount of address space needed on both the front and back
2101	 * ends of the existing window to satisfy the allocation.
2102	 *
2103	 * For each end, build a candidate region adjusting for the
2104	 * required alignment, etc.  If there is a free region at the
2105	 * edge of the window, grow from the inner edge of the free
2106	 * region.  Otherwise grow from the window boundary.
2107	 *
2108	 * Growing an I/O window below 64k for a bridge with the ISA
2109	 * enable bit doesn't require any special magic as the step
2110	 * size of an I/O window (1k) always includes multiple
2111	 * non-alias ranges when it is grown in either direction.
2112	 *
2113	 * XXX: Special case: if w->res is completely empty and the
2114	 * request size is larger than w->res, we should find the
2115	 * optimal aligned buffer containing w->res and allocate that.
2116	 */
2117	if (bootverbose)
2118		device_printf(sc->dev,
2119		    "attempting to grow %s window for (%#jx-%#jx,%#jx)\n",
2120		    w->name, start, end, count);
2121	align = (rman_res_t)1 << RF_ALIGNMENT(flags);
2122	if (start < w->base) {
2123		if (rman_first_free_region(&w->rman, &start_free, &end_free) !=
2124		    0 || start_free != w->base)
2125			end_free = w->base;
2126		if (end_free > end)
2127			end_free = end + 1;
2128
2129		/* Move end_free down until it is properly aligned. */
2130		end_free &= ~(align - 1);
2131		end_free--;
2132		front = end_free - (count - 1);
2133
2134		/*
2135		 * The resource would now be allocated at (front,
2136		 * end_free).  Ensure that fits in the (start, end)
2137		 * bounds.  end_free is checked above.  If 'front' is
2138		 * ok, ensure it is properly aligned for this window.
2139		 * Also check for underflow.
2140		 */
2141		if (front >= start && front <= end_free) {
2142			if (bootverbose)
2143				printf("\tfront candidate range: %#jx-%#jx\n",
2144				    front, end_free);
2145			front &= ~wmask;
2146			front = w->base - front;
2147		} else
2148			front = 0;
2149	} else
2150		front = 0;
2151	if (end > w->limit) {
2152		if (rman_last_free_region(&w->rman, &start_free, &end_free) !=
2153		    0 || end_free != w->limit)
2154			start_free = w->limit + 1;
2155		if (start_free < start)
2156			start_free = start;
2157
2158		/* Move start_free up until it is properly aligned. */
2159		start_free = roundup2(start_free, align);
2160		back = start_free + count - 1;
2161
2162		/*
2163		 * The resource would now be allocated at (start_free,
2164		 * back).  Ensure that fits in the (start, end)
2165		 * bounds.  start_free is checked above.  If 'back' is
2166		 * ok, ensure it is properly aligned for this window.
2167		 * Also check for overflow.
2168		 */
2169		if (back <= end && start_free <= back) {
2170			if (bootverbose)
2171				printf("\tback candidate range: %#jx-%#jx\n",
2172				    start_free, back);
2173			back |= wmask;
2174			back -= w->limit;
2175		} else
2176			back = 0;
2177	} else
2178		back = 0;
2179
2180	/*
2181	 * Try to allocate the smallest needed region first.
2182	 * If that fails, fall back to the other region.
2183	 */
2184	error = ENOSPC;
2185	while (front != 0 || back != 0) {
2186		if (front != 0 && (front <= back || back == 0)) {
2187			error = pcib_expand_window(sc, w, type, w->base - front,
2188			    w->limit);
2189			if (error == 0)
2190				break;
2191			front = 0;
2192		} else {
2193			error = pcib_expand_window(sc, w, type, w->base,
2194			    w->limit + back);
2195			if (error == 0)
2196				break;
2197			back = 0;
2198		}
2199	}
2200
2201	if (error)
2202		return (error);
2203	if (bootverbose)
2204		device_printf(sc->dev, "grew %s window to %#jx-%#jx\n",
2205		    w->name, (uintmax_t)w->base, (uintmax_t)w->limit);
2206
2207updatewin:
2208	/* Write the new window. */
2209	KASSERT((w->base & wmask) == 0, ("start address is not aligned"));
2210	KASSERT((w->limit & wmask) == wmask, ("end address is not aligned"));
2211	pcib_write_windows(sc, w->mask);
2212	return (0);
2213}
2214
2215/*
2216 * We have to trap resource allocation requests and ensure that the bridge
2217 * is set up to, or capable of handling them.
2218 */
2219struct resource *
2220pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
2221    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
2222{
2223	struct pcib_softc *sc;
2224	struct resource *r;
2225
2226	sc = device_get_softc(dev);
2227
2228	/*
2229	 * VGA resources are decoded iff the VGA enable bit is set in
2230	 * the bridge control register.  VGA resources do not fall into
2231	 * the resource windows and are passed up to the parent.
2232	 */
2233	if ((type == SYS_RES_IOPORT && pci_is_vga_ioport_range(start, end)) ||
2234	    (type == SYS_RES_MEMORY && pci_is_vga_memory_range(start, end))) {
2235		if (sc->bridgectl & PCIB_BCR_VGA_ENABLE)
2236			return (bus_generic_alloc_resource(dev, child, type,
2237			    rid, start, end, count, flags));
2238		else
2239			return (NULL);
2240	}
2241
2242	switch (type) {
2243#ifdef PCI_RES_BUS
2244	case PCI_RES_BUS:
2245		return (pcib_alloc_subbus(&sc->bus, child, rid, start, end,
2246		    count, flags));
2247#endif
2248	case SYS_RES_IOPORT:
2249		if (pcib_is_isa_range(sc, start, end, count))
2250			return (NULL);
2251		r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start,
2252		    end, count, flags);
2253		if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0)
2254			break;
2255		if (pcib_grow_window(sc, &sc->io, type, start, end, count,
2256		    flags) == 0)
2257			r = pcib_suballoc_resource(sc, &sc->io, child, type,
2258			    rid, start, end, count, flags);
2259		break;
2260	case SYS_RES_MEMORY:
2261		/*
2262		 * For prefetchable resources, prefer the prefetchable
2263		 * memory window, but fall back to the regular memory
2264		 * window if that fails.  Try both windows before
2265		 * attempting to grow a window in case the firmware
2266		 * has used a range in the regular memory window to
2267		 * map a prefetchable BAR.
2268		 */
2269		if (flags & RF_PREFETCHABLE) {
2270			r = pcib_suballoc_resource(sc, &sc->pmem, child, type,
2271			    rid, start, end, count, flags);
2272			if (r != NULL)
2273				break;
2274		}
2275		r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid,
2276		    start, end, count, flags);
2277		if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0)
2278			break;
2279		if (flags & RF_PREFETCHABLE) {
2280			if (pcib_grow_window(sc, &sc->pmem, type, start, end,
2281			    count, flags) == 0) {
2282				r = pcib_suballoc_resource(sc, &sc->pmem, child,
2283				    type, rid, start, end, count, flags);
2284				if (r != NULL)
2285					break;
2286			}
2287		}
2288		if (pcib_grow_window(sc, &sc->mem, type, start, end, count,
2289		    flags & ~RF_PREFETCHABLE) == 0)
2290			r = pcib_suballoc_resource(sc, &sc->mem, child, type,
2291			    rid, start, end, count, flags);
2292		break;
2293	default:
2294		return (bus_generic_alloc_resource(dev, child, type, rid,
2295		    start, end, count, flags));
2296	}
2297
2298	/*
2299	 * If attempts to suballocate from the window fail but this is a
2300	 * subtractive bridge, pass the request up the tree.
2301	 */
2302	if (sc->flags & PCIB_SUBTRACTIVE && r == NULL)
2303		return (bus_generic_alloc_resource(dev, child, type, rid,
2304		    start, end, count, flags));
2305	return (r);
2306}
2307
2308int
2309pcib_adjust_resource(device_t bus, device_t child, int type, struct resource *r,
2310    rman_res_t start, rman_res_t end)
2311{
2312	struct pcib_softc *sc;
2313
2314	sc = device_get_softc(bus);
2315	if (pcib_is_resource_managed(sc, type, r))
2316		return (rman_adjust_resource(r, start, end));
2317	return (bus_generic_adjust_resource(bus, child, type, r, start, end));
2318}
2319
2320int
2321pcib_release_resource(device_t dev, device_t child, int type, int rid,
2322    struct resource *r)
2323{
2324	struct pcib_softc *sc;
2325	int error;
2326
2327	sc = device_get_softc(dev);
2328	if (pcib_is_resource_managed(sc, type, r)) {
2329		if (rman_get_flags(r) & RF_ACTIVE) {
2330			error = bus_deactivate_resource(child, type, rid, r);
2331			if (error)
2332				return (error);
2333		}
2334		return (rman_release_resource(r));
2335	}
2336	return (bus_generic_release_resource(dev, child, type, rid, r));
2337}
2338#else
2339/*
2340 * We have to trap resource allocation requests and ensure that the bridge
2341 * is set up to, or capable of handling them.
2342 */
2343struct resource *
2344pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
2345    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
2346{
2347	struct pcib_softc	*sc = device_get_softc(dev);
2348	const char *name, *suffix;
2349	int ok;
2350
2351	/*
2352	 * Fail the allocation for this range if it's not supported.
2353	 */
2354	name = device_get_nameunit(child);
2355	if (name == NULL) {
2356		name = "";
2357		suffix = "";
2358	} else
2359		suffix = " ";
2360	switch (type) {
2361	case SYS_RES_IOPORT:
2362		ok = 0;
2363		if (!pcib_is_io_open(sc))
2364			break;
2365		ok = (start >= sc->iobase && end <= sc->iolimit);
2366
2367		/*
2368		 * Make sure we allow access to VGA I/O addresses when the
2369		 * bridge has the "VGA Enable" bit set.
2370		 */
2371		if (!ok && pci_is_vga_ioport_range(start, end))
2372			ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0;
2373
2374		if ((sc->flags & PCIB_SUBTRACTIVE) == 0) {
2375			if (!ok) {
2376				if (start < sc->iobase)
2377					start = sc->iobase;
2378				if (end > sc->iolimit)
2379					end = sc->iolimit;
2380				if (start < end)
2381					ok = 1;
2382			}
2383		} else {
2384			ok = 1;
2385#if 0
2386			/*
2387			 * If we overlap with the subtractive range, then
2388			 * pick the upper range to use.
2389			 */
2390			if (start < sc->iolimit && end > sc->iobase)
2391				start = sc->iolimit + 1;
2392#endif
2393		}
2394		if (end < start) {
2395			device_printf(dev, "ioport: end (%jx) < start (%jx)\n",
2396			    end, start);
2397			start = 0;
2398			end = 0;
2399			ok = 0;
2400		}
2401		if (!ok) {
2402			device_printf(dev, "%s%srequested unsupported I/O "
2403			    "range 0x%jx-0x%jx (decoding 0x%x-0x%x)\n",
2404			    name, suffix, start, end, sc->iobase, sc->iolimit);
2405			return (NULL);
2406		}
2407		if (bootverbose)
2408			device_printf(dev,
2409			    "%s%srequested I/O range 0x%jx-0x%jx: in range\n",
2410			    name, suffix, start, end);
2411		break;
2412
2413	case SYS_RES_MEMORY:
2414		ok = 0;
2415		if (pcib_is_nonprefetch_open(sc))
2416			ok = ok || (start >= sc->membase && end <= sc->memlimit);
2417		if (pcib_is_prefetch_open(sc))
2418			ok = ok || (start >= sc->pmembase && end <= sc->pmemlimit);
2419
2420		/*
2421		 * Make sure we allow access to VGA memory addresses when the
2422		 * bridge has the "VGA Enable" bit set.
2423		 */
2424		if (!ok && pci_is_vga_memory_range(start, end))
2425			ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0;
2426
2427		if ((sc->flags & PCIB_SUBTRACTIVE) == 0) {
2428			if (!ok) {
2429				ok = 1;
2430				if (flags & RF_PREFETCHABLE) {
2431					if (pcib_is_prefetch_open(sc)) {
2432						if (start < sc->pmembase)
2433							start = sc->pmembase;
2434						if (end > sc->pmemlimit)
2435							end = sc->pmemlimit;
2436					} else {
2437						ok = 0;
2438					}
2439				} else {	/* non-prefetchable */
2440					if (pcib_is_nonprefetch_open(sc)) {
2441						if (start < sc->membase)
2442							start = sc->membase;
2443						if (end > sc->memlimit)
2444							end = sc->memlimit;
2445					} else {
2446						ok = 0;
2447					}
2448				}
2449			}
2450		} else if (!ok) {
2451			ok = 1;	/* subtractive bridge: always ok */
2452#if 0
2453			if (pcib_is_nonprefetch_open(sc)) {
2454				if (start < sc->memlimit && end > sc->membase)
2455					start = sc->memlimit + 1;
2456			}
2457			if (pcib_is_prefetch_open(sc)) {
2458				if (start < sc->pmemlimit && end > sc->pmembase)
2459					start = sc->pmemlimit + 1;
2460			}
2461#endif
2462		}
2463		if (end < start) {
2464			device_printf(dev, "memory: end (%jx) < start (%jx)\n",
2465			    end, start);
2466			start = 0;
2467			end = 0;
2468			ok = 0;
2469		}
2470		if (!ok && bootverbose)
2471			device_printf(dev,
2472			    "%s%srequested unsupported memory range %#jx-%#jx "
2473			    "(decoding %#jx-%#jx, %#jx-%#jx)\n",
2474			    name, suffix, start, end,
2475			    (uintmax_t)sc->membase, (uintmax_t)sc->memlimit,
2476			    (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit);
2477		if (!ok)
2478			return (NULL);
2479		if (bootverbose)
2480			device_printf(dev,"%s%srequested memory range "
2481			    "0x%jx-0x%jx: good\n",
2482			    name, suffix, start, end);
2483		break;
2484
2485	default:
2486		break;
2487	}
2488	/*
2489	 * Bridge is OK decoding this resource, so pass it up.
2490	 */
2491	return (bus_generic_alloc_resource(dev, child, type, rid, start, end,
2492	    count, flags));
2493}
2494#endif
2495
2496/*
2497 * If ARI is enabled on this downstream port, translate the function number
2498 * to the non-ARI slot/function.  The downstream port will convert it back in
2499 * hardware.  If ARI is not enabled slot and func are not modified.
2500 */
2501static __inline void
2502pcib_xlate_ari(device_t pcib, int bus, int *slot, int *func)
2503{
2504	struct pcib_softc *sc;
2505	int ari_func;
2506
2507	sc = device_get_softc(pcib);
2508	ari_func = *func;
2509
2510	if (sc->flags & PCIB_ENABLE_ARI) {
2511		KASSERT(*slot == 0,
2512		    ("Non-zero slot number with ARI enabled!"));
2513		*slot = PCIE_ARI_SLOT(ari_func);
2514		*func = PCIE_ARI_FUNC(ari_func);
2515	}
2516}
2517
2518
2519static void
2520pcib_enable_ari(struct pcib_softc *sc, uint32_t pcie_pos)
2521{
2522	uint32_t ctl2;
2523
2524	ctl2 = pci_read_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, 4);
2525	ctl2 |= PCIEM_CTL2_ARI;
2526	pci_write_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, ctl2, 4);
2527
2528	sc->flags |= PCIB_ENABLE_ARI;
2529}
2530
2531/*
2532 * PCIB interface.
2533 */
2534int
2535pcib_maxslots(device_t dev)
2536{
2537	return (PCI_SLOTMAX);
2538}
2539
2540static int
2541pcib_ari_maxslots(device_t dev)
2542{
2543	struct pcib_softc *sc;
2544
2545	sc = device_get_softc(dev);
2546
2547	if (sc->flags & PCIB_ENABLE_ARI)
2548		return (PCIE_ARI_SLOTMAX);
2549	else
2550		return (PCI_SLOTMAX);
2551}
2552
2553static int
2554pcib_ari_maxfuncs(device_t dev)
2555{
2556	struct pcib_softc *sc;
2557
2558	sc = device_get_softc(dev);
2559
2560	if (sc->flags & PCIB_ENABLE_ARI)
2561		return (PCIE_ARI_FUNCMAX);
2562	else
2563		return (PCI_FUNCMAX);
2564}
2565
2566static void
2567pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot,
2568    int *func)
2569{
2570	struct pcib_softc *sc;
2571
2572	sc = device_get_softc(pcib);
2573
2574	*bus = PCI_RID2BUS(rid);
2575	if (sc->flags & PCIB_ENABLE_ARI) {
2576		*slot = PCIE_ARI_RID2SLOT(rid);
2577		*func = PCIE_ARI_RID2FUNC(rid);
2578	} else {
2579		*slot = PCI_RID2SLOT(rid);
2580		*func = PCI_RID2FUNC(rid);
2581	}
2582}
2583
2584/*
2585 * Since we are a child of a PCI bus, its parent must support the pcib interface.
2586 */
2587static uint32_t
2588pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
2589{
2590#ifdef PCI_HP
2591	struct pcib_softc *sc;
2592
2593	sc = device_get_softc(dev);
2594	if (!pcib_present(sc)) {
2595		switch (width) {
2596		case 2:
2597			return (0xffff);
2598		case 1:
2599			return (0xff);
2600		default:
2601			return (0xffffffff);
2602		}
2603	}
2604#endif
2605	pcib_xlate_ari(dev, b, &s, &f);
2606	return(PCIB_READ_CONFIG(device_get_parent(device_get_parent(dev)), b, s,
2607	    f, reg, width));
2608}
2609
2610static void
2611pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width)
2612{
2613#ifdef PCI_HP
2614	struct pcib_softc *sc;
2615
2616	sc = device_get_softc(dev);
2617	if (!pcib_present(sc))
2618		return;
2619#endif
2620	pcib_xlate_ari(dev, b, &s, &f);
2621	PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f,
2622	    reg, val, width);
2623}
2624
2625/*
2626 * Route an interrupt across a PCI bridge.
2627 */
2628int
2629pcib_route_interrupt(device_t pcib, device_t dev, int pin)
2630{
2631    device_t	bus;
2632    int		parent_intpin;
2633    int		intnum;
2634
2635    /*
2636     *
2637     * The PCI standard defines a swizzle of the child-side device/intpin to
2638     * the parent-side intpin as follows.
2639     *
2640     * device = device on child bus
2641     * child_intpin = intpin on child bus slot (0-3)
2642     * parent_intpin = intpin on parent bus slot (0-3)
2643     *
2644     * parent_intpin = (device + child_intpin) % 4
2645     */
2646    parent_intpin = (pci_get_slot(dev) + (pin - 1)) % 4;
2647
2648    /*
2649     * Our parent is a PCI bus.  Its parent must export the pcib interface
2650     * which includes the ability to route interrupts.
2651     */
2652    bus = device_get_parent(pcib);
2653    intnum = PCIB_ROUTE_INTERRUPT(device_get_parent(bus), pcib, parent_intpin + 1);
2654    if (PCI_INTERRUPT_VALID(intnum) && bootverbose) {
2655	device_printf(pcib, "slot %d INT%c is routed to irq %d\n",
2656	    pci_get_slot(dev), 'A' + pin - 1, intnum);
2657    }
2658    return(intnum);
2659}
2660
2661/* Pass request to alloc MSI/MSI-X messages up to the parent bridge. */
2662int
2663pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs)
2664{
2665	struct pcib_softc *sc = device_get_softc(pcib);
2666	device_t bus;
2667
2668	if (sc->flags & PCIB_DISABLE_MSI)
2669		return (ENXIO);
2670	bus = device_get_parent(pcib);
2671	return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount,
2672	    irqs));
2673}
2674
2675/* Pass request to release MSI/MSI-X messages up to the parent bridge. */
2676int
2677pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs)
2678{
2679	device_t bus;
2680
2681	bus = device_get_parent(pcib);
2682	return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs));
2683}
2684
2685/* Pass request to alloc an MSI-X message up to the parent bridge. */
2686int
2687pcib_alloc_msix(device_t pcib, device_t dev, int *irq)
2688{
2689	struct pcib_softc *sc = device_get_softc(pcib);
2690	device_t bus;
2691
2692	if (sc->flags & PCIB_DISABLE_MSIX)
2693		return (ENXIO);
2694	bus = device_get_parent(pcib);
2695	return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq));
2696}
2697
2698/* Pass request to release an MSI-X message up to the parent bridge. */
2699int
2700pcib_release_msix(device_t pcib, device_t dev, int irq)
2701{
2702	device_t bus;
2703
2704	bus = device_get_parent(pcib);
2705	return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq));
2706}
2707
2708/* Pass request to map MSI/MSI-X message up to parent bridge. */
2709int
2710pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr,
2711    uint32_t *data)
2712{
2713	device_t bus;
2714	int error;
2715
2716	bus = device_get_parent(pcib);
2717	error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data);
2718	if (error)
2719		return (error);
2720
2721	pci_ht_map_msi(pcib, *addr);
2722	return (0);
2723}
2724
2725/* Pass request for device power state up to parent bridge. */
2726int
2727pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate)
2728{
2729	device_t bus;
2730
2731	bus = device_get_parent(pcib);
2732	return (PCIB_POWER_FOR_SLEEP(bus, dev, pstate));
2733}
2734
2735static int
2736pcib_ari_enabled(device_t pcib)
2737{
2738	struct pcib_softc *sc;
2739
2740	sc = device_get_softc(pcib);
2741
2742	return ((sc->flags & PCIB_ENABLE_ARI) != 0);
2743}
2744
2745static int
2746pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type,
2747    uintptr_t *id)
2748{
2749	struct pcib_softc *sc;
2750	device_t bus_dev;
2751	uint8_t bus, slot, func;
2752
2753	if (type != PCI_ID_RID) {
2754		bus_dev = device_get_parent(pcib);
2755		return (PCIB_GET_ID(device_get_parent(bus_dev), dev, type, id));
2756	}
2757
2758	sc = device_get_softc(pcib);
2759
2760	if (sc->flags & PCIB_ENABLE_ARI) {
2761		bus = pci_get_bus(dev);
2762		func = pci_get_function(dev);
2763
2764		*id = (PCI_ARI_RID(bus, func));
2765	} else {
2766		bus = pci_get_bus(dev);
2767		slot = pci_get_slot(dev);
2768		func = pci_get_function(dev);
2769
2770		*id = (PCI_RID(bus, slot, func));
2771	}
2772
2773	return (0);
2774}
2775
2776/*
2777 * Check that the downstream port (pcib) and the endpoint device (dev) both
2778 * support ARI.  If so, enable it and return 0, otherwise return an error.
2779 */
2780static int
2781pcib_try_enable_ari(device_t pcib, device_t dev)
2782{
2783	struct pcib_softc *sc;
2784	int error;
2785	uint32_t cap2;
2786	int ari_cap_off;
2787	uint32_t ari_ver;
2788	uint32_t pcie_pos;
2789
2790	sc = device_get_softc(pcib);
2791
2792	/*
2793	 * ARI is controlled in a register in the PCIe capability structure.
2794	 * If the downstream port does not have the PCIe capability structure
2795	 * then it does not support ARI.
2796	 */
2797	error = pci_find_cap(pcib, PCIY_EXPRESS, &pcie_pos);
2798	if (error != 0)
2799		return (ENODEV);
2800
2801	/* Check that the PCIe port advertises ARI support. */
2802	cap2 = pci_read_config(pcib, pcie_pos + PCIER_DEVICE_CAP2, 4);
2803	if (!(cap2 & PCIEM_CAP2_ARI))
2804		return (ENODEV);
2805
2806	/*
2807	 * Check that the endpoint device advertises ARI support via the ARI
2808	 * extended capability structure.
2809	 */
2810	error = pci_find_extcap(dev, PCIZ_ARI, &ari_cap_off);
2811	if (error != 0)
2812		return (ENODEV);
2813
2814	/*
2815	 * Finally, check that the endpoint device supports the same version
2816	 * of ARI that we do.
2817	 */
2818	ari_ver = pci_read_config(dev, ari_cap_off, 4);
2819	if (PCI_EXTCAP_VER(ari_ver) != PCIB_SUPPORTED_ARI_VER) {
2820		if (bootverbose)
2821			device_printf(pcib,
2822			    "Unsupported version of ARI (%d) detected\n",
2823			    PCI_EXTCAP_VER(ari_ver));
2824
2825		return (ENXIO);
2826	}
2827
2828	pcib_enable_ari(sc, pcie_pos);
2829
2830	return (0);
2831}
2832
2833static int
2834pcib_reset_child(device_t dev, device_t child, int flags)
2835{
2836	struct pci_devinfo *pdinfo;
2837	int error;
2838
2839	error = 0;
2840	if (dev == NULL || device_get_parent(child) != dev)
2841		goto out;
2842	error = ENXIO;
2843	if (device_get_devclass(child) != devclass_find("pci"))
2844		goto out;
2845	pdinfo = device_get_ivars(dev);
2846	if (pdinfo->cfg.pcie.pcie_location != 0 &&
2847	    (pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT ||
2848	    pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)) {
2849		error = bus_helper_reset_prepare(child, flags);
2850		if (error == 0) {
2851			error = pcie_link_reset(dev,
2852			    pdinfo->cfg.pcie.pcie_location);
2853			/* XXXKIB call _post even if error != 0 ? */
2854			bus_helper_reset_post(child, flags);
2855		}
2856	}
2857out:
2858	return (error);
2859}
2860