mtk_pcie.c revision 300149
1/*-
2 * Copyright (c) 2016 Stanislav Galabov.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD: head/sys/mips/mediatek/mtk_pcie.c 300149 2016-05-18 15:05:44Z andrew $");
27
28#include <sys/param.h>
29#include <sys/systm.h>
30
31#include <sys/bus.h>
32#include <sys/interrupt.h>
33#include <sys/malloc.h>
34#include <sys/kernel.h>
35#include <sys/module.h>
36#include <sys/rman.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/endian.h>
40
41#include <vm/vm.h>
42#include <vm/pmap.h>
43#include <vm/vm_extern.h>
44
45#include <machine/bus.h>
46#include <machine/cpu.h>
47#include <machine/intr.h>
48#include <machine/pmap.h>
49
50#include <dev/pci/pcivar.h>
51#include <dev/pci/pcireg.h>
52
53#include <dev/pci/pcib_private.h>
54
55#include <dev/fdt/fdt_common.h>
56#include <dev/fdt/fdt_clock.h>
57#include <dev/ofw/openfirm.h>
58#include <dev/ofw/ofw_bus.h>
59#include <dev/ofw/ofw_bus_subr.h>
60
61#include <mips/mediatek/mtk_pcie.h>
62#include <mips/mediatek/mtk_soc.h>
63#include <mips/mediatek/mtk_sysctl.h>
64#include <mips/mediatek/fdt_reset.h>
65
66#include "ofw_bus_if.h"
67#include "pcib_if.h"
68#include "pic_if.h"
69
70/*
71 * Note: We only support PCIe at the moment.
72 * Most SoCs in the Ralink/Mediatek family that we target actually don't
73 * support PCI anyway, with the notable exceptions being RT3662/RT3883, which
74 * support both PCI and PCIe. If there exists a board based on one of them
75 * which is of interest in the future it shouldn't be too hard to enable PCI
76 * support for it.
77 */
78
79/* Chip specific function declarations */
80static int  mtk_pcie_phy_init(device_t);
81static int  mtk_pcie_phy_start(device_t);
82static int  mtk_pcie_phy_stop(device_t);
83static int  mtk_pcie_phy_mt7621_init(device_t);
84static int  mtk_pcie_phy_mt7628_init(device_t);
85static int  mtk_pcie_phy_mt7620_init(device_t);
86static int  mtk_pcie_phy_rt3883_init(device_t);
87static void mtk_pcie_phy_setup_slots(device_t);
88
89/* Generic declarations */
90struct mtx mtk_pci_mtx;
91MTX_SYSINIT(mtk_pci_mtx, &mtk_pci_mtx, "MTK PCIe mutex", MTX_SPIN);
92
93static int mtk_pci_intr(void *);
94
95static struct mtk_pci_softc *mt_sc = NULL;
96
97struct mtk_pci_range {
98	u_long	base;
99	u_long	len;
100};
101
102#define FDT_RANGES_CELLS	((1 + 2 + 3) * 2)
103
104static void
105mtk_pci_range_dump(struct mtk_pci_range *range)
106{
107#ifdef DEBUG
108	printf("\n");
109	printf("  base = 0x%08lx\n", range->base);
110	printf("  len  = 0x%08lx\n", range->len);
111#endif
112}
113
114static int
115mtk_pci_ranges_decode(phandle_t node, struct mtk_pci_range *io_space,
116    struct mtk_pci_range *mem_space)
117{
118	struct mtk_pci_range *pci_space;
119	pcell_t ranges[FDT_RANGES_CELLS];
120	pcell_t addr_cells, size_cells, par_addr_cells;
121	pcell_t *rangesptr;
122	pcell_t cell0, cell1, cell2;
123	int tuple_size, tuples, i, rv, len;
124
125	/*
126	 * Retrieve 'ranges' property.
127	 */
128	if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0)
129		return (EINVAL);
130	if (addr_cells != 3 || size_cells != 2)
131		return (ERANGE);
132
133	par_addr_cells = fdt_parent_addr_cells(node);
134	if (par_addr_cells != 1)
135		return (ERANGE);
136
137	len = OF_getproplen(node, "ranges");
138	if (len > sizeof(ranges))
139		return (ENOMEM);
140
141	if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0)
142		return (EINVAL);
143
144	tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells +
145	    size_cells);
146	tuples = len / tuple_size;
147
148	/*
149	 * Initialize the ranges so that we don't have to worry about
150	 * having them all defined in the FDT. In particular, it is
151	 * perfectly fine not to want I/O space on PCI busses.
152	 */
153	bzero(io_space, sizeof(*io_space));
154	bzero(mem_space, sizeof(*mem_space));
155
156	rangesptr = &ranges[0];
157	for (i = 0; i < tuples; i++) {
158		cell0 = fdt_data_get((void *)rangesptr, 1);
159		rangesptr++;
160		cell1 = fdt_data_get((void *)rangesptr, 1);
161		rangesptr++;
162		cell2 = fdt_data_get((void *)rangesptr, 1);
163		rangesptr++;
164
165		if (cell0 & 0x02000000) {
166			pci_space = mem_space;
167		} else if (cell0 & 0x01000000) {
168			pci_space = io_space;
169		} else {
170			rv = ERANGE;
171			goto out;
172		}
173
174		pci_space->base = fdt_data_get((void *)rangesptr,
175		    par_addr_cells);
176		rangesptr += par_addr_cells;
177
178		pci_space->len = fdt_data_get((void *)rangesptr, size_cells);
179		rangesptr += size_cells;
180	}
181
182	rv = 0;
183out:
184	return (rv);
185}
186
187static int
188mtk_pci_ranges(phandle_t node, struct mtk_pci_range *io_space,
189    struct mtk_pci_range *mem_space)
190{
191	int err;
192
193	if ((err = mtk_pci_ranges_decode(node, io_space, mem_space)) != 0) {
194		return (err);
195	}
196
197	mtk_pci_range_dump(io_space);
198	mtk_pci_range_dump(mem_space);
199
200	return (0);
201}
202
203static struct ofw_compat_data compat_data[] = {
204	{ "ralink,rt3883-pci",		MTK_SOC_RT3883 },
205	{ "mediatek,mt7620-pci",	MTK_SOC_MT7620A },
206	{ "mediatek,mt7628-pci",	MTK_SOC_MT7628 },
207	{ "mediatek,mt7621-pci",	MTK_SOC_MT7621 },
208	{ NULL,				MTK_SOC_UNKNOWN }
209};
210
211static int
212mtk_pci_probe(device_t dev)
213{
214	struct mtk_pci_softc *sc = device_get_softc(dev);
215
216	if (!ofw_bus_status_okay(dev))
217		return (ENXIO);
218
219	sc->socid = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
220	if (sc->socid == MTK_SOC_UNKNOWN)
221		return (ENXIO);
222
223	device_set_desc(dev, "MTK PCIe Controller");
224
225	return (0);
226}
227
228static int
229mtk_pci_attach(device_t dev)
230{
231	struct mtk_pci_softc *sc = device_get_softc(dev);
232	struct mtk_pci_range io_space, mem_space;
233	phandle_t node;
234	intptr_t xref;
235	int i, rid;
236
237	sc->sc_dev = dev;
238	mt_sc = sc;
239	sc->addr_mask = 0xffffffff;
240
241	/* Request our memory */
242	rid = 0;
243	sc->pci_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
244			    RF_ACTIVE);
245	if (sc->pci_res[0] == NULL) {
246		device_printf(dev, "could not allocate memory resource\n");
247		return (ENXIO);
248	}
249
250	/* See how many interrupts we need */
251	if (sc->socid == MTK_SOC_MT7621)
252		sc->sc_num_irq = 3;
253	else {
254		sc->sc_num_irq = 1;
255		sc->pci_res[2] = sc->pci_res[3] = NULL;
256		sc->pci_intrhand[1] = sc->pci_intrhand[2] = NULL;
257	}
258
259	/* Request our interrupts */
260	for (i = 1; i <= sc->sc_num_irq ; i++) {
261		rid = i - 1;
262		sc->pci_res[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
263				     RF_ACTIVE);
264		if (sc->pci_res[i] == NULL) {
265			device_printf(dev, "could not allocate interrupt "
266			    "resource %d\n", rid);
267			goto cleanup_res;
268		}
269	}
270
271	/* Parse our PCI 'ranges' property */
272	node = ofw_bus_get_node(dev);
273	xref = OF_xref_from_node(node);
274	if (mtk_pci_ranges(node, &io_space, &mem_space)) {
275		device_printf(dev, "could not retrieve 'ranges' data\n");
276		goto cleanup_res;
277	}
278
279	/* Memory, I/O and IRQ resource limits */
280	sc->sc_io_base = io_space.base;
281	sc->sc_io_size = io_space.len;
282	sc->sc_mem_base = mem_space.base;
283	sc->sc_mem_size = mem_space.len;
284	sc->sc_irq_start = MTK_PCIE0_IRQ;
285	sc->sc_irq_end = MTK_PCIE2_IRQ;
286
287	/* Init resource managers for memory, I/O and IRQ */
288	sc->sc_mem_rman.rm_type = RMAN_ARRAY;
289	sc->sc_mem_rman.rm_descr = "mtk pcie memory window";
290	if (rman_init(&sc->sc_mem_rman) != 0 ||
291	    rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base,
292	    sc->sc_mem_base + sc->sc_mem_size - 1) != 0) {
293		device_printf(dev, "failed to setup memory rman\n");
294		goto cleanup_res;
295	}
296
297	sc->sc_io_rman.rm_type = RMAN_ARRAY;
298	sc->sc_io_rman.rm_descr = "mtk pcie io window";
299	if (rman_init(&sc->sc_io_rman) != 0 ||
300	    rman_manage_region(&sc->sc_io_rman, sc->sc_io_base,
301	    sc->sc_io_base + sc->sc_io_size - 1) != 0) {
302		device_printf(dev, "failed to setup io rman\n");
303		goto cleanup_res;
304	}
305
306	sc->sc_irq_rman.rm_type = RMAN_ARRAY;
307	sc->sc_irq_rman.rm_descr = "mtk pcie irqs";
308	if (rman_init(&sc->sc_irq_rman) != 0 ||
309	    rman_manage_region(&sc->sc_irq_rman, sc->sc_irq_start,
310	    sc->sc_irq_end) != 0) {
311		device_printf(dev, "failed to setup irq rman\n");
312		goto cleanup_res;
313	}
314
315	/* Do SoC-specific PCIe initialization */
316	if (mtk_pcie_phy_init(dev)) {
317		device_printf(dev, "pcie phy init failed\n");
318		goto cleanup_rman;
319	}
320
321	/* Register ourselves as an interrupt controller */
322	if (intr_pic_register(dev, xref) == NULL) {
323		device_printf(dev, "could not register PIC\n");
324		goto cleanup_rman;
325	}
326
327	/* Set up our interrupt handler */
328	for (i = 1; i <= sc->sc_num_irq; i++) {
329		sc->pci_intrhand[i - 1] = NULL;
330		if (bus_setup_intr(dev, sc->pci_res[i], INTR_TYPE_MISC,
331		    mtk_pci_intr, NULL, sc, &sc->pci_intrhand[i - 1])) {
332			device_printf(dev, "could not setup intr handler %d\n",
333			    i);
334			goto cleanup;
335		}
336	}
337
338	/* Attach our PCI child so bus enumeration can start */
339	if (device_add_child(dev, "pci", -1) == NULL) {
340		device_printf(dev, "could not attach pci bus\n");
341		goto cleanup;
342	}
343
344	/* And finally, attach ourselves to the bus */
345	if (bus_generic_attach(dev)) {
346		device_printf(dev, "could not attach to bus\n");
347		goto cleanup;
348	}
349
350	return (0);
351
352cleanup:
353#ifdef notyet
354	intr_pic_unregister(dev, xref);
355#endif
356	for (i = 1; i <= sc->sc_num_irq; i++) {
357		if (sc->pci_intrhand[i - 1] != NULL)
358			bus_teardown_intr(dev, sc->pci_res[i],
359			    sc->pci_intrhand[i - 1]);
360	}
361cleanup_rman:
362	mtk_pcie_phy_stop(dev);
363	rman_fini(&sc->sc_irq_rman);
364	rman_fini(&sc->sc_io_rman);
365	rman_fini(&sc->sc_mem_rman);
366cleanup_res:
367	mt_sc = NULL;
368	if (sc->pci_res[0] != NULL)
369		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->pci_res[0]);
370	if (sc->pci_res[1] != NULL)
371		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pci_res[1]);
372	if (sc->pci_res[2] != NULL)
373		bus_release_resource(dev, SYS_RES_IRQ, 1, sc->pci_res[2]);
374	if (sc->pci_res[3] != NULL)
375		bus_release_resource(dev, SYS_RES_IRQ, 2, sc->pci_res[3]);
376	return (ENXIO);
377}
378
379static int
380mtk_pci_read_ivar(device_t dev, device_t child, int which,
381	uintptr_t *result)
382{
383	struct mtk_pci_softc *sc = device_get_softc(dev);
384
385	switch (which) {
386	case PCIB_IVAR_DOMAIN:
387		*result = device_get_unit(dev);
388		return (0);
389	case PCIB_IVAR_BUS:
390		*result = sc->sc_busno;
391		return (0);
392	}
393
394	return (ENOENT);
395}
396
397static int
398mtk_pci_write_ivar(device_t dev, device_t child, int which,
399	uintptr_t result)
400{
401	struct mtk_pci_softc *sc = device_get_softc(dev);
402
403	switch (which) {
404	case PCIB_IVAR_BUS:
405		sc->sc_busno = result;
406		return (0);
407	}
408
409	return (ENOENT);
410}
411
412static struct resource *
413mtk_pci_alloc_resource(device_t bus, device_t child, int type, int *rid,
414	rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
415{
416	struct mtk_pci_softc *sc = device_get_softc(bus);
417	struct resource *rv;
418	struct rman *rm;
419
420	switch (type) {
421	case PCI_RES_BUS:
422		return pci_domain_alloc_bus(0, child, rid, start, end, count,
423					    flags);
424	case SYS_RES_IRQ:
425		rm = &sc->sc_irq_rman;
426		break;
427	case SYS_RES_IOPORT:
428		rm = &sc->sc_io_rman;
429		break;
430	case SYS_RES_MEMORY:
431		rm = &sc->sc_mem_rman;
432		break;
433	default:
434		return (NULL);
435	}
436
437	rv = rman_reserve_resource(rm, start, end, count, flags, child);
438
439	if (rv == NULL)
440		return (NULL);
441
442	rman_set_rid(rv, *rid);
443
444	if ((flags & RF_ACTIVE) && type != SYS_RES_IRQ) {
445		if (bus_activate_resource(child, type, *rid, rv)) {
446			rman_release_resource(rv);
447			return (NULL);
448		}
449	}
450
451	return (rv);
452}
453
454static int
455mtk_pci_release_resource(device_t bus, device_t child, int type, int rid,
456    struct resource *res)
457{
458
459	if (type == PCI_RES_BUS)
460		return (pci_domain_release_bus(0, child, rid, res));
461
462	return (bus_generic_release_resource(bus, child, type, rid, res));
463}
464
465static int
466mtk_pci_adjust_resource(device_t bus, device_t child, int type,
467    struct resource *res, rman_res_t start, rman_res_t end)
468{
469	struct mtk_pci_softc *sc = device_get_softc(bus);
470	struct rman *rm;
471
472	switch (type) {
473	case PCI_RES_BUS:
474		return pci_domain_adjust_bus(0, child, res, start, end);
475	case SYS_RES_IRQ:
476		rm = &sc->sc_irq_rman;
477		break;
478	case SYS_RES_IOPORT:
479		rm = &sc->sc_io_rman;
480		break;
481	case SYS_RES_MEMORY:
482		rm = &sc->sc_mem_rman;
483		break;
484	default:
485		rm = NULL;
486		break;
487	}
488
489	if (rm != NULL)
490		return (rman_adjust_resource(res, start, end));
491
492	return (bus_generic_adjust_resource(bus, child, type, res, start, end));
493}
494
495static inline int
496mtk_idx_to_irq(int idx)
497{
498
499	return ((idx == 0) ? MTK_PCIE0_IRQ :
500		(idx == 1) ? MTK_PCIE1_IRQ :
501		(idx == 2) ? MTK_PCIE2_IRQ : -1);
502}
503
504static inline int
505mtk_irq_to_idx(int irq)
506{
507
508	return ((irq == MTK_PCIE0_IRQ) ? 0 :
509		(irq == MTK_PCIE1_IRQ) ? 1 :
510		(irq == MTK_PCIE2_IRQ) ? 2 : -1);
511}
512
513static void
514mtk_pci_mask_irq(void *source)
515{
516	MT_WRITE32(mt_sc, MTK_PCI_PCIENA,
517		MT_READ32(mt_sc, MTK_PCI_PCIENA) & ~(1<<((int)source)));
518}
519
520static void
521mtk_pci_unmask_irq(void *source)
522{
523
524	MT_WRITE32(mt_sc, MTK_PCI_PCIENA,
525		MT_READ32(mt_sc, MTK_PCI_PCIENA) | (1<<((int)source)));
526}
527
528static int
529mtk_pci_setup_intr(device_t bus, device_t child, struct resource *ires,
530	int flags, driver_filter_t *filt, driver_intr_t *handler,
531	void *arg, void **cookiep)
532{
533	struct mtk_pci_softc *sc = device_get_softc(bus);
534	struct intr_event *event;
535	int irq, error, irqidx;
536
537	irq = rman_get_start(ires);
538
539	if (irq < sc->sc_irq_start || irq > sc->sc_irq_end)
540		return (EINVAL);
541
542	irqidx = irq - sc->sc_irq_start;
543
544	event = sc->sc_eventstab[irqidx];
545	if (event == NULL) {
546		error = intr_event_create(&event, (void *)irq, 0, irq,
547		    mtk_pci_mask_irq, mtk_pci_unmask_irq, NULL, NULL,
548		    "pci intr%d:", irq);
549
550		if (error == 0) {
551			sc->sc_eventstab[irqidx] = event;
552		}
553		else {
554			return (error);
555		}
556	}
557
558	intr_event_add_handler(event, device_get_nameunit(child), filt,
559		handler, arg, intr_priority(flags), flags, cookiep);
560
561	mtk_pci_unmask_irq((void*)irq);
562
563	return (0);
564}
565
566static int
567mtk_pci_teardown_intr(device_t dev, device_t child, struct resource *ires,
568	void *cookie)
569{
570	struct mtk_pci_softc *sc = device_get_softc(dev);
571	int irq, result, irqidx;
572
573	irq = rman_get_start(ires);
574	if (irq < sc->sc_irq_start || irq > sc->sc_irq_end)
575		return (EINVAL);
576
577	irqidx = irq - sc->sc_irq_start;
578	if (sc->sc_eventstab[irqidx] == NULL)
579		panic("Trying to teardown unoccupied IRQ");
580
581	mtk_pci_mask_irq((void*)irq);
582
583	result = intr_event_remove_handler(cookie);
584	if (!result)
585		sc->sc_eventstab[irqidx] = NULL;
586
587
588	return (result);
589}
590
591static inline uint32_t
592mtk_pci_make_addr(int bus, int slot, int func, int reg)
593{
594	uint32_t addr;
595
596	addr = ((((reg & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) |
597		(func << 8) | (reg & 0xfc) | (1 << 31));
598
599	return (addr);
600}
601
602static int
603mtk_pci_maxslots(device_t dev)
604{
605
606	return (PCI_SLOTMAX);
607}
608
609static inline int
610mtk_pci_slot_has_link(device_t dev, int slot)
611{
612	struct mtk_pci_softc *sc = device_get_softc(dev);
613
614	return !!(sc->pcie_link_status & (1<<slot));
615}
616
617static uint32_t
618mtk_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func,
619	u_int reg, int bytes)
620{
621	struct mtk_pci_softc *sc = device_get_softc(dev);
622	uint32_t addr = 0, data = 0;
623
624	/* Return ~0U if slot has no link */
625	if (bus == 0 && mtk_pci_slot_has_link(dev, slot) == 0) {
626		return (~0U);
627	}
628
629	mtx_lock_spin(&mtk_pci_mtx);
630	addr = mtk_pci_make_addr(bus, slot, func, (reg & ~3)) & sc->addr_mask;
631	MT_WRITE32(sc, MTK_PCI_CFGADDR, addr);
632	switch (bytes % 4) {
633	case 0:
634		data = MT_READ32(sc, MTK_PCI_CFGDATA);
635		break;
636	case 1:
637		data = MT_READ8(sc, MTK_PCI_CFGDATA + (reg & 0x3));
638		break;
639	case 2:
640		data = MT_READ16(sc, MTK_PCI_CFGDATA + (reg & 0x3));
641		break;
642	default:
643		panic("%s(): Wrong number of bytes (%d) requested!\n",
644			__FUNCTION__, bytes % 4);
645	}
646	mtx_unlock_spin(&mtk_pci_mtx);
647
648	return (data);
649}
650
651static void
652mtk_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func,
653	u_int reg, uint32_t val, int bytes)
654{
655	struct mtk_pci_softc *sc = device_get_softc(dev);
656	uint32_t addr = 0, data = val;
657
658	/* Do not write if slot has no link */
659	if (bus == 0 && mtk_pci_slot_has_link(dev, slot) == 0)
660		return;
661
662	mtx_lock_spin(&mtk_pci_mtx);
663	addr = mtk_pci_make_addr(bus, slot, func, (reg & ~3)) & sc->addr_mask;
664	MT_WRITE32(sc, MTK_PCI_CFGADDR, addr);
665	switch (bytes % 4) {
666	case 0:
667		MT_WRITE32(sc, MTK_PCI_CFGDATA, data);
668		break;
669	case 1:
670		MT_WRITE8(sc, MTK_PCI_CFGDATA + (reg & 0x3), data);
671		break;
672	case 2:
673		MT_WRITE16(sc, MTK_PCI_CFGDATA + (reg & 0x3), data);
674		break;
675	default:
676		panic("%s(): Wrong number of bytes (%d) requested!\n",
677			__FUNCTION__, bytes % 4);
678	}
679	mtx_unlock_spin(&mtk_pci_mtx);
680}
681
682static int
683mtk_pci_route_interrupt(device_t pcib, device_t device, int pin)
684{
685	int bus, sl, dev;
686
687	bus = pci_get_bus(device);
688	sl = pci_get_slot(device);
689	dev = pci_get_device(device);
690
691	if (bus != 0)
692		panic("Unexpected bus number %d\n", bus);
693
694	/* PCIe only */
695	switch (sl) {
696	case 0: return MTK_PCIE0_IRQ;
697	case 1: return MTK_PCIE0_IRQ + 1;
698	case 2: return MTK_PCIE0_IRQ + 2;
699	default: return (-1);
700	}
701
702	return (-1);
703}
704
705static device_method_t mtk_pci_methods[] = {
706	/* Device interface */
707	DEVMETHOD(device_probe,		mtk_pci_probe),
708	DEVMETHOD(device_attach,	mtk_pci_attach),
709	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
710	DEVMETHOD(device_suspend,	bus_generic_suspend),
711	DEVMETHOD(device_resume,	bus_generic_resume),
712
713	/* Bus interface */
714	DEVMETHOD(bus_read_ivar,	mtk_pci_read_ivar),
715	DEVMETHOD(bus_write_ivar,	mtk_pci_write_ivar),
716	DEVMETHOD(bus_alloc_resource,	mtk_pci_alloc_resource),
717	DEVMETHOD(bus_release_resource,	mtk_pci_release_resource),
718	DEVMETHOD(bus_adjust_resource,	mtk_pci_adjust_resource),
719	DEVMETHOD(bus_activate_resource,   bus_generic_activate_resource),
720	DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
721	DEVMETHOD(bus_setup_intr,	mtk_pci_setup_intr),
722	DEVMETHOD(bus_teardown_intr,	mtk_pci_teardown_intr),
723
724	/* pcib interface */
725	DEVMETHOD(pcib_maxslots,	mtk_pci_maxslots),
726	DEVMETHOD(pcib_read_config,	mtk_pci_read_config),
727	DEVMETHOD(pcib_write_config,	mtk_pci_write_config),
728	DEVMETHOD(pcib_route_interrupt,	mtk_pci_route_interrupt),
729
730	/* OFW bus interface */
731	DEVMETHOD(ofw_bus_get_compat,	ofw_bus_gen_get_compat),
732	DEVMETHOD(ofw_bus_get_model,	ofw_bus_gen_get_model),
733	DEVMETHOD(ofw_bus_get_name,	ofw_bus_gen_get_name),
734	DEVMETHOD(ofw_bus_get_node,	ofw_bus_gen_get_node),
735	DEVMETHOD(ofw_bus_get_type,	ofw_bus_gen_get_type),
736
737	DEVMETHOD_END
738};
739
740static driver_t mtk_pci_driver = {
741	"pcib",
742	mtk_pci_methods,
743	sizeof(struct mtk_pci_softc),
744};
745
746static devclass_t mtk_pci_devclass;
747
748DRIVER_MODULE(mtk_pci, simplebus, mtk_pci_driver, mtk_pci_devclass, 0, 0);
749
750/* Our interrupt handler */
751static int
752mtk_pci_intr(void *arg)
753{
754	struct mtk_pci_softc *sc = arg;
755	struct intr_event *event;
756	uint32_t reg, irq, irqidx;
757
758	reg = MT_READ32(sc, MTK_PCI_PCIINT);
759
760	for (irq = sc->sc_irq_start; irq <= sc->sc_irq_end; irq++) {
761		if (reg & (1u<<irq)) {
762			irqidx = irq - sc->sc_irq_start;
763			event = sc->sc_eventstab[irqidx];
764			if (!event || TAILQ_EMPTY(&event->ie_handlers)) {
765				if (irq != 0)
766					printf("Stray PCI IRQ %d\n", irq);
767				continue;
768			}
769
770			intr_event_handle(event, NULL);
771		}
772	}
773
774	return (FILTER_HANDLED);
775}
776
777/* PCIe SoC-specific initialization */
778static int
779mtk_pcie_phy_init(device_t dev)
780{
781	struct mtk_pci_softc *sc;
782
783	/* Get our softc */
784	sc = device_get_softc(dev);
785
786	/* We don't know how many slots we have yet */
787	sc->num_slots = 0;
788
789	/* Handle SoC specific PCIe init */
790	switch (sc->socid) {
791	case MTK_SOC_MT7628: /* Fallthrough */
792	case MTK_SOC_MT7688:
793		if (mtk_pcie_phy_mt7628_init(dev))
794			return (ENXIO);
795		break;
796	case MTK_SOC_MT7621:
797		if (mtk_pcie_phy_mt7621_init(dev))
798			return (ENXIO);
799		break;
800	case MTK_SOC_MT7620A:
801		if (mtk_pcie_phy_mt7620_init(dev))
802			return (ENXIO);
803		break;
804	case MTK_SOC_RT3662: /* Fallthrough */
805	case MTK_SOC_RT3883:
806		if (mtk_pcie_phy_rt3883_init(dev))
807			return (ENXIO);
808		break;
809	default:
810		device_printf(dev, "unsupported device %x\n", sc->socid);
811		return (ENXIO);
812	}
813
814	/*
815	 * If we were successful so far go and set up the PCIe slots, so we
816	 * may allocate mem/io/irq resources and enumerate busses later.
817	 */
818	mtk_pcie_phy_setup_slots(dev);
819
820	return (0);
821}
822
823static int
824mtk_pcie_phy_start(device_t dev)
825{
826	struct mtk_pci_softc *sc = device_get_softc(dev);
827
828	if (sc->socid == MTK_SOC_MT7621 &&
829	    (mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) !=
830	    SYSCTL_MT7621_REV_E) {
831		if (fdt_reset_assert_all(dev))
832			return (ENXIO);
833	} else {
834		if (fdt_reset_deassert_all(dev))
835			return (ENXIO);
836	}
837
838	if (fdt_clock_enable_all(dev))
839		return (ENXIO);
840
841	return (0);
842}
843
844static int
845mtk_pcie_phy_stop(device_t dev)
846{
847	struct mtk_pci_softc *sc = device_get_softc(dev);
848
849	if (sc->socid == MTK_SOC_MT7621 &&
850	    (mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) !=
851	    SYSCTL_MT7621_REV_E) {
852		if (fdt_reset_deassert_all(dev))
853			return (ENXIO);
854	} else {
855		if (fdt_reset_assert_all(dev))
856			return (ENXIO);
857	}
858
859	if (fdt_clock_disable_all(dev))
860		return (ENXIO);
861
862	return (0);
863}
864
865#define mtk_pcie_phy_set(_sc, _reg, _s, _n, _v)			\
866	MT_WRITE32((_sc), (_reg), ((MT_READ32((_sc), (_reg)) &	\
867	    (~(((1ull << (_n)) - 1) << (_s)))) | ((_v) << (_s))))
868
869static void
870mtk_pcie_phy_mt7621_bypass_pipe_rst(struct mtk_pci_softc *sc, uint32_t off)
871{
872
873	mtk_pcie_phy_set(sc, off + 0x002c, 12, 1, 1);
874	mtk_pcie_phy_set(sc, off + 0x002c,  4, 1, 1);
875	mtk_pcie_phy_set(sc, off + 0x012c, 12, 1, 1);
876	mtk_pcie_phy_set(sc, off + 0x012c,  4, 1, 1);
877	mtk_pcie_phy_set(sc, off + 0x102c, 12, 1, 1);
878	mtk_pcie_phy_set(sc, off + 0x102c,  4, 1, 1);
879}
880
881static void
882mtk_pcie_phy_mt7621_setup_ssc(struct mtk_pci_softc *sc, uint32_t off)
883{
884	uint32_t xtal_sel;
885
886	xtal_sel = mtk_sysctl_get(SYSCTL_SYSCFG) >> 6;
887	xtal_sel &= 0x7;
888
889	mtk_pcie_phy_set(sc, off + 0x400, 8, 1, 1);
890	mtk_pcie_phy_set(sc, off + 0x400, 9, 2, 0);
891	mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 1);
892	mtk_pcie_phy_set(sc, off + 0x100, 4, 1, 1);
893	mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 0);
894	mtk_pcie_phy_set(sc, off + 0x100, 5, 1, 0);
895
896	if (xtal_sel <= 5 && xtal_sel >= 3) {
897		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 1);
898		mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x1a);
899		mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x1a);
900	} else {
901		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 0);
902		if (xtal_sel >= 6) {
903			mtk_pcie_phy_set(sc, off + 0x4bc,  4,  2, 0x01);
904			mtk_pcie_phy_set(sc, off + 0x49c,  0, 31, 0x18000000);
905			mtk_pcie_phy_set(sc, off + 0x4a4,  0, 16, 0x18d);
906			mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x4a);
907			mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x4a);
908			mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x11);
909			mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x11);
910		} else {
911			mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x1a);
912			mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x1a);
913		}
914	}
915
916	mtk_pcie_phy_set(sc, off + 0x4a0,  5, 1, 1);
917	mtk_pcie_phy_set(sc, off + 0x490, 22, 2, 2);
918	mtk_pcie_phy_set(sc, off + 0x490, 18, 4, 6);
919	mtk_pcie_phy_set(sc, off + 0x490, 12, 4, 2);
920	mtk_pcie_phy_set(sc, off + 0x490,  8, 4, 1);
921	mtk_pcie_phy_set(sc, off + 0x4ac, 16, 3, 0);
922	mtk_pcie_phy_set(sc, off + 0x490,  1, 3, 2);
923
924	if (xtal_sel <= 5 && xtal_sel >= 3) {
925		mtk_pcie_phy_set(sc, off + 0x414, 6, 2, 1);
926		mtk_pcie_phy_set(sc, off + 0x414, 5, 1, 1);
927	}
928
929	mtk_pcie_phy_set(sc, off + 0x414, 28, 2, 1);
930	mtk_pcie_phy_set(sc, off + 0x040, 17, 4, 7);
931	mtk_pcie_phy_set(sc, off + 0x040, 16, 1, 1);
932	mtk_pcie_phy_set(sc, off + 0x140, 17, 4, 7);
933	mtk_pcie_phy_set(sc, off + 0x140, 16, 1, 1);
934
935	mtk_pcie_phy_set(sc, off + 0x000,  5, 1, 1);
936	mtk_pcie_phy_set(sc, off + 0x100,  5, 1, 1);
937	mtk_pcie_phy_set(sc, off + 0x000,  4, 1, 0);
938	mtk_pcie_phy_set(sc, off + 0x100,  4, 1, 0);
939}
940
941/* XXX: ugly, we need to fix this at some point */
942#define MT7621_GPIO_CTRL0	*((volatile uint32_t *)0xbe000600)
943#define MT7621_GPIO_DATA0	*((volatile uint32_t *)0xbe000620)
944
945#define mtk_gpio_clr_set(_reg, _clr, _set)		\
946	do {						\
947		(_reg) = ((_reg) & (_clr)) | (_set);	\
948	} while (0)
949
950static int
951mtk_pcie_phy_mt7621_init(device_t dev)
952{
953	struct mtk_pci_softc *sc = device_get_softc(dev);
954
955	/* First off, stop the PHY */
956	if (mtk_pcie_phy_stop(dev))
957		return (ENXIO);
958
959	/* PCIe resets are GPIO pins */
960	mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7621_PERST_GPIO_MODE |
961	    MT7621_UARTL3_GPIO_MODE, MT7621_PERST_GPIO | MT7621_UARTL3_GPIO);
962
963	/* Set GPIO pins as outputs */
964	mtk_gpio_clr_set(MT7621_GPIO_CTRL0, 0, MT7621_PCIE_RST);
965
966	/* Assert resets to PCIe devices */
967	mtk_gpio_clr_set(MT7621_GPIO_DATA0, MT7621_PCIE_RST, 0);
968
969	/* Give everything a chance to sink in */
970	DELAY(100000);
971
972	/* Now start the PHY again */
973	if (mtk_pcie_phy_start(dev))
974		return (ENXIO);
975
976	/* Wait for things to settle */
977	DELAY(100000);
978
979	/* Only apply below to REV-E hardware */
980	if ((mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) ==
981	    SYSCTL_MT7621_REV_E)
982		mtk_pcie_phy_mt7621_bypass_pipe_rst(sc, 0x9000);
983
984	/* Setup PCIe ports 0 and 1 */
985	mtk_pcie_phy_mt7621_setup_ssc(sc, 0x9000);
986	/* Setup PCIe port 2 */
987	mtk_pcie_phy_mt7621_setup_ssc(sc, 0xa000);
988
989	/* Deassert resets to PCIe devices */
990	mtk_gpio_clr_set(MT7621_GPIO_DATA0, 0, MT7621_PCIE_RST);
991
992	/* Set number of slots supported */
993	sc->num_slots = 3;
994
995	/* Give it a chance to sink in */
996	DELAY(100000);
997
998	return (0);
999}
1000
1001static void
1002mtk_pcie_phy_mt7628_setup(struct mtk_pci_softc *sc, uint32_t off)
1003{
1004	uint32_t xtal_sel;
1005
1006	xtal_sel = mtk_sysctl_get(SYSCTL_SYSCFG) >> 6;
1007	xtal_sel &= 0x1;
1008
1009	mtk_pcie_phy_set(sc, off + 0x400,  8, 1, 1);
1010	mtk_pcie_phy_set(sc, off + 0x400,  9, 2, 0);
1011	mtk_pcie_phy_set(sc, off + 0x000,  4, 1, 1);
1012	mtk_pcie_phy_set(sc, off + 0x000,  5, 1, 0);
1013	mtk_pcie_phy_set(sc, off + 0x4ac, 16, 3, 3);
1014
1015	if (xtal_sel == 1) {
1016		mtk_pcie_phy_set(sc, off + 0x4bc, 24,  8, 0x7d);
1017		mtk_pcie_phy_set(sc, off + 0x490, 12,  4, 0x08);
1018		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 0x01);
1019		mtk_pcie_phy_set(sc, off + 0x4c0,  0, 32, 0x1f400000);
1020		mtk_pcie_phy_set(sc, off + 0x4a4,  0, 16, 0x013d);
1021		mtk_pcie_phy_set(sc, off + 0x4a8, 16, 16, 0x74);
1022		mtk_pcie_phy_set(sc, off + 0x4a8,  0, 16, 0x74);
1023	} else {
1024		mtk_pcie_phy_set(sc, off + 0x4bc, 24,  8, 0x64);
1025		mtk_pcie_phy_set(sc, off + 0x490, 12,  4, 0x0a);
1026		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 0x00);
1027		mtk_pcie_phy_set(sc, off + 0x4c0,  0, 32, 0x19000000);
1028		mtk_pcie_phy_set(sc, off + 0x4a4,  0, 16, 0x018d);
1029		mtk_pcie_phy_set(sc, off + 0x4a8, 16, 16, 0x4a);
1030		mtk_pcie_phy_set(sc, off + 0x4a8,  0, 16, 0x4a);
1031	}
1032
1033	mtk_pcie_phy_set(sc, off + 0x498, 0, 8, 5);
1034	mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 1);
1035	mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 0);
1036}
1037
1038static int
1039mtk_pcie_phy_mt7628_init(device_t dev)
1040{
1041	struct mtk_pci_softc *sc = device_get_softc(dev);
1042
1043	/* Set PCIe reset to normal mode */
1044	mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7628_PERST_GPIO_MODE,
1045	    MT7628_PERST);
1046
1047	/* Start the PHY */
1048	if (mtk_pcie_phy_start(dev))
1049		return (ENXIO);
1050
1051	/* Give it a chance to sink in */
1052	DELAY(100000);
1053
1054	/* Setup the PHY */
1055	mtk_pcie_phy_mt7628_setup(sc, 0x9000);
1056
1057	/* Deassert PCIe device reset */
1058	MT_CLR_SET32(sc, MTK_PCI_PCICFG, MTK_PCI_RESET, 0);
1059
1060	/* Set number of slots supported */
1061	sc->num_slots = 1;
1062
1063	return (0);
1064}
1065
1066static int
1067mtk_pcie_phy_mt7620_wait_busy(struct mtk_pci_softc *sc)
1068{
1069	uint32_t reg_value, retry;
1070
1071	reg_value = retry = 0;
1072
1073	while (retry++ < MT7620_MAX_RETRIES) {
1074		reg_value = MT_READ32(sc, MT7620_PCIE_PHY_CFG);
1075		if (reg_value & PHY_BUSY)
1076			DELAY(100000);
1077		else
1078			break;
1079	}
1080
1081	if (retry >= MT7620_MAX_RETRIES)
1082		return (ENXIO);
1083
1084	return (0);
1085}
1086
1087static int
1088mtk_pcie_phy_mt7620_set(struct mtk_pci_softc *sc, uint32_t reg,
1089    uint32_t val)
1090{
1091	uint32_t reg_val;
1092
1093	if (mtk_pcie_phy_mt7620_wait_busy(sc))
1094		return (ENXIO);
1095
1096	reg_val = PHY_MODE_WRITE | ((reg & 0xff) << PHY_ADDR_OFFSET) |
1097	    (val & 0xff);
1098	MT_WRITE32(sc, MT7620_PCIE_PHY_CFG, reg_val);
1099	DELAY(1000);
1100
1101	if (mtk_pcie_phy_mt7620_wait_busy(sc))
1102		return (ENXIO);
1103
1104	return (0);
1105}
1106
1107static int
1108mtk_pcie_phy_mt7620_init(device_t dev)
1109{
1110	struct mtk_pci_softc *sc = device_get_softc(dev);
1111
1112	/*
1113	 * The below sets the PCIe PHY to bypass the PCIe DLL and enables
1114	 * "elastic buffer control", whatever that may be...
1115	 */
1116	if (mtk_pcie_phy_mt7620_set(sc, 0x00, 0x80) ||
1117	    mtk_pcie_phy_mt7620_set(sc, 0x01, 0x04) ||
1118	    mtk_pcie_phy_mt7620_set(sc, 0x68, 0x84))
1119		return (ENXIO);
1120
1121	/* Stop PCIe */
1122	if (mtk_pcie_phy_stop(dev))
1123		return (ENXIO);
1124
1125	/* Restore PPLL to a sane state before going on */
1126	mtk_sysctl_clr_set(MT7620_PPLL_DRV, LC_CKDRVPD, PDRV_SW_SET);
1127
1128	/* No PCIe on the MT7620N */
1129	if (!(mtk_sysctl_get(SYSCTL_REVID) & MT7620_PKG_BGA)) {
1130		device_printf(dev, "PCIe disabled for MT7620N\n");
1131		mtk_sysctl_clr_set(MT7620_PPLL_CFG0, 0, PPLL_SW_SET);
1132		mtk_sysctl_clr_set(MT7620_PPLL_CFG1, 0, PPLL_PD);
1133		return (ENXIO);
1134	}
1135
1136	/* PCIe device reset pin is in normal mode */
1137	mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7620_PERST_GPIO_MODE,
1138	    MT7620_PERST);
1139
1140	/* Enable PCIe now */
1141	if (mtk_pcie_phy_start(dev))
1142		return (ENXIO);
1143
1144	/* Give it a chance to sink in */
1145	DELAY(100000);
1146
1147	/* If PLL is not locked - bail */
1148	if (!(mtk_sysctl_get(MT7620_PPLL_CFG1) & PPLL_LOCKED)) {
1149		device_printf(dev, "no PPLL not lock\n");
1150		mtk_pcie_phy_stop(dev);
1151		return (ENXIO);
1152	}
1153
1154	/* Configure PCIe PLL */
1155	mtk_sysctl_clr_set(MT7620_PPLL_DRV, LC_CKDRVOHZ | LC_CKDRVHZ,
1156	    LC_CKDRVPD | PDRV_SW_SET);
1157
1158	/* and give it a chance to settle */
1159	DELAY(100000);
1160
1161	/* Deassert PCIe device reset */
1162	MT_CLR_SET32(sc, MTK_PCI_PCICFG, MTK_PCI_RESET, 0);
1163
1164	/* MT7620 supports one PCIe slot */
1165	sc->num_slots = 1;
1166
1167	return (0);
1168}
1169
1170static int
1171mtk_pcie_phy_rt3883_init(device_t dev)
1172{
1173	struct mtk_pci_softc *sc = device_get_softc(dev);
1174
1175	/* Enable PCI host mode and PCIe RC mode */
1176	mtk_sysctl_clr_set(SYSCTL_SYSCFG1, 0, RT3883_PCI_HOST_MODE |
1177	    RT3883_PCIE_RC_MODE);
1178
1179	/* Enable PCIe PHY */
1180	if (mtk_pcie_phy_start(dev))
1181		return (ENXIO);
1182
1183	/* Disable PCI, we only support PCIe for now */
1184	mtk_sysctl_clr_set(SYSCTL_RSTCTRL, 0, RT3883_PCI_RST);
1185	mtk_sysctl_clr_set(SYSCTL_CLKCFG1, RT3883_PCI_CLK, 0);
1186
1187	/* Give things a chance to sink in */
1188	DELAY(500000);
1189
1190	/* Set PCIe port number to 0 and lift PCIe reset */
1191	MT_WRITE32(sc, MTK_PCI_PCICFG, 0);
1192
1193	/* Configure PCI Arbiter */
1194	MT_WRITE32(sc, MTK_PCI_ARBCTL, 0x79);
1195
1196	/* We have a single PCIe slot */
1197	sc->num_slots = 1;
1198
1199	return (0);
1200}
1201
1202static void
1203mtk_pcie_phy_setup_slots(device_t dev)
1204{
1205	struct mtk_pci_softc *sc = device_get_softc(dev);
1206	uint32_t bar0_val, val;
1207	int i;
1208
1209	/* Disable all PCIe interrupts */
1210	MT_WRITE32(sc, MTK_PCI_PCIENA, 0);
1211
1212	/* Default bar0_val is 64M, enabled */
1213	bar0_val = 0x03FF0001;
1214
1215	/* But we override it to 2G, enabled for some SoCs */
1216	if (sc->socid == MTK_SOC_MT7620A || sc->socid == MTK_SOC_MT7628 ||
1217	    sc->socid == MTK_SOC_MT7688 || sc->socid == MTK_SOC_MT7621)
1218		bar0_val = 0x7FFF0001;
1219
1220	/* We still don't know which slots have linked up */
1221	sc->pcie_link_status = 0;
1222
1223	/* XXX: I am not sure if this delay is really necessary */
1224	DELAY(500000);
1225
1226	/*
1227	 * See which slots have links and mark them.
1228	 * Set up all slots' BARs and make them look like PCIe bridges.
1229	 */
1230	for (i = 0; i < sc->num_slots; i++) {
1231		/* If slot has link - mark it */
1232		if (MT_READ32(sc, MTK_PCIE_STATUS(i)) & 1)
1233			sc->pcie_link_status |= (1<<i);
1234		else
1235			continue;
1236
1237		/* Generic slot configuration follows */
1238
1239		/* We enable BAR0 */
1240		MT_WRITE32(sc, MTK_PCIE_BAR0SETUP(i), bar0_val);
1241		/* and disable BAR1 */
1242		MT_WRITE32(sc, MTK_PCIE_BAR1SETUP(i), 0);
1243		/* Internal memory base has no offset */
1244		MT_WRITE32(sc, MTK_PCIE_IMBASEBAR0(i), 0);
1245		/* We're a PCIe bridge */
1246		MT_WRITE32(sc, MTK_PCIE_CLASS(i), 0x06040001);
1247
1248		val = mtk_pci_read_config(dev, 0, i, 0, 0x4, 4);
1249		mtk_pci_write_config(dev, 0, i, 0, 0x4, val | 0x4, 4);
1250		val = mtk_pci_read_config(dev, 0, i, 0, 0x70c, 4);
1251		val &= ~(0xff << 8);
1252		val |= (0x50 << 8);
1253		mtk_pci_write_config(dev, 0, i, 0, 0x70c, val, 4);
1254
1255		mtk_pci_write_config(dev, 0, i, 0, PCIR_IOBASEL_1, 0xff, 1);
1256		mtk_pci_write_config(dev, 0, i, 0, PCIR_IOBASEH_1, 0xffff, 2);
1257		mtk_pci_write_config(dev, 0, i, 0, PCIR_IOLIMITL_1, 0, 1);
1258		mtk_pci_write_config(dev, 0, i, 0, PCIR_IOLIMITH_1, 0, 2);
1259		mtk_pci_write_config(dev, 0, i, 0, PCIR_MEMBASE_1, 0xffff, 2);
1260		mtk_pci_write_config(dev, 0, i, 0, PCIR_MEMLIMIT_1, 0, 2);
1261		mtk_pci_write_config(dev, 0, i, 0, PCIR_PMBASEL_1, 0xffff, 2);
1262		mtk_pci_write_config(dev, 0, i, 0, PCIR_PMBASEH_1, 0xffffffff,
1263		    4);
1264		mtk_pci_write_config(dev, 0, i, 0, PCIR_PMLIMITL_1, 0, 2);
1265		mtk_pci_write_config(dev, 0, i, 0, PCIR_PMLIMITH_1, 0, 4);
1266	}
1267}
1268