mtk_pcie.c revision 298350
1/*-
2 * Copyright (c) 2016 Stanislav Galabov.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD: head/sys/mips/mediatek/mtk_pcie.c 298350 2016-04-20 14:47:16Z sgalabov $");
27
28#include <sys/param.h>
29#include <sys/systm.h>
30
31#include <sys/bus.h>
32#include <sys/interrupt.h>
33#include <sys/malloc.h>
34#include <sys/kernel.h>
35#include <sys/module.h>
36#include <sys/rman.h>
37#include <sys/lock.h>
38#include <sys/mutex.h>
39#include <sys/endian.h>
40
41#include <vm/vm.h>
42#include <vm/pmap.h>
43#include <vm/vm_extern.h>
44
45#include <machine/bus.h>
46#include <machine/cpu.h>
47#include <machine/intr.h>
48#include <machine/pmap.h>
49
50#include <dev/pci/pcivar.h>
51#include <dev/pci/pcireg.h>
52
53#include <dev/pci/pcib_private.h>
54
55#include <dev/fdt/fdt_common.h>
56#include <dev/fdt/fdt_clock.h>
57#include <dev/ofw/openfirm.h>
58#include <dev/ofw/ofw_bus.h>
59#include <dev/ofw/ofw_bus_subr.h>
60
61#include <mips/mediatek/mtk_pcie.h>
62#include <mips/mediatek/mtk_soc.h>
63#include <mips/mediatek/mtk_sysctl.h>
64#include <mips/mediatek/fdt_reset.h>
65
66#include "ofw_bus_if.h"
67#include "pcib_if.h"
68#include "pic_if.h"
69
70/*
71 * Note: We only support PCIe at the moment.
72 * Most SoCs in the Ralink/Mediatek family that we target actually don't
73 * support PCI anyway, with the notable exceptions being RT3662/RT3883, which
74 * support both PCI and PCIe. If there exists a board based on one of them
75 * which is of interest in the future it shouldn't be too hard to enable PCI
76 * support for it.
77 */
78
79/* Chip specific function declarations */
80static int  mtk_pcie_phy_init(device_t);
81static int  mtk_pcie_phy_start(device_t);
82static int  mtk_pcie_phy_stop(device_t);
83static int  mtk_pcie_phy_mt7621_init(device_t);
84static int  mtk_pcie_phy_mt7628_init(device_t);
85static int  mtk_pcie_phy_mt7620_init(device_t);
86static int  mtk_pcie_phy_rt3883_init(device_t);
87static void mtk_pcie_phy_setup_slots(device_t);
88
89/* Generic declarations */
90struct mtx mtk_pci_mtx;
91MTX_SYSINIT(mtk_pci_mtx, &mtk_pci_mtx, "MTK PCIe mutex", MTX_SPIN);
92
93static int mtk_pci_intr(void *);
94
95static struct mtk_pci_softc *mt_sc = NULL;
96
97struct mtk_pci_range {
98	u_long	base;
99	u_long	len;
100};
101
102#define FDT_RANGES_CELLS	((1 + 2 + 3) * 2)
103
104static void
105mtk_pci_range_dump(struct mtk_pci_range *range)
106{
107#ifdef DEBUG
108	printf("\n");
109	printf("  base = 0x%08lx\n", range->base);
110	printf("  len  = 0x%08lx\n", range->len);
111#endif
112}
113
114static int
115mtk_pci_ranges_decode(phandle_t node, struct mtk_pci_range *io_space,
116    struct mtk_pci_range *mem_space)
117{
118	struct mtk_pci_range *pci_space;
119	pcell_t ranges[FDT_RANGES_CELLS];
120	pcell_t addr_cells, size_cells, par_addr_cells;
121	pcell_t *rangesptr;
122	pcell_t cell0, cell1, cell2;
123	int tuple_size, tuples, i, rv, len;
124
125	/*
126	 * Retrieve 'ranges' property.
127	 */
128	if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0)
129		return (EINVAL);
130	if (addr_cells != 3 || size_cells != 2)
131		return (ERANGE);
132
133	par_addr_cells = fdt_parent_addr_cells(node);
134	if (par_addr_cells != 1)
135		return (ERANGE);
136
137	len = OF_getproplen(node, "ranges");
138	if (len > sizeof(ranges))
139		return (ENOMEM);
140
141	if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0)
142		return (EINVAL);
143
144	tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells +
145	    size_cells);
146	tuples = len / tuple_size;
147
148	/*
149	 * Initialize the ranges so that we don't have to worry about
150	 * having them all defined in the FDT. In particular, it is
151	 * perfectly fine not to want I/O space on PCI busses.
152	 */
153	bzero(io_space, sizeof(*io_space));
154	bzero(mem_space, sizeof(*mem_space));
155
156	rangesptr = &ranges[0];
157	for (i = 0; i < tuples; i++) {
158		cell0 = fdt_data_get((void *)rangesptr, 1);
159		rangesptr++;
160		cell1 = fdt_data_get((void *)rangesptr, 1);
161		rangesptr++;
162		cell2 = fdt_data_get((void *)rangesptr, 1);
163		rangesptr++;
164
165		if (cell0 & 0x02000000) {
166			pci_space = mem_space;
167		} else if (cell0 & 0x01000000) {
168			pci_space = io_space;
169		} else {
170			rv = ERANGE;
171			goto out;
172		}
173
174		pci_space->base = fdt_data_get((void *)rangesptr,
175		    par_addr_cells);
176		rangesptr += par_addr_cells;
177
178		pci_space->len = fdt_data_get((void *)rangesptr, size_cells);
179		rangesptr += size_cells;
180	}
181
182	rv = 0;
183out:
184	return (rv);
185}
186
187static int
188mtk_pci_ranges(phandle_t node, struct mtk_pci_range *io_space,
189    struct mtk_pci_range *mem_space)
190{
191	int err;
192
193	if ((err = mtk_pci_ranges_decode(node, io_space, mem_space)) != 0) {
194		return (err);
195	}
196
197	mtk_pci_range_dump(io_space);
198	mtk_pci_range_dump(mem_space);
199
200	return (0);
201}
202
203static struct ofw_compat_data compat_data[] = {
204	{ "ralink,rt3883-pci",		MTK_SOC_RT3883 },
205	{ "mediatek,mt7620-pci",	MTK_SOC_MT7620A },
206	{ "mediatek,mt7621-pci",	MTK_SOC_MT7621 },
207	{ NULL,				MTK_SOC_UNKNOWN }
208};
209
210static int
211mtk_pci_probe(device_t dev)
212{
213	struct mtk_pci_softc *sc = device_get_softc(dev);
214
215	if (!ofw_bus_status_okay(dev))
216		return (ENXIO);
217
218	sc->socid = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
219	if (sc->socid == MTK_SOC_UNKNOWN)
220		return (ENXIO);
221
222	device_set_desc(dev, "MTK PCIe Controller");
223
224	return (0);
225}
226
227static int
228mtk_pci_attach(device_t dev)
229{
230	struct mtk_pci_softc *sc = device_get_softc(dev);
231	struct mtk_pci_range io_space, mem_space;
232	phandle_t node;
233	intptr_t xref;
234	int i, rid;
235
236	sc->sc_dev = dev;
237	mt_sc = sc;
238	sc->addr_mask = 0xffffffff;
239
240	/* Request our memory */
241	rid = 0;
242	sc->pci_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
243			    RF_ACTIVE);
244	if (sc->pci_res[0] == NULL) {
245		device_printf(dev, "could not allocate memory resource\n");
246		return (ENXIO);
247	}
248
249	/* See how many interrupts we need */
250	if (sc->socid == MTK_SOC_MT7621)
251		sc->sc_num_irq = 3;
252	else {
253		sc->sc_num_irq = 1;
254		sc->pci_res[2] = sc->pci_res[3] = NULL;
255		sc->pci_intrhand[1] = sc->pci_intrhand[2] = NULL;
256	}
257
258	/* Request our interrupts */
259	for (i = 1; i <= sc->sc_num_irq ; i++) {
260		rid = i - 1;
261		sc->pci_res[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
262				     RF_ACTIVE);
263		if (sc->pci_res[i] == NULL) {
264			device_printf(dev, "could not allocate interrupt "
265			    "resource %d\n", rid);
266			goto cleanup_res;
267		}
268	}
269
270	/* Parse our PCI 'ranges' property */
271	node = ofw_bus_get_node(dev);
272	xref = OF_xref_from_node(node);
273	if (mtk_pci_ranges(node, &io_space, &mem_space)) {
274		device_printf(dev, "could not retrieve 'ranges' data\n");
275		goto cleanup_res;
276	}
277
278	/* Memory, I/O and IRQ resource limits */
279	sc->sc_io_base = io_space.base;
280	sc->sc_io_size = io_space.len;
281	sc->sc_mem_base = mem_space.base;
282	sc->sc_mem_size = mem_space.len;
283	sc->sc_irq_start = MTK_PCIE0_IRQ;
284	sc->sc_irq_end = MTK_PCIE2_IRQ;
285
286	/* Init resource managers for memory, I/O and IRQ */
287	sc->sc_mem_rman.rm_type = RMAN_ARRAY;
288	sc->sc_mem_rman.rm_descr = "mtk pcie memory window";
289	if (rman_init(&sc->sc_mem_rman) != 0 ||
290	    rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base,
291	    sc->sc_mem_base + sc->sc_mem_size - 1) != 0) {
292		device_printf(dev, "failed to setup memory rman\n");
293		goto cleanup_res;
294	}
295
296	sc->sc_io_rman.rm_type = RMAN_ARRAY;
297	sc->sc_io_rman.rm_descr = "mtk pcie io window";
298	if (rman_init(&sc->sc_io_rman) != 0 ||
299	    rman_manage_region(&sc->sc_io_rman, sc->sc_io_base,
300	    sc->sc_io_base + sc->sc_io_size - 1) != 0) {
301		device_printf(dev, "failed to setup io rman\n");
302		goto cleanup_res;
303	}
304
305	sc->sc_irq_rman.rm_type = RMAN_ARRAY;
306	sc->sc_irq_rman.rm_descr = "mtk pcie irqs";
307	if (rman_init(&sc->sc_irq_rman) != 0 ||
308	    rman_manage_region(&sc->sc_irq_rman, sc->sc_irq_start,
309	    sc->sc_irq_end) != 0) {
310		device_printf(dev, "failed to setup irq rman\n");
311		goto cleanup_res;
312	}
313
314	/* Do SoC-specific PCIe initialization */
315	if (mtk_pcie_phy_init(dev)) {
316		device_printf(dev, "pcie phy init failed\n");
317		goto cleanup_rman;
318	}
319
320	/* Register ourselves as an interrupt controller */
321	if (intr_pic_register(dev, xref) != 0) {
322		device_printf(dev, "could not register PIC\n");
323		goto cleanup_rman;
324	}
325
326	/* Set up our interrupt handler */
327	for (i = 1; i <= sc->sc_num_irq; i++) {
328		sc->pci_intrhand[i - 1] = NULL;
329		if (bus_setup_intr(dev, sc->pci_res[i], INTR_TYPE_MISC,
330		    mtk_pci_intr, NULL, sc, &sc->pci_intrhand[i - 1])) {
331			device_printf(dev, "could not setup intr handler %d\n",
332			    i);
333			goto cleanup;
334		}
335	}
336
337	/* Attach our PCI child so bus enumeration can start */
338	if (device_add_child(dev, "pci", -1) == NULL) {
339		device_printf(dev, "could not attach pci bus\n");
340		goto cleanup;
341	}
342
343	/* And finally, attach ourselves to the bus */
344	if (bus_generic_attach(dev)) {
345		device_printf(dev, "could not attach to bus\n");
346		goto cleanup;
347	}
348
349	return (0);
350
351cleanup:
352#ifdef notyet
353	intr_pic_unregister(dev, xref);
354#endif
355	for (i = 1; i <= sc->sc_num_irq; i++) {
356		if (sc->pci_intrhand[i - 1] != NULL)
357			bus_teardown_intr(dev, sc->pci_res[i],
358			    sc->pci_intrhand[i - 1]);
359	}
360cleanup_rman:
361	mtk_pcie_phy_stop(dev);
362	rman_fini(&sc->sc_irq_rman);
363	rman_fini(&sc->sc_io_rman);
364	rman_fini(&sc->sc_mem_rman);
365cleanup_res:
366	mt_sc = NULL;
367	if (sc->pci_res[0] != NULL)
368		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->pci_res[0]);
369	if (sc->pci_res[1] != NULL)
370		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pci_res[1]);
371	if (sc->pci_res[2] != NULL)
372		bus_release_resource(dev, SYS_RES_IRQ, 1, sc->pci_res[2]);
373	if (sc->pci_res[3] != NULL)
374		bus_release_resource(dev, SYS_RES_IRQ, 2, sc->pci_res[3]);
375	return (ENXIO);
376}
377
378static int
379mtk_pci_read_ivar(device_t dev, device_t child, int which,
380	uintptr_t *result)
381{
382	struct mtk_pci_softc *sc = device_get_softc(dev);
383
384	switch (which) {
385	case PCIB_IVAR_DOMAIN:
386		*result = device_get_unit(dev);
387		return (0);
388	case PCIB_IVAR_BUS:
389		*result = sc->sc_busno;
390		return (0);
391	}
392
393	return (ENOENT);
394}
395
396static int
397mtk_pci_write_ivar(device_t dev, device_t child, int which,
398	uintptr_t result)
399{
400	struct mtk_pci_softc *sc = device_get_softc(dev);
401
402	switch (which) {
403	case PCIB_IVAR_BUS:
404		sc->sc_busno = result;
405		return (0);
406	}
407
408	return (ENOENT);
409}
410
411static struct resource *
412mtk_pci_alloc_resource(device_t bus, device_t child, int type, int *rid,
413	rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
414{
415	struct mtk_pci_softc *sc = device_get_softc(bus);
416	struct resource *rv;
417	struct rman *rm;
418
419	switch (type) {
420	case PCI_RES_BUS:
421		return pci_domain_alloc_bus(0, child, rid, start, end, count,
422					    flags);
423	case SYS_RES_IRQ:
424		rm = &sc->sc_irq_rman;
425		break;
426	case SYS_RES_IOPORT:
427		rm = &sc->sc_io_rman;
428		break;
429	case SYS_RES_MEMORY:
430		rm = &sc->sc_mem_rman;
431		break;
432	default:
433		return (NULL);
434	}
435
436	rv = rman_reserve_resource(rm, start, end, count, flags, child);
437
438	if (rv == NULL)
439		return (NULL);
440
441	rman_set_rid(rv, *rid);
442
443	if ((flags & RF_ACTIVE) && type != SYS_RES_IRQ) {
444		if (bus_activate_resource(child, type, *rid, rv)) {
445			rman_release_resource(rv);
446			return (NULL);
447		}
448	}
449
450	return (rv);
451}
452
453static int
454mtk_pci_release_resource(device_t bus, device_t child, int type, int rid,
455    struct resource *res)
456{
457
458	if (type == PCI_RES_BUS)
459		return (pci_domain_release_bus(0, child, rid, res));
460
461	return (bus_generic_release_resource(bus, child, type, rid, res));
462}
463
464static int
465mtk_pci_adjust_resource(device_t bus, device_t child, int type,
466    struct resource *res, rman_res_t start, rman_res_t end)
467{
468	struct mtk_pci_softc *sc = device_get_softc(bus);
469	struct rman *rm;
470
471	switch (type) {
472	case PCI_RES_BUS:
473		return pci_domain_adjust_bus(0, child, res, start, end);
474	case SYS_RES_IRQ:
475		rm = &sc->sc_irq_rman;
476		break;
477	case SYS_RES_IOPORT:
478		rm = &sc->sc_io_rman;
479		break;
480	case SYS_RES_MEMORY:
481		rm = &sc->sc_mem_rman;
482		break;
483	default:
484		rm = NULL;
485		break;
486	}
487
488	if (rm != NULL)
489		return (rman_adjust_resource(res, start, end));
490
491	return (bus_generic_adjust_resource(bus, child, type, res, start, end));
492}
493
494static inline int
495mtk_idx_to_irq(int idx)
496{
497
498	return ((idx == 0) ? MTK_PCIE0_IRQ :
499		(idx == 1) ? MTK_PCIE1_IRQ :
500		(idx == 2) ? MTK_PCIE2_IRQ : -1);
501}
502
503static inline int
504mtk_irq_to_idx(int irq)
505{
506
507	return ((irq == MTK_PCIE0_IRQ) ? 0 :
508		(irq == MTK_PCIE1_IRQ) ? 1 :
509		(irq == MTK_PCIE2_IRQ) ? 2 : -1);
510}
511
512static void
513mtk_pci_mask_irq(void *source)
514{
515	MT_WRITE32(mt_sc, MTK_PCI_PCIENA,
516		MT_READ32(mt_sc, MTK_PCI_PCIENA) & ~(1<<((int)source)));
517}
518
519static void
520mtk_pci_unmask_irq(void *source)
521{
522
523	MT_WRITE32(mt_sc, MTK_PCI_PCIENA,
524		MT_READ32(mt_sc, MTK_PCI_PCIENA) | (1<<((int)source)));
525}
526
527static int
528mtk_pci_setup_intr(device_t bus, device_t child, struct resource *ires,
529	int flags, driver_filter_t *filt, driver_intr_t *handler,
530	void *arg, void **cookiep)
531{
532	struct mtk_pci_softc *sc = device_get_softc(bus);
533	struct intr_event *event;
534	int irq, error, irqidx;
535
536	irq = rman_get_start(ires);
537
538	if (irq < sc->sc_irq_start || irq > sc->sc_irq_end)
539		return (EINVAL);
540
541	irqidx = irq - sc->sc_irq_start;
542
543	event = sc->sc_eventstab[irqidx];
544	if (event == NULL) {
545		error = intr_event_create(&event, (void *)irq, 0, irq,
546		    mtk_pci_mask_irq, mtk_pci_unmask_irq, NULL, NULL,
547		    "pci intr%d:", irq);
548
549		if (error == 0) {
550			sc->sc_eventstab[irqidx] = event;
551		}
552		else {
553			return (error);
554		}
555	}
556
557	intr_event_add_handler(event, device_get_nameunit(child), filt,
558		handler, arg, intr_priority(flags), flags, cookiep);
559
560	mtk_pci_unmask_irq((void*)irq);
561
562	return (0);
563}
564
565static int
566mtk_pci_teardown_intr(device_t dev, device_t child, struct resource *ires,
567	void *cookie)
568{
569	struct mtk_pci_softc *sc = device_get_softc(dev);
570	int irq, result, irqidx;
571
572	irq = rman_get_start(ires);
573	if (irq < sc->sc_irq_start || irq > sc->sc_irq_end)
574		return (EINVAL);
575
576	irqidx = irq - sc->sc_irq_start;
577	if (sc->sc_eventstab[irqidx] == NULL)
578		panic("Trying to teardown unoccupied IRQ");
579
580	mtk_pci_mask_irq((void*)irq);
581
582	result = intr_event_remove_handler(cookie);
583	if (!result)
584		sc->sc_eventstab[irqidx] = NULL;
585
586
587	return (result);
588}
589
590static inline uint32_t
591mtk_pci_make_addr(int bus, int slot, int func, int reg)
592{
593	uint32_t addr;
594
595	addr = ((((reg & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) |
596		(func << 8) | (reg & 0xfc) | (1 << 31));
597
598	return (addr);
599}
600
601static int
602mtk_pci_maxslots(device_t dev)
603{
604
605	return (PCI_SLOTMAX);
606}
607
608static inline int
609mtk_pci_slot_has_link(device_t dev, int slot)
610{
611	struct mtk_pci_softc *sc = device_get_softc(dev);
612
613	return !!(sc->pcie_link_status & (1<<slot));
614}
615
616static uint32_t
617mtk_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func,
618	u_int reg, int bytes)
619{
620	struct mtk_pci_softc *sc = device_get_softc(dev);
621	uint32_t addr = 0, data = 0;
622
623	/* Return ~0U if slot has no link */
624	if (bus == 0 && mtk_pci_slot_has_link(dev, slot) == 0) {
625		return (~0U);
626	}
627
628	mtx_lock_spin(&mtk_pci_mtx);
629	addr = mtk_pci_make_addr(bus, slot, func, (reg & ~3)) & sc->addr_mask;
630	MT_WRITE32(sc, MTK_PCI_CFGADDR, addr);
631	switch (bytes % 4) {
632	case 0:
633		data = MT_READ32(sc, MTK_PCI_CFGDATA);
634		break;
635	case 1:
636		data = MT_READ8(sc, MTK_PCI_CFGDATA + (reg & 0x3));
637		break;
638	case 2:
639		data = MT_READ16(sc, MTK_PCI_CFGDATA + (reg & 0x3));
640		break;
641	default:
642		panic("%s(): Wrong number of bytes (%d) requested!\n",
643			__FUNCTION__, bytes % 4);
644	}
645	mtx_unlock_spin(&mtk_pci_mtx);
646
647	return (data);
648}
649
650static void
651mtk_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func,
652	u_int reg, uint32_t val, int bytes)
653{
654	struct mtk_pci_softc *sc = device_get_softc(dev);
655	uint32_t addr = 0, data = val;
656
657	/* Do not write if slot has no link */
658	if (bus == 0 && mtk_pci_slot_has_link(dev, slot) == 0)
659		return;
660
661	mtx_lock_spin(&mtk_pci_mtx);
662	addr = mtk_pci_make_addr(bus, slot, func, (reg & ~3)) & sc->addr_mask;
663	MT_WRITE32(sc, MTK_PCI_CFGADDR, addr);
664	switch (bytes % 4) {
665	case 0:
666		MT_WRITE32(sc, MTK_PCI_CFGDATA, data);
667		break;
668	case 1:
669		MT_WRITE8(sc, MTK_PCI_CFGDATA + (reg & 0x3), data);
670		break;
671	case 2:
672		MT_WRITE16(sc, MTK_PCI_CFGDATA + (reg & 0x3), data);
673		break;
674	default:
675		panic("%s(): Wrong number of bytes (%d) requested!\n",
676			__FUNCTION__, bytes % 4);
677	}
678	mtx_unlock_spin(&mtk_pci_mtx);
679}
680
681static int
682mtk_pci_route_interrupt(device_t pcib, device_t device, int pin)
683{
684	int bus, sl, dev;
685
686	bus = pci_get_bus(device);
687	sl = pci_get_slot(device);
688	dev = pci_get_device(device);
689
690	if (bus != 0)
691		panic("Unexpected bus number %d\n", bus);
692
693	/* PCIe only */
694	switch (sl) {
695	case 0: return MTK_PCIE0_IRQ;
696	case 1: return MTK_PCIE0_IRQ + 1;
697	case 2: return MTK_PCIE0_IRQ + 2;
698	default: return (-1);
699	}
700
701	return (-1);
702}
703
704static device_method_t mtk_pci_methods[] = {
705	/* Device interface */
706	DEVMETHOD(device_probe,		mtk_pci_probe),
707	DEVMETHOD(device_attach,	mtk_pci_attach),
708	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
709	DEVMETHOD(device_suspend,	bus_generic_suspend),
710	DEVMETHOD(device_resume,	bus_generic_resume),
711
712	/* Bus interface */
713	DEVMETHOD(bus_read_ivar,	mtk_pci_read_ivar),
714	DEVMETHOD(bus_write_ivar,	mtk_pci_write_ivar),
715	DEVMETHOD(bus_alloc_resource,	mtk_pci_alloc_resource),
716	DEVMETHOD(bus_release_resource,	mtk_pci_release_resource),
717	DEVMETHOD(bus_adjust_resource,	mtk_pci_adjust_resource),
718	DEVMETHOD(bus_activate_resource,   bus_generic_activate_resource),
719	DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
720	DEVMETHOD(bus_setup_intr,	mtk_pci_setup_intr),
721	DEVMETHOD(bus_teardown_intr,	mtk_pci_teardown_intr),
722
723	/* pcib interface */
724	DEVMETHOD(pcib_maxslots,	mtk_pci_maxslots),
725	DEVMETHOD(pcib_read_config,	mtk_pci_read_config),
726	DEVMETHOD(pcib_write_config,	mtk_pci_write_config),
727	DEVMETHOD(pcib_route_interrupt,	mtk_pci_route_interrupt),
728
729	/* OFW bus interface */
730	DEVMETHOD(ofw_bus_get_compat,	ofw_bus_gen_get_compat),
731	DEVMETHOD(ofw_bus_get_model,	ofw_bus_gen_get_model),
732	DEVMETHOD(ofw_bus_get_name,	ofw_bus_gen_get_name),
733	DEVMETHOD(ofw_bus_get_node,	ofw_bus_gen_get_node),
734	DEVMETHOD(ofw_bus_get_type,	ofw_bus_gen_get_type),
735
736	DEVMETHOD_END
737};
738
739static driver_t mtk_pci_driver = {
740	"pcib",
741	mtk_pci_methods,
742	sizeof(struct mtk_pci_softc),
743};
744
745static devclass_t mtk_pci_devclass;
746
747DRIVER_MODULE(mtk_pci, simplebus, mtk_pci_driver, mtk_pci_devclass, 0, 0);
748
749/* Our interrupt handler */
750static int
751mtk_pci_intr(void *arg)
752{
753	struct mtk_pci_softc *sc = arg;
754	struct intr_event *event;
755	uint32_t reg, irq, irqidx;
756
757	reg = MT_READ32(sc, MTK_PCI_PCIINT);
758
759	for (irq = sc->sc_irq_start; irq <= sc->sc_irq_end; irq++) {
760		if (reg & (1u<<irq)) {
761			irqidx = irq - sc->sc_irq_start;
762			event = sc->sc_eventstab[irqidx];
763			if (!event || TAILQ_EMPTY(&event->ie_handlers)) {
764				if (irq != 0)
765					printf("Stray PCI IRQ %d\n", irq);
766				continue;
767			}
768
769			intr_event_handle(event, NULL);
770		}
771	}
772
773	return (FILTER_HANDLED);
774}
775
776/* PCIe SoC-specific initialization */
777static int
778mtk_pcie_phy_init(device_t dev)
779{
780	struct mtk_pci_softc *sc;
781
782	/* Get our softc */
783	sc = device_get_softc(dev);
784
785	/* We don't know how many slots we have yet */
786	sc->num_slots = 0;
787
788	/* Handle SoC specific PCIe init */
789	switch (sc->socid) {
790	case MTK_SOC_MT7628: /* Fallthrough */
791	case MTK_SOC_MT7688:
792		if (mtk_pcie_phy_mt7628_init(dev))
793			return (ENXIO);
794		break;
795	case MTK_SOC_MT7621:
796		if (mtk_pcie_phy_mt7621_init(dev))
797			return (ENXIO);
798		break;
799	case MTK_SOC_MT7620A:
800		if (mtk_pcie_phy_mt7620_init(dev))
801			return (ENXIO);
802		break;
803	case MTK_SOC_RT3662: /* Fallthrough */
804	case MTK_SOC_RT3883:
805		if (mtk_pcie_phy_rt3883_init(dev))
806			return (ENXIO);
807		break;
808	default:
809		device_printf(dev, "unsupported device %x\n", sc->socid);
810		return (ENXIO);
811	}
812
813	/*
814	 * If we were successful so far go and set up the PCIe slots, so we
815	 * may allocate mem/io/irq resources and enumerate busses later.
816	 */
817	mtk_pcie_phy_setup_slots(dev);
818
819	return (0);
820}
821
822static int
823mtk_pcie_phy_start(device_t dev)
824{
825	struct mtk_pci_softc *sc = device_get_softc(dev);
826
827	if (sc->socid == MTK_SOC_MT7621 &&
828	    (mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) !=
829	    SYSCTL_MT7621_REV_E) {
830		if (fdt_reset_assert_all(dev))
831			return (ENXIO);
832	} else {
833		if (fdt_reset_deassert_all(dev))
834			return (ENXIO);
835	}
836
837	if (fdt_clock_enable_all(dev))
838		return (ENXIO);
839
840	return (0);
841}
842
843static int
844mtk_pcie_phy_stop(device_t dev)
845{
846	struct mtk_pci_softc *sc = device_get_softc(dev);
847
848	if (sc->socid == MTK_SOC_MT7621 &&
849	    (mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) !=
850	    SYSCTL_MT7621_REV_E) {
851		if (fdt_reset_deassert_all(dev))
852			return (ENXIO);
853	} else {
854		if (fdt_reset_assert_all(dev))
855			return (ENXIO);
856	}
857
858	if (fdt_clock_disable_all(dev))
859		return (ENXIO);
860
861	return (0);
862}
863
864#define mtk_pcie_phy_set(_sc, _reg, _s, _n, _v)			\
865	MT_WRITE32((_sc), (_reg), ((MT_READ32((_sc), (_reg)) &	\
866	    (~(((1ull << (_n)) - 1) << (_s)))) | ((_v) << (_s))))
867
868static void
869mtk_pcie_phy_mt7621_bypass_pipe_rst(struct mtk_pci_softc *sc, uint32_t off)
870{
871
872	mtk_pcie_phy_set(sc, off + 0x002c, 12, 1, 1);
873	mtk_pcie_phy_set(sc, off + 0x002c,  4, 1, 1);
874	mtk_pcie_phy_set(sc, off + 0x012c, 12, 1, 1);
875	mtk_pcie_phy_set(sc, off + 0x012c,  4, 1, 1);
876	mtk_pcie_phy_set(sc, off + 0x102c, 12, 1, 1);
877	mtk_pcie_phy_set(sc, off + 0x102c,  4, 1, 1);
878}
879
880static void
881mtk_pcie_phy_mt7621_setup_ssc(struct mtk_pci_softc *sc, uint32_t off)
882{
883	uint32_t xtal_sel;
884
885	xtal_sel = mtk_sysctl_get(SYSCTL_SYSCFG) >> 6;
886	xtal_sel &= 0x7;
887
888	mtk_pcie_phy_set(sc, off + 0x400, 8, 1, 1);
889	mtk_pcie_phy_set(sc, off + 0x400, 9, 2, 0);
890	mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 1);
891	mtk_pcie_phy_set(sc, off + 0x100, 4, 1, 1);
892	mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 0);
893	mtk_pcie_phy_set(sc, off + 0x100, 5, 1, 0);
894
895	if (xtal_sel <= 5 && xtal_sel >= 3) {
896		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 1);
897		mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x1a);
898		mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x1a);
899	} else {
900		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 0);
901		if (xtal_sel >= 6) {
902			mtk_pcie_phy_set(sc, off + 0x4bc,  4,  2, 0x01);
903			mtk_pcie_phy_set(sc, off + 0x49c,  0, 31, 0x18000000);
904			mtk_pcie_phy_set(sc, off + 0x4a4,  0, 16, 0x18d);
905			mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x4a);
906			mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x4a);
907			mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x11);
908			mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x11);
909		} else {
910			mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x1a);
911			mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x1a);
912		}
913	}
914
915	mtk_pcie_phy_set(sc, off + 0x4a0,  5, 1, 1);
916	mtk_pcie_phy_set(sc, off + 0x490, 22, 2, 2);
917	mtk_pcie_phy_set(sc, off + 0x490, 18, 4, 6);
918	mtk_pcie_phy_set(sc, off + 0x490, 12, 4, 2);
919	mtk_pcie_phy_set(sc, off + 0x490,  8, 4, 1);
920	mtk_pcie_phy_set(sc, off + 0x4ac, 16, 3, 0);
921	mtk_pcie_phy_set(sc, off + 0x490,  1, 3, 2);
922
923	if (xtal_sel <= 5 && xtal_sel >= 3) {
924		mtk_pcie_phy_set(sc, off + 0x414, 6, 2, 1);
925		mtk_pcie_phy_set(sc, off + 0x414, 5, 1, 1);
926	}
927
928	mtk_pcie_phy_set(sc, off + 0x414, 28, 2, 1);
929	mtk_pcie_phy_set(sc, off + 0x040, 17, 4, 7);
930	mtk_pcie_phy_set(sc, off + 0x040, 16, 1, 1);
931	mtk_pcie_phy_set(sc, off + 0x140, 17, 4, 7);
932	mtk_pcie_phy_set(sc, off + 0x140, 16, 1, 1);
933
934	mtk_pcie_phy_set(sc, off + 0x000,  5, 1, 1);
935	mtk_pcie_phy_set(sc, off + 0x100,  5, 1, 1);
936	mtk_pcie_phy_set(sc, off + 0x000,  4, 1, 0);
937	mtk_pcie_phy_set(sc, off + 0x100,  4, 1, 0);
938}
939
940/* XXX: ugly, we need to fix this at some point */
941#define MT7621_GPIO_CTRL0	*((volatile uint32_t *)0xbe000600)
942#define MT7621_GPIO_DATA0	*((volatile uint32_t *)0xbe000620)
943
944#define mtk_gpio_clr_set(_reg, _clr, _set)		\
945	do {						\
946		(_reg) = ((_reg) & (_clr)) | (_set);	\
947	} while (0)
948
949static int
950mtk_pcie_phy_mt7621_init(device_t dev)
951{
952	struct mtk_pci_softc *sc = device_get_softc(dev);
953
954	/* First off, stop the PHY */
955	if (mtk_pcie_phy_stop(dev))
956		return (ENXIO);
957
958	/* PCIe resets are GPIO pins */
959	mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7621_PERST_GPIO_MODE |
960	    MT7621_UARTL3_GPIO_MODE, MT7621_PERST_GPIO | MT7621_UARTL3_GPIO);
961
962	/* Set GPIO pins as outputs */
963	mtk_gpio_clr_set(MT7621_GPIO_CTRL0, 0, MT7621_PCIE_RST);
964
965	/* Assert resets to PCIe devices */
966	mtk_gpio_clr_set(MT7621_GPIO_DATA0, MT7621_PCIE_RST, 0);
967
968	/* Give everything a chance to sink in */
969	DELAY(100000);
970
971	/* Now start the PHY again */
972	if (mtk_pcie_phy_start(dev))
973		return (ENXIO);
974
975	/* Wait for things to settle */
976	DELAY(100000);
977
978	/* Only apply below to REV-E hardware */
979	if ((mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) ==
980	    SYSCTL_MT7621_REV_E)
981		mtk_pcie_phy_mt7621_bypass_pipe_rst(sc, 0x9000);
982
983	/* Setup PCIe ports 0 and 1 */
984	mtk_pcie_phy_mt7621_setup_ssc(sc, 0x9000);
985	/* Setup PCIe port 2 */
986	mtk_pcie_phy_mt7621_setup_ssc(sc, 0xa000);
987
988	/* Deassert resets to PCIe devices */
989	mtk_gpio_clr_set(MT7621_GPIO_DATA0, 0, MT7621_PCIE_RST);
990
991	/* Set number of slots supported */
992	sc->num_slots = 3;
993
994	/* Give it a chance to sink in */
995	DELAY(100000);
996
997	return (0);
998}
999
1000static void
1001mtk_pcie_phy_mt7628_setup(struct mtk_pci_softc *sc, uint32_t off)
1002{
1003	uint32_t xtal_sel;
1004
1005	xtal_sel = mtk_sysctl_get(SYSCTL_SYSCFG) >> 6;
1006	xtal_sel &= 0x1;
1007
1008	mtk_pcie_phy_set(sc, off + 0x400,  8, 1, 1);
1009	mtk_pcie_phy_set(sc, off + 0x400,  9, 2, 0);
1010	mtk_pcie_phy_set(sc, off + 0x000,  4, 1, 1);
1011	mtk_pcie_phy_set(sc, off + 0x000,  5, 1, 0);
1012	mtk_pcie_phy_set(sc, off + 0x4ac, 16, 3, 3);
1013
1014	if (xtal_sel == 1) {
1015		mtk_pcie_phy_set(sc, off + 0x4bc, 24,  8, 0x7d);
1016		mtk_pcie_phy_set(sc, off + 0x490, 12,  4, 0x08);
1017		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 0x01);
1018		mtk_pcie_phy_set(sc, off + 0x4c0,  0, 32, 0x1f400000);
1019		mtk_pcie_phy_set(sc, off + 0x4a4,  0, 16, 0x013d);
1020		mtk_pcie_phy_set(sc, off + 0x4a8, 16, 16, 0x74);
1021		mtk_pcie_phy_set(sc, off + 0x4a8,  0, 16, 0x74);
1022	} else {
1023		mtk_pcie_phy_set(sc, off + 0x4bc, 24,  8, 0x64);
1024		mtk_pcie_phy_set(sc, off + 0x490, 12,  4, 0x0a);
1025		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 0x00);
1026		mtk_pcie_phy_set(sc, off + 0x4c0,  0, 32, 0x19000000);
1027		mtk_pcie_phy_set(sc, off + 0x4a4,  0, 16, 0x018d);
1028		mtk_pcie_phy_set(sc, off + 0x4a8, 16, 16, 0x4a);
1029		mtk_pcie_phy_set(sc, off + 0x4a8,  0, 16, 0x4a);
1030	}
1031
1032	mtk_pcie_phy_set(sc, off + 0x498, 0, 8, 5);
1033	mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 1);
1034	mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 0);
1035}
1036
1037static int
1038mtk_pcie_phy_mt7628_init(device_t dev)
1039{
1040	struct mtk_pci_softc *sc = device_get_softc(dev);
1041
1042	/* Set PCIe reset to normal mode */
1043	mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7628_PERST_GPIO_MODE,
1044	    MT7628_PERST);
1045
1046	/* Start the PHY */
1047	if (mtk_pcie_phy_start(dev))
1048		return (ENXIO);
1049
1050	/* Give it a chance to sink in */
1051	DELAY(100000);
1052
1053	/* Setup the PHY */
1054	mtk_pcie_phy_mt7628_setup(sc, 0x9000);
1055
1056	/* Deassert PCIe device reset */
1057	MT_CLR_SET32(sc, MTK_PCI_PCICFG, MTK_PCI_RESET, 0);
1058
1059	/* Set number of slots supported */
1060	sc->num_slots = 1;
1061
1062	return (0);
1063}
1064
1065static int
1066mtk_pcie_phy_mt7620_wait_busy(struct mtk_pci_softc *sc)
1067{
1068	uint32_t reg_value, retry;
1069
1070	reg_value = retry = 0;
1071
1072	while (retry++ < MT7620_MAX_RETRIES) {
1073		reg_value = MT_READ32(sc, MT7620_PCIE_PHY_CFG);
1074		if (reg_value & PHY_BUSY)
1075			DELAY(100000);
1076		else
1077			break;
1078	}
1079
1080	if (retry >= MT7620_MAX_RETRIES)
1081		return (ENXIO);
1082
1083	return (0);
1084}
1085
1086static int
1087mtk_pcie_phy_mt7620_set(struct mtk_pci_softc *sc, uint32_t reg,
1088    uint32_t val)
1089{
1090	uint32_t reg_val;
1091
1092	if (mtk_pcie_phy_mt7620_wait_busy(sc))
1093		return (ENXIO);
1094
1095	reg_val = PHY_MODE_WRITE | ((reg & 0xff) << PHY_ADDR_OFFSET) |
1096	    (val & 0xff);
1097	MT_WRITE32(sc, MT7620_PCIE_PHY_CFG, reg_val);
1098	DELAY(1000);
1099
1100	if (mtk_pcie_phy_mt7620_wait_busy(sc))
1101		return (ENXIO);
1102
1103	return (0);
1104}
1105
1106static int
1107mtk_pcie_phy_mt7620_init(device_t dev)
1108{
1109	struct mtk_pci_softc *sc = device_get_softc(dev);
1110
1111	/*
1112	 * The below sets the PCIe PHY to bypass the PCIe DLL and enables
1113	 * "elastic buffer control", whatever that may be...
1114	 */
1115	if (mtk_pcie_phy_mt7620_set(sc, 0x00, 0x80) ||
1116	    mtk_pcie_phy_mt7620_set(sc, 0x01, 0x04) ||
1117	    mtk_pcie_phy_mt7620_set(sc, 0x68, 0x84))
1118		return (ENXIO);
1119
1120	/* Stop PCIe */
1121	if (mtk_pcie_phy_stop(dev))
1122		return (ENXIO);
1123
1124	/* Restore PPLL to a sane state before going on */
1125	mtk_sysctl_clr_set(MT7620_PPLL_DRV, LC_CKDRVPD, PDRV_SW_SET);
1126
1127	/* No PCIe on the MT7620N */
1128	if (!(mtk_sysctl_get(SYSCTL_REVID) & MT7620_PKG_BGA)) {
1129		device_printf(dev, "PCIe disabled for MT7620N\n");
1130		mtk_sysctl_clr_set(MT7620_PPLL_CFG0, 0, PPLL_SW_SET);
1131		mtk_sysctl_clr_set(MT7620_PPLL_CFG1, 0, PPLL_PD);
1132		return (ENXIO);
1133	}
1134
1135	/* PCIe device reset pin is in normal mode */
1136	mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7620_PERST_GPIO_MODE,
1137	    MT7620_PERST);
1138
1139	/* Enable PCIe now */
1140	if (mtk_pcie_phy_start(dev))
1141		return (ENXIO);
1142
1143	/* Give it a chance to sink in */
1144	DELAY(100000);
1145
1146	/* If PLL is not locked - bail */
1147	if (!(mtk_sysctl_get(MT7620_PPLL_CFG1) & PPLL_LOCKED)) {
1148		device_printf(dev, "no PPLL not lock\n");
1149		mtk_pcie_phy_stop(dev);
1150		return (ENXIO);
1151	}
1152
1153	/* Configure PCIe PLL */
1154	mtk_sysctl_clr_set(MT7620_PPLL_DRV, LC_CKDRVOHZ | LC_CKDRVHZ,
1155	    LC_CKDRVPD | PDRV_SW_SET);
1156
1157	/* and give it a chance to settle */
1158	DELAY(100000);
1159
1160	/* Deassert PCIe device reset */
1161	MT_CLR_SET32(sc, MTK_PCI_PCICFG, MTK_PCI_RESET, 0);
1162
1163	/* MT7620 supports one PCIe slot */
1164	sc->num_slots = 1;
1165
1166	return (0);
1167}
1168
1169static int
1170mtk_pcie_phy_rt3883_init(device_t dev)
1171{
1172	struct mtk_pci_softc *sc = device_get_softc(dev);
1173
1174	/* Enable PCI host mode and PCIe RC mode */
1175	mtk_sysctl_clr_set(SYSCTL_SYSCFG1, 0, RT3883_PCI_HOST_MODE |
1176	    RT3883_PCIE_RC_MODE);
1177
1178	/* Enable PCIe PHY */
1179	if (mtk_pcie_phy_start(dev))
1180		return (ENXIO);
1181
1182	/* Disable PCI, we only support PCIe for now */
1183	mtk_sysctl_clr_set(SYSCTL_RSTCTRL, 0, RT3883_PCI_RST);
1184	mtk_sysctl_clr_set(SYSCTL_CLKCFG1, RT3883_PCI_CLK, 0);
1185
1186	/* Give things a chance to sink in */
1187	DELAY(500000);
1188
1189	/* Set PCIe port number to 0 and lift PCIe reset */
1190	MT_WRITE32(sc, MTK_PCI_PCICFG, 0);
1191
1192	/* Configure PCI Arbiter */
1193	MT_WRITE32(sc, MTK_PCI_ARBCTL, 0x79);
1194
1195	/* We have a single PCIe slot */
1196	sc->num_slots = 1;
1197
1198	return (0);
1199}
1200
1201static void
1202mtk_pcie_phy_setup_slots(device_t dev)
1203{
1204	struct mtk_pci_softc *sc = device_get_softc(dev);
1205	uint32_t bar0_val, val;
1206	int i;
1207
1208	/* Disable all PCIe interrupts */
1209	MT_WRITE32(sc, MTK_PCI_PCIENA, 0);
1210
1211	/* Default bar0_val is 64M, enabled */
1212	bar0_val = 0x03FF0001;
1213
1214	/* But we override it to 2G, enabled for some SoCs */
1215	if (sc->socid == MTK_SOC_MT7620A || sc->socid == MTK_SOC_MT7628 ||
1216	    sc->socid == MTK_SOC_MT7688 || sc->socid == MTK_SOC_MT7621)
1217		bar0_val = 0x7FFF0001;
1218
1219	/* We still don't know which slots have linked up */
1220	sc->pcie_link_status = 0;
1221
1222	/* XXX: I am not sure if this delay is really necessary */
1223	DELAY(500000);
1224
1225	/*
1226	 * See which slots have links and mark them.
1227	 * Set up all slots' BARs and make them look like PCIe bridges.
1228	 */
1229	for (i = 0; i < sc->num_slots; i++) {
1230		/* If slot has link - mark it */
1231		if (MT_READ32(sc, MTK_PCIE_STATUS(i)) & 1)
1232			sc->pcie_link_status |= (1<<i);
1233		else
1234			continue;
1235
1236		/* Generic slot configuration follows */
1237
1238		/* We enable BAR0 */
1239		MT_WRITE32(sc, MTK_PCIE_BAR0SETUP(i), bar0_val);
1240		/* and disable BAR1 */
1241		MT_WRITE32(sc, MTK_PCIE_BAR1SETUP(i), 0);
1242		/* Internal memory base has no offset */
1243		MT_WRITE32(sc, MTK_PCIE_IMBASEBAR0(i), 0);
1244		/* We're a PCIe bridge */
1245		MT_WRITE32(sc, MTK_PCIE_CLASS(i), 0x06040001);
1246
1247		val = mtk_pci_read_config(dev, 0, i, 0, 0x4, 4);
1248		mtk_pci_write_config(dev, 0, i, 0, 0x4, val | 0x4, 4);
1249		val = mtk_pci_read_config(dev, 0, i, 0, 0x70c, 4);
1250		val &= ~(0xff << 8);
1251		val |= (0x50 << 8);
1252		mtk_pci_write_config(dev, 0, i, 0, 0x70c, val, 4);
1253
1254		mtk_pci_write_config(dev, 0, i, 0, PCIR_IOBASEL_1, 0xff, 1);
1255		mtk_pci_write_config(dev, 0, i, 0, PCIR_IOBASEH_1, 0xffff, 2);
1256		mtk_pci_write_config(dev, 0, i, 0, PCIR_IOLIMITL_1, 0, 1);
1257		mtk_pci_write_config(dev, 0, i, 0, PCIR_IOLIMITH_1, 0, 2);
1258		mtk_pci_write_config(dev, 0, i, 0, PCIR_MEMBASE_1, 0xffff, 2);
1259		mtk_pci_write_config(dev, 0, i, 0, PCIR_MEMLIMIT_1, 0, 2);
1260		mtk_pci_write_config(dev, 0, i, 0, PCIR_PMBASEL_1, 0xffff, 2);
1261		mtk_pci_write_config(dev, 0, i, 0, PCIR_PMBASEH_1, 0xffffffff,
1262		    4);
1263		mtk_pci_write_config(dev, 0, i, 0, PCIR_PMLIMITL_1, 0, 2);
1264		mtk_pci_write_config(dev, 0, i, 0, PCIR_PMLIMITH_1, 0, 4);
1265	}
1266}
1267