mtk_pcie.c revision 297717
1/*-
2 * Copyright (c) 2016 Stanislav Galabov.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * The pci allocator parts are based on code from sys/dev/arm/mv/:
26 *
27 * Copyright (c) 2008 MARVELL INTERNATIONAL LTD.
28 * Copyright (c) 2010 The FreeBSD Foundation
29 * Copyright (c) 2010-2012 Semihalf
30 * All rights reserved.
31 *
32 * Developed by Semihalf.
33 */
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/mips/mediatek/mtk_pcie.c 297717 2016-04-08 15:26:49Z sgalabov $");
36
37#include <sys/param.h>
38#include <sys/systm.h>
39
40#include <sys/bus.h>
41#include <sys/interrupt.h>
42#include <sys/malloc.h>
43#include <sys/kernel.h>
44#include <sys/module.h>
45#include <sys/rman.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/endian.h>
49
50#include <vm/vm.h>
51#include <vm/pmap.h>
52#include <vm/vm_extern.h>
53
54#include <machine/bus.h>
55#include <machine/cpu.h>
56#include <machine/intr.h>
57#include <machine/pmap.h>
58
59#include <dev/pci/pcivar.h>
60#include <dev/pci/pcireg.h>
61
62#include <dev/pci/pcib_private.h>
63
64#include <dev/fdt/fdt_common.h>
65#include <dev/fdt/fdt_clock.h>
66#include <dev/ofw/openfirm.h>
67#include <dev/ofw/ofw_bus.h>
68#include <dev/ofw/ofw_bus_subr.h>
69
70#include <mips/mediatek/mtk_pcie.h>
71#include <mips/mediatek/mtk_soc.h>
72#include <mips/mediatek/mtk_sysctl.h>
73#include <mips/mediatek/fdt_reset.h>
74
75#include "pcib_if.h"
76#include "pic_if.h"
77
78/*
79 * Note: We only support PCIe at the moment.
80 * Most SoCs in the Ralink/Mediatek family that we target actually don't
81 * support PCI anyway, with the notable exceptions being RT3662/RT3883, which
82 * support both PCI and PCIe. If there exists a board based on one of them
83 * which is of interest in the future it shouldn't be too hard to enable PCI
84 * support for it.
85 */
86
87/* Chip specific function declarations */
88static int  mtk_pcie_phy_init(device_t);
89static int  mtk_pcie_phy_start(device_t);
90static int  mtk_pcie_phy_stop(device_t);
91static int  mtk_pcie_phy_mt7621_init(device_t);
92static int  mtk_pcie_phy_mt7628_init(device_t);
93static int  mtk_pcie_phy_mt7620_init(device_t);
94static int  mtk_pcie_phy_rt3883_init(device_t);
95static void mtk_pcie_phy_setup_slots(device_t);
96
97/* Generic declarations */
98struct mtx mtk_pci_mtx;
99MTX_SYSINIT(mtk_pci_mtx, &mtk_pci_mtx, "MTK PCIe mutex", MTX_SPIN);
100
101static int mtk_pcib_init(device_t, int, int);
102static int mtk_pci_intr(void *);
103
104static struct mtk_pci_softc *mt_sc = NULL;
105
106struct mtk_pci_range {
107	u_long	base;
108	u_long	len;
109};
110
111#define FDT_RANGES_CELLS	(3 * 2)
112
113static void
114mtk_pci_range_dump(struct mtk_pci_range *range)
115{
116#ifdef DEBUG
117	printf("\n");
118	printf("  base = 0x%08lx\n", range->base);
119	printf("  len  = 0x%08lx\n", range->len);
120#endif
121}
122
123static int
124mtk_pci_ranges_decode(phandle_t node, struct mtk_pci_range *io_space,
125    struct mtk_pci_range *mem_space)
126{
127	struct mtk_pci_range *pci_space;
128	pcell_t ranges[FDT_RANGES_CELLS];
129	pcell_t *rangesptr;
130	pcell_t cell0, cell1, cell2;
131	int tuples, i, rv, len;
132
133	/*
134	 * Retrieve 'ranges' property.
135	 */
136	if (!OF_hasprop(node, "ranges")) {
137		printf("%s: %d\n", __FUNCTION__, 1);
138		return (EINVAL);
139	}
140
141	len = OF_getproplen(node, "ranges");
142	if (len > sizeof(ranges)) {
143		printf("%s: %d\n", __FUNCTION__, 2);
144		return (ENOMEM);
145	}
146
147	if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0) {
148		printf("%s: %d\n", __FUNCTION__, 3);
149		return (EINVAL);
150	}
151
152	tuples = len / (sizeof(pcell_t) * 3);
153
154	/*
155	 * Initialize the ranges so that we don't have to worry about
156	 * having them all defined in the FDT. In particular, it is
157	 * perfectly fine not to want I/O space on PCI busses.
158	 */
159	bzero(io_space, sizeof(*io_space));
160	bzero(mem_space, sizeof(*mem_space));
161
162	rangesptr = &ranges[0];
163	for (i = 0; i < tuples; i++) {
164		cell0 = fdt_data_get((void *)rangesptr, 1);
165		rangesptr++;
166		cell1 = fdt_data_get((void *)rangesptr, 1);
167		rangesptr++;
168		cell2 = fdt_data_get((void *)rangesptr, 1);
169		rangesptr++;
170
171		if (cell0 == 2) {
172			pci_space = mem_space;
173		} else if (cell0 == 1) {
174			pci_space = io_space;
175		} else {
176			rv = ERANGE;
177			printf("%s: %d\n", __FUNCTION__, 4);
178			goto out;
179		}
180
181		pci_space->base = cell1;
182		pci_space->len = cell2;
183	}
184
185	rv = 0;
186out:
187	return (rv);
188}
189
190static int
191mtk_pci_ranges(phandle_t node, struct mtk_pci_range *io_space,
192    struct mtk_pci_range *mem_space)
193{
194	int err;
195
196	if ((err = mtk_pci_ranges_decode(node, io_space, mem_space)) != 0) {
197		return (err);
198	}
199
200	mtk_pci_range_dump(io_space);
201	mtk_pci_range_dump(mem_space);
202
203	return (0);
204}
205
206static struct ofw_compat_data compat_data[] = {
207	{ "ralink,rt3662-pcie",		MTK_SOC_RT3883 },
208	{ "ralink,rt3883-pcie",		MTK_SOC_RT3883 },
209	{ "ralink,mt7620a-pcie",	MTK_SOC_MT7620A },
210	{ "ralink,mt7621-pcie",		MTK_SOC_MT7621 },
211	{ "ralink,mt7628-pcie",		MTK_SOC_MT7628 },
212	{ "ralink,mt7688-pcie",		MTK_SOC_MT7628 },
213	{ NULL,				MTK_SOC_UNKNOWN }
214};
215
216static int
217mtk_pci_probe(device_t dev)
218{
219	struct mtk_pci_softc *sc = device_get_softc(dev);
220
221	if (!ofw_bus_status_okay(dev))
222		return (ENXIO);
223
224	sc->socid = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
225	if (sc->socid == MTK_SOC_UNKNOWN)
226		return (ENXIO);
227
228	device_set_desc(dev, "MTK PCIe Controller");
229
230	return (0);
231}
232
233static int
234mtk_pci_attach(device_t dev)
235{
236	struct mtk_pci_softc *sc = device_get_softc(dev);
237	struct mtk_pci_range io_space, mem_space;
238	phandle_t node;
239	intptr_t xref;
240	int i, rid;
241
242	sc->sc_dev = dev;
243	mt_sc = sc;
244	sc->addr_mask = 0xffffffff;
245
246	/* Request our memory */
247	rid = 0;
248	sc->pci_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
249			    RF_ACTIVE);
250	if (sc->pci_res[0] == NULL) {
251		device_printf(dev, "could not allocate memory resource\n");
252		return (ENXIO);
253	}
254
255	/* See how many interrupts we need */
256	if (sc->socid == MTK_SOC_MT7621)
257		sc->sc_num_irq = 3;
258	else {
259		sc->sc_num_irq = 1;
260		sc->pci_res[2] = sc->pci_res[3] = NULL;
261		sc->pci_intrhand[1] = sc->pci_intrhand[2] = NULL;
262	}
263
264	/* Request our interrupts */
265	for (i = 1; i <= sc->sc_num_irq ; i++) {
266		rid = i - 1;
267		sc->pci_res[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
268				     RF_ACTIVE);
269		if (sc->pci_res[i] == NULL) {
270			device_printf(dev, "could not allocate interrupt "
271			    "resource %d\n", rid);
272			goto cleanup_res;
273		}
274	}
275
276	/* Parse our PCI 'ranges' property */
277	node = ofw_bus_get_node(dev);
278	xref = OF_xref_from_node(node);
279	if (mtk_pci_ranges(node, &io_space, &mem_space)) {
280		device_printf(dev, "could not retrieve 'ranges' data\n");
281		goto cleanup_res;
282	}
283
284	/* Memory, I/O and IRQ resource limits */
285	sc->sc_io_base = io_space.base;
286	sc->sc_io_size = io_space.len;
287	sc->sc_mem_base = mem_space.base;
288	sc->sc_mem_size = mem_space.len;
289	sc->sc_irq_start = MTK_PCIE0_IRQ;
290	sc->sc_irq_end = MTK_PCIE2_IRQ;
291
292	/* Init resource managers for memory, I/O and IRQ */
293	sc->sc_mem_rman.rm_type = RMAN_ARRAY;
294	sc->sc_mem_rman.rm_descr = "mtk pcie memory window";
295	if (rman_init(&sc->sc_mem_rman) != 0 ||
296	    rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base,
297	    sc->sc_mem_base + sc->sc_mem_size - 1) != 0) {
298		device_printf(dev, "failed to setup memory rman\n");
299		goto cleanup_res;
300	}
301
302	sc->sc_io_rman.rm_type = RMAN_ARRAY;
303	sc->sc_io_rman.rm_descr = "mtk pcie io window";
304	if (rman_init(&sc->sc_io_rman) != 0 ||
305	    rman_manage_region(&sc->sc_io_rman, sc->sc_io_base,
306	    sc->sc_io_base + sc->sc_io_size - 1) != 0) {
307		device_printf(dev, "failed to setup io rman\n");
308		goto cleanup_res;
309	}
310
311	sc->sc_irq_rman.rm_type = RMAN_ARRAY;
312	sc->sc_irq_rman.rm_descr = "mtk pcie irqs";
313	if (rman_init(&sc->sc_irq_rman) != 0 ||
314	    rman_manage_region(&sc->sc_irq_rman, sc->sc_irq_start,
315	    sc->sc_irq_end) != 0) {
316		device_printf(dev, "failed to setup irq rman\n");
317		goto cleanup_res;
318	}
319
320	/* Do SoC-specific PCIe initialization */
321	if (mtk_pcie_phy_init(dev)) {
322		device_printf(dev, "pcie phy init failed\n");
323		goto cleanup_rman;
324	}
325
326	/* Register ourselves as an interrupt controller */
327	if (intr_pic_register(dev, xref) != 0) {
328		device_printf(dev, "could not register PIC\n");
329		goto cleanup_rman;
330	}
331
332	/* Set up our interrupt handler */
333	for (i = 1; i <= sc->sc_num_irq; i++) {
334		sc->pci_intrhand[i - 1] = NULL;
335		if (bus_setup_intr(dev, sc->pci_res[i], INTR_TYPE_MISC,
336		    mtk_pci_intr, NULL, sc, &sc->pci_intrhand[i - 1])) {
337			device_printf(dev, "could not setup intr handler %d\n",
338			    i);
339			goto cleanup;
340		}
341	}
342
343	/* Do generic PCIe initialization and resource allocation */
344	mtk_pcib_init(dev, 0, PCI_SLOTMAX);
345
346	/* Attach our PCI child so bus enumeration can start */
347	if (device_add_child(dev, "pci", -1) == NULL) {
348		device_printf(dev, "could not attach pci bus\n");
349		goto cleanup;
350	}
351
352	/* And finally, attach ourselves to the bus */
353	if (bus_generic_attach(dev)) {
354		device_printf(dev, "could not attach to bus\n");
355		goto cleanup;
356	}
357
358	return (0);
359
360cleanup:
361#ifdef notyet
362	intr_pic_unregister(dev, xref);
363#endif
364	for (i = 1; i <= sc->sc_num_irq; i++) {
365		if (sc->pci_intrhand[i - 1] != NULL)
366			bus_teardown_intr(dev, sc->pci_res[i],
367			    sc->pci_intrhand[i - 1]);
368	}
369cleanup_rman:
370	mtk_pcie_phy_stop(dev);
371	rman_fini(&sc->sc_irq_rman);
372	rman_fini(&sc->sc_io_rman);
373	rman_fini(&sc->sc_mem_rman);
374cleanup_res:
375	mt_sc = NULL;
376	if (sc->pci_res[0] != NULL)
377		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->pci_res[0]);
378	if (sc->pci_res[1] != NULL)
379		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pci_res[1]);
380	if (sc->pci_res[2] != NULL)
381		bus_release_resource(dev, SYS_RES_IRQ, 1, sc->pci_res[2]);
382	if (sc->pci_res[3] != NULL)
383		bus_release_resource(dev, SYS_RES_IRQ, 2, sc->pci_res[3]);
384	return (ENXIO);
385}
386
387static int
388mtk_pci_read_ivar(device_t dev, device_t child, int which,
389	uintptr_t *result)
390{
391	struct mtk_pci_softc *sc = device_get_softc(dev);
392
393	switch (which) {
394	case PCIB_IVAR_DOMAIN:
395		*result = device_get_unit(dev);
396		return (0);
397	case PCIB_IVAR_BUS:
398		*result = sc->sc_busno;
399		return (0);
400	}
401
402	return (ENOENT);
403}
404
405static int
406mtk_pci_write_ivar(device_t dev, device_t child, int which,
407	uintptr_t result)
408{
409	struct mtk_pci_softc *sc = device_get_softc(dev);
410
411	switch (which) {
412	case PCIB_IVAR_BUS:
413		sc->sc_busno = result;
414		return (0);
415	}
416
417	return (ENOENT);
418}
419
420static struct resource *
421mtk_pci_alloc_resource(device_t bus, device_t child, int type, int *rid,
422	rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
423{
424	struct mtk_pci_softc *sc = device_get_softc(bus);
425	struct resource *rv;
426	struct rman *rm;
427
428	switch (type) {
429	case SYS_RES_IRQ:
430		rm = &sc->sc_irq_rman;
431		break;
432	case SYS_RES_IOPORT:
433		rm = &sc->sc_io_rman;
434		break;
435	case SYS_RES_MEMORY:
436		rm = &sc->sc_mem_rman;
437		break;
438	default:
439		return (NULL);
440	}
441
442	rv = rman_reserve_resource(rm, start, end, count, flags, child);
443
444	if (rv == NULL)
445		return (NULL);
446
447	rman_set_rid(rv, *rid);
448
449	if ((flags & RF_ACTIVE) && type != SYS_RES_IRQ) {
450		if (bus_activate_resource(child, type, *rid, rv)) {
451			rman_release_resource(rv);
452			return (NULL);
453		}
454	}
455
456	return (rv);
457}
458
459static inline int
460mtk_idx_to_irq(int idx)
461{
462
463	return ((idx == 0) ? MTK_PCIE0_IRQ :
464		(idx == 1) ? MTK_PCIE1_IRQ :
465		(idx == 2) ? MTK_PCIE2_IRQ : -1);
466}
467
468static inline int
469mtk_irq_to_idx(int irq)
470{
471
472	return ((irq == MTK_PCIE0_IRQ) ? 0 :
473		(irq == MTK_PCIE1_IRQ) ? 1 :
474		(irq == MTK_PCIE2_IRQ) ? 2 : -1);
475}
476
477static void
478mtk_pci_mask_irq(void *source)
479{
480	MT_WRITE32(mt_sc, MTK_PCI_PCIENA,
481		MT_READ32(mt_sc, MTK_PCI_PCIENA) & ~(1<<((int)source)));
482}
483
484static void
485mtk_pci_unmask_irq(void *source)
486{
487
488	MT_WRITE32(mt_sc, MTK_PCI_PCIENA,
489		MT_READ32(mt_sc, MTK_PCI_PCIENA) | (1<<((int)source)));
490}
491
492static int
493mtk_pci_setup_intr(device_t bus, device_t child, struct resource *ires,
494	int flags, driver_filter_t *filt, driver_intr_t *handler,
495	void *arg, void **cookiep)
496{
497	struct mtk_pci_softc *sc = device_get_softc(bus);
498	struct intr_event *event;
499	int irq, error, irqidx;
500
501	irq = rman_get_start(ires);
502
503	if (irq < sc->sc_irq_start || irq > sc->sc_irq_end)
504		return (EINVAL);
505
506	irqidx = irq - sc->sc_irq_start;
507
508	event = sc->sc_eventstab[irqidx];
509	if (event == NULL) {
510		error = intr_event_create(&event, (void *)irq, 0, irq,
511		    mtk_pci_mask_irq, mtk_pci_unmask_irq, NULL, NULL,
512		    "pci intr%d:", irq);
513
514		if (error == 0) {
515			sc->sc_eventstab[irqidx] = event;
516		}
517		else {
518			return (error);
519		}
520	}
521
522	intr_event_add_handler(event, device_get_nameunit(child), filt,
523		handler, arg, intr_priority(flags), flags, cookiep);
524
525	mtk_pci_unmask_irq((void*)irq);
526
527	return (0);
528}
529
530static int
531mtk_pci_teardown_intr(device_t dev, device_t child, struct resource *ires,
532	void *cookie)
533{
534	struct mtk_pci_softc *sc = device_get_softc(dev);
535	int irq, result, irqidx;
536
537	irq = rman_get_start(ires);
538	if (irq < sc->sc_irq_start || irq > sc->sc_irq_end)
539		return (EINVAL);
540
541	irqidx = irq - sc->sc_irq_start;
542	if (sc->sc_eventstab[irqidx] == NULL)
543		panic("Trying to teardown unoccupied IRQ");
544
545	mtk_pci_mask_irq((void*)irq);
546
547	result = intr_event_remove_handler(cookie);
548	if (!result)
549		sc->sc_eventstab[irqidx] = NULL;
550
551
552	return (result);
553}
554
555static inline uint32_t
556mtk_pci_make_addr(int bus, int slot, int func, int reg)
557{
558	uint32_t addr;
559
560	addr = ((((reg & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) |
561		(func << 8) | (reg & 0xfc) | (1 << 31));
562
563	return (addr);
564}
565
566static int
567mtk_pci_maxslots(device_t dev)
568{
569
570	return (PCI_SLOTMAX);
571}
572
573static inline int
574mtk_pci_slot_has_link(device_t dev, int slot)
575{
576	struct mtk_pci_softc *sc = device_get_softc(dev);
577
578	return !!(sc->pcie_link_status & (1<<slot));
579}
580
581static uint32_t
582mtk_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func,
583	u_int reg, int bytes)
584{
585	struct mtk_pci_softc *sc = device_get_softc(dev);
586	uint32_t addr = 0, data = 0;
587
588	/* Return ~0U if slot has no link */
589	if (bus == 0 && mtk_pci_slot_has_link(dev, slot) == 0) {
590		return (~0U);
591	}
592
593	mtx_lock_spin(&mtk_pci_mtx);
594	addr = mtk_pci_make_addr(bus, slot, func, (reg & ~3)) & sc->addr_mask;
595	MT_WRITE32(sc, MTK_PCI_CFGADDR, addr);
596	switch (bytes % 4) {
597	case 0:
598		data = MT_READ32(sc, MTK_PCI_CFGDATA);
599		break;
600	case 1:
601		data = MT_READ8(sc, MTK_PCI_CFGDATA + (reg & 0x3));
602		break;
603	case 2:
604		data = MT_READ16(sc, MTK_PCI_CFGDATA + (reg & 0x3));
605		break;
606	default:
607		panic("%s(): Wrong number of bytes (%d) requested!\n",
608			__FUNCTION__, bytes % 4);
609	}
610	mtx_unlock_spin(&mtk_pci_mtx);
611
612	return (data);
613}
614
615static void
616mtk_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func,
617	u_int reg, uint32_t val, int bytes)
618{
619	struct mtk_pci_softc *sc = device_get_softc(dev);
620	uint32_t addr = 0, data = val;
621
622	/* Do not write if slot has no link */
623	if (bus == 0 && mtk_pci_slot_has_link(dev, slot) == 0)
624		return;
625
626	mtx_lock_spin(&mtk_pci_mtx);
627	addr = mtk_pci_make_addr(bus, slot, func, (reg & ~3)) & sc->addr_mask;
628	MT_WRITE32(sc, MTK_PCI_CFGADDR, addr);
629	switch (bytes % 4) {
630	case 0:
631		MT_WRITE32(sc, MTK_PCI_CFGDATA, data);
632		break;
633	case 1:
634		MT_WRITE8(sc, MTK_PCI_CFGDATA + (reg & 0x3), data);
635		break;
636	case 2:
637		MT_WRITE16(sc, MTK_PCI_CFGDATA + (reg & 0x3), data);
638		break;
639	default:
640		panic("%s(): Wrong number of bytes (%d) requested!\n",
641			__FUNCTION__, bytes % 4);
642	}
643	mtx_unlock_spin(&mtk_pci_mtx);
644}
645
646#if 0
647/* We take care of interrupt routing in the allocator code below */
648static int
649mtk_pci_route_interrupt(device_t pcib, device_t device, int pin)
650{
651	//struct mtk_pci_softc *sc = device_get_softc(pcib);
652	int bus, sl, dev;
653
654	if (1) return PCI_INVALID_IRQ;
655
656	bus = pci_get_bus(device);
657	sl = pci_get_slot(device);
658	dev = pci_get_device(device);
659
660	printf("%s: for %d:%d:%d, int = %d\n", __FUNCTION__, bus, sl, dev, pin);
661
662	if (bus != 0)
663		panic("Unexpected bus number %d\n", bus);
664
665	/* PCIe only */
666	switch (sl) {
667	case 0: return MTK_PCIE0_IRQ;
668	case 1: return MTK_PCIE0_IRQ + 1;
669	case 2: return MTK_PCIE0_IRQ + 2;
670	default: return (-1);
671	}
672
673	return (-1);
674}
675#endif
676
677static device_method_t mtk_pci_methods[] = {
678	/* Device interface */
679	DEVMETHOD(device_probe,		mtk_pci_probe),
680	DEVMETHOD(device_attach,	mtk_pci_attach),
681	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
682	DEVMETHOD(device_suspend,	bus_generic_suspend),
683	DEVMETHOD(device_resume,	bus_generic_resume),
684
685	/* Bus interface */
686	DEVMETHOD(bus_read_ivar,	mtk_pci_read_ivar),
687	DEVMETHOD(bus_write_ivar,	mtk_pci_write_ivar),
688	DEVMETHOD(bus_alloc_resource,	mtk_pci_alloc_resource),
689	DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
690	DEVMETHOD(bus_activate_resource,   bus_generic_activate_resource),
691	DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
692	DEVMETHOD(bus_setup_intr,	mtk_pci_setup_intr),
693	DEVMETHOD(bus_teardown_intr,	mtk_pci_teardown_intr),
694
695	/* pcib interface */
696	DEVMETHOD(pcib_maxslots,	mtk_pci_maxslots),
697	DEVMETHOD(pcib_read_config,	mtk_pci_read_config),
698	DEVMETHOD(pcib_write_config,	mtk_pci_write_config),
699#if 0
700	DEVMETHOD(pcib_route_interrupt,	mtk_pci_route_interrupt),
701#endif
702
703	DEVMETHOD_END
704};
705
706static driver_t mtk_pci_driver = {
707	"pcib",
708	mtk_pci_methods,
709	sizeof(struct mtk_pci_softc),
710};
711
712static devclass_t mtk_pci_devclass;
713
714DRIVER_MODULE(mtk_pci, simplebus, mtk_pci_driver, mtk_pci_devclass, 0, 0);
715
716/* Resource allocation code */
717static inline uint32_t
718pcib_bit_get(uint32_t *map, uint32_t bit)
719{
720	uint32_t n = bit / BITS_PER_UINT32;
721
722	bit = bit % BITS_PER_UINT32;
723	return (map[n] & (1 << bit));
724}
725
726static inline void
727pcib_bit_set(uint32_t *map, uint32_t bit)
728{
729	uint32_t n = bit / BITS_PER_UINT32;
730
731	bit = bit % BITS_PER_UINT32;
732	map[n] |= (1 << bit);
733}
734
735static inline uint32_t
736pcib_map_check(uint32_t *map, uint32_t start, uint32_t bits)
737{
738	uint32_t i;
739
740	for (i = start; i < start + bits; i++)
741		if (pcib_bit_get(map, i))
742			return (0);
743
744	return (1);
745}
746
747static inline void
748pcib_map_set(uint32_t *map, uint32_t start, uint32_t bits)
749{
750	uint32_t i;
751
752	for (i = start; i < start + bits; i++)
753		pcib_bit_set(map, i);
754}
755
756static bus_addr_t
757pcib_alloc(device_t dev, uint32_t smask)
758{
759	struct mtk_pci_softc *sc = device_get_softc(dev);
760	uint32_t bits, bits_limit, i, *map, min_alloc, size;
761	bus_addr_t addr = 0;
762	bus_addr_t base;
763
764	if (smask & 1) {
765		base = sc->sc_io_base;
766		min_alloc = PCI_MIN_IO_ALLOC;
767		bits_limit = sc->sc_io_size / min_alloc;
768		map = sc->sc_io_map;
769		smask &= ~0x3;
770	} else {
771		base = sc->sc_mem_base;
772		min_alloc = PCI_MIN_MEM_ALLOC;
773		bits_limit = sc->sc_mem_size / min_alloc;
774		map = sc->sc_mem_map;
775		smask &= ~0xF;
776	}
777
778	size = ~smask + 1;
779	bits = size / min_alloc;
780
781	for (i = 0; i + bits <= bits_limit; i+= bits)
782		if (pcib_map_check(map, i, bits)) {
783			pcib_map_set(map, i, bits);
784			addr = base + (i * min_alloc);
785			return (addr);
786		}
787
788	return (addr);
789}
790
791static int
792mtk_pcib_init_bar(device_t dev, int bus, int slot, int func, int barno)
793{
794	uint32_t addr, bar;
795	int reg, width;
796
797	reg = PCIR_BAR(barno);
798
799	mtk_pci_write_config(dev, bus, slot, func, reg, ~0, 4);
800	bar = mtk_pci_read_config(dev, bus, slot, func, reg, 4);
801	if (bar == 0)
802		return (1);
803
804	/* Calculate BAR size: 64 or 32 bit (in 32-bit units) */
805	width = ((bar & 7) == 4) ? 2 : 1;
806
807	addr = pcib_alloc(dev, bar);
808	if (!addr)
809		return (-1);
810
811	if (bootverbose)
812		printf("PCI %u:%u:%u: reg %x: smask=%08x: addr=%08x\n",
813		    bus, slot, func, reg, bar, addr);
814
815	mtk_pci_write_config(dev, bus, slot, func, reg, addr, 4);
816	if (width == 2)
817		mtk_pci_write_config(dev, bus, slot, func, reg + 4, 0, 4);
818
819	return (width);
820}
821
822static int
823mtk_pcib_init_all_bars(device_t dev, int bus, int slot, int func,
824	int hdrtype)
825{
826	int maxbar, bar, i;
827
828	maxbar = (hdrtype & PCIM_HDRTYPE) ? 0 : 6;
829	bar = 0;
830
831	while (bar < maxbar) {
832		i = mtk_pcib_init_bar(dev, bus, slot, func, bar);
833		bar += i;
834		if (i < 0) {
835			device_printf(dev, "PCI IO/Memory space exhausted\n");
836			return (ENOMEM);
837		}
838	}
839
840	return (0);
841}
842
843static void
844mtk_pcib_init_bridge(device_t dev, int bus, int slot, int func)
845{
846	struct mtk_pci_softc *sc = device_get_softc(dev);
847	bus_addr_t io_base, mem_base;
848	uint32_t io_limit, mem_limit;
849	int secbus;
850
851	if (bus == 0 && !mtk_pci_slot_has_link(dev, slot)) {
852		sc->sc_cur_secbus++;
853		device_printf(dev, "Skip bus %d due to no link\n",
854		    sc->sc_cur_secbus);
855		return;
856	}
857
858	io_base = sc->sc_io_base;
859	io_limit = io_base + sc->sc_io_size - 1;
860	mem_base = sc->sc_mem_base;
861	mem_limit = mem_base + sc->sc_mem_size - 1;
862
863	mtk_pci_write_config(dev, bus, slot, func, PCIR_IOBASEL_1,
864		io_base >> 8, 1);
865	mtk_pci_write_config(dev, bus, slot, func, PCIR_IOBASEH_1,
866		io_base >> 16, 2);
867	mtk_pci_write_config(dev, bus, slot, func, PCIR_IOLIMITL_1,
868		io_limit >> 8, 1);
869	mtk_pci_write_config(dev, bus, slot, func, PCIR_IOLIMITH_1,
870		io_limit >> 16, 2);
871
872	mtk_pci_write_config(dev, bus, slot, func, PCIR_MEMBASE_1,
873		mem_base >> 16, 2);
874	mtk_pci_write_config(dev, bus, slot, func, PCIR_MEMLIMIT_1,
875		mem_limit >> 16, 2);
876
877	mtk_pci_write_config(dev, bus, slot, func, PCIR_PMBASEL_1,
878		0x10, 2);
879	mtk_pci_write_config(dev, bus, slot, func, PCIR_PMBASEH_1,
880		0x0, 4);
881	mtk_pci_write_config(dev, bus, slot, func, PCIR_PMLIMITL_1,
882		0xF, 2);
883	mtk_pci_write_config(dev, bus, slot, func, PCIR_PMLIMITH_1,
884		0x0, 4);
885
886	mtk_pci_write_config(dev, bus, slot, func, PCIR_INTLINE, 0xff, 1);
887
888	secbus = mtk_pci_read_config(dev, bus, slot, func, PCIR_SECBUS_1, 1);
889
890	if (secbus == 0) {
891		sc->sc_cur_secbus++;
892		mtk_pci_write_config(dev, bus, slot, func, PCIR_SECBUS_1,
893			sc->sc_cur_secbus, 1);
894		mtk_pci_write_config(dev, bus, slot, func, PCIR_SUBBUS_1,
895			sc->sc_cur_secbus, 1);
896		secbus = sc->sc_cur_secbus;
897	}
898
899	mtk_pcib_init(dev, secbus, PCI_SLOTMAX);
900}
901
902static uint8_t
903mtk_pci_get_int(device_t dev, int bus, int slot)
904{
905
906	if (slot != 0)
907		return (PCI_INVALID_IRQ);
908
909	switch (bus) {
910	case 1:
911		return (MTK_PCIE0_IRQ);
912	case 2:
913		return (MTK_PCIE1_IRQ);
914	case 3:
915		return (MTK_PCIE2_IRQ);
916	default:
917		device_printf(dev, "Bus %d out of range\n", slot);
918		return (PCI_INVALID_IRQ);
919	}
920
921	/* Unreachable */
922	return (PCI_INVALID_IRQ);
923}
924
925static int
926mtk_pcib_init(device_t dev, int bus, int maxslot)
927{
928	int slot, func, maxfunc, error;
929	uint8_t hdrtype, command, class, subclass;
930
931	for (slot = 0; slot <= maxslot; slot++) {
932		maxfunc = 0;
933		for (func = 0; func <= maxfunc; func++) {
934			hdrtype = mtk_pci_read_config(dev, bus, slot, func,
935				PCIR_HDRTYPE, 1);
936
937			if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
938				continue;
939
940			if (func == 0 && (hdrtype & PCIM_MFDEV))
941				maxfunc = PCI_FUNCMAX;
942
943			command = mtk_pci_read_config(dev, bus, slot, func,
944				PCIR_COMMAND, 1);
945			command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN);
946			mtk_pci_write_config(dev, bus, slot, func,
947				PCIR_COMMAND, command, 1);
948
949			error = mtk_pcib_init_all_bars(dev, bus, slot, func,
950				hdrtype);
951
952			if (error)
953				return (error);
954
955			command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN |
956				PCIM_CMD_PORTEN;
957			mtk_pci_write_config(dev, bus, slot, func,
958				PCIR_COMMAND, command, 1);
959
960			mtk_pci_write_config(dev, bus, slot, func,
961				PCIR_CACHELNSZ, 16, 1);
962
963			class = mtk_pci_read_config(dev, bus, slot, func,
964				PCIR_CLASS, 1);
965			subclass = mtk_pci_read_config(dev, bus, slot, func,
966				PCIR_SUBCLASS, 1);
967
968			if (class != PCIC_BRIDGE ||
969			    subclass != PCIS_BRIDGE_PCI) {
970				uint8_t val;
971
972				val = mtk_pci_get_int(dev, bus, slot);
973
974				mtk_pci_write_config(dev, bus, slot, func,
975				    PCIR_INTLINE, val, 1); /* XXX */
976				continue;
977			}
978
979			mtk_pcib_init_bridge(dev, bus, slot, func);
980		}
981	}
982
983	return (0);
984}
985
986/* Our interrupt handler */
987static int
988mtk_pci_intr(void *arg)
989{
990	struct mtk_pci_softc *sc = arg;
991	struct intr_event *event;
992	uint32_t reg, irq, irqidx;
993
994	reg = MT_READ32(sc, MTK_PCI_PCIINT);
995
996	for (irq = sc->sc_irq_start; irq <= sc->sc_irq_end; irq++) {
997		if (reg & (1u<<irq)) {
998			irqidx = irq - sc->sc_irq_start;
999			event = sc->sc_eventstab[irqidx];
1000			if (!event || TAILQ_EMPTY(&event->ie_handlers)) {
1001				if (irq != 0)
1002					printf("Stray PCI IRQ %d\n", irq);
1003				continue;
1004			}
1005
1006			intr_event_handle(event, NULL);
1007		}
1008	}
1009
1010	return (FILTER_HANDLED);
1011}
1012
1013/* PCIe SoC-specific initialization */
1014static int
1015mtk_pcie_phy_init(device_t dev)
1016{
1017	struct mtk_pci_softc *sc;
1018
1019	/* Get our softc */
1020	sc = device_get_softc(dev);
1021
1022	/* We don't know how many slots we have yet */
1023	sc->num_slots = 0;
1024
1025	/* Handle SoC specific PCIe init */
1026	switch (sc->socid) {
1027	case MTK_SOC_MT7628: /* Fallthrough */
1028	case MTK_SOC_MT7688:
1029		if (mtk_pcie_phy_mt7628_init(dev))
1030			return (ENXIO);
1031		break;
1032	case MTK_SOC_MT7621:
1033		if (mtk_pcie_phy_mt7621_init(dev))
1034			return (ENXIO);
1035		break;
1036	case MTK_SOC_MT7620A:
1037		if (mtk_pcie_phy_mt7620_init(dev))
1038			return (ENXIO);
1039		break;
1040	case MTK_SOC_RT3662: /* Fallthrough */
1041	case MTK_SOC_RT3883:
1042		if (mtk_pcie_phy_rt3883_init(dev))
1043			return (ENXIO);
1044		break;
1045	default:
1046		device_printf(dev, "unsupported device %x\n", sc->socid);
1047		return (ENXIO);
1048	}
1049
1050	/*
1051	 * If we were successful so far go and set up the PCIe slots, so we
1052	 * may allocate mem/io/irq resources and enumerate busses later.
1053	 */
1054	mtk_pcie_phy_setup_slots(dev);
1055
1056	return (0);
1057}
1058
1059static int
1060mtk_pcie_phy_start(device_t dev)
1061{
1062	struct mtk_pci_softc *sc = device_get_softc(dev);
1063
1064	if (sc->socid == MTK_SOC_MT7621 &&
1065	    (mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) !=
1066	    SYSCTL_MT7621_REV_E) {
1067		if (fdt_reset_assert_all(dev))
1068			return (ENXIO);
1069	} else {
1070		if (fdt_reset_deassert_all(dev))
1071			return (ENXIO);
1072	}
1073
1074	if (fdt_clock_enable_all(dev))
1075		return (ENXIO);
1076
1077	return (0);
1078}
1079
1080static int
1081mtk_pcie_phy_stop(device_t dev)
1082{
1083	struct mtk_pci_softc *sc = device_get_softc(dev);
1084
1085	if (sc->socid == MTK_SOC_MT7621 &&
1086	    (mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) !=
1087	    SYSCTL_MT7621_REV_E) {
1088		if (fdt_reset_deassert_all(dev))
1089			return (ENXIO);
1090	} else {
1091		if (fdt_reset_assert_all(dev))
1092			return (ENXIO);
1093	}
1094
1095	if (fdt_clock_disable_all(dev))
1096		return (ENXIO);
1097
1098	return (0);
1099}
1100
1101#define mtk_pcie_phy_set(_sc, _reg, _s, _n, _v)			\
1102	MT_WRITE32((_sc), (_reg), ((MT_READ32((_sc), (_reg)) &	\
1103	    (~(((1ull << (_n)) - 1) << (_s)))) | ((_v) << (_s))))
1104
1105static void
1106mtk_pcie_phy_mt7621_bypass_pipe_rst(struct mtk_pci_softc *sc, uint32_t off)
1107{
1108
1109	mtk_pcie_phy_set(sc, off + 0x002c, 12, 1, 1);
1110	mtk_pcie_phy_set(sc, off + 0x002c,  4, 1, 1);
1111	mtk_pcie_phy_set(sc, off + 0x012c, 12, 1, 1);
1112	mtk_pcie_phy_set(sc, off + 0x012c,  4, 1, 1);
1113	mtk_pcie_phy_set(sc, off + 0x102c, 12, 1, 1);
1114	mtk_pcie_phy_set(sc, off + 0x102c,  4, 1, 1);
1115}
1116
1117static void
1118mtk_pcie_phy_mt7621_setup_ssc(struct mtk_pci_softc *sc, uint32_t off)
1119{
1120	uint32_t xtal_sel;
1121
1122	xtal_sel = mtk_sysctl_get(SYSCTL_SYSCFG) >> 6;
1123	xtal_sel &= 0x7;
1124
1125	mtk_pcie_phy_set(sc, off + 0x400, 8, 1, 1);
1126	mtk_pcie_phy_set(sc, off + 0x400, 9, 2, 0);
1127	mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 1);
1128	mtk_pcie_phy_set(sc, off + 0x100, 4, 1, 1);
1129	mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 0);
1130	mtk_pcie_phy_set(sc, off + 0x100, 5, 1, 0);
1131
1132	if (xtal_sel <= 5 && xtal_sel >= 3) {
1133		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 1);
1134		mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x1a);
1135		mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x1a);
1136	} else {
1137		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 0);
1138		if (xtal_sel >= 6) {
1139			mtk_pcie_phy_set(sc, off + 0x4bc,  4,  2, 0x01);
1140			mtk_pcie_phy_set(sc, off + 0x49c,  0, 31, 0x18000000);
1141			mtk_pcie_phy_set(sc, off + 0x4a4,  0, 16, 0x18d);
1142			mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x4a);
1143			mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x4a);
1144			mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x11);
1145			mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x11);
1146		} else {
1147			mtk_pcie_phy_set(sc, off + 0x4a8,  0, 12, 0x1a);
1148			mtk_pcie_phy_set(sc, off + 0x4a8, 16, 12, 0x1a);
1149		}
1150	}
1151
1152	mtk_pcie_phy_set(sc, off + 0x4a0,  5, 1, 1);
1153	mtk_pcie_phy_set(sc, off + 0x490, 22, 2, 2);
1154	mtk_pcie_phy_set(sc, off + 0x490, 18, 4, 6);
1155	mtk_pcie_phy_set(sc, off + 0x490, 12, 4, 2);
1156	mtk_pcie_phy_set(sc, off + 0x490,  8, 4, 1);
1157	mtk_pcie_phy_set(sc, off + 0x4ac, 16, 3, 0);
1158	mtk_pcie_phy_set(sc, off + 0x490,  1, 3, 2);
1159
1160	if (xtal_sel <= 5 && xtal_sel >= 3) {
1161		mtk_pcie_phy_set(sc, off + 0x414, 6, 2, 1);
1162		mtk_pcie_phy_set(sc, off + 0x414, 5, 1, 1);
1163	}
1164
1165	mtk_pcie_phy_set(sc, off + 0x414, 28, 2, 1);
1166	mtk_pcie_phy_set(sc, off + 0x040, 17, 4, 7);
1167	mtk_pcie_phy_set(sc, off + 0x040, 16, 1, 1);
1168	mtk_pcie_phy_set(sc, off + 0x140, 17, 4, 7);
1169	mtk_pcie_phy_set(sc, off + 0x140, 16, 1, 1);
1170
1171	mtk_pcie_phy_set(sc, off + 0x000,  5, 1, 1);
1172	mtk_pcie_phy_set(sc, off + 0x100,  5, 1, 1);
1173	mtk_pcie_phy_set(sc, off + 0x000,  4, 1, 0);
1174	mtk_pcie_phy_set(sc, off + 0x100,  4, 1, 0);
1175}
1176
1177/* XXX: ugly, we need to fix this at some point */
1178#define MT7621_GPIO_CTRL0	*((volatile uint32_t *)0xbe000600)
1179#define MT7621_GPIO_DATA0	*((volatile uint32_t *)0xbe000620)
1180
1181#define mtk_gpio_clr_set(_reg, _clr, _set)		\
1182	do {						\
1183		(_reg) = ((_reg) & (_clr)) | (_set);	\
1184	} while (0)
1185
1186static int
1187mtk_pcie_phy_mt7621_init(device_t dev)
1188{
1189	struct mtk_pci_softc *sc = device_get_softc(dev);
1190
1191	/* First off, stop the PHY */
1192	if (mtk_pcie_phy_stop(dev))
1193		return (ENXIO);
1194
1195	/* PCIe resets are GPIO pins */
1196	mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7621_PERST_GPIO_MODE |
1197	    MT7621_UARTL3_GPIO_MODE, MT7621_PERST_GPIO | MT7621_UARTL3_GPIO);
1198
1199	/* Set GPIO pins as outputs */
1200	mtk_gpio_clr_set(MT7621_GPIO_CTRL0, 0, MT7621_PCIE_RST);
1201
1202	/* Assert resets to PCIe devices */
1203	mtk_gpio_clr_set(MT7621_GPIO_DATA0, MT7621_PCIE_RST, 0);
1204
1205	/* Give everything a chance to sink in */
1206	DELAY(100000);
1207
1208	/* Now start the PHY again */
1209	if (mtk_pcie_phy_start(dev))
1210		return (ENXIO);
1211
1212	/* Wait for things to settle */
1213	DELAY(100000);
1214
1215	/* Only apply below to REV-E hardware */
1216	if ((mtk_sysctl_get(SYSCTL_REVID) & SYSCTL_REVID_MASK) ==
1217	    SYSCTL_MT7621_REV_E)
1218		mtk_pcie_phy_mt7621_bypass_pipe_rst(sc, 0x9000);
1219
1220	/* Setup PCIe ports 0 and 1 */
1221	mtk_pcie_phy_mt7621_setup_ssc(sc, 0x9000);
1222	/* Setup PCIe port 2 */
1223	mtk_pcie_phy_mt7621_setup_ssc(sc, 0xa000);
1224
1225	/* Deassert resets to PCIe devices */
1226	mtk_gpio_clr_set(MT7621_GPIO_DATA0, 0, MT7621_PCIE_RST);
1227
1228	/* Set number of slots supported */
1229	sc->num_slots = 3;
1230
1231	/* Give it a chance to sink in */
1232	DELAY(100000);
1233
1234	return (0);
1235}
1236
1237static void
1238mtk_pcie_phy_mt7628_setup(struct mtk_pci_softc *sc, uint32_t off)
1239{
1240	uint32_t xtal_sel;
1241
1242	xtal_sel = mtk_sysctl_get(SYSCTL_SYSCFG) >> 6;
1243	xtal_sel &= 0x1;
1244
1245	mtk_pcie_phy_set(sc, off + 0x400,  8, 1, 1);
1246	mtk_pcie_phy_set(sc, off + 0x400,  9, 2, 0);
1247	mtk_pcie_phy_set(sc, off + 0x000,  4, 1, 1);
1248	mtk_pcie_phy_set(sc, off + 0x000,  5, 1, 0);
1249	mtk_pcie_phy_set(sc, off + 0x4ac, 16, 3, 3);
1250
1251	if (xtal_sel == 1) {
1252		mtk_pcie_phy_set(sc, off + 0x4bc, 24,  8, 0x7d);
1253		mtk_pcie_phy_set(sc, off + 0x490, 12,  4, 0x08);
1254		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 0x01);
1255		mtk_pcie_phy_set(sc, off + 0x4c0,  0, 32, 0x1f400000);
1256		mtk_pcie_phy_set(sc, off + 0x4a4,  0, 16, 0x013d);
1257		mtk_pcie_phy_set(sc, off + 0x4a8, 16, 16, 0x74);
1258		mtk_pcie_phy_set(sc, off + 0x4a8,  0, 16, 0x74);
1259	} else {
1260		mtk_pcie_phy_set(sc, off + 0x4bc, 24,  8, 0x64);
1261		mtk_pcie_phy_set(sc, off + 0x490, 12,  4, 0x0a);
1262		mtk_pcie_phy_set(sc, off + 0x490,  6,  2, 0x00);
1263		mtk_pcie_phy_set(sc, off + 0x4c0,  0, 32, 0x19000000);
1264		mtk_pcie_phy_set(sc, off + 0x4a4,  0, 16, 0x018d);
1265		mtk_pcie_phy_set(sc, off + 0x4a8, 16, 16, 0x4a);
1266		mtk_pcie_phy_set(sc, off + 0x4a8,  0, 16, 0x4a);
1267	}
1268
1269	mtk_pcie_phy_set(sc, off + 0x498, 0, 8, 5);
1270	mtk_pcie_phy_set(sc, off + 0x000, 5, 1, 1);
1271	mtk_pcie_phy_set(sc, off + 0x000, 4, 1, 0);
1272}
1273
1274static int
1275mtk_pcie_phy_mt7628_init(device_t dev)
1276{
1277	struct mtk_pci_softc *sc = device_get_softc(dev);
1278
1279	/* Set PCIe reset to normal mode */
1280	mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7628_PERST_GPIO_MODE,
1281	    MT7628_PERST);
1282
1283	/* Start the PHY */
1284	if (mtk_pcie_phy_start(dev))
1285		return (ENXIO);
1286
1287	/* Give it a chance to sink in */
1288	DELAY(100000);
1289
1290	/* Setup the PHY */
1291	mtk_pcie_phy_mt7628_setup(sc, 0x9000);
1292
1293	/* Deassert PCIe device reset */
1294	MT_CLR_SET32(sc, MTK_PCI_PCICFG, MTK_PCI_RESET, 0);
1295
1296	/* Set number of slots supported */
1297	sc->num_slots = 1;
1298
1299	return (0);
1300}
1301
1302static int
1303mtk_pcie_phy_mt7620_wait_busy(struct mtk_pci_softc *sc)
1304{
1305	uint32_t reg_value, retry;
1306
1307	reg_value = retry = 0;
1308
1309	while (retry++ < MT7620_MAX_RETRIES) {
1310		reg_value = MT_READ32(sc, MT7620_PCIE_PHY_CFG);
1311		if (reg_value & PHY_BUSY)
1312			DELAY(100000);
1313		else
1314			break;
1315	}
1316
1317	if (retry >= MT7620_MAX_RETRIES)
1318		return (ENXIO);
1319
1320	return (0);
1321}
1322
1323static int
1324mtk_pcie_phy_mt7620_set(struct mtk_pci_softc *sc, uint32_t reg,
1325    uint32_t val)
1326{
1327	uint32_t reg_val;
1328
1329	if (mtk_pcie_phy_mt7620_wait_busy(sc))
1330		return (ENXIO);
1331
1332	reg_val = PHY_MODE_WRITE | ((reg & 0xff) << PHY_ADDR_OFFSET) |
1333	    (val & 0xff);
1334	MT_WRITE32(sc, MT7620_PCIE_PHY_CFG, reg_val);
1335	DELAY(1000);
1336
1337	if (mtk_pcie_phy_mt7620_wait_busy(sc))
1338		return (ENXIO);
1339
1340	return (0);
1341}
1342
1343static int
1344mtk_pcie_phy_mt7620_init(device_t dev)
1345{
1346	struct mtk_pci_softc *sc = device_get_softc(dev);
1347
1348	/*
1349	 * The below sets the PCIe PHY to bypass the PCIe DLL and enables
1350	 * "elastic buffer control", whatever that may be...
1351	 */
1352	if (mtk_pcie_phy_mt7620_set(sc, 0x00, 0x80) ||
1353	    mtk_pcie_phy_mt7620_set(sc, 0x01, 0x04) ||
1354	    mtk_pcie_phy_mt7620_set(sc, 0x68, 0x84))
1355		return (ENXIO);
1356
1357	/* Stop PCIe */
1358	if (mtk_pcie_phy_stop(dev))
1359		return (ENXIO);
1360
1361	/* Restore PPLL to a sane state before going on */
1362	mtk_sysctl_clr_set(MT7620_PPLL_DRV, LC_CKDRVPD, PDRV_SW_SET);
1363
1364	/* No PCIe on the MT7620N */
1365	if (!(mtk_sysctl_get(SYSCTL_REVID) & MT7620_PKG_BGA)) {
1366		device_printf(dev, "PCIe disabled for MT7620N\n");
1367		mtk_sysctl_clr_set(MT7620_PPLL_CFG0, 0, PPLL_SW_SET);
1368		mtk_sysctl_clr_set(MT7620_PPLL_CFG1, 0, PPLL_PD);
1369		return (ENXIO);
1370	}
1371
1372	/* PCIe device reset pin is in normal mode */
1373	mtk_sysctl_clr_set(SYSCTL_GPIOMODE, MT7620_PERST_GPIO_MODE,
1374	    MT7620_PERST);
1375
1376	/* Enable PCIe now */
1377	if (mtk_pcie_phy_start(dev))
1378		return (ENXIO);
1379
1380	/* Give it a chance to sink in */
1381	DELAY(100000);
1382
1383	/* If PLL is not locked - bail */
1384	if (!(mtk_sysctl_get(MT7620_PPLL_CFG1) & PPLL_LOCKED)) {
1385		device_printf(dev, "no PPLL not lock\n");
1386		mtk_pcie_phy_stop(dev);
1387		return (ENXIO);
1388	}
1389
1390	/* Configure PCIe PLL */
1391	mtk_sysctl_clr_set(MT7620_PPLL_DRV, LC_CKDRVOHZ | LC_CKDRVHZ,
1392	    LC_CKDRVPD | PDRV_SW_SET);
1393
1394	/* and give it a chance to settle */
1395	DELAY(100000);
1396
1397	/* Deassert PCIe device reset */
1398	MT_CLR_SET32(sc, MTK_PCI_PCICFG, MTK_PCI_RESET, 0);
1399
1400	/* MT7620 supports one PCIe slot */
1401	sc->num_slots = 1;
1402
1403	return (0);
1404}
1405
1406static int
1407mtk_pcie_phy_rt3883_init(device_t dev)
1408{
1409	struct mtk_pci_softc *sc = device_get_softc(dev);
1410
1411	/* Enable PCI host mode and PCIe RC mode */
1412	mtk_sysctl_clr_set(SYSCTL_SYSCFG1, 0, RT3883_PCI_HOST_MODE |
1413	    RT3883_PCIE_RC_MODE);
1414
1415	/* Enable PCIe PHY */
1416	if (mtk_pcie_phy_start(dev))
1417		return (ENXIO);
1418
1419	/* Disable PCI, we only support PCIe for now */
1420	mtk_sysctl_clr_set(SYSCTL_RSTCTRL, 0, RT3883_PCI_RST);
1421	mtk_sysctl_clr_set(SYSCTL_CLKCFG1, RT3883_PCI_CLK, 0);
1422
1423	/* Give things a chance to sink in */
1424	DELAY(500000);
1425
1426	/* Set PCIe port number to 0 and lift PCIe reset */
1427	MT_WRITE32(sc, MTK_PCI_PCICFG, 0);
1428
1429	/* Configure PCI Arbiter */
1430	MT_WRITE32(sc, MTK_PCI_ARBCTL, 0x79);
1431
1432	/* We have a single PCIe slot */
1433	sc->num_slots = 1;
1434
1435	return (0);
1436}
1437
1438static void
1439mtk_pcie_phy_setup_slots(device_t dev)
1440{
1441	struct mtk_pci_softc *sc = device_get_softc(dev);
1442	uint32_t bar0_val, val;
1443	int i;
1444
1445	/* Disable all PCIe interrupts */
1446	MT_WRITE32(sc, MTK_PCI_PCIENA, 0);
1447
1448	/* Default bar0_val is 64M, enabled */
1449	bar0_val = 0x03FF0001;
1450
1451	/* But we override it to 2G, enabled for some SoCs */
1452	if (sc->socid == MTK_SOC_MT7620A || sc->socid == MTK_SOC_MT7628 ||
1453	    sc->socid == MTK_SOC_MT7688 || sc->socid == MTK_SOC_MT7621)
1454		bar0_val = 0x7FFF0001;
1455
1456	/* We still don't know which slots have linked up */
1457	sc->pcie_link_status = 0;
1458
1459	/* XXX: I am not sure if this delay is really necessary */
1460	DELAY(500000);
1461
1462	/*
1463	 * See which slots have links and mark them.
1464	 * Set up all slots' BARs and make them look like PCIe bridges.
1465	 */
1466	for (i = 0; i < sc->num_slots; i++) {
1467		/* If slot has link - mark it */
1468		if (MT_READ32(sc, MTK_PCIE_STATUS(i)) & 1)
1469			sc->pcie_link_status |= (1<<i);
1470
1471		/* Generic slot configuration follows */
1472
1473		/* We enable BAR0 */
1474		MT_WRITE32(sc, MTK_PCIE_BAR0SETUP(i), bar0_val);
1475		/* and disable BAR1 */
1476		MT_WRITE32(sc, MTK_PCIE_BAR1SETUP(i), 0);
1477		/* Internal memory base has no offset */
1478		MT_WRITE32(sc, MTK_PCIE_IMBASEBAR0(i), 0);
1479		/* We're a PCIe bridge */
1480		MT_WRITE32(sc, MTK_PCIE_CLASS(i), 0x06040001);
1481
1482		val = mtk_pci_read_config(dev, 0, i, 0, 0x4, 4);
1483		mtk_pci_write_config(dev, 0, i, 0, 0x4, val | 0x4, 4);
1484		val = mtk_pci_read_config(dev, 0, i, 0, 0x70c, 4);
1485		val &= ~(0xff << 8);
1486		val |= (0x50 << 8);
1487		mtk_pci_write_config(dev, 0, i, 0, 0x4, val, 4);
1488	}
1489}
1490