1/*
2 * Copyright 2015 Andrew Turner.
3 * Copyright 2016 Svatopluk Kraus
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *  1. Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *  2. Redistributions in binary form must reproduce the above copyright
13 *     notice, this list of conditions and the following disclaimer in the
14 *     documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include "opt_platform.h"
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/bus.h>
37#include <sys/cpuset.h>
38#include <sys/kernel.h>
39#include <sys/module.h>
40#include <sys/proc.h>
41#include <sys/rman.h>
42#ifdef SMP
43#include <sys/smp.h>
44#endif
45
46#include <machine/bus.h>
47#include <machine/intr.h>
48#include <machine/resource.h>
49#ifdef SMP
50#include <machine/smp.h>
51#endif
52
53#include <dev/ofw/ofw_bus_subr.h>
54#include <dev/ofw/ofw_bus.h>
55
56#include "pic_if.h"
57
58#define	BCM_LINTC_CONTROL_REG		0x00
59#define	BCM_LINTC_PRESCALER_REG		0x08
60#define	BCM_LINTC_GPU_ROUTING_REG	0x0c
61#define	BCM_LINTC_PMU_ROUTING_SET_REG	0x10
62#define	BCM_LINTC_PMU_ROUTING_CLR_REG	0x14
63#define	BCM_LINTC_TIMER_CFG_REG(n)	(0x40 + (n) * 4)
64#define	BCM_LINTC_MBOX_CFG_REG(n)	(0x50 + (n) * 4)
65#define	BCM_LINTC_PENDING_REG(n)	(0x60 + (n) * 4)
66#define	BCM_LINTC_MBOX0_SET_REG(n)	(0x80 + (n) * 16)
67#define	BCM_LINTC_MBOX1_SET_REG(n)	(0x84 + (n) * 16)
68#define	BCM_LINTC_MBOX2_SET_REG(n)	(0x88 + (n) * 16)
69#define	BCM_LINTC_MBOX3_SET_REG(n)	(0x8C + (n) * 16)
70#define	BCM_LINTC_MBOX0_CLR_REG(n)	(0xC0 + (n) * 16)
71#define	BCM_LINTC_MBOX1_CLR_REG(n)	(0xC4 + (n) * 16)
72#define	BCM_LINTC_MBOX2_CLR_REG(n)	(0xC8 + (n) * 16)
73#define	BCM_LINTC_MBOX3_CLR_REG(n)	(0xCC + (n) * 16)
74
75/* Prescaler Register */
76#define	BCM_LINTC_PSR_19_2		0x80000000	/* 19.2 MHz */
77
78/* GPU Interrupt Routing Register */
79#define	BCM_LINTC_GIRR_IRQ_CORE(n)	(n)
80#define	BCM_LINTC_GIRR_FIQ_CORE(n)	((n) << 2)
81
82/* PMU Interrupt Routing Register */
83#define	BCM_LINTC_PIRR_IRQ_EN_CORE(n)	(1 << (n))
84#define	BCM_LINTC_PIRR_FIQ_EN_CORE(n)	(1 << ((n) + 4))
85
86/* Timer Config Register */
87#define	BCM_LINTC_TCR_IRQ_EN_TIMER(n)	(1 << (n))
88#define	BCM_LINTC_TCR_FIQ_EN_TIMER(n)	(1 << ((n) + 4))
89
90/* MBOX Config Register */
91#define	BCM_LINTC_MCR_IRQ_EN_MBOX(n)	(1 << (n))
92#define	BCM_LINTC_MCR_FIQ_EN_MBOX(n)	(1 << ((n) + 4))
93
94#define	BCM_LINTC_CNTPSIRQ_IRQ		0
95#define	BCM_LINTC_CNTPNSIRQ_IRQ		1
96#define	BCM_LINTC_CNTHPIRQ_IRQ		2
97#define	BCM_LINTC_CNTVIRQ_IRQ		3
98#define	BCM_LINTC_MBOX0_IRQ		4
99#define	BCM_LINTC_MBOX1_IRQ		5
100#define	BCM_LINTC_MBOX2_IRQ		6
101#define	BCM_LINTC_MBOX3_IRQ		7
102#define	BCM_LINTC_GPU_IRQ		8
103#define	BCM_LINTC_PMU_IRQ		9
104#define	BCM_LINTC_AXI_IRQ		10
105#define	BCM_LINTC_LTIMER_IRQ		11
106
107#define	BCM_LINTC_NIRQS			12
108
109#define	BCM_LINTC_TIMER0_IRQ		BCM_LINTC_CNTPSIRQ_IRQ
110#define	BCM_LINTC_TIMER1_IRQ		BCM_LINTC_CNTPNSIRQ_IRQ
111#define	BCM_LINTC_TIMER2_IRQ		BCM_LINTC_CNTHPIRQ_IRQ
112#define	BCM_LINTC_TIMER3_IRQ		BCM_LINTC_CNTVIRQ_IRQ
113
114#define	BCM_LINTC_TIMER0_IRQ_MASK	(1 << BCM_LINTC_TIMER0_IRQ)
115#define	BCM_LINTC_TIMER1_IRQ_MASK	(1 << BCM_LINTC_TIMER1_IRQ)
116#define	BCM_LINTC_TIMER2_IRQ_MASK	(1 << BCM_LINTC_TIMER2_IRQ)
117#define	BCM_LINTC_TIMER3_IRQ_MASK	(1 << BCM_LINTC_TIMER3_IRQ)
118#define	BCM_LINTC_MBOX0_IRQ_MASK	(1 << BCM_LINTC_MBOX0_IRQ)
119#define	BCM_LINTC_GPU_IRQ_MASK		(1 << BCM_LINTC_GPU_IRQ)
120#define	BCM_LINTC_PMU_IRQ_MASK		(1 << BCM_LINTC_PMU_IRQ)
121
122#define	BCM_LINTC_UP_PENDING_MASK	\
123    (BCM_LINTC_TIMER0_IRQ_MASK |	\
124     BCM_LINTC_TIMER1_IRQ_MASK |	\
125     BCM_LINTC_TIMER2_IRQ_MASK |	\
126     BCM_LINTC_TIMER3_IRQ_MASK |	\
127     BCM_LINTC_GPU_IRQ_MASK |		\
128     BCM_LINTC_PMU_IRQ_MASK)
129
130#define	BCM_LINTC_SMP_PENDING_MASK	\
131    (BCM_LINTC_UP_PENDING_MASK |	\
132     BCM_LINTC_MBOX0_IRQ_MASK)
133
134#ifdef SMP
135#define BCM_LINTC_PENDING_MASK		BCM_LINTC_SMP_PENDING_MASK
136#else
137#define BCM_LINTC_PENDING_MASK		BCM_LINTC_UP_PENDING_MASK
138#endif
139
140struct bcm_lintc_irqsrc {
141	struct intr_irqsrc	bli_isrc;
142	u_int			bli_irq;
143	union {
144		u_int		bli_mask;	/* for timers */
145		u_int		bli_value;	/* for GPU */
146	};
147};
148
149struct bcm_lintc_softc {
150	device_t		bls_dev;
151	struct mtx		bls_mtx;
152	struct resource *	bls_mem;
153	bus_space_tag_t		bls_bst;
154	bus_space_handle_t	bls_bsh;
155	struct bcm_lintc_irqsrc	bls_isrcs[BCM_LINTC_NIRQS];
156};
157
158static struct bcm_lintc_softc *bcm_lintc_sc;
159
160#ifdef SMP
161#define BCM_LINTC_NIPIS		32	/* only mailbox 0 is used for IPI */
162CTASSERT(INTR_IPI_COUNT <= BCM_LINTC_NIPIS);
163#endif
164
165#define	BCM_LINTC_LOCK(sc)		mtx_lock_spin(&(sc)->bls_mtx)
166#define	BCM_LINTC_UNLOCK(sc)		mtx_unlock_spin(&(sc)->bls_mtx)
167#define	BCM_LINTC_LOCK_INIT(sc)		mtx_init(&(sc)->bls_mtx,	\
168    device_get_nameunit((sc)->bls_dev), "bmc_local_intc", MTX_SPIN)
169#define	BCM_LINTC_LOCK_DESTROY(sc)	mtx_destroy(&(sc)->bls_mtx)
170
171#define	bcm_lintc_read_4(sc, reg)		\
172    bus_space_read_4((sc)->bls_bst, (sc)->bls_bsh, (reg))
173#define	bcm_lintc_write_4(sc, reg, val)		\
174    bus_space_write_4((sc)->bls_bst, (sc)->bls_bsh, (reg), (val))
175
176static inline void
177bcm_lintc_rwreg_clr(struct bcm_lintc_softc *sc, uint32_t reg,
178    uint32_t mask)
179{
180
181	bcm_lintc_write_4(sc, reg, bcm_lintc_read_4(sc, reg) & ~mask);
182}
183
184static inline void
185bcm_lintc_rwreg_set(struct bcm_lintc_softc *sc, uint32_t reg,
186    uint32_t mask)
187{
188
189	bcm_lintc_write_4(sc, reg, bcm_lintc_read_4(sc, reg) | mask);
190}
191
192static void
193bcm_lintc_timer_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
194{
195	cpuset_t *cpus;
196	uint32_t cpu;
197
198	cpus = &bli->bli_isrc.isrc_cpu;
199
200	BCM_LINTC_LOCK(sc);
201	for (cpu = 0; cpu < 4; cpu++)
202		if (CPU_ISSET(cpu, cpus))
203			bcm_lintc_rwreg_clr(sc, BCM_LINTC_TIMER_CFG_REG(cpu),
204			    bli->bli_mask);
205	BCM_LINTC_UNLOCK(sc);
206}
207
208static void
209bcm_lintc_timer_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
210{
211	cpuset_t *cpus;
212	uint32_t cpu;
213
214	cpus = &bli->bli_isrc.isrc_cpu;
215
216	BCM_LINTC_LOCK(sc);
217	for (cpu = 0; cpu < 4; cpu++)
218		if (CPU_ISSET(cpu, cpus))
219			bcm_lintc_rwreg_set(sc, BCM_LINTC_TIMER_CFG_REG(cpu),
220			    bli->bli_mask);
221	BCM_LINTC_UNLOCK(sc);
222}
223
224static inline void
225bcm_lintc_gpu_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
226{
227
228	/* It's accessed just and only by one core. */
229	bcm_lintc_write_4(sc, BCM_LINTC_GPU_ROUTING_REG, 0);
230}
231
232static inline void
233bcm_lintc_gpu_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
234{
235
236	/* It's accessed just and only by one core. */
237	bcm_lintc_write_4(sc, BCM_LINTC_GPU_ROUTING_REG, bli->bli_value);
238}
239
240static inline void
241bcm_lintc_pmu_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
242{
243	cpuset_t *cpus;
244	uint32_t cpu, mask;
245
246	mask = 0;
247	cpus = &bli->bli_isrc.isrc_cpu;
248
249	BCM_LINTC_LOCK(sc);
250	for (cpu = 0; cpu < 4; cpu++)
251		if (CPU_ISSET(cpu, cpus))
252			mask |= BCM_LINTC_PIRR_IRQ_EN_CORE(cpu);
253	/* Write-clear register. */
254	bcm_lintc_write_4(sc, BCM_LINTC_PMU_ROUTING_CLR_REG, mask);
255	BCM_LINTC_UNLOCK(sc);
256}
257
258static inline void
259bcm_lintc_pmu_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
260{
261	cpuset_t *cpus;
262	uint32_t cpu, mask;
263
264	mask = 0;
265	cpus = &bli->bli_isrc.isrc_cpu;
266
267	BCM_LINTC_LOCK(sc);
268	for (cpu = 0; cpu < 4; cpu++)
269		if (CPU_ISSET(cpu, cpus))
270			mask |= BCM_LINTC_PIRR_IRQ_EN_CORE(cpu);
271	/* Write-set register. */
272	bcm_lintc_write_4(sc, BCM_LINTC_PMU_ROUTING_SET_REG, mask);
273	BCM_LINTC_UNLOCK(sc);
274}
275
276static void
277bcm_lintc_mask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
278{
279
280	switch (bli->bli_irq) {
281	case BCM_LINTC_TIMER0_IRQ:
282	case BCM_LINTC_TIMER1_IRQ:
283	case BCM_LINTC_TIMER2_IRQ:
284	case BCM_LINTC_TIMER3_IRQ:
285		bcm_lintc_timer_mask(sc, bli);
286		return;
287	case BCM_LINTC_MBOX0_IRQ:
288	case BCM_LINTC_MBOX1_IRQ:
289	case BCM_LINTC_MBOX2_IRQ:
290	case BCM_LINTC_MBOX3_IRQ:
291		return;
292	case BCM_LINTC_GPU_IRQ:
293		bcm_lintc_gpu_mask(sc, bli);
294		return;
295	case BCM_LINTC_PMU_IRQ:
296		bcm_lintc_pmu_mask(sc, bli);
297		return;
298	default:
299		panic("%s: not implemented for irq %u", __func__, bli->bli_irq);
300	}
301}
302
303static void
304bcm_lintc_unmask(struct bcm_lintc_softc *sc, struct bcm_lintc_irqsrc *bli)
305{
306
307	switch (bli->bli_irq) {
308	case BCM_LINTC_TIMER0_IRQ:
309	case BCM_LINTC_TIMER1_IRQ:
310	case BCM_LINTC_TIMER2_IRQ:
311	case BCM_LINTC_TIMER3_IRQ:
312		bcm_lintc_timer_unmask(sc, bli);
313		return;
314	case BCM_LINTC_MBOX0_IRQ:
315	case BCM_LINTC_MBOX1_IRQ:
316	case BCM_LINTC_MBOX2_IRQ:
317	case BCM_LINTC_MBOX3_IRQ:
318		return;
319	case BCM_LINTC_GPU_IRQ:
320		bcm_lintc_gpu_unmask(sc, bli);
321		return;
322	case BCM_LINTC_PMU_IRQ:
323		bcm_lintc_pmu_unmask(sc, bli);
324		return;
325	default:
326		panic("%s: not implemented for irq %u", __func__, bli->bli_irq);
327	}
328}
329
330#ifdef SMP
331static inline void
332bcm_lintc_ipi_write(struct bcm_lintc_softc *sc, cpuset_t cpus, u_int ipi)
333{
334	u_int cpu;
335	uint32_t mask;
336
337	mask = 1 << ipi;
338	for (cpu = 0; cpu < mp_ncpus; cpu++)
339		if (CPU_ISSET(cpu, &cpus))
340			bcm_lintc_write_4(sc, BCM_LINTC_MBOX0_SET_REG(cpu),
341			    mask);
342}
343
344static inline void
345bcm_lintc_ipi_dispatch(struct bcm_lintc_softc *sc, u_int cpu,
346    struct trapframe *tf)
347{
348	u_int ipi;
349	uint32_t mask;
350
351	mask = bcm_lintc_read_4(sc, BCM_LINTC_MBOX0_CLR_REG(cpu));
352	if (mask == 0) {
353		device_printf(sc->bls_dev, "Spurious ipi detected\n");
354		return;
355	}
356
357	for (ipi = 0; mask != 0; mask >>= 1, ipi++) {
358		if ((mask & 0x01) == 0)
359			continue;
360		/*
361		 * Clear an IPI before dispatching to not miss anyone
362		 * and make sure that it's observed by everybody.
363		 */
364		bcm_lintc_write_4(sc, BCM_LINTC_MBOX0_CLR_REG(cpu), 1 << ipi);
365#if defined(__aarch64__)
366		dsb(sy);
367#else
368		dsb();
369#endif
370		intr_ipi_dispatch(ipi, tf);
371	}
372}
373#endif
374
375static inline void
376bcm_lintc_irq_dispatch(struct bcm_lintc_softc *sc, u_int irq,
377    struct trapframe *tf)
378{
379	struct bcm_lintc_irqsrc *bli;
380
381	bli = &sc->bls_isrcs[irq];
382	if (intr_isrc_dispatch(&bli->bli_isrc, tf) != 0)
383		device_printf(sc->bls_dev, "Stray irq %u detected\n", irq);
384}
385
386static int
387bcm_lintc_intr(void *arg)
388{
389	struct bcm_lintc_softc *sc;
390	u_int cpu;
391	uint32_t num, reg;
392	struct trapframe *tf;
393
394	sc = arg;
395	cpu = PCPU_GET(cpuid);
396	tf = curthread->td_intr_frame;
397
398	for (num = 0; ; num++) {
399		reg = bcm_lintc_read_4(sc, BCM_LINTC_PENDING_REG(cpu));
400		if ((reg & BCM_LINTC_PENDING_MASK) == 0)
401			break;
402#ifdef SMP
403		if (reg & BCM_LINTC_MBOX0_IRQ_MASK)
404			bcm_lintc_ipi_dispatch(sc, cpu, tf);
405#endif
406		if (reg & BCM_LINTC_TIMER0_IRQ_MASK)
407			bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER0_IRQ, tf);
408		if (reg & BCM_LINTC_TIMER1_IRQ_MASK)
409			bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER1_IRQ, tf);
410		if (reg & BCM_LINTC_TIMER2_IRQ_MASK)
411			bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER2_IRQ, tf);
412		if (reg & BCM_LINTC_TIMER3_IRQ_MASK)
413			bcm_lintc_irq_dispatch(sc, BCM_LINTC_TIMER3_IRQ, tf);
414		if (reg & BCM_LINTC_GPU_IRQ_MASK)
415			bcm_lintc_irq_dispatch(sc, BCM_LINTC_GPU_IRQ, tf);
416		if (reg & BCM_LINTC_PMU_IRQ_MASK)
417			bcm_lintc_irq_dispatch(sc, BCM_LINTC_PMU_IRQ, tf);
418
419		arm_irq_memory_barrier(0); /* XXX */
420	}
421	reg &= ~BCM_LINTC_PENDING_MASK;
422	if (reg != 0)
423		device_printf(sc->bls_dev, "Unknown interrupt(s) %x\n", reg);
424	else if (num == 0 && bootverbose)
425		device_printf(sc->bls_dev, "Spurious interrupt detected\n");
426
427	return (FILTER_HANDLED);
428}
429
430static void
431bcm_lintc_disable_intr(device_t dev, struct intr_irqsrc *isrc)
432{
433
434	bcm_lintc_mask(device_get_softc(dev), (struct bcm_lintc_irqsrc *)isrc);
435}
436
437static void
438bcm_lintc_enable_intr(device_t dev, struct intr_irqsrc *isrc)
439{
440	struct bcm_lintc_irqsrc *bli = (struct bcm_lintc_irqsrc *)isrc;
441
442	arm_irq_memory_barrier(bli->bli_irq);
443	bcm_lintc_unmask(device_get_softc(dev), bli);
444}
445
446static int
447bcm_lintc_map_intr(device_t dev, struct intr_map_data *data,
448    struct intr_irqsrc **isrcp)
449{
450	struct intr_map_data_fdt *daf;
451	struct bcm_lintc_softc *sc;
452
453	if (data->type != INTR_MAP_DATA_FDT)
454		return (ENOTSUP);
455
456	daf = (struct intr_map_data_fdt *)data;
457	if (daf->ncells > 2 || daf->cells[0] >= BCM_LINTC_NIRQS)
458		return (EINVAL);
459
460	/* TODO: handle IRQ type here */
461
462	sc = device_get_softc(dev);
463	*isrcp = &sc->bls_isrcs[daf->cells[0]].bli_isrc;
464	return (0);
465}
466
467static void
468bcm_lintc_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
469{
470	struct bcm_lintc_irqsrc *bli = (struct bcm_lintc_irqsrc *)isrc;
471
472	if (bli->bli_irq == BCM_LINTC_GPU_IRQ)
473		bcm_lintc_gpu_mask(device_get_softc(dev), bli);
474	else {
475		/*
476		 * Handler for PPI interrupt does not make sense much unless
477		 * there is one bound ithread for each core for it. Thus the
478		 * interrupt can be masked on current core only while ithread
479		 * bounded to this core ensures unmasking on the same core.
480		 */
481		panic ("%s: handlers are not supported", __func__);
482	}
483}
484
485static void
486bcm_lintc_post_ithread(device_t dev, struct intr_irqsrc *isrc)
487{
488	struct bcm_lintc_irqsrc *bli = (struct bcm_lintc_irqsrc *)isrc;
489
490	if (bli->bli_irq == BCM_LINTC_GPU_IRQ)
491		bcm_lintc_gpu_unmask(device_get_softc(dev), bli);
492	else {
493		/* See comment in bcm_lintc_pre_ithread(). */
494		panic ("%s: handlers are not supported", __func__);
495	}
496}
497
498static void
499bcm_lintc_post_filter(device_t dev, struct intr_irqsrc *isrc)
500{
501}
502
503static int
504bcm_lintc_setup_intr(device_t dev, struct intr_irqsrc *isrc,
505    struct resource *res, struct intr_map_data *data)
506{
507	struct bcm_lintc_softc *sc;
508
509	if (isrc->isrc_handlers == 0 && isrc->isrc_flags & INTR_ISRCF_PPI) {
510		sc = device_get_softc(dev);
511		BCM_LINTC_LOCK(sc);
512		CPU_SET(PCPU_GET(cpuid), &isrc->isrc_cpu);
513		BCM_LINTC_UNLOCK(sc);
514	}
515	return (0);
516}
517
518#ifdef SMP
519static void
520bcm_lintc_init_rwreg_on_ap(struct bcm_lintc_softc *sc, u_int cpu, u_int irq,
521    uint32_t reg, uint32_t mask)
522{
523
524	if (intr_isrc_init_on_cpu(&sc->bls_isrcs[irq].bli_isrc, cpu))
525		bcm_lintc_rwreg_set(sc, reg, mask);
526}
527
528static void
529bcm_lintc_init_pmu_on_ap(struct bcm_lintc_softc *sc, u_int cpu)
530{
531	struct intr_irqsrc *isrc = &sc->bls_isrcs[BCM_LINTC_PMU_IRQ].bli_isrc;
532
533	if (intr_isrc_init_on_cpu(isrc, cpu)) {
534		/* Write-set register. */
535		bcm_lintc_write_4(sc, BCM_LINTC_PMU_ROUTING_SET_REG,
536		    BCM_LINTC_PIRR_IRQ_EN_CORE(cpu));
537	}
538}
539
540static void
541bcm_lintc_init_secondary(device_t dev)
542{
543	u_int cpu;
544	struct bcm_lintc_softc *sc;
545
546	cpu = PCPU_GET(cpuid);
547	sc = device_get_softc(dev);
548
549	BCM_LINTC_LOCK(sc);
550	bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER0_IRQ,
551	    BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(0));
552	bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER1_IRQ,
553	    BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(1));
554	bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER2_IRQ,
555	    BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(2));
556	bcm_lintc_init_rwreg_on_ap(sc, cpu, BCM_LINTC_TIMER3_IRQ,
557	    BCM_LINTC_TIMER_CFG_REG(cpu), BCM_LINTC_TCR_IRQ_EN_TIMER(3));
558	bcm_lintc_init_pmu_on_ap(sc, cpu);
559	BCM_LINTC_UNLOCK(sc);
560}
561
562static void
563bcm_lintc_ipi_send(device_t dev, struct intr_irqsrc *isrc, cpuset_t cpus,
564    u_int ipi)
565{
566	struct bcm_lintc_softc *sc = device_get_softc(dev);
567
568	KASSERT(isrc == &sc->bls_isrcs[BCM_LINTC_MBOX0_IRQ].bli_isrc,
569	    ("%s: bad ISRC %p argument", __func__, isrc));
570	bcm_lintc_ipi_write(sc, cpus, ipi);
571}
572
573static int
574bcm_lintc_ipi_setup(device_t dev, u_int ipi, struct intr_irqsrc **isrcp)
575{
576	struct bcm_lintc_softc *sc = device_get_softc(dev);
577
578	KASSERT(ipi < BCM_LINTC_NIPIS, ("%s: too high ipi %u", __func__, ipi));
579
580	*isrcp = &sc->bls_isrcs[BCM_LINTC_MBOX0_IRQ].bli_isrc;
581	return (0);
582}
583#endif
584
585static int
586bcm_lintc_pic_attach(struct bcm_lintc_softc *sc)
587{
588	struct bcm_lintc_irqsrc *bisrcs;
589	struct intr_pic *pic;
590	int error;
591	u_int flags;
592	uint32_t irq;
593	const char *name;
594	intptr_t xref;
595
596	bisrcs = sc->bls_isrcs;
597	name = device_get_nameunit(sc->bls_dev);
598	for (irq = 0; irq < BCM_LINTC_NIRQS; irq++) {
599		bisrcs[irq].bli_irq = irq;
600		switch (irq) {
601		case BCM_LINTC_TIMER0_IRQ:
602			bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(0);
603			flags = INTR_ISRCF_PPI;
604			break;
605		case BCM_LINTC_TIMER1_IRQ:
606			bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(1);
607			flags = INTR_ISRCF_PPI;
608			break;
609		case BCM_LINTC_TIMER2_IRQ:
610			bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(2);
611			flags = INTR_ISRCF_PPI;
612			break;
613		case BCM_LINTC_TIMER3_IRQ:
614			bisrcs[irq].bli_mask = BCM_LINTC_TCR_IRQ_EN_TIMER(3);
615			flags = INTR_ISRCF_PPI;
616			break;
617		case BCM_LINTC_MBOX0_IRQ:
618		case BCM_LINTC_MBOX1_IRQ:
619		case BCM_LINTC_MBOX2_IRQ:
620		case BCM_LINTC_MBOX3_IRQ:
621			bisrcs[irq].bli_value = 0;	/* not used */
622			flags = INTR_ISRCF_IPI;
623			break;
624		case BCM_LINTC_GPU_IRQ:
625			bisrcs[irq].bli_value = BCM_LINTC_GIRR_IRQ_CORE(0);
626			flags = 0;
627			break;
628		case BCM_LINTC_PMU_IRQ:
629			bisrcs[irq].bli_value = 0;	/* not used */
630			flags = INTR_ISRCF_PPI;
631			break;
632		default:
633			bisrcs[irq].bli_value = 0;	/* not used */
634			flags = 0;
635			break;
636		}
637
638		error = intr_isrc_register(&bisrcs[irq].bli_isrc, sc->bls_dev,
639		    flags, "%s,%u", name, irq);
640		if (error != 0)
641			return (error);
642	}
643
644	xref = OF_xref_from_node(ofw_bus_get_node(sc->bls_dev));
645	pic = intr_pic_register(sc->bls_dev, xref);
646	if (pic == NULL)
647		return (ENXIO);
648
649	return (intr_pic_claim_root(sc->bls_dev, xref, bcm_lintc_intr, sc, 0));
650}
651
652static int
653bcm_lintc_probe(device_t dev)
654{
655
656	if (!ofw_bus_status_okay(dev))
657		return (ENXIO);
658
659	if (!ofw_bus_is_compatible(dev, "brcm,bcm2836-l1-intc"))
660		return (ENXIO);
661	if (!ofw_bus_has_prop(dev, "interrupt-controller"))
662		return (ENXIO);
663	device_set_desc(dev, "BCM2836 Interrupt Controller");
664	return (BUS_PROBE_DEFAULT);
665}
666
667static int
668bcm_lintc_attach(device_t dev)
669{
670	struct bcm_lintc_softc *sc;
671	int cpu, rid;
672
673	sc = device_get_softc(dev);
674
675	sc->bls_dev = dev;
676	if (bcm_lintc_sc != NULL)
677		return (ENXIO);
678
679	rid = 0;
680	sc->bls_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
681	    RF_ACTIVE);
682	if (sc->bls_mem == NULL) {
683		device_printf(dev, "could not allocate memory resource\n");
684		return (ENXIO);
685	}
686
687	sc->bls_bst = rman_get_bustag(sc->bls_mem);
688	sc->bls_bsh = rman_get_bushandle(sc->bls_mem);
689
690	bcm_lintc_write_4(sc, BCM_LINTC_CONTROL_REG, 0);
691	bcm_lintc_write_4(sc, BCM_LINTC_PRESCALER_REG, BCM_LINTC_PSR_19_2);
692
693	/* Disable all timers on all cores. */
694	for (cpu = 0; cpu < 4; cpu++)
695		bcm_lintc_write_4(sc, BCM_LINTC_TIMER_CFG_REG(cpu), 0);
696
697#ifdef SMP
698	/* Enable mailbox 0 on all cores used for IPI. */
699	for (cpu = 0; cpu < 4; cpu++)
700		bcm_lintc_write_4(sc, BCM_LINTC_MBOX_CFG_REG(cpu),
701		    BCM_LINTC_MCR_IRQ_EN_MBOX(0));
702#endif
703
704	if (bcm_lintc_pic_attach(sc) != 0) {
705		device_printf(dev, "could not attach PIC\n");
706		return (ENXIO);
707	}
708
709	BCM_LINTC_LOCK_INIT(sc);
710	bcm_lintc_sc = sc;
711	return (0);
712}
713
714static device_method_t bcm_lintc_methods[] = {
715	DEVMETHOD(device_probe,		bcm_lintc_probe),
716	DEVMETHOD(device_attach,	bcm_lintc_attach),
717
718	DEVMETHOD(pic_disable_intr,	bcm_lintc_disable_intr),
719	DEVMETHOD(pic_enable_intr,	bcm_lintc_enable_intr),
720	DEVMETHOD(pic_map_intr,		bcm_lintc_map_intr),
721	DEVMETHOD(pic_post_filter,	bcm_lintc_post_filter),
722	DEVMETHOD(pic_post_ithread,	bcm_lintc_post_ithread),
723	DEVMETHOD(pic_pre_ithread,	bcm_lintc_pre_ithread),
724	DEVMETHOD(pic_setup_intr,	bcm_lintc_setup_intr),
725#ifdef SMP
726	DEVMETHOD(pic_init_secondary,	bcm_lintc_init_secondary),
727	DEVMETHOD(pic_ipi_send,		bcm_lintc_ipi_send),
728	DEVMETHOD(pic_ipi_setup,	bcm_lintc_ipi_setup),
729#endif
730
731	DEVMETHOD_END
732};
733
734static driver_t bcm_lintc_driver = {
735	"lintc",
736	bcm_lintc_methods,
737	sizeof(struct bcm_lintc_softc),
738};
739
740static devclass_t bcm_lintc_devclass;
741
742EARLY_DRIVER_MODULE(lintc, simplebus, bcm_lintc_driver, bcm_lintc_devclass,
743    0, 0, BUS_PASS_INTERRUPT);
744