1// Copyright 2017 The Fuchsia Authors
2// Copyright (c) 2017, Google Inc. All rights reserved.
3//
4// Use of this source code is governed by a MIT-style
5// license that can be found in the LICENSE file or at
6// https://opensource.org/licenses/MIT
7
8#include <arch/arch_ops.h>
9#include <arch/arm64/hypervisor/gic/gicv3.h>
10#include <arch/arm64/periphmap.h>
11#include <assert.h>
12#include <bits.h>
13#include <dev/interrupt.h>
14#include <dev/interrupt/arm_gic_common.h>
15#include <err.h>
16#include <inttypes.h>
17#include <kernel/stats.h>
18#include <kernel/thread.h>
19#include <lib/ktrace.h>
20#include <lk/init.h>
21#include <pdev/driver.h>
22#include <pdev/interrupt.h>
23#include <string.h>
24#include <trace.h>
25#include <vm/vm.h>
26#include <zircon/boot/driver-config.h>
27#include <zircon/types.h>
28
29#define LOCAL_TRACE 0
30
31#include <arch/arm64.h>
32#define IFRAME_PC(frame) ((frame)->elr)
33
34// values read from zbi
35static vaddr_t arm_gicv3_gic_base = 0;
36static uint64_t arm_gicv3_gicd_offset = 0;
37static uint64_t arm_gicv3_gicr_offset = 0;
38static uint64_t arm_gicv3_gicr_stride = 0;
39
40//
41// IMX8M Errata: e11171: CA53: Cannot support single-core runtime wakeup
42
43// According to the GIC500 specification and the Arm Trusted Firmware design, when a CPU
44// core enters the deepest CPU idle state (power-down), it must disable the GIC500 CPU
45// interface and set the Redistributor register to indicate that this CPU is in sleep state.
46
47// On NXP IMX8M, However, if the CPU core is in WFI or power-down with CPU interface disabled,
48// another core cannot wake-up the powered-down core using SGI interrupt.
49
50// One workaround is to use another A53 core for the IRQ0 which is controlled by the IOMUX
51// GPR to generate an external interrupt to wake-up the powered-down core.
52// The SW workaround is implemented into default BSP release. The workaround commit tag is
53// ���MLK-16804-04 driver: irqchip: Add IPI SW workaround for imx8mq" on the linux-imx project
54static uint64_t mx8_gpr_virt = 0;
55
56static uint32_t ipi_base = 0;
57
58// this header uses the arm_gicv3_gic_* variables above
59#include <dev/interrupt/arm_gicv3_regs.h>
60
61static uint gic_max_int;
62
63static bool gic_is_valid_interrupt(unsigned int vector, uint32_t flags) {
64    return (vector < gic_max_int);
65}
66
67static uint32_t gic_get_base_vector() {
68    // ARM Generic Interrupt Controller v3&4 chapter 2.2
69    // INTIDs 0-15 are local CPU interrupts
70    return 16;
71}
72
73static uint32_t gic_get_max_vector() {
74    return gic_max_int;
75}
76
77static void gic_wait_for_rwp(uint64_t reg) {
78    int count = 1000000;
79    while (GICREG(0, reg) & (1 << 31)) {
80        count -= 1;
81        if (!count) {
82            LTRACEF("arm_gicv3: rwp timeout 0x%x\n", GICREG(0, reg));
83            return;
84        }
85    }
86}
87
88static void gic_set_enable(uint vector, bool enable) {
89    int reg = vector / 32;
90    uint32_t mask = (uint32_t)(1ULL << (vector % 32));
91
92    if (vector < 32) {
93        for (uint i = 0; i < arch_max_num_cpus(); i++) {
94            if (enable) {
95                GICREG(0, GICR_ISENABLER0(i)) = mask;
96            } else {
97                GICREG(0, GICR_ICENABLER0(i)) = mask;
98            }
99            gic_wait_for_rwp(GICR_CTLR(i));
100        }
101    } else {
102        if (enable) {
103            GICREG(0, GICD_ISENABLER(reg)) = mask;
104        } else {
105            GICREG(0, GICD_ICENABLER(reg)) = mask;
106        }
107        gic_wait_for_rwp(GICD_CTLR);
108    }
109}
110
111static void gic_init_percpu_early() {
112    uint cpu = arch_curr_cpu_num();
113
114    // redistributer config: configure sgi/ppi as non-secure group 1.
115    GICREG(0, GICR_IGROUPR0(cpu)) = ~0;
116    gic_wait_for_rwp(GICR_CTLR(cpu));
117
118    // redistributer config: clear and mask sgi/ppi.
119    GICREG(0, GICR_ICENABLER0(cpu)) = 0xffffffff;
120    GICREG(0, GICR_ICPENDR0(cpu)) = ~0;
121    gic_wait_for_rwp(GICR_CTLR(cpu));
122
123    // TODO lpi init
124
125    // enable system register interface
126    uint32_t sre = gic_read_sre();
127    if (!(sre & 0x1)) {
128        gic_write_sre(sre | 0x1);
129        sre = gic_read_sre();
130        assert(sre & 0x1);
131    }
132
133    // set priority threshold to max.
134    gic_write_pmr(0xff);
135
136    // TODO EOI deactivates interrupt - revisit.
137    gic_write_ctlr(0);
138
139    // enable group 1 interrupts.
140    gic_write_igrpen(1);
141}
142
143static zx_status_t gic_init() {
144    LTRACE_ENTRY;
145
146    DEBUG_ASSERT(arch_ints_disabled());
147
148    uint pidr2 = GICREG(0, GICD_PIDR2);
149    uint rev = BITS_SHIFT(pidr2, 7, 4);
150    if (rev != GICV3 && rev != GICV4) {
151        return ZX_ERR_NOT_FOUND;
152    }
153
154    uint32_t typer = GICREG(0, GICD_TYPER);
155    uint32_t idbits = BITS_SHIFT(typer, 23, 19);
156    gic_max_int = (idbits + 1) * 32;
157
158    // disable the distributor
159    GICREG(0, GICD_CTLR) = 0;
160    gic_wait_for_rwp(GICD_CTLR);
161    ISB;
162
163    // diistributer config: mask and clear all spis, set group 1.
164    uint i;
165    for (i = 32; i < gic_max_int; i += 32) {
166        GICREG(0, GICD_ICENABLER(i / 32)) = ~0;
167        GICREG(0, GICD_ICPENDR(i / 32)) = ~0;
168        GICREG(0, GICD_IGROUPR(i / 32)) = ~0;
169        GICREG(0, GICD_IGRPMODR(i / 32)) = 0;
170    }
171    gic_wait_for_rwp(GICD_CTLR);
172
173    // enable distributor with ARE, group 1 enable
174    GICREG(0, GICD_CTLR) = CTLR_ENALBE_G0 | CTLR_ENABLE_G1NS | CTLR_ARE_S;
175    gic_wait_for_rwp(GICD_CTLR);
176
177    // ensure we're running on cpu 0 and that cpu 0 corresponds to affinity 0.0.0.0
178    DEBUG_ASSERT(arch_curr_cpu_num() == 0);
179    DEBUG_ASSERT(arch_cpu_num_to_cpu_id(0u) == 0);     // AFF0
180    DEBUG_ASSERT(arch_cpu_num_to_cluster_id(0u) == 0); // AFF1
181
182    // TODO(maniscalco): If/when we support AFF2/AFF3, be sure to assert those here.
183
184    // set spi to target cpu 0 (affinity 0.0.0.0). must do this after ARE enable
185    uint max_cpu = BITS_SHIFT(typer, 7, 5);
186    if (max_cpu > 0) {
187        for (i = 32; i < gic_max_int; i++) {
188            GICREG64(0, GICD_IROUTER(i)) = 0;
189        }
190    }
191
192    gic_init_percpu_early();
193
194    mb();
195    ISB;
196
197    return ZX_OK;
198}
199
200static zx_status_t arm_gic_sgi(u_int irq, u_int flags, u_int cpu_mask) {
201    if (flags != ARM_GIC_SGI_FLAG_NS) {
202        return ZX_ERR_INVALID_ARGS;
203    }
204
205    if (irq >= 16) {
206        return ZX_ERR_INVALID_ARGS;
207    }
208
209    smp_mb();
210
211    uint cpu = 0;
212    uint cluster = 0;
213    uint64_t val = 0;
214    while (cpu_mask && cpu < arch_max_num_cpus()) {
215        u_int mask = 0;
216        while (arch_cpu_num_to_cluster_id(cpu) == cluster) {
217            if (cpu_mask & (1u << cpu)) {
218                mask |= 1u << arch_cpu_num_to_cpu_id(cpu);
219                cpu_mask &= ~(1u << cpu);
220            }
221            cpu += 1;
222        }
223
224        // Without the RS field set, we can only deal with the first
225        // 16 cpus within a single cluster
226        DEBUG_ASSERT((mask & 0xffff) == mask);
227
228        val = ((irq & 0xf) << 24) |
229              ((cluster & 0xff) << 16) |
230              (mask & 0xffff);
231
232        gic_write_sgi1r(val);
233        cluster += 1;
234        // Work around
235        if (mx8_gpr_virt) {
236            uint32_t regVal;
237            // pending irq32 to wakeup core
238            regVal = *(volatile uint32_t*)(mx8_gpr_virt + 0x4);
239            regVal |= (1 << 12);
240            *(volatile uint32_t*)(mx8_gpr_virt + 0x4) = regVal;
241            // delay
242            spin(50);
243            regVal &= ~(1 << 12);
244            *(volatile uint32_t*)(mx8_gpr_virt + 0x4) = regVal;
245        }
246    }
247
248    return ZX_OK;
249}
250
251static zx_status_t gic_mask_interrupt(unsigned int vector) {
252    LTRACEF("vector %u\n", vector);
253
254    if (vector >= gic_max_int) {
255        return ZX_ERR_INVALID_ARGS;
256    }
257
258    gic_set_enable(vector, false);
259
260    return ZX_OK;
261}
262
263static zx_status_t gic_unmask_interrupt(unsigned int vector) {
264    LTRACEF("vector %u\n", vector);
265
266    if (vector >= gic_max_int) {
267        return ZX_ERR_INVALID_ARGS;
268    }
269
270    gic_set_enable(vector, true);
271
272    return ZX_OK;
273}
274
275static zx_status_t gic_configure_interrupt(unsigned int vector,
276                                           enum interrupt_trigger_mode tm,
277                                           enum interrupt_polarity pol) {
278    LTRACEF("vector %u, trigger mode %d, polarity %d\n", vector, tm, pol);
279
280    if (vector <= 15 || vector >= gic_max_int) {
281        return ZX_ERR_INVALID_ARGS;
282    }
283
284    if (pol != IRQ_POLARITY_ACTIVE_HIGH) {
285        // TODO: polarity should actually be configure through a GPIO controller
286        return ZX_ERR_NOT_SUPPORTED;
287    }
288
289    uint reg = vector / 16;
290    uint mask = 0x2 << ((vector % 16) * 2);
291    uint32_t val = GICREG(0, GICD_ICFGR(reg));
292    if (tm == IRQ_TRIGGER_MODE_EDGE) {
293        val |= mask;
294    } else {
295        val &= ~mask;
296    }
297    GICREG(0, GICD_ICFGR(reg)) = val;
298
299    return ZX_OK;
300}
301
302static zx_status_t gic_get_interrupt_config(unsigned int vector,
303                                            enum interrupt_trigger_mode* tm,
304                                            enum interrupt_polarity* pol) {
305    LTRACEF("vector %u\n", vector);
306
307    if (vector >= gic_max_int) {
308        return ZX_ERR_INVALID_ARGS;
309    }
310
311    if (tm) {
312        *tm = IRQ_TRIGGER_MODE_EDGE;
313    }
314    if (pol) {
315        *pol = IRQ_POLARITY_ACTIVE_HIGH;
316    }
317
318    return ZX_OK;
319}
320
321static unsigned int gic_remap_interrupt(unsigned int vector) {
322    LTRACEF("vector %u\n", vector);
323    return vector;
324}
325
326// called from assembly
327static void gic_handle_irq(iframe* frame) {
328    // get the current vector
329    uint32_t iar = gic_read_iar();
330    unsigned vector = iar & 0x3ff;
331
332    LTRACEF_LEVEL(2, "iar %#x, vector %u\n", iar, vector);
333
334    if (vector >= 0x3fe) {
335        // spurious
336        // TODO check this
337        return;
338    }
339
340    // tracking external hardware irqs in this variable
341    if (vector >= 32) {
342        CPU_STATS_INC(interrupts);
343    }
344
345    uint cpu = arch_curr_cpu_num();
346
347    ktrace_tiny(TAG_IRQ_ENTER, (vector << 8) | cpu);
348
349    LTRACEF_LEVEL(2, "iar 0x%x cpu %u currthread %p vector %u pc %#" PRIxPTR "\n",
350                  iar, cpu, get_current_thread(), vector, (uintptr_t)IFRAME_PC(frame));
351
352    // deliver the interrupt
353    struct int_handler_struct* handler = pdev_get_int_handler(vector);
354    if (handler->handler) {
355        handler->handler(handler->arg);
356    }
357
358    gic_write_eoir(vector);
359
360    LTRACEF_LEVEL(2, "cpu %u exit\n", cpu);
361
362    ktrace_tiny(TAG_IRQ_EXIT, (vector << 8) | cpu);
363}
364
365static void gic_handle_fiq(iframe* frame) {
366    PANIC_UNIMPLEMENTED;
367}
368
369static zx_status_t gic_send_ipi(cpu_mask_t target, mp_ipi_t ipi) {
370    uint gic_ipi_num = ipi + ipi_base;
371
372    // filter out targets outside of the range of cpus we care about
373    target &= (cpu_mask_t)(((1UL << arch_max_num_cpus()) - 1));
374    if (target != 0) {
375        LTRACEF("target 0x%x, gic_ipi %u\n", target, gic_ipi_num);
376        arm_gic_sgi(gic_ipi_num, ARM_GIC_SGI_FLAG_NS, target);
377    }
378
379    return ZX_OK;
380}
381
382static void arm_ipi_halt_handler(void*) {
383    LTRACEF("cpu %u\n", arch_curr_cpu_num());
384
385    arch_disable_ints();
386    for (;;) {
387    }
388}
389
390static void gic_init_percpu() {
391    mp_set_curr_cpu_online(true);
392    unmask_interrupt(MP_IPI_GENERIC + ipi_base);
393    unmask_interrupt(MP_IPI_RESCHEDULE + ipi_base);
394    unmask_interrupt(MP_IPI_INTERRUPT + ipi_base);
395    unmask_interrupt(MP_IPI_HALT + ipi_base);
396}
397
398static void gic_shutdown() {
399    // Turn off all GIC0 interrupts at the distributor.
400    GICREG(0, GICD_CTLR) = 0;
401}
402
403// Returns true if any PPIs are enabled on the calling CPU.
404static bool is_ppi_enabled() {
405    DEBUG_ASSERT(arch_ints_disabled());
406
407    // PPIs are 16-31.
408    uint32_t mask = 0xffff0000;
409
410    uint cpu_num = arch_curr_cpu_num();
411    uint32_t reg = GICREG(0, GICR_ICENABLER0(cpu_num));
412    if ((reg & mask) != 0) {
413        return true;
414    }
415
416    return false;
417}
418
419// Returns true if any SPIs are enabled on the calling CPU.
420static bool is_spi_enabled() {
421    DEBUG_ASSERT(arch_ints_disabled());
422
423    uint cpu_num = arch_curr_cpu_num();
424
425    // TODO(maniscalco): If/when we support AFF2/AFF3, update the mask below.
426    uint aff0 = arch_cpu_num_to_cpu_id(cpu_num);
427    uint aff1 = arch_cpu_num_to_cluster_id(cpu_num);
428    uint64_t aff_mask = (aff1 << 8) + aff0;
429
430    // Check each SPI to see if it's routed to this CPU.
431    for (uint i = 32u; i < gic_max_int; ++i) {
432        if ((GICREG64(0, GICD_IROUTER(i)) & aff_mask) != 0) {
433            return true;
434        }
435    }
436
437    return false;
438}
439
440static void gic_shutdown_cpu() {
441    DEBUG_ASSERT(arch_ints_disabled());
442
443    // Before we shutdown the GIC, make sure we've migrated/disabled any and all peripheral
444    // interrupts targeted at this CPU (PPIs and SPIs).
445    DEBUG_ASSERT(!is_ppi_enabled());
446    DEBUG_ASSERT(!is_spi_enabled());
447    // TODO(maniscalco): If/when we start using LPIs, make sure none are targeted at this CPU.
448
449    // Disable group 1 interrupts at the CPU interface.
450    gic_write_igrpen(0);
451}
452
453static bool gic_msi_is_supported() {
454    return false;
455}
456
457static bool gic_msi_supports_masking() {
458    return false;
459}
460
461static void gic_msi_mask_unmask(const msi_block_t* block, uint msi_id, bool mask) {
462    PANIC_UNIMPLEMENTED;
463}
464
465static zx_status_t gic_msi_alloc_block(uint requested_irqs,
466                                       bool can_target_64bit,
467                                       bool is_msix,
468                                       msi_block_t* out_block) {
469    PANIC_UNIMPLEMENTED;
470}
471
472static void gic_msi_free_block(msi_block_t* block) {
473    PANIC_UNIMPLEMENTED;
474}
475
476static void gic_msi_register_handler(const msi_block_t* block,
477                                     uint msi_id,
478                                     int_handler handler,
479                                     void* ctx) {
480    PANIC_UNIMPLEMENTED;
481}
482
483static const struct pdev_interrupt_ops gic_ops = {
484    .mask = gic_mask_interrupt,
485    .unmask = gic_unmask_interrupt,
486    .configure = gic_configure_interrupt,
487    .get_config = gic_get_interrupt_config,
488    .is_valid = gic_is_valid_interrupt,
489    .get_base_vector = gic_get_base_vector,
490    .get_max_vector = gic_get_max_vector,
491    .remap = gic_remap_interrupt,
492    .send_ipi = gic_send_ipi,
493    .init_percpu_early = gic_init_percpu_early,
494    .init_percpu = gic_init_percpu,
495    .handle_irq = gic_handle_irq,
496    .handle_fiq = gic_handle_fiq,
497    .shutdown = gic_shutdown,
498    .shutdown_cpu = gic_shutdown_cpu,
499    .msi_is_supported = gic_msi_is_supported,
500    .msi_supports_masking = gic_msi_supports_masking,
501    .msi_mask_unmask = gic_msi_mask_unmask,
502    .msi_alloc_block = gic_msi_alloc_block,
503    .msi_free_block = gic_msi_free_block,
504    .msi_register_handler = gic_msi_register_handler,
505};
506
507static void arm_gic_v3_init(const void* driver_data, uint32_t length) {
508    ASSERT(length >= sizeof(dcfg_arm_gicv3_driver_t));
509    auto driver = static_cast<const dcfg_arm_gicv3_driver_t*>(driver_data);
510    ASSERT(driver->mmio_phys);
511
512    LTRACE_ENTRY;
513
514    // If a GIC driver is already registered to the GIC interface it's means we are running GICv2
515    // and we do not need to initialize GICv3. Since we have added both GICv3 and GICv2 in board.mdi,
516    // both drivers are initialized
517    if (gicv3_is_gic_registered()) {
518        return;
519    }
520
521    if (driver->mx8_gpr_phys) {
522        printf("arm-gic-v3: Applying Errata e11171 for NXP MX8!\n");
523        mx8_gpr_virt = periph_paddr_to_vaddr(driver->mx8_gpr_phys);
524        ASSERT(mx8_gpr_virt);
525    }
526
527    arm_gicv3_gic_base = periph_paddr_to_vaddr(driver->mmio_phys);
528    ASSERT(arm_gicv3_gic_base);
529    arm_gicv3_gicd_offset = driver->gicd_offset;
530    arm_gicv3_gicr_offset = driver->gicr_offset;
531    arm_gicv3_gicr_stride = driver->gicr_stride;
532    ipi_base = driver->ipi_base;
533
534    arm_gicv3_gic_base = periph_paddr_to_vaddr(driver->mmio_phys);
535    ASSERT(arm_gicv3_gic_base);
536
537    if (gic_init() != ZX_OK) {
538        if (driver->optional) {
539            // failed to detect gic v3 but it's marked optional. continue
540            return;
541        }
542        printf("GICv3: failed to detect GICv3, interrupts will be broken\n");
543        return;
544    }
545
546    dprintf(SPEW, "detected GICv3\n");
547
548    pdev_register_interrupts(&gic_ops);
549
550    zx_status_t status =
551        gic_register_sgi_handler(MP_IPI_GENERIC + ipi_base, &mp_mbx_generic_irq);
552    DEBUG_ASSERT(status == ZX_OK);
553    status =
554        gic_register_sgi_handler(MP_IPI_RESCHEDULE + ipi_base, &mp_mbx_reschedule_irq);
555    DEBUG_ASSERT(status == ZX_OK);
556    status = gic_register_sgi_handler(MP_IPI_INTERRUPT + ipi_base, &mp_mbx_interrupt_irq);
557    DEBUG_ASSERT(status == ZX_OK);
558    status = gic_register_sgi_handler(MP_IPI_HALT + ipi_base, &arm_ipi_halt_handler);
559    DEBUG_ASSERT(status == ZX_OK);
560
561    gicv3_hw_interface_register();
562
563    LTRACE_EXIT;
564}
565
566LK_PDEV_INIT(arm_gic_v3_init, KDRV_ARM_GIC_V3, arm_gic_v3_init, LK_INIT_LEVEL_PLATFORM_EARLY);
567