1/* 2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) 3 * 4 * SPDX-License-Identifier: GPL-2.0-only 5 */ 6 7#include <drivers/timer/arm_generic.h> 8 9BOOT_CODE void initGenericTimer(void) 10{ 11 if (config_set(CONFIG_DEBUG_BUILD)) { 12 /* check the frequency is correct */ 13 word_t gpt_cntfrq = 0; 14 SYSTEM_READ_WORD(CNTFRQ, gpt_cntfrq); 15 /* The CNTFRQ register is 32-bits and is safe to compare with TIMER_CLOCK_HZ. */ 16 if (gpt_cntfrq != 0 && gpt_cntfrq != TIMER_CLOCK_HZ) { 17 printf("Warning: gpt_cntfrq %lu, expected %u\n", gpt_cntfrq, 18 (uint32_t) TIMER_CLOCK_HZ); 19 } 20 } 21 22#ifdef CONFIG_KERNEL_MCS 23 /* this sets the irq to UINT64_MAX */ 24 ackDeadlineIRQ(); 25 SYSTEM_WRITE_WORD(CNT_CTL, BIT(0)); 26#else /* CONFIG_KERNEL_MCS */ 27 resetTimer(); 28#endif /* !CONFIG_KERNEL_MCS */ 29} 30 31/* 32 * The exynos5 platforms require custom hardware initialisation before the 33 * generic timer is usable. They need to overwrite initTimer before calling 34 * initGenericTimer because of this. We cannot use a `weak` symbol definition 35 * in this case because the kernel is built as a single file and multiple 36 * symbol definitions with the same name are not allowed. We therefore resort 37 * to ifdef'ing out this initTimer definition for exynos5 platforms. 38 */ 39#ifndef CONFIG_PLAT_EXYNOS5 40BOOT_CODE void initTimer(void) 41{ 42 initGenericTimer(); 43} 44#endif 45 46#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT 47 48#include <arch/object/vcpu.h> 49#include <armv/vcpu.h> 50 51/** MODIFIES: */ 52/** DONT_TRANSLATE */ 53static inline uint64_t read_cntpct(void) 54{ 55 uint64_t val; 56 SYSTEM_READ_64(CNTPCT, val); 57 return val; 58} 59 60static void save_virt_timer(vcpu_t *vcpu) 61{ 62 /* Save control register */ 63 vcpu_save_reg(vcpu, seL4_VCPUReg_CNTV_CTL); 64 vcpu_hw_write_reg(seL4_VCPUReg_CNTV_CTL, 0); 65 /* Save Compare Value and Offset registers */ 66#ifdef CONFIG_ARCH_AARCH64 67 vcpu_save_reg(vcpu, seL4_VCPUReg_CNTV_CVAL); 68 vcpu_save_reg(vcpu, seL4_VCPUReg_CNTVOFF); 69 vcpu_save_reg(vcpu, seL4_VCPUReg_CNTKCTL_EL1); 70 check_export_arch_timer(); 71#else 72 uint64_t cval = get_cntv_cval_64(); 73 uint64_t cntvoff = get_cntv_off_64(); 74 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTV_CVALhigh, (word_t)(cval >> 32)); 75 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTV_CVALlow, (word_t)cval); 76 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTVOFFhigh, (word_t)(cntvoff >> 32)); 77 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTVOFFlow, (word_t)cntvoff); 78#endif 79#ifdef CONFIG_VTIMER_UPDATE_VOFFSET 80 /* Save counter value at the time the vcpu is disabled */ 81 vcpu->virtTimer.last_pcount = read_cntpct(); 82#endif 83} 84 85static void restore_virt_timer(vcpu_t *vcpu) 86{ 87 /* Restore virtual timer state */ 88#ifdef CONFIG_ARCH_AARCH64 89 vcpu_restore_reg(vcpu, seL4_VCPUReg_CNTV_CVAL); 90 vcpu_restore_reg(vcpu, seL4_VCPUReg_CNTKCTL_EL1); 91#else 92 uint32_t cval_high = vcpu_read_reg(vcpu, seL4_VCPUReg_CNTV_CVALhigh); 93 uint32_t cval_low = vcpu_read_reg(vcpu, seL4_VCPUReg_CNTV_CVALlow); 94 uint64_t cval = ((uint64_t)cval_high << 32) | (uint64_t) cval_low; 95 set_cntv_cval_64(cval); 96#endif 97 98 /* Set virtual timer offset */ 99#ifdef CONFIG_VTIMER_UPDATE_VOFFSET 100 uint64_t pcount_delta; 101 uint64_t current_cntpct = read_cntpct(); 102 pcount_delta = current_cntpct - vcpu->virtTimer.last_pcount; 103#endif 104#ifdef CONFIG_ARCH_AARCH64 105#ifdef CONFIG_VTIMER_UPDATE_VOFFSET 106 uint64_t offset = vcpu_read_reg(vcpu, seL4_VCPUReg_CNTVOFF); 107 offset += pcount_delta; 108 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTVOFF, offset); 109#endif 110 vcpu_restore_reg(vcpu, seL4_VCPUReg_CNTVOFF); 111#else 112 uint32_t offset_high = vcpu_read_reg(vcpu, seL4_VCPUReg_CNTVOFFhigh); 113 uint32_t offset_low = vcpu_read_reg(vcpu, seL4_VCPUReg_CNTVOFFlow); 114 uint64_t offset = ((uint64_t)offset_high << 32) | (uint64_t) offset_low; 115#ifdef CONFIG_VTIMER_UPDATE_VOFFSET 116 offset += pcount_delta; 117 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTVOFFhigh, (word_t)(offset >> 32)); 118 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTVOFFlow, (word_t) offset); 119#endif 120 set_cntv_off_64(offset); 121#endif 122 /* For verification, need to ensure we don't unmask an inactive interrupt; 123 * the virtual timer should never get disabled, but the knowledge is not 124 * available at this point */ 125 /* Restore interrupt mask state */ 126 if (likely(isIRQActive(CORE_IRQ_TO_IRQT(CURRENT_CPU_INDEX(), INTERRUPT_VTIMER_EVENT)))) { 127 maskInterrupt(vcpu->vppi_masked[irqVPPIEventIndex(CORE_IRQ_TO_IRQT(CURRENT_CPU_INDEX(), INTERRUPT_VTIMER_EVENT))], 128 CORE_IRQ_TO_IRQT(CURRENT_CPU_INDEX(), INTERRUPT_VTIMER_EVENT)); 129 } 130 /* Restore virtual timer control register */ 131 vcpu_restore_reg(vcpu, seL4_VCPUReg_CNTV_CTL); 132} 133 134#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */ 135