tprof_armv8.c revision 1.6
1/* $NetBSD: tprof_armv8.c,v 1.6 2020/10/30 18:54:37 skrll Exp $ */ 2 3/*- 4 * Copyright (c) 2018 Jared McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__KERNEL_RCSID(0, "$NetBSD: tprof_armv8.c,v 1.6 2020/10/30 18:54:37 skrll Exp $"); 31 32#include <sys/param.h> 33#include <sys/bus.h> 34#include <sys/cpu.h> 35#include <sys/xcall.h> 36 37#include <dev/tprof/tprof.h> 38 39#include <arm/armreg.h> 40#include <arm/cpufunc.h> 41 42#include <dev/tprof/tprof_armv8.h> 43 44static tprof_param_t armv8_pmu_param; 45static const u_int armv8_pmu_counter = 1; 46static uint32_t counter_val; 47static uint32_t counter_reset_val; 48 49static bool 50armv8_pmu_event_implemented(uint16_t event) 51{ 52 uint64_t eid[2]; 53 54 if (event >= 64) 55 return false; 56 57 eid[0] = reg_pmceid0_el0_read(); 58 eid[1] = reg_pmceid1_el0_read(); 59 60 const u_int idx = event / 32; 61 const u_int bit = event % 32; 62 63 if (eid[idx] & __BIT(bit)) 64 return true; 65 66 return false; 67} 68 69static void 70armv8_pmu_set_pmevtyper(u_int counter, uint64_t val) 71{ 72 reg_pmselr_el0_write(counter); 73 isb(); 74 reg_pmxevtyper_el0_write(val); 75} 76 77static void 78armv8_pmu_set_pmevcntr(u_int counter, uint32_t val) 79{ 80 reg_pmselr_el0_write(counter); 81 isb(); 82 reg_pmxevcntr_el0_write(val); 83} 84 85static void 86armv8_pmu_start_cpu(void *arg1, void *arg2) 87{ 88 const uint32_t counter_mask = __BIT(armv8_pmu_counter); 89 uint64_t pmevtyper; 90 91 /* Disable event counter */ 92 reg_pmcntenclr_el0_write(counter_mask); 93 94 /* Configure event counter */ 95 pmevtyper = __SHIFTIN(armv8_pmu_param.p_event, PMEVTYPER_EVTCOUNT); 96 if (!ISSET(armv8_pmu_param.p_flags, TPROF_PARAM_USER)) 97 pmevtyper |= PMEVTYPER_U; 98 if (!ISSET(armv8_pmu_param.p_flags, TPROF_PARAM_KERN)) 99 pmevtyper |= PMEVTYPER_P; 100 101 armv8_pmu_set_pmevtyper(armv8_pmu_counter, pmevtyper); 102 103 /* Enable overflow interrupts */ 104 reg_pmintenset_el1_write(counter_mask); 105 106 /* Clear overflow flag */ 107 reg_pmovsclr_el0_write(counter_mask); 108 109 /* Initialize event counter value */ 110 armv8_pmu_set_pmevcntr(armv8_pmu_counter, counter_reset_val); 111 112 /* Enable event counter */ 113 reg_pmcntenset_el0_write(counter_mask); 114} 115 116static void 117armv8_pmu_stop_cpu(void *arg1, void *arg2) 118{ 119 const uint32_t counter_mask = __BIT(armv8_pmu_counter); 120 121 /* Disable overflow interrupts */ 122 reg_pmintenclr_el1_write(counter_mask); 123 124 /* Disable event counter */ 125 reg_pmcntenclr_el0_write(counter_mask); 126} 127 128static uint64_t 129armv8_pmu_estimate_freq(void) 130{ 131 uint64_t cpufreq = curcpu()->ci_data.cpu_cc_freq; 132 uint64_t freq = 10000; 133 uint32_t pmcr; 134 135 counter_val = cpufreq / freq; 136 if (counter_val == 0) 137 counter_val = 4000000000ULL / freq; 138 139 pmcr = reg_pmcr_el0_read(); 140 if (pmcr & PMCR_D) 141 counter_val /= 64; 142 143 return freq; 144} 145 146static uint32_t 147armv8_pmu_ident(void) 148{ 149 return TPROF_IDENT_ARMV8_GENERIC; 150} 151 152static int 153armv8_pmu_start(const tprof_param_t *param) 154{ 155 uint64_t xc; 156 157 if (!armv8_pmu_event_implemented(param->p_event)) { 158 printf("%s: event %#" PRIx64 " not implemented on this CPU\n", 159 __func__, param->p_event); 160 return EINVAL; 161 } 162 163 counter_reset_val = -counter_val + 1; 164 165 armv8_pmu_param = *param; 166 xc = xc_broadcast(0, armv8_pmu_start_cpu, NULL, NULL); 167 xc_wait(xc); 168 169 return 0; 170} 171 172static void 173armv8_pmu_stop(const tprof_param_t *param) 174{ 175 uint64_t xc; 176 177 xc = xc_broadcast(0, armv8_pmu_stop_cpu, NULL, NULL); 178 xc_wait(xc); 179} 180 181static const tprof_backend_ops_t tprof_armv8_pmu_ops = { 182 .tbo_estimate_freq = armv8_pmu_estimate_freq, 183 .tbo_ident = armv8_pmu_ident, 184 .tbo_start = armv8_pmu_start, 185 .tbo_stop = armv8_pmu_stop, 186}; 187 188int 189armv8_pmu_intr(void *priv) 190{ 191 const struct trapframe * const tf = priv; 192 const uint32_t counter_mask = __BIT(armv8_pmu_counter); 193 tprof_frame_info_t tfi; 194 195 const uint32_t pmovs = reg_pmovsset_el0_read(); 196 if ((pmovs & counter_mask) != 0) { 197 tfi.tfi_pc = tf->tf_pc; 198 tfi.tfi_inkernel = tfi.tfi_pc >= VM_MIN_KERNEL_ADDRESS && 199 tfi.tfi_pc < VM_MAX_KERNEL_ADDRESS; 200 tprof_sample(NULL, &tfi); 201 202 armv8_pmu_set_pmevcntr(armv8_pmu_counter, counter_reset_val); 203 } 204 reg_pmovsclr_el0_write(pmovs); 205 206 return 1; 207} 208 209int 210armv8_pmu_init(void) 211{ 212 /* Disable EL0 access to performance monitors */ 213 reg_pmuserenr_el0_write(0); 214 215 /* Disable interrupts */ 216 reg_pmintenclr_el1_write(~0U); 217 218 /* Disable event counters */ 219 reg_pmcntenclr_el0_write(PMCNTEN_P); 220 221 return tprof_backend_register("tprof_armv8", &tprof_armv8_pmu_ops, 222 TPROF_BACKEND_VERSION); 223} 224