1/* 2 * Copyright (c) 2005-2008 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_FREE_COPYRIGHT@ 30 */ 31/* 32 * @APPLE_FREE_COPYRIGHT@ 33 */ 34 35/* 36 * Author: Bill Angell, Apple 37 * Date: 10/auht-five 38 * 39 * Random diagnostics, augmented Derek Kumar 2011 40 * 41 * 42 */ 43 44 45#include <kern/machine.h> 46#include <kern/processor.h> 47#include <mach/machine.h> 48#include <mach/processor_info.h> 49#include <mach/mach_types.h> 50#include <mach/boolean.h> 51#include <kern/thread.h> 52#include <kern/task.h> 53#include <kern/ipc_kobject.h> 54#include <mach/vm_param.h> 55#include <ipc/port.h> 56#include <ipc/ipc_entry.h> 57#include <ipc/ipc_space.h> 58#include <ipc/ipc_object.h> 59#include <ipc/ipc_port.h> 60#include <vm/vm_kern.h> 61#include <vm/vm_map.h> 62#include <vm/vm_page.h> 63#include <vm/pmap.h> 64#include <pexpert/pexpert.h> 65#include <console/video_console.h> 66#include <i386/cpu_data.h> 67#include <i386/Diagnostics.h> 68#include <i386/mp.h> 69#include <i386/pmCPU.h> 70#include <i386/tsc.h> 71#include <mach/i386/syscall_sw.h> 72#include <kern/kalloc.h> 73#include <sys/kdebug.h> 74#include <i386/machine_cpu.h> 75#include <i386/misc_protos.h> 76#include <i386/cpuid.h> 77 78#define PERMIT_PERMCHECK (0) 79 80diagWork dgWork; 81uint64_t lastRuptClear = 0ULL; 82boolean_t diag_pmc_enabled = FALSE; 83void cpu_powerstats(void *); 84 85typedef struct { 86 uint64_t caperf; 87 uint64_t cmperf; 88 uint64_t ccres[6]; 89 uint64_t crtimes[CPU_RTIME_BINS]; 90 uint64_t citimes[CPU_ITIME_BINS]; 91 uint64_t crtime_total; 92 uint64_t citime_total; 93 uint64_t cpu_idle_exits; 94 uint64_t cpu_insns; 95 uint64_t cpu_ucc; 96 uint64_t cpu_urc; 97} core_energy_stat_t; 98 99typedef struct { 100 uint64_t pkes_version; 101 uint64_t pkg_cres[2][7]; 102 uint64_t pkg_power_unit; 103 uint64_t pkg_energy; 104 uint64_t pp0_energy; 105 uint64_t pp1_energy; 106 uint64_t ddr_energy; 107 uint64_t llc_flushed_cycles; 108 uint64_t ring_ratio_instantaneous; 109 uint64_t IA_frequency_clipping_cause; 110 uint64_t GT_frequency_clipping_cause; 111 uint64_t pkg_idle_exits; 112 uint64_t pkg_rtimes[CPU_RTIME_BINS]; 113 uint64_t pkg_itimes[CPU_ITIME_BINS]; 114 uint64_t mbus_delay_time; 115 uint64_t mint_delay_time; 116 uint32_t ncpus; 117 core_energy_stat_t cest[]; 118} pkg_energy_statistics_t; 119 120 121int 122diagCall64(x86_saved_state_t * state) 123{ 124 uint64_t curpos, i, j; 125 uint64_t selector, data; 126 uint64_t currNap, durNap; 127 x86_saved_state64_t *regs; 128 boolean_t diagflag; 129 uint32_t rval = 0; 130 131 assert(is_saved_state64(state)); 132 regs = saved_state64(state); 133 134 diagflag = ((dgWork.dgFlags & enaDiagSCs) != 0); 135 selector = regs->rdi; 136 137 switch (selector) { /* Select the routine */ 138 case dgRuptStat: /* Suck Interruption statistics */ 139 (void) ml_set_interrupts_enabled(TRUE); 140 data = regs->rsi; /* Get the number of processors */ 141 142 if (data == 0) { /* If no location is specified for data, clear all 143 * counts 144 */ 145 for (i = 0; i < real_ncpus; i++) { /* Cycle through 146 * processors */ 147 for (j = 0; j < 256; j++) 148 cpu_data_ptr[i]->cpu_hwIntCnt[j] = 0; 149 } 150 151 lastRuptClear = mach_absolute_time(); /* Get the time of clear */ 152 rval = 1; /* Normal return */ 153 break; 154 } 155 156 (void) copyout((char *) &real_ncpus, data, sizeof(real_ncpus)); /* Copy out number of 157 * processors */ 158 currNap = mach_absolute_time(); /* Get the time now */ 159 durNap = currNap - lastRuptClear; /* Get the last interval 160 * duration */ 161 if (durNap == 0) 162 durNap = 1; /* This is a very short time, make it 163 * bigger */ 164 165 curpos = data + sizeof(real_ncpus); /* Point to the next 166 * available spot */ 167 168 for (i = 0; i < real_ncpus; i++) { /* Move 'em all out */ 169 (void) copyout((char *) &durNap, curpos, 8); /* Copy out the time 170 * since last clear */ 171 (void) copyout((char *) &cpu_data_ptr[i]->cpu_hwIntCnt, curpos + 8, 256 * sizeof(uint32_t)); /* Copy out interrupt 172 * data for this 173 * processor */ 174 curpos = curpos + (256 * sizeof(uint32_t) + 8); /* Point to next out put 175 * slot */ 176 } 177 rval = 1; 178 break; 179 180 case dgPowerStat: 181 { 182 uint32_t c2l = 0, c2h = 0, c3l = 0, c3h = 0, c6l = 0, c6h = 0, c7l = 0, c7h = 0; 183 uint32_t pkg_unit_l = 0, pkg_unit_h = 0, pkg_ecl = 0, pkg_ech = 0; 184 185 pkg_energy_statistics_t pkes; 186 core_energy_stat_t cest; 187 188 bzero(&pkes, sizeof(pkes)); 189 bzero(&cest, sizeof(cest)); 190 191 pkes.pkes_version = 1ULL; 192 rdmsr_carefully(MSR_IA32_PKG_C2_RESIDENCY, &c2l, &c2h); 193 rdmsr_carefully(MSR_IA32_PKG_C3_RESIDENCY, &c3l, &c3h); 194 rdmsr_carefully(MSR_IA32_PKG_C6_RESIDENCY, &c6l, &c6h); 195 rdmsr_carefully(MSR_IA32_PKG_C7_RESIDENCY, &c7l, &c7h); 196 197 pkes.pkg_cres[0][0] = ((uint64_t)c2h << 32) | c2l; 198 pkes.pkg_cres[0][1] = ((uint64_t)c3h << 32) | c3l; 199 pkes.pkg_cres[0][2] = ((uint64_t)c6h << 32) | c6l; 200 pkes.pkg_cres[0][3] = ((uint64_t)c7h << 32) | c7l; 201 202 uint64_t c8r = ~0ULL, c9r = ~0ULL, c10r = ~0ULL; 203 204 rdmsr64_carefully(MSR_IA32_PKG_C8_RESIDENCY, &c8r); 205 rdmsr64_carefully(MSR_IA32_PKG_C9_RESIDENCY, &c9r); 206 rdmsr64_carefully(MSR_IA32_PKG_C10_RESIDENCY, &c10r); 207 208 pkes.pkg_cres[0][4] = c8r; 209 pkes.pkg_cres[0][5] = c9r; 210 pkes.pkg_cres[0][6] = c10r; 211 212 pkes.ddr_energy = ~0ULL; 213 rdmsr64_carefully(MSR_IA32_DDR_ENERGY_STATUS, &pkes.ddr_energy); 214 pkes.llc_flushed_cycles = ~0ULL; 215 rdmsr64_carefully(MSR_IA32_LLC_FLUSHED_RESIDENCY_TIMER, &pkes.llc_flushed_cycles); 216 217 pkes.ring_ratio_instantaneous = ~0ULL; 218 rdmsr64_carefully(MSR_IA32_RING_PERF_STATUS, &pkes.ring_ratio_instantaneous); 219 220 pkes.IA_frequency_clipping_cause = ~0ULL; 221 rdmsr64_carefully(MSR_IA32_IA_PERF_LIMIT_REASONS, &pkes.IA_frequency_clipping_cause); 222 223 pkes.GT_frequency_clipping_cause = ~0ULL; 224 rdmsr64_carefully(MSR_IA32_GT_PERF_LIMIT_REASONS, &pkes.GT_frequency_clipping_cause); 225 226 rdmsr_carefully(MSR_IA32_PKG_POWER_SKU_UNIT, &pkg_unit_l, &pkg_unit_h); 227 rdmsr_carefully(MSR_IA32_PKG_ENERGY_STATUS, &pkg_ecl, &pkg_ech); 228 pkes.pkg_power_unit = ((uint64_t)pkg_unit_h << 32) | pkg_unit_l; 229 pkes.pkg_energy = ((uint64_t)pkg_ech << 32) | pkg_ecl; 230 231 rdmsr_carefully(MSR_IA32_PP0_ENERGY_STATUS, &pkg_ecl, &pkg_ech); 232 pkes.pp0_energy = ((uint64_t)pkg_ech << 32) | pkg_ecl; 233 234 rdmsr_carefully(MSR_IA32_PP1_ENERGY_STATUS, &pkg_ecl, &pkg_ech); 235 pkes.pp1_energy = ((uint64_t)pkg_ech << 32) | pkg_ecl; 236 237 pkes.pkg_idle_exits = current_cpu_datap()->lcpu.package->package_idle_exits; 238 pkes.ncpus = real_ncpus; 239 240 (void) ml_set_interrupts_enabled(TRUE); 241 242 copyout(&pkes, regs->rsi, sizeof(pkes)); 243 curpos = regs->rsi + sizeof(pkes); 244 245 mp_cpus_call(CPUMASK_ALL, ASYNC, cpu_powerstats, NULL); 246 247 for (i = 0; i < real_ncpus; i++) { 248 (void) ml_set_interrupts_enabled(FALSE); 249 250 cest.caperf = cpu_data_ptr[i]->cpu_aperf; 251 cest.cmperf = cpu_data_ptr[i]->cpu_mperf; 252 cest.ccres[0] = cpu_data_ptr[i]->cpu_c3res; 253 cest.ccres[1] = cpu_data_ptr[i]->cpu_c6res; 254 cest.ccres[2] = cpu_data_ptr[i]->cpu_c7res; 255 256 bcopy(&cpu_data_ptr[i]->cpu_rtimes[0], &cest.crtimes[0], sizeof(cest.crtimes)); 257 bcopy(&cpu_data_ptr[i]->cpu_itimes[0], &cest.citimes[0], sizeof(cest.citimes)); 258 259 cest.citime_total = cpu_data_ptr[i]->cpu_itime_total; 260 cest.crtime_total = cpu_data_ptr[i]->cpu_rtime_total; 261 cest.cpu_idle_exits = cpu_data_ptr[i]->cpu_idle_exits; 262 cest.cpu_insns = cpu_data_ptr[i]->cpu_cur_insns; 263 cest.cpu_ucc = cpu_data_ptr[i]->cpu_cur_ucc; 264 cest.cpu_urc = cpu_data_ptr[i]->cpu_cur_urc; 265 (void) ml_set_interrupts_enabled(TRUE); 266 267 copyout(&cest, curpos, sizeof(cest)); 268 curpos += sizeof(cest); 269 } 270 rval = 1; 271 } 272 break; 273 case dgEnaPMC: 274 { 275 boolean_t enable = TRUE; 276 uint32_t cpuinfo[4]; 277 /* Require architectural PMC v2 or higher, corresponding to 278 * Merom+, or equivalent virtualised facility. 279 */ 280 do_cpuid(0xA, &cpuinfo[0]); 281 if ((cpuinfo[0] & 0xFF) >= 2) { 282 mp_cpus_call(CPUMASK_ALL, ASYNC, cpu_pmc_control, &enable); 283 diag_pmc_enabled = TRUE; 284 } 285 rval = 1; 286 } 287 break; 288#if DEBUG 289 case dgGzallocTest: 290 { 291 (void) ml_set_interrupts_enabled(TRUE); 292 if (diagflag) { 293 unsigned *ptr = (unsigned *)kalloc(1024); 294 kfree(ptr, 1024); 295 *ptr = 0x42; 296 } 297 } 298 break; 299#endif 300 301#if PERMIT_PERMCHECK 302 case dgPermCheck: 303 { 304 (void) ml_set_interrupts_enabled(TRUE); 305 if (diagflag) 306 rval = pmap_permissions_verify(kernel_pmap, kernel_map, 0, ~0ULL); 307 } 308 break; 309#endif /* PERMIT_PERMCHECK */ 310 default: /* Handle invalid ones */ 311 rval = 0; /* Return an exception */ 312 } 313 314 regs->rax = rval; 315 316 return rval; 317} 318 319void cpu_powerstats(__unused void *arg) { 320 cpu_data_t *cdp = current_cpu_datap(); 321 __unused int cnum = cdp->cpu_number; 322 uint32_t cl = 0, ch = 0, mpl = 0, mph = 0, apl = 0, aph = 0; 323 324 rdmsr_carefully(MSR_IA32_MPERF, &mpl, &mph); 325 rdmsr_carefully(MSR_IA32_APERF, &apl, &aph); 326 327 cdp->cpu_mperf = ((uint64_t)mph << 32) | mpl; 328 cdp->cpu_aperf = ((uint64_t)aph << 32) | apl; 329 330 uint64_t ctime = mach_absolute_time(); 331 cdp->cpu_rtime_total += ctime - cdp->cpu_ixtime; 332 cdp->cpu_ixtime = ctime; 333 334 rdmsr_carefully(MSR_IA32_CORE_C3_RESIDENCY, &cl, &ch); 335 cdp->cpu_c3res = ((uint64_t)ch << 32) | cl; 336 337 rdmsr_carefully(MSR_IA32_CORE_C6_RESIDENCY, &cl, &ch); 338 cdp->cpu_c6res = ((uint64_t)ch << 32) | cl; 339 340 rdmsr_carefully(MSR_IA32_CORE_C7_RESIDENCY, &cl, &ch); 341 cdp->cpu_c7res = ((uint64_t)ch << 32) | cl; 342 343 if (diag_pmc_enabled) { 344 uint64_t insns = read_pmc(FIXED_PMC0); 345 uint64_t ucc = read_pmc(FIXED_PMC1); 346 uint64_t urc = read_pmc(FIXED_PMC2); 347 cdp->cpu_cur_insns = insns; 348 cdp->cpu_cur_ucc = ucc; 349 cdp->cpu_cur_urc = urc; 350 } 351} 352 353void cpu_pmc_control(void *enablep) { 354 boolean_t enable = *(boolean_t *)enablep; 355 cpu_data_t *cdp = current_cpu_datap(); 356 357 if (enable) { 358 wrmsr64(0x38F, 0x70000000FULL); 359 wrmsr64(0x38D, 0x333); 360 set_cr4(get_cr4() | CR4_PCE); 361 362 } else { 363 wrmsr64(0x38F, 0); 364 wrmsr64(0x38D, 0); 365 set_cr4((get_cr4() & ~CR4_PCE)); 366 } 367 cdp->cpu_fixed_pmcs_enabled = enable; 368} 369