tsc.c revision 246116
1/*- 2 * Copyright (c) 1998-2003 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/x86/x86/tsc.c 246116 2013-01-30 12:43:10Z kib $"); 29 30#include "opt_compat.h" 31#include "opt_clock.h" 32 33#include <sys/param.h> 34#include <sys/bus.h> 35#include <sys/cpu.h> 36#include <sys/limits.h> 37#include <sys/malloc.h> 38#include <sys/systm.h> 39#include <sys/sysctl.h> 40#include <sys/time.h> 41#include <sys/timetc.h> 42#include <sys/kernel.h> 43#include <sys/power.h> 44#include <sys/smp.h> 45#include <sys/vdso.h> 46#include <machine/clock.h> 47#include <machine/cputypes.h> 48#include <machine/md_var.h> 49#include <machine/specialreg.h> 50 51#include "cpufreq_if.h" 52 53uint64_t tsc_freq; 54int tsc_is_invariant; 55int tsc_perf_stat; 56 57static eventhandler_tag tsc_levels_tag, tsc_pre_tag, tsc_post_tag; 58 59SYSCTL_INT(_kern_timecounter, OID_AUTO, invariant_tsc, CTLFLAG_RDTUN, 60 &tsc_is_invariant, 0, "Indicates whether the TSC is P-state invariant"); 61TUNABLE_INT("kern.timecounter.invariant_tsc", &tsc_is_invariant); 62 63#ifdef SMP 64static int smp_tsc; 65SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, 66 "Indicates whether the TSC is safe to use in SMP mode"); 67TUNABLE_INT("kern.timecounter.smp_tsc", &smp_tsc); 68 69static int smp_tsc_shift = 1; 70SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_shift, CTLFLAG_RDTUN, 71 &smp_tsc_shift, 0, 72 "Shift to pre-apply for the maximum TSC frequency in SMP mode"); 73TUNABLE_INT("kern.timecounter.smp_tsc_shift", &smp_tsc_shift); 74#endif 75 76static int tsc_disabled; 77SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, 78 "Disable x86 Time Stamp Counter"); 79TUNABLE_INT("machdep.disable_tsc", &tsc_disabled); 80 81static int tsc_skip_calibration; 82SYSCTL_INT(_machdep, OID_AUTO, disable_tsc_calibration, CTLFLAG_RDTUN, 83 &tsc_skip_calibration, 0, "Disable TSC frequency calibration"); 84TUNABLE_INT("machdep.disable_tsc_calibration", &tsc_skip_calibration); 85 86static void tsc_freq_changed(void *arg, const struct cf_level *level, 87 int status); 88static void tsc_freq_changing(void *arg, const struct cf_level *level, 89 int *status); 90static unsigned tsc_get_timecount(struct timecounter *tc); 91static inline unsigned tsc_get_timecount_low(struct timecounter *tc); 92static unsigned tsc_get_timecount_lfence(struct timecounter *tc); 93static unsigned tsc_get_timecount_low_lfence(struct timecounter *tc); 94static unsigned tsc_get_timecount_mfence(struct timecounter *tc); 95static unsigned tsc_get_timecount_low_mfence(struct timecounter *tc); 96static void tsc_levels_changed(void *arg, int unit); 97 98static struct timecounter tsc_timecounter = { 99 tsc_get_timecount, /* get_timecount */ 100 0, /* no poll_pps */ 101 ~0u, /* counter_mask */ 102 0, /* frequency */ 103 "TSC", /* name */ 104 800, /* quality (adjusted in code) */ 105}; 106 107#define VMW_HVMAGIC 0x564d5868 108#define VMW_HVPORT 0x5658 109#define VMW_HVCMD_GETVERSION 10 110#define VMW_HVCMD_GETHZ 45 111 112static __inline void 113vmware_hvcall(u_int cmd, u_int *p) 114{ 115 116 __asm __volatile("inl %w3, %0" 117 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3]) 118 : "0" (VMW_HVMAGIC), "1" (UINT_MAX), "2" (cmd), "3" (VMW_HVPORT) 119 : "memory"); 120} 121 122static int 123tsc_freq_vmware(void) 124{ 125 char hv_sig[13]; 126 u_int regs[4]; 127 char *p; 128 u_int hv_high; 129 int i; 130 131 /* 132 * [RFC] CPUID usage for interaction between Hypervisors and Linux. 133 * http://lkml.org/lkml/2008/10/1/246 134 * 135 * KB1009458: Mechanisms to determine if software is running in 136 * a VMware virtual machine 137 * http://kb.vmware.com/kb/1009458 138 */ 139 hv_high = 0; 140 if ((cpu_feature2 & CPUID2_HV) != 0) { 141 do_cpuid(0x40000000, regs); 142 hv_high = regs[0]; 143 for (i = 1, p = hv_sig; i < 4; i++, p += sizeof(regs) / 4) 144 memcpy(p, ®s[i], sizeof(regs[i])); 145 *p = '\0'; 146 if (bootverbose) { 147 /* 148 * HV vendor ID string 149 * ------------+-------------- 150 * KVM "KVMKVMKVM" 151 * Microsoft "Microsoft Hv" 152 * VMware "VMwareVMware" 153 * Xen "XenVMMXenVMM" 154 */ 155 printf("Hypervisor: Origin = \"%s\"\n", hv_sig); 156 } 157 if (strncmp(hv_sig, "VMwareVMware", 12) != 0) 158 return (0); 159 } else { 160 p = getenv("smbios.system.serial"); 161 if (p == NULL) 162 return (0); 163 if (strncmp(p, "VMware-", 7) != 0 && 164 strncmp(p, "VMW", 3) != 0) { 165 freeenv(p); 166 return (0); 167 } 168 freeenv(p); 169 vmware_hvcall(VMW_HVCMD_GETVERSION, regs); 170 if (regs[1] != VMW_HVMAGIC) 171 return (0); 172 } 173 if (hv_high >= 0x40000010) { 174 do_cpuid(0x40000010, regs); 175 tsc_freq = regs[0] * 1000; 176 } else { 177 vmware_hvcall(VMW_HVCMD_GETHZ, regs); 178 if (regs[1] != UINT_MAX) 179 tsc_freq = regs[0] | ((uint64_t)regs[1] << 32); 180 } 181 tsc_is_invariant = 1; 182 return (1); 183} 184 185static void 186tsc_freq_intel(void) 187{ 188 char brand[48]; 189 u_int regs[4]; 190 uint64_t freq; 191 char *p; 192 u_int i; 193 194 /* 195 * Intel Processor Identification and the CPUID Instruction 196 * Application Note 485. 197 * http://www.intel.com/assets/pdf/appnote/241618.pdf 198 */ 199 if (cpu_exthigh >= 0x80000004) { 200 p = brand; 201 for (i = 0x80000002; i < 0x80000005; i++) { 202 do_cpuid(i, regs); 203 memcpy(p, regs, sizeof(regs)); 204 p += sizeof(regs); 205 } 206 p = NULL; 207 for (i = 0; i < sizeof(brand) - 1; i++) 208 if (brand[i] == 'H' && brand[i + 1] == 'z') 209 p = brand + i; 210 if (p != NULL) { 211 p -= 5; 212 switch (p[4]) { 213 case 'M': 214 i = 1; 215 break; 216 case 'G': 217 i = 1000; 218 break; 219 case 'T': 220 i = 1000000; 221 break; 222 default: 223 return; 224 } 225#define C2D(c) ((c) - '0') 226 if (p[1] == '.') { 227 freq = C2D(p[0]) * 1000; 228 freq += C2D(p[2]) * 100; 229 freq += C2D(p[3]) * 10; 230 freq *= i * 1000; 231 } else { 232 freq = C2D(p[0]) * 1000; 233 freq += C2D(p[1]) * 100; 234 freq += C2D(p[2]) * 10; 235 freq += C2D(p[3]); 236 freq *= i * 1000000; 237 } 238#undef C2D 239 tsc_freq = freq; 240 } 241 } 242} 243 244static void 245probe_tsc_freq(void) 246{ 247 u_int regs[4]; 248 uint64_t tsc1, tsc2; 249 250 if (cpu_high >= 6) { 251 do_cpuid(6, regs); 252 if ((regs[2] & CPUID_PERF_STAT) != 0) { 253 /* 254 * XXX Some emulators expose host CPUID without actual 255 * support for these MSRs. We must test whether they 256 * really work. 257 */ 258 wrmsr(MSR_MPERF, 0); 259 wrmsr(MSR_APERF, 0); 260 DELAY(10); 261 if (rdmsr(MSR_MPERF) > 0 && rdmsr(MSR_APERF) > 0) 262 tsc_perf_stat = 1; 263 } 264 } 265 266 if (tsc_freq_vmware()) 267 return; 268 269 switch (cpu_vendor_id) { 270 case CPU_VENDOR_AMD: 271 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 272 (vm_guest == VM_GUEST_NO && 273 CPUID_TO_FAMILY(cpu_id) >= 0x10)) 274 tsc_is_invariant = 1; 275 if (cpu_feature & CPUID_SSE2) { 276 tsc_timecounter.tc_get_timecount = 277 tsc_get_timecount_mfence; 278 } 279 break; 280 case CPU_VENDOR_INTEL: 281 if ((amd_pminfo & AMDPM_TSC_INVARIANT) != 0 || 282 (vm_guest == VM_GUEST_NO && 283 ((CPUID_TO_FAMILY(cpu_id) == 0x6 && 284 CPUID_TO_MODEL(cpu_id) >= 0xe) || 285 (CPUID_TO_FAMILY(cpu_id) == 0xf && 286 CPUID_TO_MODEL(cpu_id) >= 0x3)))) 287 tsc_is_invariant = 1; 288 if (cpu_feature & CPUID_SSE2) { 289 tsc_timecounter.tc_get_timecount = 290 tsc_get_timecount_lfence; 291 } 292 break; 293 case CPU_VENDOR_CENTAUR: 294 if (vm_guest == VM_GUEST_NO && 295 CPUID_TO_FAMILY(cpu_id) == 0x6 && 296 CPUID_TO_MODEL(cpu_id) >= 0xf && 297 (rdmsr(0x1203) & 0x100000000ULL) == 0) 298 tsc_is_invariant = 1; 299 if (cpu_feature & CPUID_SSE2) { 300 tsc_timecounter.tc_get_timecount = 301 tsc_get_timecount_lfence; 302 } 303 break; 304 } 305 306 if (tsc_skip_calibration) { 307 if (cpu_vendor_id == CPU_VENDOR_INTEL) 308 tsc_freq_intel(); 309 return; 310 } 311 312 if (bootverbose) 313 printf("Calibrating TSC clock ... "); 314 tsc1 = rdtsc(); 315 DELAY(1000000); 316 tsc2 = rdtsc(); 317 tsc_freq = tsc2 - tsc1; 318 if (bootverbose) 319 printf("TSC clock: %ju Hz\n", (intmax_t)tsc_freq); 320} 321 322void 323init_TSC(void) 324{ 325 326 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 327 return; 328 329 probe_tsc_freq(); 330 331 /* 332 * Inform CPU accounting about our boot-time clock rate. This will 333 * be updated if someone loads a cpufreq driver after boot that 334 * discovers a new max frequency. 335 */ 336 if (tsc_freq != 0) 337 set_cputicker(rdtsc, tsc_freq, !tsc_is_invariant); 338 339 if (tsc_is_invariant) 340 return; 341 342 /* Register to find out about changes in CPU frequency. */ 343 tsc_pre_tag = EVENTHANDLER_REGISTER(cpufreq_pre_change, 344 tsc_freq_changing, NULL, EVENTHANDLER_PRI_FIRST); 345 tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, 346 tsc_freq_changed, NULL, EVENTHANDLER_PRI_FIRST); 347 tsc_levels_tag = EVENTHANDLER_REGISTER(cpufreq_levels_changed, 348 tsc_levels_changed, NULL, EVENTHANDLER_PRI_ANY); 349} 350 351#ifdef SMP 352 353/* 354 * RDTSC is not a serializing instruction, and does not drain 355 * instruction stream, so we need to drain the stream before executing 356 * it. It could be fixed by use of RDTSCP, except the instruction is 357 * not available everywhere. 358 * 359 * Use CPUID for draining in the boot-time SMP constistency test. The 360 * timecounters use MFENCE for AMD CPUs, and LFENCE for others (Intel 361 * and VIA) when SSE2 is present, and nothing on older machines which 362 * also do not issue RDTSC prematurely. There, testing for SSE2 and 363 * vendor is too cumbersome, and we learn about TSC presence from CPUID. 364 * 365 * Do not use do_cpuid(), since we do not need CPUID results, which 366 * have to be written into memory with do_cpuid(). 367 */ 368#define TSC_READ(x) \ 369static void \ 370tsc_read_##x(void *arg) \ 371{ \ 372 uint64_t *tsc = arg; \ 373 u_int cpu = PCPU_GET(cpuid); \ 374 \ 375 __asm __volatile("cpuid" : : : "eax", "ebx", "ecx", "edx"); \ 376 tsc[cpu * 3 + x] = rdtsc(); \ 377} 378TSC_READ(0) 379TSC_READ(1) 380TSC_READ(2) 381#undef TSC_READ 382 383#define N 1000 384 385static void 386comp_smp_tsc(void *arg) 387{ 388 uint64_t *tsc; 389 int64_t d1, d2; 390 u_int cpu = PCPU_GET(cpuid); 391 u_int i, j, size; 392 393 size = (mp_maxid + 1) * 3; 394 for (i = 0, tsc = arg; i < N; i++, tsc += size) 395 CPU_FOREACH(j) { 396 if (j == cpu) 397 continue; 398 d1 = tsc[cpu * 3 + 1] - tsc[j * 3]; 399 d2 = tsc[cpu * 3 + 2] - tsc[j * 3 + 1]; 400 if (d1 <= 0 || d2 <= 0) { 401 smp_tsc = 0; 402 return; 403 } 404 } 405} 406 407static int 408test_smp_tsc(void) 409{ 410 uint64_t *data, *tsc; 411 u_int i, size; 412 413 if (!smp_tsc && !tsc_is_invariant) 414 return (-100); 415 size = (mp_maxid + 1) * 3; 416 data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); 417 for (i = 0, tsc = data; i < N; i++, tsc += size) 418 smp_rendezvous(tsc_read_0, tsc_read_1, tsc_read_2, tsc); 419 smp_tsc = 1; /* XXX */ 420 smp_rendezvous(smp_no_rendevous_barrier, comp_smp_tsc, 421 smp_no_rendevous_barrier, data); 422 free(data, M_TEMP); 423 if (bootverbose) 424 printf("SMP: %sed TSC synchronization test\n", 425 smp_tsc ? "pass" : "fail"); 426 if (smp_tsc && tsc_is_invariant) { 427 switch (cpu_vendor_id) { 428 case CPU_VENDOR_AMD: 429 /* 430 * Starting with Family 15h processors, TSC clock 431 * source is in the north bridge. Check whether 432 * we have a single-socket/multi-core platform. 433 * XXX Need more work for complex cases. 434 */ 435 if (CPUID_TO_FAMILY(cpu_id) < 0x15 || 436 (amd_feature2 & AMDID2_CMP) == 0 || 437 smp_cpus > (cpu_procinfo2 & AMDID_CMP_CORES) + 1) 438 break; 439 return (1000); 440 case CPU_VENDOR_INTEL: 441 /* 442 * XXX Assume Intel platforms have synchronized TSCs. 443 */ 444 return (1000); 445 } 446 return (800); 447 } 448 return (-100); 449} 450 451#undef N 452 453#endif /* SMP */ 454 455static void 456init_TSC_tc(void) 457{ 458 uint64_t max_freq; 459 int shift; 460 461 if ((cpu_feature & CPUID_TSC) == 0 || tsc_disabled) 462 return; 463 464 /* 465 * Limit timecounter frequency to fit in an int and prevent it from 466 * overflowing too fast. 467 */ 468 max_freq = UINT_MAX; 469 470 /* 471 * We can not use the TSC if we support APM. Precise timekeeping 472 * on an APM'ed machine is at best a fools pursuit, since 473 * any and all of the time spent in various SMM code can't 474 * be reliably accounted for. Reading the RTC is your only 475 * source of reliable time info. The i8254 loses too, of course, 476 * but we need to have some kind of time... 477 * We don't know at this point whether APM is going to be used 478 * or not, nor when it might be activated. Play it safe. 479 */ 480 if (power_pm_get_type() == POWER_PM_TYPE_APM) { 481 tsc_timecounter.tc_quality = -1000; 482 if (bootverbose) 483 printf("TSC timecounter disabled: APM enabled.\n"); 484 goto init; 485 } 486 487 /* 488 * We cannot use the TSC if it stops incrementing in deep sleep. 489 * Currently only Intel CPUs are known for this problem unless 490 * the invariant TSC bit is set. 491 */ 492 if (cpu_can_deep_sleep && cpu_vendor_id == CPU_VENDOR_INTEL && 493 (amd_pminfo & AMDPM_TSC_INVARIANT) == 0) { 494 tsc_timecounter.tc_quality = -1000; 495 tsc_timecounter.tc_flags |= TC_FLAGS_C3STOP; 496 if (bootverbose) 497 printf("TSC timecounter disabled: C3 enabled.\n"); 498 goto init; 499 } 500 501#ifdef SMP 502 /* 503 * We can not use the TSC in SMP mode unless the TSCs on all CPUs are 504 * synchronized. If the user is sure that the system has synchronized 505 * TSCs, set kern.timecounter.smp_tsc tunable to a non-zero value. 506 * We also limit the frequency even lower to avoid "temporal anomalies" 507 * as much as possible. The TSC seems unreliable in virtualized SMP 508 * environments, so it is set to a negative quality in those cases. 509 */ 510 if (smp_cpus > 1) { 511 if (vm_guest != 0) { 512 tsc_timecounter.tc_quality = -100; 513 } else { 514 tsc_timecounter.tc_quality = test_smp_tsc(); 515 max_freq >>= smp_tsc_shift; 516 } 517 } else 518#endif 519 if (tsc_is_invariant) 520 tsc_timecounter.tc_quality = 1000; 521 522init: 523 for (shift = 0; shift < 31 && (tsc_freq >> shift) > max_freq; shift++) 524 ; 525 if (shift > 0) { 526 if (cpu_feature & CPUID_SSE2) { 527 if (cpu_vendor_id == CPU_VENDOR_AMD) { 528 tsc_timecounter.tc_get_timecount = 529 tsc_get_timecount_low_mfence; 530 } else { 531 tsc_timecounter.tc_get_timecount = 532 tsc_get_timecount_low_lfence; 533 } 534 } else 535 tsc_timecounter.tc_get_timecount = tsc_get_timecount_low; 536 tsc_timecounter.tc_name = "TSC-low"; 537 if (bootverbose) 538 printf("TSC timecounter discards lower %d bit(s)\n", 539 shift); 540 } 541 if (tsc_freq != 0) { 542 tsc_timecounter.tc_frequency = tsc_freq >> shift; 543 tsc_timecounter.tc_priv = (void *)(intptr_t)shift; 544 tc_init(&tsc_timecounter); 545 } 546} 547SYSINIT(tsc_tc, SI_SUB_SMP, SI_ORDER_ANY, init_TSC_tc, NULL); 548 549/* 550 * When cpufreq levels change, find out about the (new) max frequency. We 551 * use this to update CPU accounting in case it got a lower estimate at boot. 552 */ 553static void 554tsc_levels_changed(void *arg, int unit) 555{ 556 device_t cf_dev; 557 struct cf_level *levels; 558 int count, error; 559 uint64_t max_freq; 560 561 /* Only use values from the first CPU, assuming all are equal. */ 562 if (unit != 0) 563 return; 564 565 /* Find the appropriate cpufreq device instance. */ 566 cf_dev = devclass_get_device(devclass_find("cpufreq"), unit); 567 if (cf_dev == NULL) { 568 printf("tsc_levels_changed() called but no cpufreq device?\n"); 569 return; 570 } 571 572 /* Get settings from the device and find the max frequency. */ 573 count = 64; 574 levels = malloc(count * sizeof(*levels), M_TEMP, M_NOWAIT); 575 if (levels == NULL) 576 return; 577 error = CPUFREQ_LEVELS(cf_dev, levels, &count); 578 if (error == 0 && count != 0) { 579 max_freq = (uint64_t)levels[0].total_set.freq * 1000000; 580 set_cputicker(rdtsc, max_freq, 1); 581 } else 582 printf("tsc_levels_changed: no max freq found\n"); 583 free(levels, M_TEMP); 584} 585 586/* 587 * If the TSC timecounter is in use, veto the pending change. It may be 588 * possible in the future to handle a dynamically-changing timecounter rate. 589 */ 590static void 591tsc_freq_changing(void *arg, const struct cf_level *level, int *status) 592{ 593 594 if (*status != 0 || timecounter != &tsc_timecounter) 595 return; 596 597 printf("timecounter TSC must not be in use when " 598 "changing frequencies; change denied\n"); 599 *status = EBUSY; 600} 601 602/* Update TSC freq with the value indicated by the caller. */ 603static void 604tsc_freq_changed(void *arg, const struct cf_level *level, int status) 605{ 606 uint64_t freq; 607 608 /* If there was an error during the transition, don't do anything. */ 609 if (tsc_disabled || status != 0) 610 return; 611 612 /* Total setting for this level gives the new frequency in MHz. */ 613 freq = (uint64_t)level->total_set.freq * 1000000; 614 atomic_store_rel_64(&tsc_freq, freq); 615 tsc_timecounter.tc_frequency = 616 freq >> (int)(intptr_t)tsc_timecounter.tc_priv; 617} 618 619static int 620sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS) 621{ 622 int error; 623 uint64_t freq; 624 625 freq = atomic_load_acq_64(&tsc_freq); 626 if (freq == 0) 627 return (EOPNOTSUPP); 628 error = sysctl_handle_64(oidp, &freq, 0, req); 629 if (error == 0 && req->newptr != NULL) { 630 atomic_store_rel_64(&tsc_freq, freq); 631 atomic_store_rel_64(&tsc_timecounter.tc_frequency, 632 freq >> (int)(intptr_t)tsc_timecounter.tc_priv); 633 } 634 return (error); 635} 636 637SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_U64 | CTLFLAG_RW, 638 0, 0, sysctl_machdep_tsc_freq, "QU", "Time Stamp Counter frequency"); 639 640static u_int 641tsc_get_timecount(struct timecounter *tc __unused) 642{ 643 644 return (rdtsc32()); 645} 646 647static inline u_int 648tsc_get_timecount_low(struct timecounter *tc) 649{ 650 uint32_t rv; 651 652 __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" 653 : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx"); 654 return (rv); 655} 656 657static u_int 658tsc_get_timecount_lfence(struct timecounter *tc __unused) 659{ 660 661 lfence(); 662 return (rdtsc32()); 663} 664 665static u_int 666tsc_get_timecount_low_lfence(struct timecounter *tc) 667{ 668 669 lfence(); 670 return (tsc_get_timecount_low(tc)); 671} 672 673static u_int 674tsc_get_timecount_mfence(struct timecounter *tc __unused) 675{ 676 677 mfence(); 678 return (rdtsc32()); 679} 680 681static u_int 682tsc_get_timecount_low_mfence(struct timecounter *tc) 683{ 684 685 mfence(); 686 return (tsc_get_timecount_low(tc)); 687} 688 689uint32_t 690cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th) 691{ 692 693 vdso_th->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 694 bzero(vdso_th->th_res, sizeof(vdso_th->th_res)); 695 return (timecounter == &tsc_timecounter); 696} 697 698#ifdef COMPAT_FREEBSD32 699uint32_t 700cpu_fill_vdso_timehands32(struct vdso_timehands32 *vdso_th32) 701{ 702 703 vdso_th32->th_x86_shift = (int)(intptr_t)timecounter->tc_priv; 704 bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res)); 705 return (timecounter == &tsc_timecounter); 706} 707#endif 708