1/* 2 * linux/include/asm-i386/tsc.h 3 * 4 * i386 TSC related functions 5 */ 6#ifndef _ASM_i386_TSC_H 7#define _ASM_i386_TSC_H 8 9#include <asm/processor.h> 10 11/* 12 * Standard way to access the cycle counter. 13 */ 14typedef unsigned long long cycles_t; 15 16extern unsigned int cpu_khz; 17extern unsigned int tsc_khz; 18 19static inline cycles_t get_cycles(void) 20{ 21 unsigned long long ret = 0; 22 23#ifndef CONFIG_X86_TSC 24 if (!cpu_has_tsc) 25 return 0; 26#endif 27 28#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) 29 rdtscll(ret); 30#endif 31 return ret; 32} 33 34/* Like get_cycles, but make sure the CPU is synchronized. */ 35static __always_inline cycles_t get_cycles_sync(void) 36{ 37 unsigned long long ret; 38 unsigned eax, edx; 39 40 /* 41 * Use RDTSCP if possible; it is guaranteed to be synchronous 42 * and doesn't cause a VMEXIT on Hypervisors 43 */ 44 alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP, 45 ASM_OUTPUT2("=a" (eax), "=d" (edx)), 46 "a" (0U), "d" (0U) : "ecx", "memory"); 47 ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax); 48 if (ret) 49 return ret; 50 51 /* 52 * Don't do an additional sync on CPUs where we know 53 * RDTSC is already synchronous: 54 */ 55 alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, 56 "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); 57 rdtscll(ret); 58 59 return ret; 60} 61 62extern void tsc_init(void); 63extern void mark_tsc_unstable(char *reason); 64extern int unsynchronized_tsc(void); 65extern void init_tsc_clocksource(void); 66 67/* 68 * Boot-time check whether the TSCs are synchronized across 69 * all CPUs/cores: 70 */ 71extern void check_tsc_sync_source(int cpu); 72extern void check_tsc_sync_target(void); 73 74#endif 75