1/** 2 * \file 3 * \brief X86 inline asm utilities and defines 4 */ 5 6/* 7 * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich. 8 * All rights reserved. 9 * 10 * This file is distributed under the terms in the attached LICENSE file. 11 * If you do not find this file, copies can be found by writing to: 12 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group. 13 */ 14 15#ifndef KERNEL_X86_H 16#define KERNEL_X86_H 17 18/***** MSRs *****/ 19 20#define MSR_IA32_EFER 0xc0000080 ///< Extended features enables 21#define MSR_IA32_STAR 0xc0000081 ///< System call segment selectors MSR 22#define MSR_IA32_LSTAR 0xc0000082 ///< System call target address MSR 23#define MSR_IA32_FMASK 0xc0000084 ///< System call flag mask MSR 24#define MSR_IA32_FSBASE 0xc0000100 ///< 64-bit FS base register 25#define MSR_IA32_GSBASE 0xc0000101 ///< 64-bit GS base register 26#define MSR_AMD_HWCR 0xc0010015 ///< AMD hardware configuration 27#define MSR_AMD_VMCR 0xc0010114 ///< Global aspects of SVM 28#define MSR_AMD_VM_HSAVE 0xc0010117 ///< Physical address of host save area 29 30/*** IA32_EFER flags ***/ 31 32#define IA32_EFER_SCE (1 << 0) ///< Fast system call enable 33#define IA32_EFER_LME (1 << 8) ///< Long mode enable 34#define IA32_EFER_LMA (1 << 10) ///< Long mode active 35#define IA32_EFER_NXE (1 << 11) ///< No execute enable 36#define IA32_EFER_SVME (1 << 12) ///< Switch to enable/disable SVM 37 38/*** AMD_HWCR flags ***/ 39 40#define AMD_HWCR_FFDIS (1 << 6) ///< TLB flush filter 41 42/*** AMD_VMCR flags ***/ 43#define AMD_VMCR_SVMDIS (1 << 4) ///< SVM disabled indicator 44 45// Register space access functions, needed by ia32_dev.h (since 46// ia23_dev.h is generated by Mackerel, and expects these functions to 47// be available). 48 49#define ia32_msr_read_64(_d,_r) rdmsr(_r) 50#define ia32_msr_write_64(_d,_r,_v) wrmsr(_r,_v) 51#define ia32_msr_read_32(_d,_r) ((uint32_t)rdmsr(_r)) 52#define ia32_msr_write_32(_d,_r,_v) wrmsr(_r,_v) 53 54#define amd64_cr0_rawrd(_d) rdcr0() 55#define amd64_cr0_rawwr(_d,_v) wrcr0(_v) 56#define amd64_cr2_rawrd(_d) rdcr2() 57#define amd64_cr2_rawwr(_d,_v) wrcr2(_v) 58#define amd64_cr3_rawrd(_d) rdcr3() 59#define amd64_cr3_rawwr(_d,_v) wrcr3(_v) 60#define amd64_cr4_rawrd(_d) rdcr4() 61#define amd64_cr4_rawwr(_d,_v) wrcr4(_v) 62 63#ifndef __ASSEMBLER__ 64 65static inline uint64_t rdcr0(void) 66{ 67 uint64_t cr0; 68 __asm volatile("mov %%cr0, %[cr0]" : [cr0] "=r" (cr0)); 69 return cr0; 70} 71 72static inline void wrcr0(uint64_t cr0) 73{ 74 __asm volatile("mov %[cr0], %%cr0" :: [cr0] "r" (cr0)); 75} 76 77static inline uint64_t rdcr2(void) 78{ 79 uint64_t cr2; 80 __asm volatile("mov %%cr2, %[cr2]" : [cr2] "=r" (cr2)); 81 return cr2; 82} 83 84static inline void wrcr2(uint64_t cr2) 85{ 86 __asm volatile("mov %[cr2], %%cr2" :: [cr2] "r" (cr2)); 87} 88 89static inline uint64_t rdcr3(void) 90{ 91 uint64_t cr3; 92 __asm volatile("mov %%cr3, %[cr3]" : [cr3] "=r" (cr3)); 93 return cr3; 94} 95 96static inline void wrcr3(uint64_t cr3) 97{ 98 __asm volatile("mov %[cr3], %%cr3" :: [cr3] "r" (cr3)); 99} 100 101static inline uint64_t rdcr4(void) 102{ 103 uint64_t cr4; 104 __asm volatile("mov %%cr4, %[cr4]" : [cr4] "=r" (cr4)); 105 return cr4; 106} 107 108static inline void wrcr4(uint64_t cr4) 109{ 110 __asm volatile("mov %[cr4], %%cr4" :: [cr4] "r" (cr4)); 111} 112 113static inline uint8_t inb(uint16_t port) 114{ 115 uint8_t data; 116 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 117 return data; 118} 119 120static inline void outb(uint16_t port, uint8_t data) 121{ 122#if !defined(__k1om__) 123 __asm __volatile("outb %0,%%dx" : : "a" (data), "d" (port)); 124#endif 125} 126 127static inline uint32_t ind(uint16_t port) 128{ 129 uint32_t data; 130 __asm __volatile("in %%dx,%0" : "=a" (data) : "d" (port)); 131 return data; 132} 133 134static inline void outd(uint16_t port, uint32_t data) 135{ 136#if !defined(__k1om__) 137 __asm __volatile("out %0,%%dx" : : "a" (data), "d" (port)); 138#endif 139} 140 141static inline uint16_t inw(uint16_t port) 142{ 143 uint16_t data; 144 __asm __volatile("inw %%dx, %0" : "=a" (data) : "d" (port)); 145 return data; 146} 147 148static inline void outw(uint16_t port, uint16_t data) 149{ 150#if !defined(__k1om__) 151 __asm __volatile("outw %0,%%dx" : : "a" (data), "d" (port)); 152#endif 153} 154 155/** \brief This function reads a model specific register */ 156static inline uint64_t rdmsr(uint32_t msr_number) 157{ 158 uint32_t eax, edx; 159 __asm volatile ("rdmsr" : "=a" (eax), "=d" (edx) : "c" (msr_number)); 160 return ((uint64_t)edx << 32) | eax; 161} 162 163/** \brief this function writes a model specific register */ 164static inline void wrmsr(uint32_t msr_number, uint64_t value) 165{ 166 uint32_t eax, edx; 167 168 eax = value & 0xffffffff; 169 edx = value >> 32; 170 __asm__ __volatile__ ("wrmsr" : : "a" (eax), "d" (edx), "c" (msr_number)); 171} 172 173/** \brief Add bitmask to model specific register */ 174static inline void addmsr(uint32_t msr_number, uint64_t mask) 175{ 176 wrmsr(msr_number, rdmsr(msr_number) | mask); 177} 178 179/** \brief Triggers a hardware breakpoint exception */ 180static inline void hw_breakpoint(void) 181{ 182 __asm__ __volatile__("int $3" ::); 183} 184 185/** \brief Issue WBINVD instruction, invalidating all caches */ 186static inline void wbinvd(void) 187{ 188 __asm volatile("wbinvd" ::: "memory"); 189} 190 191 192static inline void clts(void) 193{ 194 __asm volatile("clts"); 195} 196 197#include <stdbool.h> 198 199bool has_monitor_mwait(void); 200void monitor_mwait(lvaddr_t base, uint64_t lastval, uint32_t extensions, 201 uint32_t hints); 202 203#endif //__ASSEMBLER__ 204 205/*** Test whether real processor or M5 ***/ 206 207#define CPU_IS_M5_SIMULATOR \ 208 ({uint32_t _eax, _ebx, _ecx, _edx; \ 209 cpuid(0,&_eax,&_ebx,&_ecx,&_edx); \ 210 /* Expect "M5 Simulator" */ \ 211 /* 5320354d 6c756d69 726f7461 */ \ 212 ((_ebx==0x5320354d && _edx==0x6c756d69 && _ecx==0x726f7461));}) 213 214#endif 215