1/* 2 * VMI specific paravirt-ops implementation 3 * 4 * Copyright (C) 2005, VMware, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 14 * NON INFRINGEMENT. See the GNU General Public License for more 15 * details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 * 21 * Send feedback to zach@vmware.com 22 * 23 */ 24 25#include <linux/module.h> 26#include <linux/cpu.h> 27#include <linux/bootmem.h> 28#include <linux/mm.h> 29#include <linux/highmem.h> 30#include <linux/sched.h> 31#include <asm/vmi.h> 32#include <asm/io.h> 33#include <asm/fixmap.h> 34#include <asm/apicdef.h> 35#include <asm/apic.h> 36#include <asm/processor.h> 37#include <asm/timer.h> 38#include <asm/vmi_time.h> 39#include <asm/kmap_types.h> 40 41/* Convenient for calling VMI functions indirectly in the ROM */ 42typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); 43typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int); 44 45#define call_vrom_func(rom,func) \ 46 (((VROMFUNC *)(rom->func))()) 47 48#define call_vrom_long_func(rom,func,arg) \ 49 (((VROMLONGFUNC *)(rom->func)) (arg)) 50 51static struct vrom_header *vmi_rom; 52static int disable_pge; 53static int disable_pse; 54static int disable_sep; 55static int disable_tsc; 56static int disable_mtrr; 57static int disable_noidle; 58static int disable_vmi_timer; 59 60/* Cached VMI operations */ 61static struct { 62 void (*cpuid)(void /* non-c */); 63 void (*_set_ldt)(u32 selector); 64 void (*set_tr)(u32 selector); 65 void (*set_kernel_stack)(u32 selector, u32 esp0); 66 void (*allocate_page)(u32, u32, u32, u32, u32); 67 void (*release_page)(u32, u32); 68 void (*set_pte)(pte_t, pte_t *, unsigned); 69 void (*update_pte)(pte_t *, unsigned); 70 void (*set_linear_mapping)(int, void *, u32, u32); 71 void (*_flush_tlb)(int); 72 void (*set_initial_ap_state)(int, int); 73 void (*halt)(void); 74 void (*set_lazy_mode)(int mode); 75} vmi_ops; 76 77/* Cached VMI operations */ 78struct vmi_timer_ops vmi_timer_ops; 79 80/* 81 * VMI patching routines. 82 */ 83#define MNEM_CALL 0xe8 84#define MNEM_JMP 0xe9 85#define MNEM_RET 0xc3 86 87#define IRQ_PATCH_INT_MASK 0 88#define IRQ_PATCH_DISABLE 5 89 90static inline void patch_offset(unsigned char *eip, unsigned char *dest) 91{ 92 *(unsigned long *)(eip+1) = dest-eip-5; 93} 94 95static unsigned patch_internal(int call, unsigned len, void *insns) 96{ 97 u64 reloc; 98 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; 99 reloc = call_vrom_long_func(vmi_rom, get_reloc, call); 100 switch(rel->type) { 101 case VMI_RELOCATION_CALL_REL: 102 BUG_ON(len < 5); 103 *(char *)insns = MNEM_CALL; 104 patch_offset(insns, rel->eip); 105 return 5; 106 107 case VMI_RELOCATION_JUMP_REL: 108 BUG_ON(len < 5); 109 *(char *)insns = MNEM_JMP; 110 patch_offset(insns, rel->eip); 111 return 5; 112 113 case VMI_RELOCATION_NOP: 114 /* obliterate the whole thing */ 115 return 0; 116 117 case VMI_RELOCATION_NONE: 118 /* leave native code in place */ 119 break; 120 121 default: 122 BUG(); 123 } 124 return len; 125} 126 127/* 128 * Apply patch if appropriate, return length of new instruction 129 * sequence. The callee does nop padding for us. 130 */ 131static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, unsigned len) 132{ 133 switch (type) { 134 case PARAVIRT_PATCH(irq_disable): 135 return patch_internal(VMI_CALL_DisableInterrupts, len, insns); 136 case PARAVIRT_PATCH(irq_enable): 137 return patch_internal(VMI_CALL_EnableInterrupts, len, insns); 138 case PARAVIRT_PATCH(restore_fl): 139 return patch_internal(VMI_CALL_SetInterruptMask, len, insns); 140 case PARAVIRT_PATCH(save_fl): 141 return patch_internal(VMI_CALL_GetInterruptMask, len, insns); 142 case PARAVIRT_PATCH(iret): 143 return patch_internal(VMI_CALL_IRET, len, insns); 144 case PARAVIRT_PATCH(irq_enable_sysexit): 145 return patch_internal(VMI_CALL_SYSEXIT, len, insns); 146 default: 147 break; 148 } 149 return len; 150} 151 152/* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */ 153static void vmi_cpuid(unsigned int *eax, unsigned int *ebx, 154 unsigned int *ecx, unsigned int *edx) 155{ 156 int override = 0; 157 if (*eax == 1) 158 override = 1; 159 asm volatile ("call *%6" 160 : "=a" (*eax), 161 "=b" (*ebx), 162 "=c" (*ecx), 163 "=d" (*edx) 164 : "0" (*eax), "2" (*ecx), "r" (vmi_ops.cpuid)); 165 if (override) { 166 if (disable_pse) 167 *edx &= ~X86_FEATURE_PSE; 168 if (disable_pge) 169 *edx &= ~X86_FEATURE_PGE; 170 if (disable_sep) 171 *edx &= ~X86_FEATURE_SEP; 172 if (disable_tsc) 173 *edx &= ~X86_FEATURE_TSC; 174 if (disable_mtrr) 175 *edx &= ~X86_FEATURE_MTRR; 176 } 177} 178 179static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new) 180{ 181 if (gdt[nr].a != new->a || gdt[nr].b != new->b) 182 write_gdt_entry(gdt, nr, new->a, new->b); 183} 184 185static void vmi_load_tls(struct thread_struct *t, unsigned int cpu) 186{ 187 struct desc_struct *gdt = get_cpu_gdt_table(cpu); 188 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]); 189 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]); 190 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]); 191} 192 193static void vmi_set_ldt(const void *addr, unsigned entries) 194{ 195 unsigned cpu = smp_processor_id(); 196 u32 low, high; 197 198 pack_descriptor(&low, &high, (unsigned long)addr, 199 entries * sizeof(struct desc_struct) - 1, 200 DESCTYPE_LDT, 0); 201 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, low, high); 202 vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0); 203} 204 205static void vmi_set_tr(void) 206{ 207 vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct)); 208} 209 210static void vmi_load_esp0(struct tss_struct *tss, 211 struct thread_struct *thread) 212{ 213 tss->x86_tss.esp0 = thread->esp0; 214 215 /* This can only happen when SEP is enabled, no need to test "SEP"arately */ 216 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { 217 tss->x86_tss.ss1 = thread->sysenter_cs; 218 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); 219 } 220 vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.esp0); 221} 222 223static void vmi_flush_tlb_user(void) 224{ 225 vmi_ops._flush_tlb(VMI_FLUSH_TLB); 226} 227 228static void vmi_flush_tlb_kernel(void) 229{ 230 vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL); 231} 232 233/* Stub to do nothing at all; used for delays and unimplemented calls */ 234static void vmi_nop(void) 235{ 236} 237 238#ifdef CONFIG_DEBUG_PAGE_TYPE 239 240#ifdef CONFIG_X86_PAE 241#define MAX_BOOT_PTS (2048+4+1) 242#else 243#define MAX_BOOT_PTS (1024+1) 244#endif 245 246/* 247 * During boot, mem_map is not yet available in paging_init, so stash 248 * all the boot page allocations here. 249 */ 250static struct { 251 u32 pfn; 252 int type; 253} boot_page_allocations[MAX_BOOT_PTS]; 254static int num_boot_page_allocations; 255static int boot_allocations_applied; 256 257void vmi_apply_boot_page_allocations(void) 258{ 259 int i; 260 BUG_ON(!mem_map); 261 for (i = 0; i < num_boot_page_allocations; i++) { 262 struct page *page = pfn_to_page(boot_page_allocations[i].pfn); 263 page->type = boot_page_allocations[i].type; 264 page->type = boot_page_allocations[i].type & 265 ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); 266 } 267 boot_allocations_applied = 1; 268} 269 270static void record_page_type(u32 pfn, int type) 271{ 272 BUG_ON(num_boot_page_allocations >= MAX_BOOT_PTS); 273 boot_page_allocations[num_boot_page_allocations].pfn = pfn; 274 boot_page_allocations[num_boot_page_allocations].type = type; 275 num_boot_page_allocations++; 276} 277 278static void check_zeroed_page(u32 pfn, int type, struct page *page) 279{ 280 u32 *ptr; 281 int i; 282 int limit = PAGE_SIZE / sizeof(int); 283 284 if (page_address(page)) 285 ptr = (u32 *)page_address(page); 286 else 287 ptr = (u32 *)__va(pfn << PAGE_SHIFT); 288 /* 289 * When cloning the root in non-PAE mode, only the userspace 290 * pdes need to be zeroed. 291 */ 292 if (type & VMI_PAGE_CLONE) 293 limit = USER_PTRS_PER_PGD; 294 for (i = 0; i < limit; i++) 295 BUG_ON(ptr[i]); 296} 297 298/* 299 * We stash the page type into struct page so we can verify the page 300 * types are used properly. 301 */ 302static void vmi_set_page_type(u32 pfn, int type) 303{ 304 /* PAE can have multiple roots per page - don't track */ 305 if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP)) 306 return; 307 308 if (boot_allocations_applied) { 309 struct page *page = pfn_to_page(pfn); 310 if (type != VMI_PAGE_NORMAL) 311 BUG_ON(page->type); 312 else 313 BUG_ON(page->type == VMI_PAGE_NORMAL); 314 page->type = type & ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); 315 if (type & VMI_PAGE_ZEROED) 316 check_zeroed_page(pfn, type, page); 317 } else { 318 record_page_type(pfn, type); 319 } 320} 321 322static void vmi_check_page_type(u32 pfn, int type) 323{ 324 /* PAE can have multiple roots per page - skip checks */ 325 if (PTRS_PER_PMD > 1 && (type & VMI_PAGE_PDP)) 326 return; 327 328 type &= ~(VMI_PAGE_ZEROED | VMI_PAGE_CLONE); 329 if (boot_allocations_applied) { 330 struct page *page = pfn_to_page(pfn); 331 BUG_ON((page->type ^ type) & VMI_PAGE_PAE); 332 BUG_ON(type == VMI_PAGE_NORMAL && page->type); 333 BUG_ON((type & page->type) == 0); 334 } 335} 336#else 337#define vmi_set_page_type(p,t) do { } while (0) 338#define vmi_check_page_type(p,t) do { } while (0) 339#endif 340 341#ifdef CONFIG_HIGHPTE 342static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type) 343{ 344 void *va = kmap_atomic(page, type); 345 346 /* 347 * Internally, the VMI ROM must map virtual addresses to physical 348 * addresses for processing MMU updates. By the time MMU updates 349 * are issued, this information is typically already lost. 350 * Fortunately, the VMI provides a cache of mapping slots for active 351 * page tables. 352 * 353 * We use slot zero for the linear mapping of physical memory, and 354 * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1. 355 * 356 * args: SLOT VA COUNT PFN 357 */ 358 BUG_ON(type != KM_PTE0 && type != KM_PTE1); 359 vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page)); 360 361 return va; 362} 363#endif 364 365static void vmi_allocate_pt(u32 pfn) 366{ 367 vmi_set_page_type(pfn, VMI_PAGE_L1); 368 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0); 369} 370 371static void vmi_allocate_pd(u32 pfn) 372{ 373 /* 374 * This call comes in very early, before mem_map is setup. 375 * It is called only for swapper_pg_dir, which already has 376 * data on it. 377 */ 378 vmi_set_page_type(pfn, VMI_PAGE_L2); 379 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0); 380} 381 382static void vmi_allocate_pd_clone(u32 pfn, u32 clonepfn, u32 start, u32 count) 383{ 384 vmi_set_page_type(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE); 385 vmi_check_page_type(clonepfn, VMI_PAGE_L2); 386 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count); 387} 388 389static void vmi_release_pt(u32 pfn) 390{ 391 vmi_ops.release_page(pfn, VMI_PAGE_L1); 392 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 393} 394 395static void vmi_release_pd(u32 pfn) 396{ 397 vmi_ops.release_page(pfn, VMI_PAGE_L2); 398 vmi_set_page_type(pfn, VMI_PAGE_NORMAL); 399} 400 401/* 402 * Helper macros for MMU update flags. We can defer updates until a flush 403 * or page invalidation only if the update is to the current address space 404 * (otherwise, there is no flush). We must check against init_mm, since 405 * this could be a kernel update, which usually passes init_mm, although 406 * sometimes this check can be skipped if we know the particular function 407 * is only called on user mode PTEs. We could change the kernel to pass 408 * current->active_mm here, but in particular, I was unsure if changing 409 * mm/highmem.c to do this would still be correct on other architectures. 410 */ 411#define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \ 412 (!mustbeuser && (mm) == &init_mm)) 413#define vmi_flags_addr(mm, addr, level, user) \ 414 ((level) | (is_current_as(mm, user) ? \ 415 (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) 416#define vmi_flags_addr_defer(mm, addr, level, user) \ 417 ((level) | (is_current_as(mm, user) ? \ 418 (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0)) 419 420static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 421{ 422 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); 423 vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); 424} 425 426static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 427{ 428 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); 429 vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0)); 430} 431 432static void vmi_set_pte(pte_t *ptep, pte_t pte) 433{ 434 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE | VMI_PAGE_PD); 435 vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT); 436} 437 438static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) 439{ 440 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); 441 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); 442} 443 444static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval) 445{ 446#ifdef CONFIG_X86_PAE 447 const pte_t pte = { pmdval.pmd, pmdval.pmd >> 32 }; 448 vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PMD); 449#else 450 const pte_t pte = { pmdval.pud.pgd.pgd }; 451 vmi_check_page_type(__pa(pmdp) >> PAGE_SHIFT, VMI_PAGE_PGD); 452#endif 453 vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD); 454} 455 456#ifdef CONFIG_X86_PAE 457 458static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval) 459{ 460 set_64bit((unsigned long long *)ptep,pte_val(pteval)); 461 vmi_ops.update_pte(ptep, VMI_PAGE_PT); 462} 463 464static void vmi_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) 465{ 466 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); 467 vmi_ops.set_pte(pte, ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 1)); 468} 469 470static void vmi_set_pud(pud_t *pudp, pud_t pudval) 471{ 472 /* Um, eww */ 473 const pte_t pte = { pudval.pgd.pgd, pudval.pgd.pgd >> 32 }; 474 vmi_check_page_type(__pa(pudp) >> PAGE_SHIFT, VMI_PAGE_PGD); 475 vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP); 476} 477 478static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 479{ 480 const pte_t pte = { 0 }; 481 vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE); 482 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); 483} 484 485static void vmi_pmd_clear(pmd_t *pmd) 486{ 487 const pte_t pte = { 0 }; 488 vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD); 489 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD); 490} 491#endif 492 493#ifdef CONFIG_SMP 494static void __devinit 495vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, 496 unsigned long start_esp) 497{ 498 struct vmi_ap_state ap; 499 500 /* Default everything to zero. This is fine for most GPRs. */ 501 memset(&ap, 0, sizeof(struct vmi_ap_state)); 502 503 ap.gdtr_limit = GDT_SIZE - 1; 504 ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid); 505 506 ap.idtr_limit = IDT_ENTRIES * 8 - 1; 507 ap.idtr_base = (unsigned long) idt_table; 508 509 ap.ldtr = 0; 510 511 ap.cs = __KERNEL_CS; 512 ap.eip = (unsigned long) start_eip; 513 ap.ss = __KERNEL_DS; 514 ap.esp = (unsigned long) start_esp; 515 516 ap.ds = __USER_DS; 517 ap.es = __USER_DS; 518 ap.fs = __KERNEL_PERCPU; 519 ap.gs = 0; 520 521 ap.eflags = 0; 522 523#ifdef CONFIG_X86_PAE 524 /* efer should match BSP efer. */ 525 if (cpu_has_nx) { 526 unsigned l, h; 527 rdmsr(MSR_EFER, l, h); 528 ap.efer = (unsigned long long) h << 32 | l; 529 } 530#endif 531 532 ap.cr3 = __pa(swapper_pg_dir); 533 /* Protected mode, paging, AM, WP, NE, MP. */ 534 ap.cr0 = 0x80050023; 535 ap.cr4 = mmu_cr4_features; 536 vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid); 537} 538#endif 539 540static void vmi_set_lazy_mode(enum paravirt_lazy_mode mode) 541{ 542 static DEFINE_PER_CPU(enum paravirt_lazy_mode, lazy_mode); 543 544 if (!vmi_ops.set_lazy_mode) 545 return; 546 547 /* Modes should never nest or overlap */ 548 BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE || 549 mode == PARAVIRT_LAZY_FLUSH)); 550 551 if (mode == PARAVIRT_LAZY_FLUSH) { 552 vmi_ops.set_lazy_mode(0); 553 vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode)); 554 } else { 555 vmi_ops.set_lazy_mode(mode); 556 __get_cpu_var(lazy_mode) = mode; 557 } 558} 559 560static inline int __init check_vmi_rom(struct vrom_header *rom) 561{ 562 struct pci_header *pci; 563 struct pnp_header *pnp; 564 const char *manufacturer = "UNKNOWN"; 565 const char *product = "UNKNOWN"; 566 const char *license = "unspecified"; 567 568 if (rom->rom_signature != 0xaa55) 569 return 0; 570 if (rom->vrom_signature != VMI_SIGNATURE) 571 return 0; 572 if (rom->api_version_maj != VMI_API_REV_MAJOR || 573 rom->api_version_min+1 < VMI_API_REV_MINOR+1) { 574 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n", 575 rom->api_version_maj, 576 rom->api_version_min); 577 return 0; 578 } 579 580 /* 581 * Relying on the VMI_SIGNATURE field is not 100% safe, so check 582 * the PCI header and device type to make sure this is really a 583 * VMI device. 584 */ 585 if (!rom->pci_header_offs) { 586 printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n"); 587 return 0; 588 } 589 590 pci = (struct pci_header *)((char *)rom+rom->pci_header_offs); 591 if (pci->vendorID != PCI_VENDOR_ID_VMWARE || 592 pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) { 593 /* Allow it to run... anyways, but warn */ 594 printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n"); 595 } 596 597 if (rom->pnp_header_offs) { 598 pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs); 599 if (pnp->manufacturer_offset) 600 manufacturer = (const char *)rom+pnp->manufacturer_offset; 601 if (pnp->product_offset) 602 product = (const char *)rom+pnp->product_offset; 603 } 604 605 if (rom->license_offs) 606 license = (char *)rom+rom->license_offs; 607 608 printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n", 609 manufacturer, product, 610 rom->api_version_maj, rom->api_version_min, 611 pci->rom_version_maj, pci->rom_version_min); 612 613 /* Don't allow BSD/MIT here for now because we don't want to end up 614 with any binary only shim layers */ 615 if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) { 616 printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n", 617 license); 618 return 0; 619 } 620 621 return 1; 622} 623 624/* 625 * Probe for the VMI option ROM 626 */ 627static inline int __init probe_vmi_rom(void) 628{ 629 unsigned long base; 630 631 /* VMI ROM is in option ROM area, check signature */ 632 for (base = 0xC0000; base < 0xE0000; base += 2048) { 633 struct vrom_header *romstart; 634 romstart = (struct vrom_header *)isa_bus_to_virt(base); 635 if (check_vmi_rom(romstart)) { 636 vmi_rom = romstart; 637 return 1; 638 } 639 } 640 return 0; 641} 642 643/* 644 * VMI setup common to all processors 645 */ 646void vmi_bringup(void) 647{ 648 /* We must establish the lowmem mapping for MMU ops to work */ 649 if (vmi_ops.set_linear_mapping) 650 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0); 651} 652 653/* 654 * Return a pointer to a VMI function or NULL if unimplemented 655 */ 656static void *vmi_get_function(int vmicall) 657{ 658 u64 reloc; 659 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; 660 reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall); 661 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); 662 if (rel->type == VMI_RELOCATION_CALL_REL) 663 return (void *)rel->eip; 664 else 665 return NULL; 666} 667 668/* 669 * Helper macro for making the VMI paravirt-ops fill code readable. 670 * For unimplemented operations, fall back to default, unless nop 671 * is returned by the ROM. 672 */ 673#define para_fill(opname, vmicall) \ 674do { \ 675 reloc = call_vrom_long_func(vmi_rom, get_reloc, \ 676 VMI_CALL_##vmicall); \ 677 if (rel->type == VMI_RELOCATION_CALL_REL) \ 678 paravirt_ops.opname = (void *)rel->eip; \ 679 else if (rel->type == VMI_RELOCATION_NOP) \ 680 paravirt_ops.opname = (void *)vmi_nop; \ 681 else if (rel->type != VMI_RELOCATION_NONE) \ 682 printk(KERN_WARNING "VMI: Unknown relocation " \ 683 "type %d for " #vmicall"\n",\ 684 rel->type); \ 685} while (0) 686 687/* 688 * Helper macro for making the VMI paravirt-ops fill code readable. 689 * For cached operations which do not match the VMI ROM ABI and must 690 * go through a tranlation stub. Ignore NOPs, since it is not clear 691 * a NOP * VMI function corresponds to a NOP paravirt-op when the 692 * functions are not in 1-1 correspondence. 693 */ 694#define para_wrap(opname, wrapper, cache, vmicall) \ 695do { \ 696 reloc = call_vrom_long_func(vmi_rom, get_reloc, \ 697 VMI_CALL_##vmicall); \ 698 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \ 699 if (rel->type == VMI_RELOCATION_CALL_REL) { \ 700 paravirt_ops.opname = wrapper; \ 701 vmi_ops.cache = (void *)rel->eip; \ 702 } \ 703} while (0) 704 705/* 706 * Activate the VMI interface and switch into paravirtualized mode 707 */ 708static inline int __init activate_vmi(void) 709{ 710 short kernel_cs; 711 u64 reloc; 712 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc; 713 714 if (call_vrom_func(vmi_rom, vmi_init) != 0) { 715 printk(KERN_ERR "VMI ROM failed to initialize!"); 716 return 0; 717 } 718 savesegment(cs, kernel_cs); 719 720 paravirt_ops.paravirt_enabled = 1; 721 paravirt_ops.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK; 722 723 paravirt_ops.patch = vmi_patch; 724 paravirt_ops.name = "vmi"; 725 726 /* 727 * Many of these operations are ABI compatible with VMI. 728 * This means we can fill in the paravirt-ops with direct 729 * pointers into the VMI ROM. If the calling convention for 730 * these operations changes, this code needs to be updated. 731 * 732 * Exceptions 733 * CPUID paravirt-op uses pointers, not the native ISA 734 * halt has no VMI equivalent; all VMI halts are "safe" 735 * no MSR support yet - just trap and emulate. VMI uses the 736 * same ABI as the native ISA, but Linux wants exceptions 737 * from bogus MSR read / write handled 738 * rdpmc is not yet used in Linux 739 */ 740 741 /* CPUID is special, so very special it gets wrapped like a present */ 742 para_wrap(cpuid, vmi_cpuid, cpuid, CPUID); 743 744 para_fill(clts, CLTS); 745 para_fill(get_debugreg, GetDR); 746 para_fill(set_debugreg, SetDR); 747 para_fill(read_cr0, GetCR0); 748 para_fill(read_cr2, GetCR2); 749 para_fill(read_cr3, GetCR3); 750 para_fill(read_cr4, GetCR4); 751 para_fill(write_cr0, SetCR0); 752 para_fill(write_cr2, SetCR2); 753 para_fill(write_cr3, SetCR3); 754 para_fill(write_cr4, SetCR4); 755 para_fill(save_fl, GetInterruptMask); 756 para_fill(restore_fl, SetInterruptMask); 757 para_fill(irq_disable, DisableInterrupts); 758 para_fill(irq_enable, EnableInterrupts); 759 760 para_fill(wbinvd, WBINVD); 761 para_fill(read_tsc, RDTSC); 762 763 /* The following we emulate with trap and emulate for now */ 764 /* paravirt_ops.read_msr = vmi_rdmsr */ 765 /* paravirt_ops.write_msr = vmi_wrmsr */ 766 /* paravirt_ops.rdpmc = vmi_rdpmc */ 767 768 /* TR interface doesn't pass TR value, wrap */ 769 para_wrap(load_tr_desc, vmi_set_tr, set_tr, SetTR); 770 771 /* LDT is special, too */ 772 para_wrap(set_ldt, vmi_set_ldt, _set_ldt, SetLDT); 773 774 para_fill(load_gdt, SetGDT); 775 para_fill(load_idt, SetIDT); 776 para_fill(store_gdt, GetGDT); 777 para_fill(store_idt, GetIDT); 778 para_fill(store_tr, GetTR); 779 paravirt_ops.load_tls = vmi_load_tls; 780 para_fill(write_ldt_entry, WriteLDTEntry); 781 para_fill(write_gdt_entry, WriteGDTEntry); 782 para_fill(write_idt_entry, WriteIDTEntry); 783 para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); 784 para_fill(set_iopl_mask, SetIOPLMask); 785 para_fill(io_delay, IODelay); 786 para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); 787 788 /* user and kernel flush are just handled with different flags to FlushTLB */ 789 para_wrap(flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB); 790 para_wrap(flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB); 791 para_fill(flush_tlb_single, InvalPage); 792 793 /* 794 * Until a standard flag format can be agreed on, we need to 795 * implement these as wrappers in Linux. Get the VMI ROM 796 * function pointers for the two backend calls. 797 */ 798#ifdef CONFIG_X86_PAE 799 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong); 800 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong); 801#else 802 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE); 803 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE); 804#endif 805 806 if (vmi_ops.set_pte) { 807 paravirt_ops.set_pte = vmi_set_pte; 808 paravirt_ops.set_pte_at = vmi_set_pte_at; 809 paravirt_ops.set_pmd = vmi_set_pmd; 810#ifdef CONFIG_X86_PAE 811 paravirt_ops.set_pte_atomic = vmi_set_pte_atomic; 812 paravirt_ops.set_pte_present = vmi_set_pte_present; 813 paravirt_ops.set_pud = vmi_set_pud; 814 paravirt_ops.pte_clear = vmi_pte_clear; 815 paravirt_ops.pmd_clear = vmi_pmd_clear; 816#endif 817 } 818 819 if (vmi_ops.update_pte) { 820 paravirt_ops.pte_update = vmi_update_pte; 821 paravirt_ops.pte_update_defer = vmi_update_pte_defer; 822 } 823 824 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); 825 if (vmi_ops.allocate_page) { 826 paravirt_ops.alloc_pt = vmi_allocate_pt; 827 paravirt_ops.alloc_pd = vmi_allocate_pd; 828 paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; 829 } 830 831 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); 832 if (vmi_ops.release_page) { 833 paravirt_ops.release_pt = vmi_release_pt; 834 paravirt_ops.release_pd = vmi_release_pd; 835 } 836 837 /* Set linear is needed in all cases */ 838 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping); 839#ifdef CONFIG_HIGHPTE 840 if (vmi_ops.set_linear_mapping) 841 paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte; 842#endif 843 844 /* 845 * These MUST always be patched. Don't support indirect jumps 846 * through these operations, as the VMI interface may use either 847 * a jump or a call to get to these operations, depending on 848 * the backend. They are performance critical anyway, so requiring 849 * a patch is not a big problem. 850 */ 851 paravirt_ops.irq_enable_sysexit = (void *)0xfeedbab0; 852 paravirt_ops.iret = (void *)0xbadbab0; 853 854#ifdef CONFIG_SMP 855 para_wrap(startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState); 856#endif 857 858#ifdef CONFIG_X86_LOCAL_APIC 859 para_fill(apic_read, APICRead); 860 para_fill(apic_write, APICWrite); 861 para_fill(apic_write_atomic, APICWrite); 862#endif 863 864 /* 865 * Check for VMI timer functionality by probing for a cycle frequency method 866 */ 867 reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency); 868 if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) { 869 vmi_timer_ops.get_cycle_frequency = (void *)rel->eip; 870 vmi_timer_ops.get_cycle_counter = 871 vmi_get_function(VMI_CALL_GetCycleCounter); 872 vmi_timer_ops.get_wallclock = 873 vmi_get_function(VMI_CALL_GetWallclockTime); 874 vmi_timer_ops.wallclock_updated = 875 vmi_get_function(VMI_CALL_WallclockUpdated); 876 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm); 877 vmi_timer_ops.cancel_alarm = 878 vmi_get_function(VMI_CALL_CancelAlarm); 879 paravirt_ops.time_init = vmi_time_init; 880 paravirt_ops.get_wallclock = vmi_get_wallclock; 881 paravirt_ops.set_wallclock = vmi_set_wallclock; 882#ifdef CONFIG_X86_LOCAL_APIC 883 paravirt_ops.setup_boot_clock = vmi_time_bsp_init; 884 paravirt_ops.setup_secondary_clock = vmi_time_ap_init; 885#endif 886 paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles; 887 paravirt_ops.get_cpu_khz = vmi_cpu_khz; 888 889 /* We have true wallclock functions; disable CMOS clock sync */ 890 no_sync_cmos_clock = 1; 891 } else { 892 disable_noidle = 1; 893 disable_vmi_timer = 1; 894 } 895 896 para_fill(safe_halt, Halt); 897 898 /* 899 * Alternative instruction rewriting doesn't happen soon enough 900 * to convert VMI_IRET to a call instead of a jump; so we have 901 * to do this before IRQs get reenabled. Fortunately, it is 902 * idempotent. 903 */ 904 apply_paravirt(__parainstructions, __parainstructions_end); 905 906 vmi_bringup(); 907 908 return 1; 909} 910 911#undef para_fill 912 913void __init vmi_init(void) 914{ 915 unsigned long flags; 916 917 if (!vmi_rom) 918 probe_vmi_rom(); 919 else 920 check_vmi_rom(vmi_rom); 921 922 /* In case probing for or validating the ROM failed, basil */ 923 if (!vmi_rom) 924 return; 925 926 reserve_top_address(-vmi_rom->virtual_top); 927 928 local_irq_save(flags); 929 activate_vmi(); 930 931#ifdef CONFIG_X86_IO_APIC 932 /* This is virtual hardware; timer routing is wired correctly */ 933 no_timer_check = 1; 934#endif 935 local_irq_restore(flags & X86_EFLAGS_IF); 936} 937 938static int __init parse_vmi(char *arg) 939{ 940 if (!arg) 941 return -EINVAL; 942 943 if (!strcmp(arg, "disable_pge")) { 944 clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); 945 disable_pge = 1; 946 } else if (!strcmp(arg, "disable_pse")) { 947 clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); 948 disable_pse = 1; 949 } else if (!strcmp(arg, "disable_sep")) { 950 clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability); 951 disable_sep = 1; 952 } else if (!strcmp(arg, "disable_tsc")) { 953 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); 954 disable_tsc = 1; 955 } else if (!strcmp(arg, "disable_mtrr")) { 956 clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability); 957 disable_mtrr = 1; 958 } else if (!strcmp(arg, "disable_timer")) { 959 disable_vmi_timer = 1; 960 disable_noidle = 1; 961 } else if (!strcmp(arg, "disable_noidle")) 962 disable_noidle = 1; 963 return 0; 964} 965 966early_param("vmi", parse_vmi); 967