vmm.c revision 276403
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: stable/10/sys/amd64/vmm/vmm.c 276403 2014-12-30 08:24:14Z neel $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm.c 276403 2014-12-30 08:24:14Z neel $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/kernel.h> 35#include <sys/module.h> 36#include <sys/sysctl.h> 37#include <sys/malloc.h> 38#include <sys/pcpu.h> 39#include <sys/lock.h> 40#include <sys/mutex.h> 41#include <sys/proc.h> 42#include <sys/rwlock.h> 43#include <sys/sched.h> 44#include <sys/smp.h> 45#include <sys/systm.h> 46 47#include <vm/vm.h> 48#include <vm/vm_object.h> 49#include <vm/vm_page.h> 50#include <vm/pmap.h> 51#include <vm/vm_map.h> 52#include <vm/vm_extern.h> 53#include <vm/vm_param.h> 54 55#include <machine/cpu.h> 56#include <machine/vm.h> 57#include <machine/pcb.h> 58#include <machine/smp.h> 59#include <x86/psl.h> 60#include <x86/apicreg.h> 61#include <machine/vmparam.h> 62 63#include <machine/vmm.h> 64#include <machine/vmm_dev.h> 65#include <machine/vmm_instruction_emul.h> 66 67#include "vmm_ioport.h" 68#include "vmm_ktr.h" 69#include "vmm_host.h" 70#include "vmm_mem.h" 71#include "vmm_util.h" 72#include "vatpic.h" 73#include "vatpit.h" 74#include "vhpet.h" 75#include "vioapic.h" 76#include "vlapic.h" 77#include "vmm_ipi.h" 78#include "vmm_stat.h" 79#include "vmm_lapic.h" 80 81#include "io/ppt.h" 82#include "io/iommu.h" 83 84struct vlapic; 85 86/* 87 * Initialization: 88 * (a) allocated when vcpu is created 89 * (i) initialized when vcpu is created and when it is reinitialized 90 * (o) initialized the first time the vcpu is created 91 * (x) initialized before use 92 */ 93struct vcpu { 94 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */ 95 enum vcpu_state state; /* (o) vcpu state */ 96 int hostcpu; /* (o) vcpu's host cpu */ 97 struct vlapic *vlapic; /* (i) APIC device model */ 98 enum x2apic_state x2apic_state; /* (i) APIC mode */ 99 uint64_t exitintinfo; /* (i) events pending at VM exit */ 100 int nmi_pending; /* (i) NMI pending */ 101 int extint_pending; /* (i) INTR pending */ 102 struct vm_exception exception; /* (x) exception collateral */ 103 int exception_pending; /* (i) exception pending */ 104 struct savefpu *guestfpu; /* (a,i) guest fpu state */ 105 uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 106 void *stats; /* (a,i) statistics */ 107 struct vm_exit exitinfo; /* (x) exit reason and collateral */ 108}; 109 110#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) 111#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 112#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 113#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 114#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) 115 116struct mem_seg { 117 vm_paddr_t gpa; 118 size_t len; 119 boolean_t wired; 120 vm_object_t object; 121}; 122#define VM_MAX_MEMORY_SEGMENTS 2 123 124/* 125 * Initialization: 126 * (o) initialized the first time the VM is created 127 * (i) initialized when VM is created and when it is reinitialized 128 * (x) initialized before use 129 */ 130struct vm { 131 void *cookie; /* (i) cpu-specific data */ 132 void *iommu; /* (x) iommu-specific data */ 133 struct vhpet *vhpet; /* (i) virtual HPET */ 134 struct vioapic *vioapic; /* (i) virtual ioapic */ 135 struct vatpic *vatpic; /* (i) virtual atpic */ 136 struct vatpit *vatpit; /* (i) virtual atpit */ 137 volatile cpuset_t active_cpus; /* (i) active vcpus */ 138 int suspend; /* (i) stop VM execution */ 139 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 140 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 141 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */ 142 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */ 143 void *rendezvous_arg; /* (x) rendezvous func/arg */ 144 vm_rendezvous_func_t rendezvous_func; 145 struct mtx rendezvous_mtx; /* (o) rendezvous lock */ 146 int num_mem_segs; /* (o) guest memory segments */ 147 struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS]; 148 struct vmspace *vmspace; /* (o) guest's address space */ 149 char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ 150 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ 151}; 152 153static int vmm_initialized; 154 155static struct vmm_ops *ops; 156#define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0) 157#define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0) 158#define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0) 159 160#define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL) 161#define VMRUN(vmi, vcpu, rip, pmap, rptr, sptr) \ 162 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, rptr, sptr) : ENXIO) 163#define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL) 164#define VMSPACE_ALLOC(min, max) \ 165 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL) 166#define VMSPACE_FREE(vmspace) \ 167 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO) 168#define VMGETREG(vmi, vcpu, num, retval) \ 169 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO) 170#define VMSETREG(vmi, vcpu, num, val) \ 171 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO) 172#define VMGETDESC(vmi, vcpu, num, desc) \ 173 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO) 174#define VMSETDESC(vmi, vcpu, num, desc) \ 175 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO) 176#define VMGETCAP(vmi, vcpu, num, retval) \ 177 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO) 178#define VMSETCAP(vmi, vcpu, num, val) \ 179 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO) 180#define VLAPIC_INIT(vmi, vcpu) \ 181 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL) 182#define VLAPIC_CLEANUP(vmi, vlapic) \ 183 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL) 184 185#define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 186#define fpu_stop_emulating() clts() 187 188static MALLOC_DEFINE(M_VM, "vm", "vm"); 189 190/* statistics */ 191static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 192 193SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); 194 195/* 196 * Halt the guest if all vcpus are executing a HLT instruction with 197 * interrupts disabled. 198 */ 199static int halt_detection_enabled = 1; 200TUNABLE_INT("hw.vmm.halt_detection", &halt_detection_enabled); 201SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN, 202 &halt_detection_enabled, 0, 203 "Halt VM if all vcpus execute HLT with interrupts disabled"); 204 205static int vmm_ipinum; 206SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, 207 "IPI vector used for vcpu notifications"); 208 209static int trace_guest_exceptions; 210SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN, 211 &trace_guest_exceptions, 0, 212 "Trap into hypervisor on all guest exceptions and reflect them back"); 213 214static void 215vcpu_cleanup(struct vm *vm, int i, bool destroy) 216{ 217 struct vcpu *vcpu = &vm->vcpu[i]; 218 219 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 220 if (destroy) { 221 vmm_stat_free(vcpu->stats); 222 fpu_save_area_free(vcpu->guestfpu); 223 } 224} 225 226static void 227vcpu_init(struct vm *vm, int vcpu_id, bool create) 228{ 229 struct vcpu *vcpu; 230 231 KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU, 232 ("vcpu_init: invalid vcpu %d", vcpu_id)); 233 234 vcpu = &vm->vcpu[vcpu_id]; 235 236 if (create) { 237 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already " 238 "initialized", vcpu_id)); 239 vcpu_lock_init(vcpu); 240 vcpu->state = VCPU_IDLE; 241 vcpu->hostcpu = NOCPU; 242 vcpu->guestfpu = fpu_save_area_alloc(); 243 vcpu->stats = vmm_stat_alloc(); 244 } 245 246 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 247 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 248 vcpu->exitintinfo = 0; 249 vcpu->nmi_pending = 0; 250 vcpu->extint_pending = 0; 251 vcpu->exception_pending = 0; 252 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 253 fpu_save_area_reset(vcpu->guestfpu); 254 vmm_stat_init(vcpu->stats); 255} 256 257int 258vcpu_trace_exceptions(struct vm *vm, int vcpuid) 259{ 260 261 return (trace_guest_exceptions); 262} 263 264struct vm_exit * 265vm_exitinfo(struct vm *vm, int cpuid) 266{ 267 struct vcpu *vcpu; 268 269 if (cpuid < 0 || cpuid >= VM_MAXCPU) 270 panic("vm_exitinfo: invalid cpuid %d", cpuid); 271 272 vcpu = &vm->vcpu[cpuid]; 273 274 return (&vcpu->exitinfo); 275} 276 277static void 278vmm_resume(void) 279{ 280 VMM_RESUME(); 281} 282 283static int 284vmm_init(void) 285{ 286 int error; 287 288 vmm_host_state_init(); 289 290 vmm_ipinum = vmm_ipi_alloc(); 291 if (vmm_ipinum == 0) 292 vmm_ipinum = IPI_AST; 293 294 error = vmm_mem_init(); 295 if (error) 296 return (error); 297 298 if (vmm_is_intel()) 299 ops = &vmm_ops_intel; 300 else if (vmm_is_amd()) 301 ops = &vmm_ops_amd; 302 else 303 return (ENXIO); 304 305 vmm_resume_p = vmm_resume; 306 307 return (VMM_INIT(vmm_ipinum)); 308} 309 310static int 311vmm_handler(module_t mod, int what, void *arg) 312{ 313 int error; 314 315 switch (what) { 316 case MOD_LOAD: 317 vmmdev_init(); 318 if (ppt_avail_devices() > 0) 319 iommu_init(); 320 error = vmm_init(); 321 if (error == 0) 322 vmm_initialized = 1; 323 break; 324 case MOD_UNLOAD: 325 error = vmmdev_cleanup(); 326 if (error == 0) { 327 vmm_resume_p = NULL; 328 iommu_cleanup(); 329 if (vmm_ipinum != IPI_AST) 330 vmm_ipi_free(vmm_ipinum); 331 error = VMM_CLEANUP(); 332 /* 333 * Something bad happened - prevent new 334 * VMs from being created 335 */ 336 if (error) 337 vmm_initialized = 0; 338 } 339 break; 340 default: 341 error = 0; 342 break; 343 } 344 return (error); 345} 346 347static moduledata_t vmm_kmod = { 348 "vmm", 349 vmm_handler, 350 NULL 351}; 352 353/* 354 * vmm initialization has the following dependencies: 355 * 356 * - iommu initialization must happen after the pci passthru driver has had 357 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE). 358 * 359 * - VT-x initialization requires smp_rendezvous() and therefore must happen 360 * after SMP is fully functional (after SI_SUB_SMP). 361 */ 362DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 363MODULE_VERSION(vmm, 1); 364 365static void 366vm_init(struct vm *vm, bool create) 367{ 368 int i; 369 370 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace)); 371 vm->iommu = NULL; 372 vm->vioapic = vioapic_init(vm); 373 vm->vhpet = vhpet_init(vm); 374 vm->vatpic = vatpic_init(vm); 375 vm->vatpit = vatpit_init(vm); 376 377 CPU_ZERO(&vm->active_cpus); 378 379 vm->suspend = 0; 380 CPU_ZERO(&vm->suspended_cpus); 381 382 for (i = 0; i < VM_MAXCPU; i++) 383 vcpu_init(vm, i, create); 384} 385 386int 387vm_create(const char *name, struct vm **retvm) 388{ 389 struct vm *vm; 390 struct vmspace *vmspace; 391 392 /* 393 * If vmm.ko could not be successfully initialized then don't attempt 394 * to create the virtual machine. 395 */ 396 if (!vmm_initialized) 397 return (ENXIO); 398 399 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) 400 return (EINVAL); 401 402 vmspace = VMSPACE_ALLOC(VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); 403 if (vmspace == NULL) 404 return (ENOMEM); 405 406 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 407 strcpy(vm->name, name); 408 vm->num_mem_segs = 0; 409 vm->vmspace = vmspace; 410 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); 411 412 vm_init(vm, true); 413 414 *retvm = vm; 415 return (0); 416} 417 418static void 419vm_free_mem_seg(struct vm *vm, struct mem_seg *seg) 420{ 421 422 if (seg->object != NULL) 423 vmm_mem_free(vm->vmspace, seg->gpa, seg->len); 424 425 bzero(seg, sizeof(*seg)); 426} 427 428static void 429vm_cleanup(struct vm *vm, bool destroy) 430{ 431 int i; 432 433 ppt_unassign_all(vm); 434 435 if (vm->iommu != NULL) 436 iommu_destroy_domain(vm->iommu); 437 438 vatpit_cleanup(vm->vatpit); 439 vhpet_cleanup(vm->vhpet); 440 vatpic_cleanup(vm->vatpic); 441 vioapic_cleanup(vm->vioapic); 442 443 for (i = 0; i < VM_MAXCPU; i++) 444 vcpu_cleanup(vm, i, destroy); 445 446 VMCLEANUP(vm->cookie); 447 448 if (destroy) { 449 for (i = 0; i < vm->num_mem_segs; i++) 450 vm_free_mem_seg(vm, &vm->mem_segs[i]); 451 452 vm->num_mem_segs = 0; 453 454 VMSPACE_FREE(vm->vmspace); 455 vm->vmspace = NULL; 456 } 457} 458 459void 460vm_destroy(struct vm *vm) 461{ 462 vm_cleanup(vm, true); 463 free(vm, M_VM); 464} 465 466int 467vm_reinit(struct vm *vm) 468{ 469 int error; 470 471 /* 472 * A virtual machine can be reset only if all vcpus are suspended. 473 */ 474 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 475 vm_cleanup(vm, false); 476 vm_init(vm, false); 477 error = 0; 478 } else { 479 error = EBUSY; 480 } 481 482 return (error); 483} 484 485const char * 486vm_name(struct vm *vm) 487{ 488 return (vm->name); 489} 490 491int 492vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 493{ 494 vm_object_t obj; 495 496 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 497 return (ENOMEM); 498 else 499 return (0); 500} 501 502int 503vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 504{ 505 506 vmm_mmio_free(vm->vmspace, gpa, len); 507 return (0); 508} 509 510boolean_t 511vm_mem_allocated(struct vm *vm, vm_paddr_t gpa) 512{ 513 int i; 514 vm_paddr_t gpabase, gpalimit; 515 516 for (i = 0; i < vm->num_mem_segs; i++) { 517 gpabase = vm->mem_segs[i].gpa; 518 gpalimit = gpabase + vm->mem_segs[i].len; 519 if (gpa >= gpabase && gpa < gpalimit) 520 return (TRUE); /* 'gpa' is regular memory */ 521 } 522 523 if (ppt_is_mmio(vm, gpa)) 524 return (TRUE); /* 'gpa' is pci passthru mmio */ 525 526 return (FALSE); 527} 528 529int 530vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len) 531{ 532 int available, allocated; 533 struct mem_seg *seg; 534 vm_object_t object; 535 vm_paddr_t g; 536 537 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0) 538 return (EINVAL); 539 540 available = allocated = 0; 541 g = gpa; 542 while (g < gpa + len) { 543 if (vm_mem_allocated(vm, g)) 544 allocated++; 545 else 546 available++; 547 548 g += PAGE_SIZE; 549 } 550 551 /* 552 * If there are some allocated and some available pages in the address 553 * range then it is an error. 554 */ 555 if (allocated && available) 556 return (EINVAL); 557 558 /* 559 * If the entire address range being requested has already been 560 * allocated then there isn't anything more to do. 561 */ 562 if (allocated && available == 0) 563 return (0); 564 565 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS) 566 return (E2BIG); 567 568 seg = &vm->mem_segs[vm->num_mem_segs]; 569 570 if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL) 571 return (ENOMEM); 572 573 seg->gpa = gpa; 574 seg->len = len; 575 seg->object = object; 576 seg->wired = FALSE; 577 578 vm->num_mem_segs++; 579 580 return (0); 581} 582 583static vm_paddr_t 584vm_maxmem(struct vm *vm) 585{ 586 int i; 587 vm_paddr_t gpa, maxmem; 588 589 maxmem = 0; 590 for (i = 0; i < vm->num_mem_segs; i++) { 591 gpa = vm->mem_segs[i].gpa + vm->mem_segs[i].len; 592 if (gpa > maxmem) 593 maxmem = gpa; 594 } 595 return (maxmem); 596} 597 598static void 599vm_gpa_unwire(struct vm *vm) 600{ 601 int i, rv; 602 struct mem_seg *seg; 603 604 for (i = 0; i < vm->num_mem_segs; i++) { 605 seg = &vm->mem_segs[i]; 606 if (!seg->wired) 607 continue; 608 609 rv = vm_map_unwire(&vm->vmspace->vm_map, 610 seg->gpa, seg->gpa + seg->len, 611 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 612 KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment " 613 "%#lx/%ld could not be unwired: %d", 614 vm_name(vm), seg->gpa, seg->len, rv)); 615 616 seg->wired = FALSE; 617 } 618} 619 620static int 621vm_gpa_wire(struct vm *vm) 622{ 623 int i, rv; 624 struct mem_seg *seg; 625 626 for (i = 0; i < vm->num_mem_segs; i++) { 627 seg = &vm->mem_segs[i]; 628 if (seg->wired) 629 continue; 630 631 /* XXX rlimits? */ 632 rv = vm_map_wire(&vm->vmspace->vm_map, 633 seg->gpa, seg->gpa + seg->len, 634 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 635 if (rv != KERN_SUCCESS) 636 break; 637 638 seg->wired = TRUE; 639 } 640 641 if (i < vm->num_mem_segs) { 642 /* 643 * Undo the wiring before returning an error. 644 */ 645 vm_gpa_unwire(vm); 646 return (EAGAIN); 647 } 648 649 return (0); 650} 651 652static void 653vm_iommu_modify(struct vm *vm, boolean_t map) 654{ 655 int i, sz; 656 vm_paddr_t gpa, hpa; 657 struct mem_seg *seg; 658 void *vp, *cookie, *host_domain; 659 660 sz = PAGE_SIZE; 661 host_domain = iommu_host_domain(); 662 663 for (i = 0; i < vm->num_mem_segs; i++) { 664 seg = &vm->mem_segs[i]; 665 KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired", 666 vm_name(vm), seg->gpa, seg->len)); 667 668 gpa = seg->gpa; 669 while (gpa < seg->gpa + seg->len) { 670 vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE, 671 &cookie); 672 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx", 673 vm_name(vm), gpa)); 674 675 vm_gpa_release(cookie); 676 677 hpa = DMAP_TO_PHYS((uintptr_t)vp); 678 if (map) { 679 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 680 iommu_remove_mapping(host_domain, hpa, sz); 681 } else { 682 iommu_remove_mapping(vm->iommu, gpa, sz); 683 iommu_create_mapping(host_domain, hpa, hpa, sz); 684 } 685 686 gpa += PAGE_SIZE; 687 } 688 } 689 690 /* 691 * Invalidate the cached translations associated with the domain 692 * from which pages were removed. 693 */ 694 if (map) 695 iommu_invalidate_tlb(host_domain); 696 else 697 iommu_invalidate_tlb(vm->iommu); 698} 699 700#define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE) 701#define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE) 702 703int 704vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) 705{ 706 int error; 707 708 error = ppt_unassign_device(vm, bus, slot, func); 709 if (error) 710 return (error); 711 712 if (ppt_assigned_devices(vm) == 0) { 713 vm_iommu_unmap(vm); 714 vm_gpa_unwire(vm); 715 } 716 return (0); 717} 718 719int 720vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) 721{ 722 int error; 723 vm_paddr_t maxaddr; 724 725 /* 726 * Virtual machines with pci passthru devices get special treatment: 727 * - the guest physical memory is wired 728 * - the iommu is programmed to do the 'gpa' to 'hpa' translation 729 * 730 * We need to do this before the first pci passthru device is attached. 731 */ 732 if (ppt_assigned_devices(vm) == 0) { 733 KASSERT(vm->iommu == NULL, 734 ("vm_assign_pptdev: iommu must be NULL")); 735 maxaddr = vm_maxmem(vm); 736 vm->iommu = iommu_create_domain(maxaddr); 737 738 error = vm_gpa_wire(vm); 739 if (error) 740 return (error); 741 742 vm_iommu_map(vm); 743 } 744 745 error = ppt_assign_device(vm, bus, slot, func); 746 return (error); 747} 748 749void * 750vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 751 void **cookie) 752{ 753 int count, pageoff; 754 vm_page_t m; 755 756 pageoff = gpa & PAGE_MASK; 757 if (len > PAGE_SIZE - pageoff) 758 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); 759 760 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, 761 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); 762 763 if (count == 1) { 764 *cookie = m; 765 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); 766 } else { 767 *cookie = NULL; 768 return (NULL); 769 } 770} 771 772void 773vm_gpa_release(void *cookie) 774{ 775 vm_page_t m = cookie; 776 777 vm_page_lock(m); 778 vm_page_unhold(m); 779 vm_page_unlock(m); 780} 781 782int 783vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase, 784 struct vm_memory_segment *seg) 785{ 786 int i; 787 788 for (i = 0; i < vm->num_mem_segs; i++) { 789 if (gpabase == vm->mem_segs[i].gpa) { 790 seg->gpa = vm->mem_segs[i].gpa; 791 seg->len = vm->mem_segs[i].len; 792 seg->wired = vm->mem_segs[i].wired; 793 return (0); 794 } 795 } 796 return (-1); 797} 798 799int 800vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len, 801 vm_offset_t *offset, struct vm_object **object) 802{ 803 int i; 804 size_t seg_len; 805 vm_paddr_t seg_gpa; 806 vm_object_t seg_obj; 807 808 for (i = 0; i < vm->num_mem_segs; i++) { 809 if ((seg_obj = vm->mem_segs[i].object) == NULL) 810 continue; 811 812 seg_gpa = vm->mem_segs[i].gpa; 813 seg_len = vm->mem_segs[i].len; 814 815 if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) { 816 *offset = gpa - seg_gpa; 817 *object = seg_obj; 818 vm_object_reference(seg_obj); 819 return (0); 820 } 821 } 822 823 return (EINVAL); 824} 825 826int 827vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) 828{ 829 830 if (vcpu < 0 || vcpu >= VM_MAXCPU) 831 return (EINVAL); 832 833 if (reg >= VM_REG_LAST) 834 return (EINVAL); 835 836 return (VMGETREG(vm->cookie, vcpu, reg, retval)); 837} 838 839int 840vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val) 841{ 842 843 if (vcpu < 0 || vcpu >= VM_MAXCPU) 844 return (EINVAL); 845 846 if (reg >= VM_REG_LAST) 847 return (EINVAL); 848 849 return (VMSETREG(vm->cookie, vcpu, reg, val)); 850} 851 852static boolean_t 853is_descriptor_table(int reg) 854{ 855 856 switch (reg) { 857 case VM_REG_GUEST_IDTR: 858 case VM_REG_GUEST_GDTR: 859 return (TRUE); 860 default: 861 return (FALSE); 862 } 863} 864 865static boolean_t 866is_segment_register(int reg) 867{ 868 869 switch (reg) { 870 case VM_REG_GUEST_ES: 871 case VM_REG_GUEST_CS: 872 case VM_REG_GUEST_SS: 873 case VM_REG_GUEST_DS: 874 case VM_REG_GUEST_FS: 875 case VM_REG_GUEST_GS: 876 case VM_REG_GUEST_TR: 877 case VM_REG_GUEST_LDTR: 878 return (TRUE); 879 default: 880 return (FALSE); 881 } 882} 883 884int 885vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 886 struct seg_desc *desc) 887{ 888 889 if (vcpu < 0 || vcpu >= VM_MAXCPU) 890 return (EINVAL); 891 892 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 893 return (EINVAL); 894 895 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 896} 897 898int 899vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 900 struct seg_desc *desc) 901{ 902 if (vcpu < 0 || vcpu >= VM_MAXCPU) 903 return (EINVAL); 904 905 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 906 return (EINVAL); 907 908 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 909} 910 911static void 912restore_guest_fpustate(struct vcpu *vcpu) 913{ 914 915 /* flush host state to the pcb */ 916 fpuexit(curthread); 917 918 /* restore guest FPU state */ 919 fpu_stop_emulating(); 920 fpurestore(vcpu->guestfpu); 921 922 /* restore guest XCR0 if XSAVE is enabled in the host */ 923 if (rcr4() & CR4_XSAVE) 924 load_xcr(0, vcpu->guest_xcr0); 925 926 /* 927 * The FPU is now "dirty" with the guest's state so turn on emulation 928 * to trap any access to the FPU by the host. 929 */ 930 fpu_start_emulating(); 931} 932 933static void 934save_guest_fpustate(struct vcpu *vcpu) 935{ 936 937 if ((rcr0() & CR0_TS) == 0) 938 panic("fpu emulation not enabled in host!"); 939 940 /* save guest XCR0 and restore host XCR0 */ 941 if (rcr4() & CR4_XSAVE) { 942 vcpu->guest_xcr0 = rxcr(0); 943 load_xcr(0, vmm_get_host_xcr0()); 944 } 945 946 /* save guest FPU state */ 947 fpu_stop_emulating(); 948 fpusave(vcpu->guestfpu); 949 fpu_start_emulating(); 950} 951 952static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 953 954static int 955vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, 956 bool from_idle) 957{ 958 int error; 959 960 vcpu_assert_locked(vcpu); 961 962 /* 963 * State transitions from the vmmdev_ioctl() must always begin from 964 * the VCPU_IDLE state. This guarantees that there is only a single 965 * ioctl() operating on a vcpu at any point. 966 */ 967 if (from_idle) { 968 while (vcpu->state != VCPU_IDLE) 969 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); 970 } else { 971 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 972 "vcpu idle state")); 973 } 974 975 if (vcpu->state == VCPU_RUNNING) { 976 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 977 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 978 } else { 979 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 980 "vcpu that is not running", vcpu->hostcpu)); 981 } 982 983 /* 984 * The following state transitions are allowed: 985 * IDLE -> FROZEN -> IDLE 986 * FROZEN -> RUNNING -> FROZEN 987 * FROZEN -> SLEEPING -> FROZEN 988 */ 989 switch (vcpu->state) { 990 case VCPU_IDLE: 991 case VCPU_RUNNING: 992 case VCPU_SLEEPING: 993 error = (newstate != VCPU_FROZEN); 994 break; 995 case VCPU_FROZEN: 996 error = (newstate == VCPU_FROZEN); 997 break; 998 default: 999 error = 1; 1000 break; 1001 } 1002 1003 if (error) 1004 return (EBUSY); 1005 1006 vcpu->state = newstate; 1007 if (newstate == VCPU_RUNNING) 1008 vcpu->hostcpu = curcpu; 1009 else 1010 vcpu->hostcpu = NOCPU; 1011 1012 if (newstate == VCPU_IDLE) 1013 wakeup(&vcpu->state); 1014 1015 return (0); 1016} 1017 1018static void 1019vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1020{ 1021 int error; 1022 1023 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 1024 panic("Error %d setting state to %d\n", error, newstate); 1025} 1026 1027static void 1028vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) 1029{ 1030 int error; 1031 1032 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) 1033 panic("Error %d setting state to %d", error, newstate); 1034} 1035 1036static void 1037vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func) 1038{ 1039 1040 KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked")); 1041 1042 /* 1043 * Update 'rendezvous_func' and execute a write memory barrier to 1044 * ensure that it is visible across all host cpus. This is not needed 1045 * for correctness but it does ensure that all the vcpus will notice 1046 * that the rendezvous is requested immediately. 1047 */ 1048 vm->rendezvous_func = func; 1049 wmb(); 1050} 1051 1052#define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \ 1053 do { \ 1054 if (vcpuid >= 0) \ 1055 VCPU_CTR0(vm, vcpuid, fmt); \ 1056 else \ 1057 VM_CTR0(vm, fmt); \ 1058 } while (0) 1059 1060static void 1061vm_handle_rendezvous(struct vm *vm, int vcpuid) 1062{ 1063 1064 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 1065 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); 1066 1067 mtx_lock(&vm->rendezvous_mtx); 1068 while (vm->rendezvous_func != NULL) { 1069 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ 1070 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); 1071 1072 if (vcpuid != -1 && 1073 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && 1074 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { 1075 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func"); 1076 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg); 1077 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); 1078 } 1079 if (CPU_CMP(&vm->rendezvous_req_cpus, 1080 &vm->rendezvous_done_cpus) == 0) { 1081 VCPU_CTR0(vm, vcpuid, "Rendezvous completed"); 1082 vm_set_rendezvous_func(vm, NULL); 1083 wakeup(&vm->rendezvous_func); 1084 break; 1085 } 1086 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion"); 1087 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, 1088 "vmrndv", 0); 1089 } 1090 mtx_unlock(&vm->rendezvous_mtx); 1091} 1092 1093/* 1094 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1095 */ 1096static int 1097vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) 1098{ 1099 struct vcpu *vcpu; 1100 const char *wmesg; 1101 int error, t, vcpu_halted, vm_halted; 1102 1103 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1104 1105 vcpu = &vm->vcpu[vcpuid]; 1106 vcpu_halted = 0; 1107 vm_halted = 0; 1108 1109 /* 1110 * The typical way to halt a cpu is to execute: "sti; hlt" 1111 * 1112 * STI sets RFLAGS.IF to enable interrupts. However, the processor 1113 * remains in an "interrupt shadow" for an additional instruction 1114 * following the STI. This guarantees that "sti; hlt" sequence is 1115 * atomic and a pending interrupt will be recognized after the HLT. 1116 * 1117 * After the HLT emulation is done the vcpu is no longer in an 1118 * interrupt shadow and a pending interrupt can be injected on 1119 * the next entry into the guest. 1120 */ 1121 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); 1122 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", 1123 __func__, error)); 1124 1125 vcpu_lock(vcpu); 1126 while (1) { 1127 /* 1128 * Do a final check for pending NMI or interrupts before 1129 * really putting this thread to sleep. Also check for 1130 * software events that would cause this vcpu to wakeup. 1131 * 1132 * These interrupts/events could have happened after the 1133 * vcpu returned from VMRUN() and before it acquired the 1134 * vcpu lock above. 1135 */ 1136 if (vm->rendezvous_func != NULL || vm->suspend) 1137 break; 1138 if (vm_nmi_pending(vm, vcpuid)) 1139 break; 1140 if (!intr_disabled) { 1141 if (vm_extint_pending(vm, vcpuid) || 1142 vlapic_pending_intr(vcpu->vlapic, NULL)) { 1143 break; 1144 } 1145 } 1146 1147 /* Don't go to sleep if the vcpu thread needs to yield */ 1148 if (vcpu_should_yield(vm, vcpuid)) 1149 break; 1150 1151 /* 1152 * Some Linux guests implement "halt" by having all vcpus 1153 * execute HLT with interrupts disabled. 'halted_cpus' keeps 1154 * track of the vcpus that have entered this state. When all 1155 * vcpus enter the halted state the virtual machine is halted. 1156 */ 1157 if (intr_disabled) { 1158 wmesg = "vmhalt"; 1159 VCPU_CTR0(vm, vcpuid, "Halted"); 1160 if (!vcpu_halted && halt_detection_enabled) { 1161 vcpu_halted = 1; 1162 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1163 } 1164 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1165 vm_halted = 1; 1166 break; 1167 } 1168 } else { 1169 wmesg = "vmidle"; 1170 } 1171 1172 t = ticks; 1173 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1174 /* 1175 * XXX msleep_spin() cannot be interrupted by signals so 1176 * wake up periodically to check pending signals. 1177 */ 1178 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); 1179 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1180 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); 1181 } 1182 1183 if (vcpu_halted) 1184 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1185 1186 vcpu_unlock(vcpu); 1187 1188 if (vm_halted) 1189 vm_suspend(vm, VM_SUSPEND_HALT); 1190 1191 return (0); 1192} 1193 1194static int 1195vm_handle_paging(struct vm *vm, int vcpuid, bool *retu) 1196{ 1197 int rv, ftype; 1198 struct vm_map *map; 1199 struct vcpu *vcpu; 1200 struct vm_exit *vme; 1201 1202 vcpu = &vm->vcpu[vcpuid]; 1203 vme = &vcpu->exitinfo; 1204 1205 ftype = vme->u.paging.fault_type; 1206 KASSERT(ftype == VM_PROT_READ || 1207 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, 1208 ("vm_handle_paging: invalid fault_type %d", ftype)); 1209 1210 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 1211 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), 1212 vme->u.paging.gpa, ftype); 1213 if (rv == 0) { 1214 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx", 1215 ftype == VM_PROT_READ ? "accessed" : "dirty", 1216 vme->u.paging.gpa); 1217 goto done; 1218 } 1219 } 1220 1221 map = &vm->vmspace->vm_map; 1222 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL); 1223 1224 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, " 1225 "ftype = %d", rv, vme->u.paging.gpa, ftype); 1226 1227 if (rv != KERN_SUCCESS) 1228 return (EFAULT); 1229done: 1230 /* restart execution at the faulting instruction */ 1231 vme->inst_length = 0; 1232 1233 return (0); 1234} 1235 1236static int 1237vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu) 1238{ 1239 struct vie *vie; 1240 struct vcpu *vcpu; 1241 struct vm_exit *vme; 1242 uint64_t gla, gpa; 1243 struct vm_guest_paging *paging; 1244 mem_region_read_t mread; 1245 mem_region_write_t mwrite; 1246 enum vm_cpu_mode cpu_mode; 1247 int cs_d, error, length; 1248 1249 vcpu = &vm->vcpu[vcpuid]; 1250 vme = &vcpu->exitinfo; 1251 1252 gla = vme->u.inst_emul.gla; 1253 gpa = vme->u.inst_emul.gpa; 1254 cs_d = vme->u.inst_emul.cs_d; 1255 vie = &vme->u.inst_emul.vie; 1256 paging = &vme->u.inst_emul.paging; 1257 cpu_mode = paging->cpu_mode; 1258 1259 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa); 1260 1261 /* Fetch, decode and emulate the faulting instruction */ 1262 if (vie->num_valid == 0) { 1263 /* 1264 * If the instruction length is not known then assume a 1265 * maximum size instruction. 1266 */ 1267 length = vme->inst_length ? vme->inst_length : VIE_INST_SIZE; 1268 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip, 1269 length, vie); 1270 } else { 1271 /* 1272 * The instruction bytes have already been copied into 'vie' 1273 */ 1274 error = 0; 1275 } 1276 if (error == 1) 1277 return (0); /* Resume guest to handle page fault */ 1278 else if (error == -1) 1279 return (EFAULT); 1280 else if (error != 0) 1281 panic("%s: vmm_fetch_instruction error %d", __func__, error); 1282 1283 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) 1284 return (EFAULT); 1285 1286 /* 1287 * If the instruction length is not specified the update it now. 1288 */ 1289 if (vme->inst_length == 0) 1290 vme->inst_length = vie->num_processed; 1291 1292 /* return to userland unless this is an in-kernel emulated device */ 1293 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1294 mread = lapic_mmio_read; 1295 mwrite = lapic_mmio_write; 1296 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1297 mread = vioapic_mmio_read; 1298 mwrite = vioapic_mmio_write; 1299 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1300 mread = vhpet_mmio_read; 1301 mwrite = vhpet_mmio_write; 1302 } else { 1303 *retu = true; 1304 return (0); 1305 } 1306 1307 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging, 1308 mread, mwrite, retu); 1309 1310 return (error); 1311} 1312 1313static int 1314vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu) 1315{ 1316 int i, done; 1317 struct vcpu *vcpu; 1318 1319 done = 0; 1320 vcpu = &vm->vcpu[vcpuid]; 1321 1322 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 1323 1324 /* 1325 * Wait until all 'active_cpus' have suspended themselves. 1326 * 1327 * Since a VM may be suspended at any time including when one or 1328 * more vcpus are doing a rendezvous we need to call the rendezvous 1329 * handler while we are waiting to prevent a deadlock. 1330 */ 1331 vcpu_lock(vcpu); 1332 while (1) { 1333 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1334 VCPU_CTR0(vm, vcpuid, "All vcpus suspended"); 1335 break; 1336 } 1337 1338 if (vm->rendezvous_func == NULL) { 1339 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend"); 1340 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1341 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); 1342 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1343 } else { 1344 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend"); 1345 vcpu_unlock(vcpu); 1346 vm_handle_rendezvous(vm, vcpuid); 1347 vcpu_lock(vcpu); 1348 } 1349 } 1350 vcpu_unlock(vcpu); 1351 1352 /* 1353 * Wakeup the other sleeping vcpus and return to userspace. 1354 */ 1355 for (i = 0; i < VM_MAXCPU; i++) { 1356 if (CPU_ISSET(i, &vm->suspended_cpus)) { 1357 vcpu_notify_event(vm, i, false); 1358 } 1359 } 1360 1361 *retu = true; 1362 return (0); 1363} 1364 1365int 1366vm_suspend(struct vm *vm, enum vm_suspend_how how) 1367{ 1368 int i; 1369 1370 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 1371 return (EINVAL); 1372 1373 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { 1374 VM_CTR2(vm, "virtual machine already suspended %d/%d", 1375 vm->suspend, how); 1376 return (EALREADY); 1377 } 1378 1379 VM_CTR1(vm, "virtual machine successfully suspended %d", how); 1380 1381 /* 1382 * Notify all active vcpus that they are now suspended. 1383 */ 1384 for (i = 0; i < VM_MAXCPU; i++) { 1385 if (CPU_ISSET(i, &vm->active_cpus)) 1386 vcpu_notify_event(vm, i, false); 1387 } 1388 1389 return (0); 1390} 1391 1392void 1393vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip) 1394{ 1395 struct vm_exit *vmexit; 1396 1397 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, 1398 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); 1399 1400 vmexit = vm_exitinfo(vm, vcpuid); 1401 vmexit->rip = rip; 1402 vmexit->inst_length = 0; 1403 vmexit->exitcode = VM_EXITCODE_SUSPENDED; 1404 vmexit->u.suspended.how = vm->suspend; 1405} 1406 1407void 1408vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip) 1409{ 1410 struct vm_exit *vmexit; 1411 1412 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress")); 1413 1414 vmexit = vm_exitinfo(vm, vcpuid); 1415 vmexit->rip = rip; 1416 vmexit->inst_length = 0; 1417 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1418 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1); 1419} 1420 1421void 1422vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip) 1423{ 1424 struct vm_exit *vmexit; 1425 1426 vmexit = vm_exitinfo(vm, vcpuid); 1427 vmexit->rip = rip; 1428 vmexit->inst_length = 0; 1429 vmexit->exitcode = VM_EXITCODE_BOGUS; 1430 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1); 1431} 1432 1433int 1434vm_run(struct vm *vm, struct vm_run *vmrun) 1435{ 1436 int error, vcpuid; 1437 struct vcpu *vcpu; 1438 struct pcb *pcb; 1439 uint64_t tscval, rip; 1440 struct vm_exit *vme; 1441 bool retu, intr_disabled; 1442 pmap_t pmap; 1443 void *rptr, *sptr; 1444 1445 vcpuid = vmrun->cpuid; 1446 1447 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1448 return (EINVAL); 1449 1450 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 1451 return (EINVAL); 1452 1453 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) 1454 return (EINVAL); 1455 1456 rptr = &vm->rendezvous_func; 1457 sptr = &vm->suspend; 1458 pmap = vmspace_pmap(vm->vmspace); 1459 vcpu = &vm->vcpu[vcpuid]; 1460 vme = &vcpu->exitinfo; 1461 rip = vmrun->rip; 1462restart: 1463 critical_enter(); 1464 1465 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1466 ("vm_run: absurd pm_active")); 1467 1468 tscval = rdtsc(); 1469 1470 pcb = PCPU_GET(curpcb); 1471 set_pcb_flags(pcb, PCB_FULL_IRET); 1472 1473 restore_guest_fpustate(vcpu); 1474 1475 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 1476 error = VMRUN(vm->cookie, vcpuid, rip, pmap, rptr, sptr); 1477 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 1478 1479 save_guest_fpustate(vcpu); 1480 1481 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 1482 1483 critical_exit(); 1484 1485 if (error == 0) { 1486 retu = false; 1487 switch (vme->exitcode) { 1488 case VM_EXITCODE_SUSPENDED: 1489 error = vm_handle_suspend(vm, vcpuid, &retu); 1490 break; 1491 case VM_EXITCODE_IOAPIC_EOI: 1492 vioapic_process_eoi(vm, vcpuid, 1493 vme->u.ioapic_eoi.vector); 1494 break; 1495 case VM_EXITCODE_RENDEZVOUS: 1496 vm_handle_rendezvous(vm, vcpuid); 1497 error = 0; 1498 break; 1499 case VM_EXITCODE_HLT: 1500 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 1501 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); 1502 break; 1503 case VM_EXITCODE_PAGING: 1504 error = vm_handle_paging(vm, vcpuid, &retu); 1505 break; 1506 case VM_EXITCODE_INST_EMUL: 1507 error = vm_handle_inst_emul(vm, vcpuid, &retu); 1508 break; 1509 case VM_EXITCODE_INOUT: 1510 case VM_EXITCODE_INOUT_STR: 1511 error = vm_handle_inout(vm, vcpuid, vme, &retu); 1512 break; 1513 case VM_EXITCODE_MONITOR: 1514 case VM_EXITCODE_MWAIT: 1515 vm_inject_ud(vm, vcpuid); 1516 break; 1517 default: 1518 retu = true; /* handled in userland */ 1519 break; 1520 } 1521 } 1522 1523 if (error == 0 && retu == false) { 1524 rip = vme->rip + vme->inst_length; 1525 goto restart; 1526 } 1527 1528 /* copy the exit information */ 1529 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit)); 1530 return (error); 1531} 1532 1533int 1534vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) 1535{ 1536 struct vcpu *vcpu; 1537 int type, vector; 1538 1539 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1540 return (EINVAL); 1541 1542 vcpu = &vm->vcpu[vcpuid]; 1543 1544 if (info & VM_INTINFO_VALID) { 1545 type = info & VM_INTINFO_TYPE; 1546 vector = info & 0xff; 1547 if (type == VM_INTINFO_NMI && vector != IDT_NMI) 1548 return (EINVAL); 1549 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32) 1550 return (EINVAL); 1551 if (info & VM_INTINFO_RSVD) 1552 return (EINVAL); 1553 } else { 1554 info = 0; 1555 } 1556 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info); 1557 vcpu->exitintinfo = info; 1558 return (0); 1559} 1560 1561enum exc_class { 1562 EXC_BENIGN, 1563 EXC_CONTRIBUTORY, 1564 EXC_PAGEFAULT 1565}; 1566 1567#define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 1568 1569static enum exc_class 1570exception_class(uint64_t info) 1571{ 1572 int type, vector; 1573 1574 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info)); 1575 type = info & VM_INTINFO_TYPE; 1576 vector = info & 0xff; 1577 1578 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 1579 switch (type) { 1580 case VM_INTINFO_HWINTR: 1581 case VM_INTINFO_SWINTR: 1582 case VM_INTINFO_NMI: 1583 return (EXC_BENIGN); 1584 default: 1585 /* 1586 * Hardware exception. 1587 * 1588 * SVM and VT-x use identical type values to represent NMI, 1589 * hardware interrupt and software interrupt. 1590 * 1591 * SVM uses type '3' for all exceptions. VT-x uses type '3' 1592 * for exceptions except #BP and #OF. #BP and #OF use a type 1593 * value of '5' or '6'. Therefore we don't check for explicit 1594 * values of 'type' to classify 'intinfo' into a hardware 1595 * exception. 1596 */ 1597 break; 1598 } 1599 1600 switch (vector) { 1601 case IDT_PF: 1602 case IDT_VE: 1603 return (EXC_PAGEFAULT); 1604 case IDT_DE: 1605 case IDT_TS: 1606 case IDT_NP: 1607 case IDT_SS: 1608 case IDT_GP: 1609 return (EXC_CONTRIBUTORY); 1610 default: 1611 return (EXC_BENIGN); 1612 } 1613} 1614 1615static int 1616nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2, 1617 uint64_t *retinfo) 1618{ 1619 enum exc_class exc1, exc2; 1620 int type1, vector1; 1621 1622 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1)); 1623 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2)); 1624 1625 /* 1626 * If an exception occurs while attempting to call the double-fault 1627 * handler the processor enters shutdown mode (aka triple fault). 1628 */ 1629 type1 = info1 & VM_INTINFO_TYPE; 1630 vector1 = info1 & 0xff; 1631 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) { 1632 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)", 1633 info1, info2); 1634 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT); 1635 *retinfo = 0; 1636 return (0); 1637 } 1638 1639 /* 1640 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3 1641 */ 1642 exc1 = exception_class(info1); 1643 exc2 = exception_class(info2); 1644 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 1645 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 1646 /* Convert nested fault into a double fault. */ 1647 *retinfo = IDT_DF; 1648 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1649 *retinfo |= VM_INTINFO_DEL_ERRCODE; 1650 } else { 1651 /* Handle exceptions serially */ 1652 *retinfo = info2; 1653 } 1654 return (1); 1655} 1656 1657static uint64_t 1658vcpu_exception_intinfo(struct vcpu *vcpu) 1659{ 1660 uint64_t info = 0; 1661 1662 if (vcpu->exception_pending) { 1663 info = vcpu->exception.vector & 0xff; 1664 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1665 if (vcpu->exception.error_code_valid) { 1666 info |= VM_INTINFO_DEL_ERRCODE; 1667 info |= (uint64_t)vcpu->exception.error_code << 32; 1668 } 1669 } 1670 return (info); 1671} 1672 1673int 1674vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) 1675{ 1676 struct vcpu *vcpu; 1677 uint64_t info1, info2; 1678 int valid; 1679 1680 KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid)); 1681 1682 vcpu = &vm->vcpu[vcpuid]; 1683 1684 info1 = vcpu->exitintinfo; 1685 vcpu->exitintinfo = 0; 1686 1687 info2 = 0; 1688 if (vcpu->exception_pending) { 1689 info2 = vcpu_exception_intinfo(vcpu); 1690 vcpu->exception_pending = 0; 1691 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx", 1692 vcpu->exception.vector, info2); 1693 } 1694 1695 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) { 1696 valid = nested_fault(vm, vcpuid, info1, info2, retinfo); 1697 } else if (info1 & VM_INTINFO_VALID) { 1698 *retinfo = info1; 1699 valid = 1; 1700 } else if (info2 & VM_INTINFO_VALID) { 1701 *retinfo = info2; 1702 valid = 1; 1703 } else { 1704 valid = 0; 1705 } 1706 1707 if (valid) { 1708 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), " 1709 "retinfo(%#lx)", __func__, info1, info2, *retinfo); 1710 } 1711 1712 return (valid); 1713} 1714 1715int 1716vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) 1717{ 1718 struct vcpu *vcpu; 1719 1720 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1721 return (EINVAL); 1722 1723 vcpu = &vm->vcpu[vcpuid]; 1724 *info1 = vcpu->exitintinfo; 1725 *info2 = vcpu_exception_intinfo(vcpu); 1726 return (0); 1727} 1728 1729int 1730vm_inject_exception(struct vm *vm, int vcpuid, struct vm_exception *exception) 1731{ 1732 struct vcpu *vcpu; 1733 1734 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1735 return (EINVAL); 1736 1737 if (exception->vector < 0 || exception->vector >= 32) 1738 return (EINVAL); 1739 1740 /* 1741 * A double fault exception should never be injected directly into 1742 * the guest. It is a derived exception that results from specific 1743 * combinations of nested faults. 1744 */ 1745 if (exception->vector == IDT_DF) 1746 return (EINVAL); 1747 1748 vcpu = &vm->vcpu[vcpuid]; 1749 1750 if (vcpu->exception_pending) { 1751 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to " 1752 "pending exception %d", exception->vector, 1753 vcpu->exception.vector); 1754 return (EBUSY); 1755 } 1756 1757 vcpu->exception_pending = 1; 1758 vcpu->exception = *exception; 1759 VCPU_CTR1(vm, vcpuid, "Exception %d pending", exception->vector); 1760 return (0); 1761} 1762 1763void 1764vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid, 1765 int errcode) 1766{ 1767 struct vm_exception exception; 1768 struct vm_exit *vmexit; 1769 struct vm *vm; 1770 int error; 1771 1772 vm = vmarg; 1773 1774 exception.vector = vector; 1775 exception.error_code = errcode; 1776 exception.error_code_valid = errcode_valid; 1777 error = vm_inject_exception(vm, vcpuid, &exception); 1778 KASSERT(error == 0, ("vm_inject_exception error %d", error)); 1779 1780 /* 1781 * A fault-like exception allows the instruction to be restarted 1782 * after the exception handler returns. 1783 * 1784 * By setting the inst_length to 0 we ensure that the instruction 1785 * pointer remains at the faulting instruction. 1786 */ 1787 vmexit = vm_exitinfo(vm, vcpuid); 1788 vmexit->inst_length = 0; 1789} 1790 1791void 1792vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2) 1793{ 1794 struct vm *vm; 1795 int error; 1796 1797 vm = vmarg; 1798 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx", 1799 error_code, cr2); 1800 1801 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2); 1802 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); 1803 1804 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code); 1805} 1806 1807static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 1808 1809int 1810vm_inject_nmi(struct vm *vm, int vcpuid) 1811{ 1812 struct vcpu *vcpu; 1813 1814 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1815 return (EINVAL); 1816 1817 vcpu = &vm->vcpu[vcpuid]; 1818 1819 vcpu->nmi_pending = 1; 1820 vcpu_notify_event(vm, vcpuid, false); 1821 return (0); 1822} 1823 1824int 1825vm_nmi_pending(struct vm *vm, int vcpuid) 1826{ 1827 struct vcpu *vcpu; 1828 1829 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1830 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1831 1832 vcpu = &vm->vcpu[vcpuid]; 1833 1834 return (vcpu->nmi_pending); 1835} 1836 1837void 1838vm_nmi_clear(struct vm *vm, int vcpuid) 1839{ 1840 struct vcpu *vcpu; 1841 1842 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1843 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1844 1845 vcpu = &vm->vcpu[vcpuid]; 1846 1847 if (vcpu->nmi_pending == 0) 1848 panic("vm_nmi_clear: inconsistent nmi_pending state"); 1849 1850 vcpu->nmi_pending = 0; 1851 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 1852} 1853 1854static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 1855 1856int 1857vm_inject_extint(struct vm *vm, int vcpuid) 1858{ 1859 struct vcpu *vcpu; 1860 1861 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1862 return (EINVAL); 1863 1864 vcpu = &vm->vcpu[vcpuid]; 1865 1866 vcpu->extint_pending = 1; 1867 vcpu_notify_event(vm, vcpuid, false); 1868 return (0); 1869} 1870 1871int 1872vm_extint_pending(struct vm *vm, int vcpuid) 1873{ 1874 struct vcpu *vcpu; 1875 1876 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1877 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 1878 1879 vcpu = &vm->vcpu[vcpuid]; 1880 1881 return (vcpu->extint_pending); 1882} 1883 1884void 1885vm_extint_clear(struct vm *vm, int vcpuid) 1886{ 1887 struct vcpu *vcpu; 1888 1889 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1890 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 1891 1892 vcpu = &vm->vcpu[vcpuid]; 1893 1894 if (vcpu->extint_pending == 0) 1895 panic("vm_extint_clear: inconsistent extint_pending state"); 1896 1897 vcpu->extint_pending = 0; 1898 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); 1899} 1900 1901int 1902vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 1903{ 1904 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1905 return (EINVAL); 1906 1907 if (type < 0 || type >= VM_CAP_MAX) 1908 return (EINVAL); 1909 1910 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 1911} 1912 1913int 1914vm_set_capability(struct vm *vm, int vcpu, int type, int val) 1915{ 1916 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1917 return (EINVAL); 1918 1919 if (type < 0 || type >= VM_CAP_MAX) 1920 return (EINVAL); 1921 1922 return (VMSETCAP(vm->cookie, vcpu, type, val)); 1923} 1924 1925struct vlapic * 1926vm_lapic(struct vm *vm, int cpu) 1927{ 1928 return (vm->vcpu[cpu].vlapic); 1929} 1930 1931struct vioapic * 1932vm_ioapic(struct vm *vm) 1933{ 1934 1935 return (vm->vioapic); 1936} 1937 1938struct vhpet * 1939vm_hpet(struct vm *vm) 1940{ 1941 1942 return (vm->vhpet); 1943} 1944 1945boolean_t 1946vmm_is_pptdev(int bus, int slot, int func) 1947{ 1948 int found, i, n; 1949 int b, s, f; 1950 char *val, *cp, *cp2; 1951 1952 /* 1953 * XXX 1954 * The length of an environment variable is limited to 128 bytes which 1955 * puts an upper limit on the number of passthru devices that may be 1956 * specified using a single environment variable. 1957 * 1958 * Work around this by scanning multiple environment variable 1959 * names instead of a single one - yuck! 1960 */ 1961 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 1962 1963 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 1964 found = 0; 1965 for (i = 0; names[i] != NULL && !found; i++) { 1966 cp = val = getenv(names[i]); 1967 while (cp != NULL && *cp != '\0') { 1968 if ((cp2 = strchr(cp, ' ')) != NULL) 1969 *cp2 = '\0'; 1970 1971 n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 1972 if (n == 3 && bus == b && slot == s && func == f) { 1973 found = 1; 1974 break; 1975 } 1976 1977 if (cp2 != NULL) 1978 *cp2++ = ' '; 1979 1980 cp = cp2; 1981 } 1982 freeenv(val); 1983 } 1984 return (found); 1985} 1986 1987void * 1988vm_iommu_domain(struct vm *vm) 1989{ 1990 1991 return (vm->iommu); 1992} 1993 1994int 1995vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 1996 bool from_idle) 1997{ 1998 int error; 1999 struct vcpu *vcpu; 2000 2001 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2002 panic("vm_set_run_state: invalid vcpuid %d", vcpuid); 2003 2004 vcpu = &vm->vcpu[vcpuid]; 2005 2006 vcpu_lock(vcpu); 2007 error = vcpu_set_state_locked(vcpu, newstate, from_idle); 2008 vcpu_unlock(vcpu); 2009 2010 return (error); 2011} 2012 2013enum vcpu_state 2014vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 2015{ 2016 struct vcpu *vcpu; 2017 enum vcpu_state state; 2018 2019 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2020 panic("vm_get_run_state: invalid vcpuid %d", vcpuid); 2021 2022 vcpu = &vm->vcpu[vcpuid]; 2023 2024 vcpu_lock(vcpu); 2025 state = vcpu->state; 2026 if (hostcpu != NULL) 2027 *hostcpu = vcpu->hostcpu; 2028 vcpu_unlock(vcpu); 2029 2030 return (state); 2031} 2032 2033int 2034vm_activate_cpu(struct vm *vm, int vcpuid) 2035{ 2036 2037 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2038 return (EINVAL); 2039 2040 if (CPU_ISSET(vcpuid, &vm->active_cpus)) 2041 return (EBUSY); 2042 2043 VCPU_CTR0(vm, vcpuid, "activated"); 2044 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); 2045 return (0); 2046} 2047 2048cpuset_t 2049vm_active_cpus(struct vm *vm) 2050{ 2051 2052 return (vm->active_cpus); 2053} 2054 2055cpuset_t 2056vm_suspended_cpus(struct vm *vm) 2057{ 2058 2059 return (vm->suspended_cpus); 2060} 2061 2062void * 2063vcpu_stats(struct vm *vm, int vcpuid) 2064{ 2065 2066 return (vm->vcpu[vcpuid].stats); 2067} 2068 2069int 2070vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 2071{ 2072 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2073 return (EINVAL); 2074 2075 *state = vm->vcpu[vcpuid].x2apic_state; 2076 2077 return (0); 2078} 2079 2080int 2081vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 2082{ 2083 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2084 return (EINVAL); 2085 2086 if (state >= X2APIC_STATE_LAST) 2087 return (EINVAL); 2088 2089 vm->vcpu[vcpuid].x2apic_state = state; 2090 2091 vlapic_set_x2apic_state(vm, vcpuid, state); 2092 2093 return (0); 2094} 2095 2096/* 2097 * This function is called to ensure that a vcpu "sees" a pending event 2098 * as soon as possible: 2099 * - If the vcpu thread is sleeping then it is woken up. 2100 * - If the vcpu is running on a different host_cpu then an IPI will be directed 2101 * to the host_cpu to cause the vcpu to trap into the hypervisor. 2102 */ 2103void 2104vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr) 2105{ 2106 int hostcpu; 2107 struct vcpu *vcpu; 2108 2109 vcpu = &vm->vcpu[vcpuid]; 2110 2111 vcpu_lock(vcpu); 2112 hostcpu = vcpu->hostcpu; 2113 if (vcpu->state == VCPU_RUNNING) { 2114 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 2115 if (hostcpu != curcpu) { 2116 if (lapic_intr) { 2117 vlapic_post_intr(vcpu->vlapic, hostcpu, 2118 vmm_ipinum); 2119 } else { 2120 ipi_cpu(hostcpu, vmm_ipinum); 2121 } 2122 } else { 2123 /* 2124 * If the 'vcpu' is running on 'curcpu' then it must 2125 * be sending a notification to itself (e.g. SELF_IPI). 2126 * The pending event will be picked up when the vcpu 2127 * transitions back to guest context. 2128 */ 2129 } 2130 } else { 2131 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 2132 "with hostcpu %d", vcpu->state, hostcpu)); 2133 if (vcpu->state == VCPU_SLEEPING) 2134 wakeup_one(vcpu); 2135 } 2136 vcpu_unlock(vcpu); 2137} 2138 2139struct vmspace * 2140vm_get_vmspace(struct vm *vm) 2141{ 2142 2143 return (vm->vmspace); 2144} 2145 2146int 2147vm_apicid2vcpuid(struct vm *vm, int apicid) 2148{ 2149 /* 2150 * XXX apic id is assumed to be numerically identical to vcpu id 2151 */ 2152 return (apicid); 2153} 2154 2155void 2156vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, 2157 vm_rendezvous_func_t func, void *arg) 2158{ 2159 int i; 2160 2161 /* 2162 * Enforce that this function is called without any locks 2163 */ 2164 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); 2165 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 2166 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); 2167 2168restart: 2169 mtx_lock(&vm->rendezvous_mtx); 2170 if (vm->rendezvous_func != NULL) { 2171 /* 2172 * If a rendezvous is already in progress then we need to 2173 * call the rendezvous handler in case this 'vcpuid' is one 2174 * of the targets of the rendezvous. 2175 */ 2176 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress"); 2177 mtx_unlock(&vm->rendezvous_mtx); 2178 vm_handle_rendezvous(vm, vcpuid); 2179 goto restart; 2180 } 2181 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " 2182 "rendezvous is still in progress")); 2183 2184 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous"); 2185 vm->rendezvous_req_cpus = dest; 2186 CPU_ZERO(&vm->rendezvous_done_cpus); 2187 vm->rendezvous_arg = arg; 2188 vm_set_rendezvous_func(vm, func); 2189 mtx_unlock(&vm->rendezvous_mtx); 2190 2191 /* 2192 * Wake up any sleeping vcpus and trigger a VM-exit in any running 2193 * vcpus so they handle the rendezvous as soon as possible. 2194 */ 2195 for (i = 0; i < VM_MAXCPU; i++) { 2196 if (CPU_ISSET(i, &dest)) 2197 vcpu_notify_event(vm, i, false); 2198 } 2199 2200 vm_handle_rendezvous(vm, vcpuid); 2201} 2202 2203struct vatpic * 2204vm_atpic(struct vm *vm) 2205{ 2206 return (vm->vatpic); 2207} 2208 2209struct vatpit * 2210vm_atpit(struct vm *vm) 2211{ 2212 return (vm->vatpit); 2213} 2214 2215enum vm_reg_name 2216vm_segment_name(int seg) 2217{ 2218 static enum vm_reg_name seg_names[] = { 2219 VM_REG_GUEST_ES, 2220 VM_REG_GUEST_CS, 2221 VM_REG_GUEST_SS, 2222 VM_REG_GUEST_DS, 2223 VM_REG_GUEST_FS, 2224 VM_REG_GUEST_GS 2225 }; 2226 2227 KASSERT(seg >= 0 && seg < nitems(seg_names), 2228 ("%s: invalid segment encoding %d", __func__, seg)); 2229 return (seg_names[seg]); 2230} 2231 2232void 2233vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 2234 int num_copyinfo) 2235{ 2236 int idx; 2237 2238 for (idx = 0; idx < num_copyinfo; idx++) { 2239 if (copyinfo[idx].cookie != NULL) 2240 vm_gpa_release(copyinfo[idx].cookie); 2241 } 2242 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo)); 2243} 2244 2245int 2246vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 2247 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 2248 int num_copyinfo) 2249{ 2250 int error, idx, nused; 2251 size_t n, off, remaining; 2252 void *hva, *cookie; 2253 uint64_t gpa; 2254 2255 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo); 2256 2257 nused = 0; 2258 remaining = len; 2259 while (remaining > 0) { 2260 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 2261 error = vmm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa); 2262 if (error) 2263 return (error); 2264 off = gpa & PAGE_MASK; 2265 n = min(remaining, PAGE_SIZE - off); 2266 copyinfo[nused].gpa = gpa; 2267 copyinfo[nused].len = n; 2268 remaining -= n; 2269 gla += n; 2270 nused++; 2271 } 2272 2273 for (idx = 0; idx < nused; idx++) { 2274 hva = vm_gpa_hold(vm, copyinfo[idx].gpa, copyinfo[idx].len, 2275 prot, &cookie); 2276 if (hva == NULL) 2277 break; 2278 copyinfo[idx].hva = hva; 2279 copyinfo[idx].cookie = cookie; 2280 } 2281 2282 if (idx != nused) { 2283 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo); 2284 return (-1); 2285 } else { 2286 return (0); 2287 } 2288} 2289 2290void 2291vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr, 2292 size_t len) 2293{ 2294 char *dst; 2295 int idx; 2296 2297 dst = kaddr; 2298 idx = 0; 2299 while (len > 0) { 2300 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 2301 len -= copyinfo[idx].len; 2302 dst += copyinfo[idx].len; 2303 idx++; 2304 } 2305} 2306 2307void 2308vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 2309 struct vm_copyinfo *copyinfo, size_t len) 2310{ 2311 const char *src; 2312 int idx; 2313 2314 src = kaddr; 2315 idx = 0; 2316 while (len > 0) { 2317 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 2318 len -= copyinfo[idx].len; 2319 src += copyinfo[idx].len; 2320 idx++; 2321 } 2322} 2323 2324/* 2325 * Return the amount of in-use and wired memory for the VM. Since 2326 * these are global stats, only return the values with for vCPU 0 2327 */ 2328VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 2329VMM_STAT_DECLARE(VMM_MEM_WIRED); 2330 2331static void 2332vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2333{ 2334 2335 if (vcpu == 0) { 2336 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT, 2337 PAGE_SIZE * vmspace_resident_count(vm->vmspace)); 2338 } 2339} 2340 2341static void 2342vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2343{ 2344 2345 if (vcpu == 0) { 2346 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED, 2347 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace))); 2348 } 2349} 2350 2351VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 2352VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt); 2353