vmm.c revision 284899
1/*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: stable/10/sys/amd64/vmm/vmm.c 284899 2015-06-28 01:21:55Z neel $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm.c 284899 2015-06-28 01:21:55Z neel $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/kernel.h> 35#include <sys/module.h> 36#include <sys/sysctl.h> 37#include <sys/malloc.h> 38#include <sys/pcpu.h> 39#include <sys/lock.h> 40#include <sys/mutex.h> 41#include <sys/proc.h> 42#include <sys/rwlock.h> 43#include <sys/sched.h> 44#include <sys/smp.h> 45#include <sys/systm.h> 46 47#include <vm/vm.h> 48#include <vm/vm_object.h> 49#include <vm/vm_page.h> 50#include <vm/pmap.h> 51#include <vm/vm_map.h> 52#include <vm/vm_extern.h> 53#include <vm/vm_param.h> 54 55#include <machine/cpu.h> 56#include <machine/vm.h> 57#include <machine/pcb.h> 58#include <machine/smp.h> 59#include <x86/psl.h> 60#include <x86/apicreg.h> 61#include <machine/vmparam.h> 62 63#include <machine/vmm.h> 64#include <machine/vmm_dev.h> 65#include <machine/vmm_instruction_emul.h> 66 67#include "vmm_ioport.h" 68#include "vmm_ktr.h" 69#include "vmm_host.h" 70#include "vmm_mem.h" 71#include "vmm_util.h" 72#include "vatpic.h" 73#include "vatpit.h" 74#include "vhpet.h" 75#include "vioapic.h" 76#include "vlapic.h" 77#include "vpmtmr.h" 78#include "vrtc.h" 79#include "vmm_ipi.h" 80#include "vmm_stat.h" 81#include "vmm_lapic.h" 82 83#include "io/ppt.h" 84#include "io/iommu.h" 85 86struct vlapic; 87 88/* 89 * Initialization: 90 * (a) allocated when vcpu is created 91 * (i) initialized when vcpu is created and when it is reinitialized 92 * (o) initialized the first time the vcpu is created 93 * (x) initialized before use 94 */ 95struct vcpu { 96 struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */ 97 enum vcpu_state state; /* (o) vcpu state */ 98 int hostcpu; /* (o) vcpu's host cpu */ 99 struct vlapic *vlapic; /* (i) APIC device model */ 100 enum x2apic_state x2apic_state; /* (i) APIC mode */ 101 uint64_t exitintinfo; /* (i) events pending at VM exit */ 102 int nmi_pending; /* (i) NMI pending */ 103 int extint_pending; /* (i) INTR pending */ 104 int exception_pending; /* (i) exception pending */ 105 int exc_vector; /* (x) exception collateral */ 106 int exc_errcode_valid; 107 uint32_t exc_errcode; 108 struct savefpu *guestfpu; /* (a,i) guest fpu state */ 109 uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 110 void *stats; /* (a,i) statistics */ 111 struct vm_exit exitinfo; /* (x) exit reason and collateral */ 112 uint64_t nextrip; /* (x) next instruction to execute */ 113}; 114 115#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) 116#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 117#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 118#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 119#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) 120 121struct mem_seg { 122 vm_paddr_t gpa; 123 size_t len; 124 boolean_t wired; 125 vm_object_t object; 126}; 127#define VM_MAX_MEMORY_SEGMENTS 2 128 129/* 130 * Initialization: 131 * (o) initialized the first time the VM is created 132 * (i) initialized when VM is created and when it is reinitialized 133 * (x) initialized before use 134 */ 135struct vm { 136 void *cookie; /* (i) cpu-specific data */ 137 void *iommu; /* (x) iommu-specific data */ 138 struct vhpet *vhpet; /* (i) virtual HPET */ 139 struct vioapic *vioapic; /* (i) virtual ioapic */ 140 struct vatpic *vatpic; /* (i) virtual atpic */ 141 struct vatpit *vatpit; /* (i) virtual atpit */ 142 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ 143 struct vrtc *vrtc; /* (o) virtual RTC */ 144 volatile cpuset_t active_cpus; /* (i) active vcpus */ 145 int suspend; /* (i) stop VM execution */ 146 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 147 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 148 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */ 149 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */ 150 void *rendezvous_arg; /* (x) rendezvous func/arg */ 151 vm_rendezvous_func_t rendezvous_func; 152 struct mtx rendezvous_mtx; /* (o) rendezvous lock */ 153 int num_mem_segs; /* (o) guest memory segments */ 154 struct mem_seg mem_segs[VM_MAX_MEMORY_SEGMENTS]; 155 struct vmspace *vmspace; /* (o) guest's address space */ 156 char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ 157 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ 158}; 159 160static int vmm_initialized; 161 162static struct vmm_ops *ops; 163#define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0) 164#define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0) 165#define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0) 166 167#define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL) 168#define VMRUN(vmi, vcpu, rip, pmap, rptr, sptr) \ 169 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, rptr, sptr) : ENXIO) 170#define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL) 171#define VMSPACE_ALLOC(min, max) \ 172 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL) 173#define VMSPACE_FREE(vmspace) \ 174 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO) 175#define VMGETREG(vmi, vcpu, num, retval) \ 176 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO) 177#define VMSETREG(vmi, vcpu, num, val) \ 178 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO) 179#define VMGETDESC(vmi, vcpu, num, desc) \ 180 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO) 181#define VMSETDESC(vmi, vcpu, num, desc) \ 182 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO) 183#define VMGETCAP(vmi, vcpu, num, retval) \ 184 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO) 185#define VMSETCAP(vmi, vcpu, num, val) \ 186 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO) 187#define VLAPIC_INIT(vmi, vcpu) \ 188 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL) 189#define VLAPIC_CLEANUP(vmi, vlapic) \ 190 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL) 191 192#define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 193#define fpu_stop_emulating() clts() 194 195static MALLOC_DEFINE(M_VM, "vm", "vm"); 196 197/* statistics */ 198static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 199 200SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); 201 202/* 203 * Halt the guest if all vcpus are executing a HLT instruction with 204 * interrupts disabled. 205 */ 206static int halt_detection_enabled = 1; 207TUNABLE_INT("hw.vmm.halt_detection", &halt_detection_enabled); 208SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN, 209 &halt_detection_enabled, 0, 210 "Halt VM if all vcpus execute HLT with interrupts disabled"); 211 212static int vmm_ipinum; 213SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, 214 "IPI vector used for vcpu notifications"); 215 216static int trace_guest_exceptions; 217SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN, 218 &trace_guest_exceptions, 0, 219 "Trap into hypervisor on all guest exceptions and reflect them back"); 220 221static int vmm_force_iommu = 0; 222TUNABLE_INT("hw.vmm.force_iommu", &vmm_force_iommu); 223SYSCTL_INT(_hw_vmm, OID_AUTO, force_iommu, CTLFLAG_RDTUN, &vmm_force_iommu, 0, 224 "Force use of I/O MMU even if no passthrough devices were found."); 225 226static void 227vcpu_cleanup(struct vm *vm, int i, bool destroy) 228{ 229 struct vcpu *vcpu = &vm->vcpu[i]; 230 231 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 232 if (destroy) { 233 vmm_stat_free(vcpu->stats); 234 fpu_save_area_free(vcpu->guestfpu); 235 } 236} 237 238static void 239vcpu_init(struct vm *vm, int vcpu_id, bool create) 240{ 241 struct vcpu *vcpu; 242 243 KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU, 244 ("vcpu_init: invalid vcpu %d", vcpu_id)); 245 246 vcpu = &vm->vcpu[vcpu_id]; 247 248 if (create) { 249 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already " 250 "initialized", vcpu_id)); 251 vcpu_lock_init(vcpu); 252 vcpu->state = VCPU_IDLE; 253 vcpu->hostcpu = NOCPU; 254 vcpu->guestfpu = fpu_save_area_alloc(); 255 vcpu->stats = vmm_stat_alloc(); 256 } 257 258 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 259 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 260 vcpu->exitintinfo = 0; 261 vcpu->nmi_pending = 0; 262 vcpu->extint_pending = 0; 263 vcpu->exception_pending = 0; 264 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 265 fpu_save_area_reset(vcpu->guestfpu); 266 vmm_stat_init(vcpu->stats); 267} 268 269int 270vcpu_trace_exceptions(struct vm *vm, int vcpuid) 271{ 272 273 return (trace_guest_exceptions); 274} 275 276struct vm_exit * 277vm_exitinfo(struct vm *vm, int cpuid) 278{ 279 struct vcpu *vcpu; 280 281 if (cpuid < 0 || cpuid >= VM_MAXCPU) 282 panic("vm_exitinfo: invalid cpuid %d", cpuid); 283 284 vcpu = &vm->vcpu[cpuid]; 285 286 return (&vcpu->exitinfo); 287} 288 289static void 290vmm_resume(void) 291{ 292 VMM_RESUME(); 293} 294 295static int 296vmm_init(void) 297{ 298 int error; 299 300 vmm_host_state_init(); 301 302 vmm_ipinum = vmm_ipi_alloc(); 303 if (vmm_ipinum == 0) 304 vmm_ipinum = IPI_AST; 305 306 error = vmm_mem_init(); 307 if (error) 308 return (error); 309 310 if (vmm_is_intel()) 311 ops = &vmm_ops_intel; 312 else if (vmm_is_amd()) 313 ops = &vmm_ops_amd; 314 else 315 return (ENXIO); 316 317 vmm_resume_p = vmm_resume; 318 319 return (VMM_INIT(vmm_ipinum)); 320} 321 322static int 323vmm_handler(module_t mod, int what, void *arg) 324{ 325 int error; 326 327 switch (what) { 328 case MOD_LOAD: 329 vmmdev_init(); 330 if (vmm_force_iommu || ppt_avail_devices() > 0) 331 iommu_init(); 332 error = vmm_init(); 333 if (error == 0) 334 vmm_initialized = 1; 335 break; 336 case MOD_UNLOAD: 337 error = vmmdev_cleanup(); 338 if (error == 0) { 339 vmm_resume_p = NULL; 340 iommu_cleanup(); 341 if (vmm_ipinum != IPI_AST) 342 vmm_ipi_free(vmm_ipinum); 343 error = VMM_CLEANUP(); 344 /* 345 * Something bad happened - prevent new 346 * VMs from being created 347 */ 348 if (error) 349 vmm_initialized = 0; 350 } 351 break; 352 default: 353 error = 0; 354 break; 355 } 356 return (error); 357} 358 359static moduledata_t vmm_kmod = { 360 "vmm", 361 vmm_handler, 362 NULL 363}; 364 365/* 366 * vmm initialization has the following dependencies: 367 * 368 * - iommu initialization must happen after the pci passthru driver has had 369 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE). 370 * 371 * - VT-x initialization requires smp_rendezvous() and therefore must happen 372 * after SMP is fully functional (after SI_SUB_SMP). 373 */ 374DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 375MODULE_VERSION(vmm, 1); 376 377static void 378vm_init(struct vm *vm, bool create) 379{ 380 int i; 381 382 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace)); 383 vm->iommu = NULL; 384 vm->vioapic = vioapic_init(vm); 385 vm->vhpet = vhpet_init(vm); 386 vm->vatpic = vatpic_init(vm); 387 vm->vatpit = vatpit_init(vm); 388 vm->vpmtmr = vpmtmr_init(vm); 389 if (create) 390 vm->vrtc = vrtc_init(vm); 391 392 CPU_ZERO(&vm->active_cpus); 393 394 vm->suspend = 0; 395 CPU_ZERO(&vm->suspended_cpus); 396 397 for (i = 0; i < VM_MAXCPU; i++) 398 vcpu_init(vm, i, create); 399} 400 401int 402vm_create(const char *name, struct vm **retvm) 403{ 404 struct vm *vm; 405 struct vmspace *vmspace; 406 407 /* 408 * If vmm.ko could not be successfully initialized then don't attempt 409 * to create the virtual machine. 410 */ 411 if (!vmm_initialized) 412 return (ENXIO); 413 414 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) 415 return (EINVAL); 416 417 vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS); 418 if (vmspace == NULL) 419 return (ENOMEM); 420 421 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 422 strcpy(vm->name, name); 423 vm->num_mem_segs = 0; 424 vm->vmspace = vmspace; 425 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); 426 427 vm_init(vm, true); 428 429 *retvm = vm; 430 return (0); 431} 432 433static void 434vm_free_mem_seg(struct vm *vm, struct mem_seg *seg) 435{ 436 437 if (seg->object != NULL) 438 vmm_mem_free(vm->vmspace, seg->gpa, seg->len); 439 440 bzero(seg, sizeof(*seg)); 441} 442 443static void 444vm_cleanup(struct vm *vm, bool destroy) 445{ 446 int i; 447 448 ppt_unassign_all(vm); 449 450 if (vm->iommu != NULL) 451 iommu_destroy_domain(vm->iommu); 452 453 if (destroy) 454 vrtc_cleanup(vm->vrtc); 455 else 456 vrtc_reset(vm->vrtc); 457 vpmtmr_cleanup(vm->vpmtmr); 458 vatpit_cleanup(vm->vatpit); 459 vhpet_cleanup(vm->vhpet); 460 vatpic_cleanup(vm->vatpic); 461 vioapic_cleanup(vm->vioapic); 462 463 for (i = 0; i < VM_MAXCPU; i++) 464 vcpu_cleanup(vm, i, destroy); 465 466 VMCLEANUP(vm->cookie); 467 468 if (destroy) { 469 for (i = 0; i < vm->num_mem_segs; i++) 470 vm_free_mem_seg(vm, &vm->mem_segs[i]); 471 472 vm->num_mem_segs = 0; 473 474 VMSPACE_FREE(vm->vmspace); 475 vm->vmspace = NULL; 476 } 477} 478 479void 480vm_destroy(struct vm *vm) 481{ 482 vm_cleanup(vm, true); 483 free(vm, M_VM); 484} 485 486int 487vm_reinit(struct vm *vm) 488{ 489 int error; 490 491 /* 492 * A virtual machine can be reset only if all vcpus are suspended. 493 */ 494 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 495 vm_cleanup(vm, false); 496 vm_init(vm, false); 497 error = 0; 498 } else { 499 error = EBUSY; 500 } 501 502 return (error); 503} 504 505const char * 506vm_name(struct vm *vm) 507{ 508 return (vm->name); 509} 510 511int 512vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 513{ 514 vm_object_t obj; 515 516 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 517 return (ENOMEM); 518 else 519 return (0); 520} 521 522int 523vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 524{ 525 526 vmm_mmio_free(vm->vmspace, gpa, len); 527 return (0); 528} 529 530boolean_t 531vm_mem_allocated(struct vm *vm, vm_paddr_t gpa) 532{ 533 int i; 534 vm_paddr_t gpabase, gpalimit; 535 536 for (i = 0; i < vm->num_mem_segs; i++) { 537 gpabase = vm->mem_segs[i].gpa; 538 gpalimit = gpabase + vm->mem_segs[i].len; 539 if (gpa >= gpabase && gpa < gpalimit) 540 return (TRUE); /* 'gpa' is regular memory */ 541 } 542 543 if (ppt_is_mmio(vm, gpa)) 544 return (TRUE); /* 'gpa' is pci passthru mmio */ 545 546 return (FALSE); 547} 548 549int 550vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len) 551{ 552 int available, allocated; 553 struct mem_seg *seg; 554 vm_object_t object; 555 vm_paddr_t g; 556 557 if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0) 558 return (EINVAL); 559 560 available = allocated = 0; 561 g = gpa; 562 while (g < gpa + len) { 563 if (vm_mem_allocated(vm, g)) 564 allocated++; 565 else 566 available++; 567 568 g += PAGE_SIZE; 569 } 570 571 /* 572 * If there are some allocated and some available pages in the address 573 * range then it is an error. 574 */ 575 if (allocated && available) 576 return (EINVAL); 577 578 /* 579 * If the entire address range being requested has already been 580 * allocated then there isn't anything more to do. 581 */ 582 if (allocated && available == 0) 583 return (0); 584 585 if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS) 586 return (E2BIG); 587 588 seg = &vm->mem_segs[vm->num_mem_segs]; 589 590 if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL) 591 return (ENOMEM); 592 593 seg->gpa = gpa; 594 seg->len = len; 595 seg->object = object; 596 seg->wired = FALSE; 597 598 vm->num_mem_segs++; 599 600 return (0); 601} 602 603static vm_paddr_t 604vm_maxmem(struct vm *vm) 605{ 606 int i; 607 vm_paddr_t gpa, maxmem; 608 609 maxmem = 0; 610 for (i = 0; i < vm->num_mem_segs; i++) { 611 gpa = vm->mem_segs[i].gpa + vm->mem_segs[i].len; 612 if (gpa > maxmem) 613 maxmem = gpa; 614 } 615 return (maxmem); 616} 617 618static void 619vm_gpa_unwire(struct vm *vm) 620{ 621 int i, rv; 622 struct mem_seg *seg; 623 624 for (i = 0; i < vm->num_mem_segs; i++) { 625 seg = &vm->mem_segs[i]; 626 if (!seg->wired) 627 continue; 628 629 rv = vm_map_unwire(&vm->vmspace->vm_map, 630 seg->gpa, seg->gpa + seg->len, 631 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 632 KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment " 633 "%#lx/%ld could not be unwired: %d", 634 vm_name(vm), seg->gpa, seg->len, rv)); 635 636 seg->wired = FALSE; 637 } 638} 639 640static int 641vm_gpa_wire(struct vm *vm) 642{ 643 int i, rv; 644 struct mem_seg *seg; 645 646 for (i = 0; i < vm->num_mem_segs; i++) { 647 seg = &vm->mem_segs[i]; 648 if (seg->wired) 649 continue; 650 651 /* XXX rlimits? */ 652 rv = vm_map_wire(&vm->vmspace->vm_map, 653 seg->gpa, seg->gpa + seg->len, 654 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 655 if (rv != KERN_SUCCESS) 656 break; 657 658 seg->wired = TRUE; 659 } 660 661 if (i < vm->num_mem_segs) { 662 /* 663 * Undo the wiring before returning an error. 664 */ 665 vm_gpa_unwire(vm); 666 return (EAGAIN); 667 } 668 669 return (0); 670} 671 672static void 673vm_iommu_modify(struct vm *vm, boolean_t map) 674{ 675 int i, sz; 676 vm_paddr_t gpa, hpa; 677 struct mem_seg *seg; 678 void *vp, *cookie, *host_domain; 679 680 sz = PAGE_SIZE; 681 host_domain = iommu_host_domain(); 682 683 for (i = 0; i < vm->num_mem_segs; i++) { 684 seg = &vm->mem_segs[i]; 685 KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired", 686 vm_name(vm), seg->gpa, seg->len)); 687 688 gpa = seg->gpa; 689 while (gpa < seg->gpa + seg->len) { 690 vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE, 691 &cookie); 692 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx", 693 vm_name(vm), gpa)); 694 695 vm_gpa_release(cookie); 696 697 hpa = DMAP_TO_PHYS((uintptr_t)vp); 698 if (map) { 699 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 700 iommu_remove_mapping(host_domain, hpa, sz); 701 } else { 702 iommu_remove_mapping(vm->iommu, gpa, sz); 703 iommu_create_mapping(host_domain, hpa, hpa, sz); 704 } 705 706 gpa += PAGE_SIZE; 707 } 708 } 709 710 /* 711 * Invalidate the cached translations associated with the domain 712 * from which pages were removed. 713 */ 714 if (map) 715 iommu_invalidate_tlb(host_domain); 716 else 717 iommu_invalidate_tlb(vm->iommu); 718} 719 720#define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE) 721#define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE) 722 723int 724vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) 725{ 726 int error; 727 728 error = ppt_unassign_device(vm, bus, slot, func); 729 if (error) 730 return (error); 731 732 if (ppt_assigned_devices(vm) == 0) { 733 vm_iommu_unmap(vm); 734 vm_gpa_unwire(vm); 735 } 736 return (0); 737} 738 739int 740vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) 741{ 742 int error; 743 vm_paddr_t maxaddr; 744 745 /* 746 * Virtual machines with pci passthru devices get special treatment: 747 * - the guest physical memory is wired 748 * - the iommu is programmed to do the 'gpa' to 'hpa' translation 749 * 750 * We need to do this before the first pci passthru device is attached. 751 */ 752 if (ppt_assigned_devices(vm) == 0) { 753 KASSERT(vm->iommu == NULL, 754 ("vm_assign_pptdev: iommu must be NULL")); 755 maxaddr = vm_maxmem(vm); 756 vm->iommu = iommu_create_domain(maxaddr); 757 758 error = vm_gpa_wire(vm); 759 if (error) 760 return (error); 761 762 vm_iommu_map(vm); 763 } 764 765 error = ppt_assign_device(vm, bus, slot, func); 766 return (error); 767} 768 769void * 770vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot, 771 void **cookie) 772{ 773 int count, pageoff; 774 vm_page_t m; 775 776 pageoff = gpa & PAGE_MASK; 777 if (len > PAGE_SIZE - pageoff) 778 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); 779 780 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, 781 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); 782 783 if (count == 1) { 784 *cookie = m; 785 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); 786 } else { 787 *cookie = NULL; 788 return (NULL); 789 } 790} 791 792void 793vm_gpa_release(void *cookie) 794{ 795 vm_page_t m = cookie; 796 797 vm_page_lock(m); 798 vm_page_unhold(m); 799 vm_page_unlock(m); 800} 801 802int 803vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase, 804 struct vm_memory_segment *seg) 805{ 806 int i; 807 808 for (i = 0; i < vm->num_mem_segs; i++) { 809 if (gpabase == vm->mem_segs[i].gpa) { 810 seg->gpa = vm->mem_segs[i].gpa; 811 seg->len = vm->mem_segs[i].len; 812 seg->wired = vm->mem_segs[i].wired; 813 return (0); 814 } 815 } 816 return (-1); 817} 818 819int 820vm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len, 821 vm_offset_t *offset, struct vm_object **object) 822{ 823 int i; 824 size_t seg_len; 825 vm_paddr_t seg_gpa; 826 vm_object_t seg_obj; 827 828 for (i = 0; i < vm->num_mem_segs; i++) { 829 if ((seg_obj = vm->mem_segs[i].object) == NULL) 830 continue; 831 832 seg_gpa = vm->mem_segs[i].gpa; 833 seg_len = vm->mem_segs[i].len; 834 835 if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) { 836 *offset = gpa - seg_gpa; 837 *object = seg_obj; 838 vm_object_reference(seg_obj); 839 return (0); 840 } 841 } 842 843 return (EINVAL); 844} 845 846int 847vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) 848{ 849 850 if (vcpu < 0 || vcpu >= VM_MAXCPU) 851 return (EINVAL); 852 853 if (reg >= VM_REG_LAST) 854 return (EINVAL); 855 856 return (VMGETREG(vm->cookie, vcpu, reg, retval)); 857} 858 859int 860vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val) 861{ 862 struct vcpu *vcpu; 863 int error; 864 865 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 866 return (EINVAL); 867 868 if (reg >= VM_REG_LAST) 869 return (EINVAL); 870 871 error = VMSETREG(vm->cookie, vcpuid, reg, val); 872 if (error || reg != VM_REG_GUEST_RIP) 873 return (error); 874 875 /* Set 'nextrip' to match the value of %rip */ 876 VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val); 877 vcpu = &vm->vcpu[vcpuid]; 878 vcpu->nextrip = val; 879 return (0); 880} 881 882static boolean_t 883is_descriptor_table(int reg) 884{ 885 886 switch (reg) { 887 case VM_REG_GUEST_IDTR: 888 case VM_REG_GUEST_GDTR: 889 return (TRUE); 890 default: 891 return (FALSE); 892 } 893} 894 895static boolean_t 896is_segment_register(int reg) 897{ 898 899 switch (reg) { 900 case VM_REG_GUEST_ES: 901 case VM_REG_GUEST_CS: 902 case VM_REG_GUEST_SS: 903 case VM_REG_GUEST_DS: 904 case VM_REG_GUEST_FS: 905 case VM_REG_GUEST_GS: 906 case VM_REG_GUEST_TR: 907 case VM_REG_GUEST_LDTR: 908 return (TRUE); 909 default: 910 return (FALSE); 911 } 912} 913 914int 915vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 916 struct seg_desc *desc) 917{ 918 919 if (vcpu < 0 || vcpu >= VM_MAXCPU) 920 return (EINVAL); 921 922 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 923 return (EINVAL); 924 925 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 926} 927 928int 929vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 930 struct seg_desc *desc) 931{ 932 if (vcpu < 0 || vcpu >= VM_MAXCPU) 933 return (EINVAL); 934 935 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 936 return (EINVAL); 937 938 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 939} 940 941static void 942restore_guest_fpustate(struct vcpu *vcpu) 943{ 944 945 /* flush host state to the pcb */ 946 fpuexit(curthread); 947 948 /* restore guest FPU state */ 949 fpu_stop_emulating(); 950 fpurestore(vcpu->guestfpu); 951 952 /* restore guest XCR0 if XSAVE is enabled in the host */ 953 if (rcr4() & CR4_XSAVE) 954 load_xcr(0, vcpu->guest_xcr0); 955 956 /* 957 * The FPU is now "dirty" with the guest's state so turn on emulation 958 * to trap any access to the FPU by the host. 959 */ 960 fpu_start_emulating(); 961} 962 963static void 964save_guest_fpustate(struct vcpu *vcpu) 965{ 966 967 if ((rcr0() & CR0_TS) == 0) 968 panic("fpu emulation not enabled in host!"); 969 970 /* save guest XCR0 and restore host XCR0 */ 971 if (rcr4() & CR4_XSAVE) { 972 vcpu->guest_xcr0 = rxcr(0); 973 load_xcr(0, vmm_get_host_xcr0()); 974 } 975 976 /* save guest FPU state */ 977 fpu_stop_emulating(); 978 fpusave(vcpu->guestfpu); 979 fpu_start_emulating(); 980} 981 982static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 983 984static int 985vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate, 986 bool from_idle) 987{ 988 int error; 989 990 vcpu_assert_locked(vcpu); 991 992 /* 993 * State transitions from the vmmdev_ioctl() must always begin from 994 * the VCPU_IDLE state. This guarantees that there is only a single 995 * ioctl() operating on a vcpu at any point. 996 */ 997 if (from_idle) { 998 while (vcpu->state != VCPU_IDLE) 999 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); 1000 } else { 1001 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 1002 "vcpu idle state")); 1003 } 1004 1005 if (vcpu->state == VCPU_RUNNING) { 1006 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 1007 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 1008 } else { 1009 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 1010 "vcpu that is not running", vcpu->hostcpu)); 1011 } 1012 1013 /* 1014 * The following state transitions are allowed: 1015 * IDLE -> FROZEN -> IDLE 1016 * FROZEN -> RUNNING -> FROZEN 1017 * FROZEN -> SLEEPING -> FROZEN 1018 */ 1019 switch (vcpu->state) { 1020 case VCPU_IDLE: 1021 case VCPU_RUNNING: 1022 case VCPU_SLEEPING: 1023 error = (newstate != VCPU_FROZEN); 1024 break; 1025 case VCPU_FROZEN: 1026 error = (newstate == VCPU_FROZEN); 1027 break; 1028 default: 1029 error = 1; 1030 break; 1031 } 1032 1033 if (error) 1034 return (EBUSY); 1035 1036 vcpu->state = newstate; 1037 if (newstate == VCPU_RUNNING) 1038 vcpu->hostcpu = curcpu; 1039 else 1040 vcpu->hostcpu = NOCPU; 1041 1042 if (newstate == VCPU_IDLE) 1043 wakeup(&vcpu->state); 1044 1045 return (0); 1046} 1047 1048static void 1049vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1050{ 1051 int error; 1052 1053 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 1054 panic("Error %d setting state to %d\n", error, newstate); 1055} 1056 1057static void 1058vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate) 1059{ 1060 int error; 1061 1062 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0) 1063 panic("Error %d setting state to %d", error, newstate); 1064} 1065 1066static void 1067vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func) 1068{ 1069 1070 KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked")); 1071 1072 /* 1073 * Update 'rendezvous_func' and execute a write memory barrier to 1074 * ensure that it is visible across all host cpus. This is not needed 1075 * for correctness but it does ensure that all the vcpus will notice 1076 * that the rendezvous is requested immediately. 1077 */ 1078 vm->rendezvous_func = func; 1079 wmb(); 1080} 1081 1082#define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \ 1083 do { \ 1084 if (vcpuid >= 0) \ 1085 VCPU_CTR0(vm, vcpuid, fmt); \ 1086 else \ 1087 VM_CTR0(vm, fmt); \ 1088 } while (0) 1089 1090static void 1091vm_handle_rendezvous(struct vm *vm, int vcpuid) 1092{ 1093 1094 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 1095 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); 1096 1097 mtx_lock(&vm->rendezvous_mtx); 1098 while (vm->rendezvous_func != NULL) { 1099 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ 1100 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); 1101 1102 if (vcpuid != -1 && 1103 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && 1104 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { 1105 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func"); 1106 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg); 1107 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); 1108 } 1109 if (CPU_CMP(&vm->rendezvous_req_cpus, 1110 &vm->rendezvous_done_cpus) == 0) { 1111 VCPU_CTR0(vm, vcpuid, "Rendezvous completed"); 1112 vm_set_rendezvous_func(vm, NULL); 1113 wakeup(&vm->rendezvous_func); 1114 break; 1115 } 1116 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion"); 1117 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, 1118 "vmrndv", 0); 1119 } 1120 mtx_unlock(&vm->rendezvous_mtx); 1121} 1122 1123/* 1124 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1125 */ 1126static int 1127vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) 1128{ 1129 struct vcpu *vcpu; 1130 const char *wmesg; 1131 int t, vcpu_halted, vm_halted; 1132 1133 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1134 1135 vcpu = &vm->vcpu[vcpuid]; 1136 vcpu_halted = 0; 1137 vm_halted = 0; 1138 1139 vcpu_lock(vcpu); 1140 while (1) { 1141 /* 1142 * Do a final check for pending NMI or interrupts before 1143 * really putting this thread to sleep. Also check for 1144 * software events that would cause this vcpu to wakeup. 1145 * 1146 * These interrupts/events could have happened after the 1147 * vcpu returned from VMRUN() and before it acquired the 1148 * vcpu lock above. 1149 */ 1150 if (vm->rendezvous_func != NULL || vm->suspend) 1151 break; 1152 if (vm_nmi_pending(vm, vcpuid)) 1153 break; 1154 if (!intr_disabled) { 1155 if (vm_extint_pending(vm, vcpuid) || 1156 vlapic_pending_intr(vcpu->vlapic, NULL)) { 1157 break; 1158 } 1159 } 1160 1161 /* Don't go to sleep if the vcpu thread needs to yield */ 1162 if (vcpu_should_yield(vm, vcpuid)) 1163 break; 1164 1165 /* 1166 * Some Linux guests implement "halt" by having all vcpus 1167 * execute HLT with interrupts disabled. 'halted_cpus' keeps 1168 * track of the vcpus that have entered this state. When all 1169 * vcpus enter the halted state the virtual machine is halted. 1170 */ 1171 if (intr_disabled) { 1172 wmesg = "vmhalt"; 1173 VCPU_CTR0(vm, vcpuid, "Halted"); 1174 if (!vcpu_halted && halt_detection_enabled) { 1175 vcpu_halted = 1; 1176 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1177 } 1178 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1179 vm_halted = 1; 1180 break; 1181 } 1182 } else { 1183 wmesg = "vmidle"; 1184 } 1185 1186 t = ticks; 1187 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1188 /* 1189 * XXX msleep_spin() cannot be interrupted by signals so 1190 * wake up periodically to check pending signals. 1191 */ 1192 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); 1193 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1194 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); 1195 } 1196 1197 if (vcpu_halted) 1198 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1199 1200 vcpu_unlock(vcpu); 1201 1202 if (vm_halted) 1203 vm_suspend(vm, VM_SUSPEND_HALT); 1204 1205 return (0); 1206} 1207 1208static int 1209vm_handle_paging(struct vm *vm, int vcpuid, bool *retu) 1210{ 1211 int rv, ftype; 1212 struct vm_map *map; 1213 struct vcpu *vcpu; 1214 struct vm_exit *vme; 1215 1216 vcpu = &vm->vcpu[vcpuid]; 1217 vme = &vcpu->exitinfo; 1218 1219 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 1220 __func__, vme->inst_length)); 1221 1222 ftype = vme->u.paging.fault_type; 1223 KASSERT(ftype == VM_PROT_READ || 1224 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, 1225 ("vm_handle_paging: invalid fault_type %d", ftype)); 1226 1227 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 1228 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), 1229 vme->u.paging.gpa, ftype); 1230 if (rv == 0) { 1231 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx", 1232 ftype == VM_PROT_READ ? "accessed" : "dirty", 1233 vme->u.paging.gpa); 1234 goto done; 1235 } 1236 } 1237 1238 map = &vm->vmspace->vm_map; 1239 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL); 1240 1241 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, " 1242 "ftype = %d", rv, vme->u.paging.gpa, ftype); 1243 1244 if (rv != KERN_SUCCESS) 1245 return (EFAULT); 1246done: 1247 return (0); 1248} 1249 1250static int 1251vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu) 1252{ 1253 struct vie *vie; 1254 struct vcpu *vcpu; 1255 struct vm_exit *vme; 1256 uint64_t gla, gpa, cs_base; 1257 struct vm_guest_paging *paging; 1258 mem_region_read_t mread; 1259 mem_region_write_t mwrite; 1260 enum vm_cpu_mode cpu_mode; 1261 int cs_d, error, length; 1262 1263 vcpu = &vm->vcpu[vcpuid]; 1264 vme = &vcpu->exitinfo; 1265 1266 gla = vme->u.inst_emul.gla; 1267 gpa = vme->u.inst_emul.gpa; 1268 cs_base = vme->u.inst_emul.cs_base; 1269 cs_d = vme->u.inst_emul.cs_d; 1270 vie = &vme->u.inst_emul.vie; 1271 paging = &vme->u.inst_emul.paging; 1272 cpu_mode = paging->cpu_mode; 1273 1274 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa); 1275 1276 /* Fetch, decode and emulate the faulting instruction */ 1277 if (vie->num_valid == 0) { 1278 /* 1279 * If the instruction length is not known then assume a 1280 * maximum size instruction. 1281 */ 1282 length = vme->inst_length ? vme->inst_length : VIE_INST_SIZE; 1283 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip + 1284 cs_base, length, vie); 1285 } else { 1286 /* 1287 * The instruction bytes have already been copied into 'vie' 1288 */ 1289 error = 0; 1290 } 1291 if (error == 1) 1292 return (0); /* Resume guest to handle page fault */ 1293 else if (error == -1) 1294 return (EFAULT); 1295 else if (error != 0) 1296 panic("%s: vmm_fetch_instruction error %d", __func__, error); 1297 1298 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) 1299 return (EFAULT); 1300 1301 /* 1302 * If the instruction length was not specified then update it now 1303 * along with 'nextrip'. 1304 */ 1305 if (vme->inst_length == 0) { 1306 vme->inst_length = vie->num_processed; 1307 vcpu->nextrip += vie->num_processed; 1308 } 1309 1310 /* return to userland unless this is an in-kernel emulated device */ 1311 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1312 mread = lapic_mmio_read; 1313 mwrite = lapic_mmio_write; 1314 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1315 mread = vioapic_mmio_read; 1316 mwrite = vioapic_mmio_write; 1317 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1318 mread = vhpet_mmio_read; 1319 mwrite = vhpet_mmio_write; 1320 } else { 1321 *retu = true; 1322 return (0); 1323 } 1324 1325 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging, 1326 mread, mwrite, retu); 1327 1328 return (error); 1329} 1330 1331static int 1332vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu) 1333{ 1334 int i, done; 1335 struct vcpu *vcpu; 1336 1337 done = 0; 1338 vcpu = &vm->vcpu[vcpuid]; 1339 1340 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 1341 1342 /* 1343 * Wait until all 'active_cpus' have suspended themselves. 1344 * 1345 * Since a VM may be suspended at any time including when one or 1346 * more vcpus are doing a rendezvous we need to call the rendezvous 1347 * handler while we are waiting to prevent a deadlock. 1348 */ 1349 vcpu_lock(vcpu); 1350 while (1) { 1351 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1352 VCPU_CTR0(vm, vcpuid, "All vcpus suspended"); 1353 break; 1354 } 1355 1356 if (vm->rendezvous_func == NULL) { 1357 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend"); 1358 vcpu_require_state_locked(vcpu, VCPU_SLEEPING); 1359 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); 1360 vcpu_require_state_locked(vcpu, VCPU_FROZEN); 1361 } else { 1362 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend"); 1363 vcpu_unlock(vcpu); 1364 vm_handle_rendezvous(vm, vcpuid); 1365 vcpu_lock(vcpu); 1366 } 1367 } 1368 vcpu_unlock(vcpu); 1369 1370 /* 1371 * Wakeup the other sleeping vcpus and return to userspace. 1372 */ 1373 for (i = 0; i < VM_MAXCPU; i++) { 1374 if (CPU_ISSET(i, &vm->suspended_cpus)) { 1375 vcpu_notify_event(vm, i, false); 1376 } 1377 } 1378 1379 *retu = true; 1380 return (0); 1381} 1382 1383int 1384vm_suspend(struct vm *vm, enum vm_suspend_how how) 1385{ 1386 int i; 1387 1388 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 1389 return (EINVAL); 1390 1391 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { 1392 VM_CTR2(vm, "virtual machine already suspended %d/%d", 1393 vm->suspend, how); 1394 return (EALREADY); 1395 } 1396 1397 VM_CTR1(vm, "virtual machine successfully suspended %d", how); 1398 1399 /* 1400 * Notify all active vcpus that they are now suspended. 1401 */ 1402 for (i = 0; i < VM_MAXCPU; i++) { 1403 if (CPU_ISSET(i, &vm->active_cpus)) 1404 vcpu_notify_event(vm, i, false); 1405 } 1406 1407 return (0); 1408} 1409 1410void 1411vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip) 1412{ 1413 struct vm_exit *vmexit; 1414 1415 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, 1416 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); 1417 1418 vmexit = vm_exitinfo(vm, vcpuid); 1419 vmexit->rip = rip; 1420 vmexit->inst_length = 0; 1421 vmexit->exitcode = VM_EXITCODE_SUSPENDED; 1422 vmexit->u.suspended.how = vm->suspend; 1423} 1424 1425void 1426vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip) 1427{ 1428 struct vm_exit *vmexit; 1429 1430 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress")); 1431 1432 vmexit = vm_exitinfo(vm, vcpuid); 1433 vmexit->rip = rip; 1434 vmexit->inst_length = 0; 1435 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1436 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1); 1437} 1438 1439void 1440vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip) 1441{ 1442 struct vm_exit *vmexit; 1443 1444 vmexit = vm_exitinfo(vm, vcpuid); 1445 vmexit->rip = rip; 1446 vmexit->inst_length = 0; 1447 vmexit->exitcode = VM_EXITCODE_BOGUS; 1448 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1); 1449} 1450 1451int 1452vm_run(struct vm *vm, struct vm_run *vmrun) 1453{ 1454 int error, vcpuid; 1455 struct vcpu *vcpu; 1456 struct pcb *pcb; 1457 uint64_t tscval; 1458 struct vm_exit *vme; 1459 bool retu, intr_disabled; 1460 pmap_t pmap; 1461 void *rptr, *sptr; 1462 1463 vcpuid = vmrun->cpuid; 1464 1465 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1466 return (EINVAL); 1467 1468 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 1469 return (EINVAL); 1470 1471 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) 1472 return (EINVAL); 1473 1474 rptr = &vm->rendezvous_func; 1475 sptr = &vm->suspend; 1476 pmap = vmspace_pmap(vm->vmspace); 1477 vcpu = &vm->vcpu[vcpuid]; 1478 vme = &vcpu->exitinfo; 1479restart: 1480 critical_enter(); 1481 1482 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1483 ("vm_run: absurd pm_active")); 1484 1485 tscval = rdtsc(); 1486 1487 pcb = PCPU_GET(curpcb); 1488 set_pcb_flags(pcb, PCB_FULL_IRET); 1489 1490 restore_guest_fpustate(vcpu); 1491 1492 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 1493 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, rptr, sptr); 1494 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 1495 1496 save_guest_fpustate(vcpu); 1497 1498 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 1499 1500 critical_exit(); 1501 1502 if (error == 0) { 1503 retu = false; 1504 vcpu->nextrip = vme->rip + vme->inst_length; 1505 switch (vme->exitcode) { 1506 case VM_EXITCODE_SUSPENDED: 1507 error = vm_handle_suspend(vm, vcpuid, &retu); 1508 break; 1509 case VM_EXITCODE_IOAPIC_EOI: 1510 vioapic_process_eoi(vm, vcpuid, 1511 vme->u.ioapic_eoi.vector); 1512 break; 1513 case VM_EXITCODE_RENDEZVOUS: 1514 vm_handle_rendezvous(vm, vcpuid); 1515 error = 0; 1516 break; 1517 case VM_EXITCODE_HLT: 1518 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 1519 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); 1520 break; 1521 case VM_EXITCODE_PAGING: 1522 error = vm_handle_paging(vm, vcpuid, &retu); 1523 break; 1524 case VM_EXITCODE_INST_EMUL: 1525 error = vm_handle_inst_emul(vm, vcpuid, &retu); 1526 break; 1527 case VM_EXITCODE_INOUT: 1528 case VM_EXITCODE_INOUT_STR: 1529 error = vm_handle_inout(vm, vcpuid, vme, &retu); 1530 break; 1531 case VM_EXITCODE_MONITOR: 1532 case VM_EXITCODE_MWAIT: 1533 vm_inject_ud(vm, vcpuid); 1534 break; 1535 default: 1536 retu = true; /* handled in userland */ 1537 break; 1538 } 1539 } 1540 1541 if (error == 0 && retu == false) 1542 goto restart; 1543 1544 /* copy the exit information */ 1545 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit)); 1546 return (error); 1547} 1548 1549int 1550vm_restart_instruction(void *arg, int vcpuid) 1551{ 1552 struct vm *vm; 1553 struct vcpu *vcpu; 1554 enum vcpu_state state; 1555 uint64_t rip; 1556 int error; 1557 1558 vm = arg; 1559 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1560 return (EINVAL); 1561 1562 vcpu = &vm->vcpu[vcpuid]; 1563 state = vcpu_get_state(vm, vcpuid, NULL); 1564 if (state == VCPU_RUNNING) { 1565 /* 1566 * When a vcpu is "running" the next instruction is determined 1567 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. 1568 * Thus setting 'inst_length' to zero will cause the current 1569 * instruction to be restarted. 1570 */ 1571 vcpu->exitinfo.inst_length = 0; 1572 VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by " 1573 "setting inst_length to zero", vcpu->exitinfo.rip); 1574 } else if (state == VCPU_FROZEN) { 1575 /* 1576 * When a vcpu is "frozen" it is outside the critical section 1577 * around VMRUN() and 'nextrip' points to the next instruction. 1578 * Thus instruction restart is achieved by setting 'nextrip' 1579 * to the vcpu's %rip. 1580 */ 1581 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip); 1582 KASSERT(!error, ("%s: error %d getting rip", __func__, error)); 1583 VCPU_CTR2(vm, vcpuid, "restarting instruction by updating " 1584 "nextrip from %#lx to %#lx", vcpu->nextrip, rip); 1585 vcpu->nextrip = rip; 1586 } else { 1587 panic("%s: invalid state %d", __func__, state); 1588 } 1589 return (0); 1590} 1591 1592int 1593vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) 1594{ 1595 struct vcpu *vcpu; 1596 int type, vector; 1597 1598 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1599 return (EINVAL); 1600 1601 vcpu = &vm->vcpu[vcpuid]; 1602 1603 if (info & VM_INTINFO_VALID) { 1604 type = info & VM_INTINFO_TYPE; 1605 vector = info & 0xff; 1606 if (type == VM_INTINFO_NMI && vector != IDT_NMI) 1607 return (EINVAL); 1608 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32) 1609 return (EINVAL); 1610 if (info & VM_INTINFO_RSVD) 1611 return (EINVAL); 1612 } else { 1613 info = 0; 1614 } 1615 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info); 1616 vcpu->exitintinfo = info; 1617 return (0); 1618} 1619 1620enum exc_class { 1621 EXC_BENIGN, 1622 EXC_CONTRIBUTORY, 1623 EXC_PAGEFAULT 1624}; 1625 1626#define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 1627 1628static enum exc_class 1629exception_class(uint64_t info) 1630{ 1631 int type, vector; 1632 1633 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info)); 1634 type = info & VM_INTINFO_TYPE; 1635 vector = info & 0xff; 1636 1637 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 1638 switch (type) { 1639 case VM_INTINFO_HWINTR: 1640 case VM_INTINFO_SWINTR: 1641 case VM_INTINFO_NMI: 1642 return (EXC_BENIGN); 1643 default: 1644 /* 1645 * Hardware exception. 1646 * 1647 * SVM and VT-x use identical type values to represent NMI, 1648 * hardware interrupt and software interrupt. 1649 * 1650 * SVM uses type '3' for all exceptions. VT-x uses type '3' 1651 * for exceptions except #BP and #OF. #BP and #OF use a type 1652 * value of '5' or '6'. Therefore we don't check for explicit 1653 * values of 'type' to classify 'intinfo' into a hardware 1654 * exception. 1655 */ 1656 break; 1657 } 1658 1659 switch (vector) { 1660 case IDT_PF: 1661 case IDT_VE: 1662 return (EXC_PAGEFAULT); 1663 case IDT_DE: 1664 case IDT_TS: 1665 case IDT_NP: 1666 case IDT_SS: 1667 case IDT_GP: 1668 return (EXC_CONTRIBUTORY); 1669 default: 1670 return (EXC_BENIGN); 1671 } 1672} 1673 1674static int 1675nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2, 1676 uint64_t *retinfo) 1677{ 1678 enum exc_class exc1, exc2; 1679 int type1, vector1; 1680 1681 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1)); 1682 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2)); 1683 1684 /* 1685 * If an exception occurs while attempting to call the double-fault 1686 * handler the processor enters shutdown mode (aka triple fault). 1687 */ 1688 type1 = info1 & VM_INTINFO_TYPE; 1689 vector1 = info1 & 0xff; 1690 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) { 1691 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)", 1692 info1, info2); 1693 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT); 1694 *retinfo = 0; 1695 return (0); 1696 } 1697 1698 /* 1699 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3 1700 */ 1701 exc1 = exception_class(info1); 1702 exc2 = exception_class(info2); 1703 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 1704 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 1705 /* Convert nested fault into a double fault. */ 1706 *retinfo = IDT_DF; 1707 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1708 *retinfo |= VM_INTINFO_DEL_ERRCODE; 1709 } else { 1710 /* Handle exceptions serially */ 1711 *retinfo = info2; 1712 } 1713 return (1); 1714} 1715 1716static uint64_t 1717vcpu_exception_intinfo(struct vcpu *vcpu) 1718{ 1719 uint64_t info = 0; 1720 1721 if (vcpu->exception_pending) { 1722 info = vcpu->exc_vector & 0xff; 1723 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1724 if (vcpu->exc_errcode_valid) { 1725 info |= VM_INTINFO_DEL_ERRCODE; 1726 info |= (uint64_t)vcpu->exc_errcode << 32; 1727 } 1728 } 1729 return (info); 1730} 1731 1732int 1733vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) 1734{ 1735 struct vcpu *vcpu; 1736 uint64_t info1, info2; 1737 int valid; 1738 1739 KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid)); 1740 1741 vcpu = &vm->vcpu[vcpuid]; 1742 1743 info1 = vcpu->exitintinfo; 1744 vcpu->exitintinfo = 0; 1745 1746 info2 = 0; 1747 if (vcpu->exception_pending) { 1748 info2 = vcpu_exception_intinfo(vcpu); 1749 vcpu->exception_pending = 0; 1750 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx", 1751 vcpu->exc_vector, info2); 1752 } 1753 1754 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) { 1755 valid = nested_fault(vm, vcpuid, info1, info2, retinfo); 1756 } else if (info1 & VM_INTINFO_VALID) { 1757 *retinfo = info1; 1758 valid = 1; 1759 } else if (info2 & VM_INTINFO_VALID) { 1760 *retinfo = info2; 1761 valid = 1; 1762 } else { 1763 valid = 0; 1764 } 1765 1766 if (valid) { 1767 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), " 1768 "retinfo(%#lx)", __func__, info1, info2, *retinfo); 1769 } 1770 1771 return (valid); 1772} 1773 1774int 1775vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) 1776{ 1777 struct vcpu *vcpu; 1778 1779 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1780 return (EINVAL); 1781 1782 vcpu = &vm->vcpu[vcpuid]; 1783 *info1 = vcpu->exitintinfo; 1784 *info2 = vcpu_exception_intinfo(vcpu); 1785 return (0); 1786} 1787 1788int 1789vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid, 1790 uint32_t errcode, int restart_instruction) 1791{ 1792 struct vcpu *vcpu; 1793 int error; 1794 1795 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1796 return (EINVAL); 1797 1798 if (vector < 0 || vector >= 32) 1799 return (EINVAL); 1800 1801 /* 1802 * A double fault exception should never be injected directly into 1803 * the guest. It is a derived exception that results from specific 1804 * combinations of nested faults. 1805 */ 1806 if (vector == IDT_DF) 1807 return (EINVAL); 1808 1809 vcpu = &vm->vcpu[vcpuid]; 1810 1811 if (vcpu->exception_pending) { 1812 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to " 1813 "pending exception %d", vector, vcpu->exc_vector); 1814 return (EBUSY); 1815 } 1816 1817 /* 1818 * From section 26.6.1 "Interruptibility State" in Intel SDM: 1819 * 1820 * Event blocking by "STI" or "MOV SS" is cleared after guest executes 1821 * one instruction or incurs an exception. 1822 */ 1823 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); 1824 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", 1825 __func__, error)); 1826 1827 if (restart_instruction) 1828 vm_restart_instruction(vm, vcpuid); 1829 1830 vcpu->exception_pending = 1; 1831 vcpu->exc_vector = vector; 1832 vcpu->exc_errcode = errcode; 1833 vcpu->exc_errcode_valid = errcode_valid; 1834 VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector); 1835 return (0); 1836} 1837 1838void 1839vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid, 1840 int errcode) 1841{ 1842 struct vm *vm; 1843 int error, restart_instruction; 1844 1845 vm = vmarg; 1846 restart_instruction = 1; 1847 1848 error = vm_inject_exception(vm, vcpuid, vector, errcode_valid, 1849 errcode, restart_instruction); 1850 KASSERT(error == 0, ("vm_inject_exception error %d", error)); 1851} 1852 1853void 1854vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2) 1855{ 1856 struct vm *vm; 1857 int error; 1858 1859 vm = vmarg; 1860 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx", 1861 error_code, cr2); 1862 1863 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2); 1864 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); 1865 1866 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code); 1867} 1868 1869static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 1870 1871int 1872vm_inject_nmi(struct vm *vm, int vcpuid) 1873{ 1874 struct vcpu *vcpu; 1875 1876 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1877 return (EINVAL); 1878 1879 vcpu = &vm->vcpu[vcpuid]; 1880 1881 vcpu->nmi_pending = 1; 1882 vcpu_notify_event(vm, vcpuid, false); 1883 return (0); 1884} 1885 1886int 1887vm_nmi_pending(struct vm *vm, int vcpuid) 1888{ 1889 struct vcpu *vcpu; 1890 1891 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1892 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1893 1894 vcpu = &vm->vcpu[vcpuid]; 1895 1896 return (vcpu->nmi_pending); 1897} 1898 1899void 1900vm_nmi_clear(struct vm *vm, int vcpuid) 1901{ 1902 struct vcpu *vcpu; 1903 1904 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1905 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 1906 1907 vcpu = &vm->vcpu[vcpuid]; 1908 1909 if (vcpu->nmi_pending == 0) 1910 panic("vm_nmi_clear: inconsistent nmi_pending state"); 1911 1912 vcpu->nmi_pending = 0; 1913 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 1914} 1915 1916static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 1917 1918int 1919vm_inject_extint(struct vm *vm, int vcpuid) 1920{ 1921 struct vcpu *vcpu; 1922 1923 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1924 return (EINVAL); 1925 1926 vcpu = &vm->vcpu[vcpuid]; 1927 1928 vcpu->extint_pending = 1; 1929 vcpu_notify_event(vm, vcpuid, false); 1930 return (0); 1931} 1932 1933int 1934vm_extint_pending(struct vm *vm, int vcpuid) 1935{ 1936 struct vcpu *vcpu; 1937 1938 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1939 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 1940 1941 vcpu = &vm->vcpu[vcpuid]; 1942 1943 return (vcpu->extint_pending); 1944} 1945 1946void 1947vm_extint_clear(struct vm *vm, int vcpuid) 1948{ 1949 struct vcpu *vcpu; 1950 1951 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1952 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 1953 1954 vcpu = &vm->vcpu[vcpuid]; 1955 1956 if (vcpu->extint_pending == 0) 1957 panic("vm_extint_clear: inconsistent extint_pending state"); 1958 1959 vcpu->extint_pending = 0; 1960 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); 1961} 1962 1963int 1964vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 1965{ 1966 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1967 return (EINVAL); 1968 1969 if (type < 0 || type >= VM_CAP_MAX) 1970 return (EINVAL); 1971 1972 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 1973} 1974 1975int 1976vm_set_capability(struct vm *vm, int vcpu, int type, int val) 1977{ 1978 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1979 return (EINVAL); 1980 1981 if (type < 0 || type >= VM_CAP_MAX) 1982 return (EINVAL); 1983 1984 return (VMSETCAP(vm->cookie, vcpu, type, val)); 1985} 1986 1987struct vlapic * 1988vm_lapic(struct vm *vm, int cpu) 1989{ 1990 return (vm->vcpu[cpu].vlapic); 1991} 1992 1993struct vioapic * 1994vm_ioapic(struct vm *vm) 1995{ 1996 1997 return (vm->vioapic); 1998} 1999 2000struct vhpet * 2001vm_hpet(struct vm *vm) 2002{ 2003 2004 return (vm->vhpet); 2005} 2006 2007boolean_t 2008vmm_is_pptdev(int bus, int slot, int func) 2009{ 2010 int found, i, n; 2011 int b, s, f; 2012 char *val, *cp, *cp2; 2013 2014 /* 2015 * XXX 2016 * The length of an environment variable is limited to 128 bytes which 2017 * puts an upper limit on the number of passthru devices that may be 2018 * specified using a single environment variable. 2019 * 2020 * Work around this by scanning multiple environment variable 2021 * names instead of a single one - yuck! 2022 */ 2023 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 2024 2025 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 2026 found = 0; 2027 for (i = 0; names[i] != NULL && !found; i++) { 2028 cp = val = getenv(names[i]); 2029 while (cp != NULL && *cp != '\0') { 2030 if ((cp2 = strchr(cp, ' ')) != NULL) 2031 *cp2 = '\0'; 2032 2033 n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 2034 if (n == 3 && bus == b && slot == s && func == f) { 2035 found = 1; 2036 break; 2037 } 2038 2039 if (cp2 != NULL) 2040 *cp2++ = ' '; 2041 2042 cp = cp2; 2043 } 2044 freeenv(val); 2045 } 2046 return (found); 2047} 2048 2049void * 2050vm_iommu_domain(struct vm *vm) 2051{ 2052 2053 return (vm->iommu); 2054} 2055 2056int 2057vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 2058 bool from_idle) 2059{ 2060 int error; 2061 struct vcpu *vcpu; 2062 2063 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2064 panic("vm_set_run_state: invalid vcpuid %d", vcpuid); 2065 2066 vcpu = &vm->vcpu[vcpuid]; 2067 2068 vcpu_lock(vcpu); 2069 error = vcpu_set_state_locked(vcpu, newstate, from_idle); 2070 vcpu_unlock(vcpu); 2071 2072 return (error); 2073} 2074 2075enum vcpu_state 2076vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 2077{ 2078 struct vcpu *vcpu; 2079 enum vcpu_state state; 2080 2081 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2082 panic("vm_get_run_state: invalid vcpuid %d", vcpuid); 2083 2084 vcpu = &vm->vcpu[vcpuid]; 2085 2086 vcpu_lock(vcpu); 2087 state = vcpu->state; 2088 if (hostcpu != NULL) 2089 *hostcpu = vcpu->hostcpu; 2090 vcpu_unlock(vcpu); 2091 2092 return (state); 2093} 2094 2095int 2096vm_activate_cpu(struct vm *vm, int vcpuid) 2097{ 2098 2099 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2100 return (EINVAL); 2101 2102 if (CPU_ISSET(vcpuid, &vm->active_cpus)) 2103 return (EBUSY); 2104 2105 VCPU_CTR0(vm, vcpuid, "activated"); 2106 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); 2107 return (0); 2108} 2109 2110cpuset_t 2111vm_active_cpus(struct vm *vm) 2112{ 2113 2114 return (vm->active_cpus); 2115} 2116 2117cpuset_t 2118vm_suspended_cpus(struct vm *vm) 2119{ 2120 2121 return (vm->suspended_cpus); 2122} 2123 2124void * 2125vcpu_stats(struct vm *vm, int vcpuid) 2126{ 2127 2128 return (vm->vcpu[vcpuid].stats); 2129} 2130 2131int 2132vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 2133{ 2134 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2135 return (EINVAL); 2136 2137 *state = vm->vcpu[vcpuid].x2apic_state; 2138 2139 return (0); 2140} 2141 2142int 2143vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 2144{ 2145 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2146 return (EINVAL); 2147 2148 if (state >= X2APIC_STATE_LAST) 2149 return (EINVAL); 2150 2151 vm->vcpu[vcpuid].x2apic_state = state; 2152 2153 vlapic_set_x2apic_state(vm, vcpuid, state); 2154 2155 return (0); 2156} 2157 2158/* 2159 * This function is called to ensure that a vcpu "sees" a pending event 2160 * as soon as possible: 2161 * - If the vcpu thread is sleeping then it is woken up. 2162 * - If the vcpu is running on a different host_cpu then an IPI will be directed 2163 * to the host_cpu to cause the vcpu to trap into the hypervisor. 2164 */ 2165void 2166vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr) 2167{ 2168 int hostcpu; 2169 struct vcpu *vcpu; 2170 2171 vcpu = &vm->vcpu[vcpuid]; 2172 2173 vcpu_lock(vcpu); 2174 hostcpu = vcpu->hostcpu; 2175 if (vcpu->state == VCPU_RUNNING) { 2176 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 2177 if (hostcpu != curcpu) { 2178 if (lapic_intr) { 2179 vlapic_post_intr(vcpu->vlapic, hostcpu, 2180 vmm_ipinum); 2181 } else { 2182 ipi_cpu(hostcpu, vmm_ipinum); 2183 } 2184 } else { 2185 /* 2186 * If the 'vcpu' is running on 'curcpu' then it must 2187 * be sending a notification to itself (e.g. SELF_IPI). 2188 * The pending event will be picked up when the vcpu 2189 * transitions back to guest context. 2190 */ 2191 } 2192 } else { 2193 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 2194 "with hostcpu %d", vcpu->state, hostcpu)); 2195 if (vcpu->state == VCPU_SLEEPING) 2196 wakeup_one(vcpu); 2197 } 2198 vcpu_unlock(vcpu); 2199} 2200 2201struct vmspace * 2202vm_get_vmspace(struct vm *vm) 2203{ 2204 2205 return (vm->vmspace); 2206} 2207 2208int 2209vm_apicid2vcpuid(struct vm *vm, int apicid) 2210{ 2211 /* 2212 * XXX apic id is assumed to be numerically identical to vcpu id 2213 */ 2214 return (apicid); 2215} 2216 2217void 2218vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, 2219 vm_rendezvous_func_t func, void *arg) 2220{ 2221 int i; 2222 2223 /* 2224 * Enforce that this function is called without any locks 2225 */ 2226 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); 2227 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 2228 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); 2229 2230restart: 2231 mtx_lock(&vm->rendezvous_mtx); 2232 if (vm->rendezvous_func != NULL) { 2233 /* 2234 * If a rendezvous is already in progress then we need to 2235 * call the rendezvous handler in case this 'vcpuid' is one 2236 * of the targets of the rendezvous. 2237 */ 2238 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress"); 2239 mtx_unlock(&vm->rendezvous_mtx); 2240 vm_handle_rendezvous(vm, vcpuid); 2241 goto restart; 2242 } 2243 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " 2244 "rendezvous is still in progress")); 2245 2246 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous"); 2247 vm->rendezvous_req_cpus = dest; 2248 CPU_ZERO(&vm->rendezvous_done_cpus); 2249 vm->rendezvous_arg = arg; 2250 vm_set_rendezvous_func(vm, func); 2251 mtx_unlock(&vm->rendezvous_mtx); 2252 2253 /* 2254 * Wake up any sleeping vcpus and trigger a VM-exit in any running 2255 * vcpus so they handle the rendezvous as soon as possible. 2256 */ 2257 for (i = 0; i < VM_MAXCPU; i++) { 2258 if (CPU_ISSET(i, &dest)) 2259 vcpu_notify_event(vm, i, false); 2260 } 2261 2262 vm_handle_rendezvous(vm, vcpuid); 2263} 2264 2265struct vatpic * 2266vm_atpic(struct vm *vm) 2267{ 2268 return (vm->vatpic); 2269} 2270 2271struct vatpit * 2272vm_atpit(struct vm *vm) 2273{ 2274 return (vm->vatpit); 2275} 2276 2277struct vpmtmr * 2278vm_pmtmr(struct vm *vm) 2279{ 2280 2281 return (vm->vpmtmr); 2282} 2283 2284struct vrtc * 2285vm_rtc(struct vm *vm) 2286{ 2287 2288 return (vm->vrtc); 2289} 2290 2291enum vm_reg_name 2292vm_segment_name(int seg) 2293{ 2294 static enum vm_reg_name seg_names[] = { 2295 VM_REG_GUEST_ES, 2296 VM_REG_GUEST_CS, 2297 VM_REG_GUEST_SS, 2298 VM_REG_GUEST_DS, 2299 VM_REG_GUEST_FS, 2300 VM_REG_GUEST_GS 2301 }; 2302 2303 KASSERT(seg >= 0 && seg < nitems(seg_names), 2304 ("%s: invalid segment encoding %d", __func__, seg)); 2305 return (seg_names[seg]); 2306} 2307 2308void 2309vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 2310 int num_copyinfo) 2311{ 2312 int idx; 2313 2314 for (idx = 0; idx < num_copyinfo; idx++) { 2315 if (copyinfo[idx].cookie != NULL) 2316 vm_gpa_release(copyinfo[idx].cookie); 2317 } 2318 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo)); 2319} 2320 2321int 2322vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 2323 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 2324 int num_copyinfo) 2325{ 2326 int error, idx, nused; 2327 size_t n, off, remaining; 2328 void *hva, *cookie; 2329 uint64_t gpa; 2330 2331 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo); 2332 2333 nused = 0; 2334 remaining = len; 2335 while (remaining > 0) { 2336 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 2337 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa); 2338 if (error) 2339 return (error); 2340 off = gpa & PAGE_MASK; 2341 n = min(remaining, PAGE_SIZE - off); 2342 copyinfo[nused].gpa = gpa; 2343 copyinfo[nused].len = n; 2344 remaining -= n; 2345 gla += n; 2346 nused++; 2347 } 2348 2349 for (idx = 0; idx < nused; idx++) { 2350 hva = vm_gpa_hold(vm, copyinfo[idx].gpa, copyinfo[idx].len, 2351 prot, &cookie); 2352 if (hva == NULL) 2353 break; 2354 copyinfo[idx].hva = hva; 2355 copyinfo[idx].cookie = cookie; 2356 } 2357 2358 if (idx != nused) { 2359 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo); 2360 return (-1); 2361 } else { 2362 return (0); 2363 } 2364} 2365 2366void 2367vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr, 2368 size_t len) 2369{ 2370 char *dst; 2371 int idx; 2372 2373 dst = kaddr; 2374 idx = 0; 2375 while (len > 0) { 2376 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 2377 len -= copyinfo[idx].len; 2378 dst += copyinfo[idx].len; 2379 idx++; 2380 } 2381} 2382 2383void 2384vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 2385 struct vm_copyinfo *copyinfo, size_t len) 2386{ 2387 const char *src; 2388 int idx; 2389 2390 src = kaddr; 2391 idx = 0; 2392 while (len > 0) { 2393 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 2394 len -= copyinfo[idx].len; 2395 src += copyinfo[idx].len; 2396 idx++; 2397 } 2398} 2399 2400/* 2401 * Return the amount of in-use and wired memory for the VM. Since 2402 * these are global stats, only return the values with for vCPU 0 2403 */ 2404VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 2405VMM_STAT_DECLARE(VMM_MEM_WIRED); 2406 2407static void 2408vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2409{ 2410 2411 if (vcpu == 0) { 2412 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT, 2413 PAGE_SIZE * vmspace_resident_count(vm->vmspace)); 2414 } 2415} 2416 2417static void 2418vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2419{ 2420 2421 if (vcpu == 0) { 2422 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED, 2423 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace))); 2424 } 2425} 2426 2427VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 2428VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt); 2429