vmm.c revision 305673
1299425Smm/*- 2299425Smm * Copyright (c) 2011 NetApp, Inc. 3299425Smm * All rights reserved. 4299425Smm * 5299425Smm * Redistribution and use in source and binary forms, with or without 6299425Smm * modification, are permitted provided that the following conditions 7299425Smm * are met: 8299425Smm * 1. Redistributions of source code must retain the above copyright 9299425Smm * notice, this list of conditions and the following disclaimer. 10299425Smm * 2. Redistributions in binary form must reproduce the above copyright 11299425Smm * notice, this list of conditions and the following disclaimer in the 12299425Smm * documentation and/or other materials provided with the distribution. 13299425Smm * 14299425Smm * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15299425Smm * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16299425Smm * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17299425Smm * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18299425Smm * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19299425Smm * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20299425Smm * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21299425Smm * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22299425Smm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23299425Smm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24299425Smm * SUCH DAMAGE. 25299425Smm * 26299425Smm * $FreeBSD: stable/10/sys/amd64/vmm/vmm.c 305673 2016-09-09 20:30:36Z jhb $ 27299425Smm */ 28299425Smm 29299425Smm#include <sys/cdefs.h> 30299425Smm__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm.c 305673 2016-09-09 20:30:36Z jhb $"); 31299425Smm 32299425Smm#include <sys/param.h> 33299425Smm#include <sys/systm.h> 34299425Smm#include <sys/kernel.h> 35299425Smm#include <sys/module.h> 36299425Smm#include <sys/sysctl.h> 37299425Smm#include <sys/malloc.h> 38299425Smm#include <sys/pcpu.h> 39299425Smm#include <sys/lock.h> 40299425Smm#include <sys/mutex.h> 41299425Smm#include <sys/proc.h> 42299425Smm#include <sys/rwlock.h> 43299425Smm#include <sys/sched.h> 44299425Smm#include <sys/smp.h> 45299425Smm#include <sys/systm.h> 46299425Smm 47299425Smm#include <vm/vm.h> 48299425Smm#include <vm/vm_object.h> 49299425Smm#include <vm/vm_page.h> 50299425Smm#include <vm/pmap.h> 51299425Smm#include <vm/vm_map.h> 52299425Smm#include <vm/vm_extern.h> 53299425Smm#include <vm/vm_param.h> 54299425Smm 55299425Smm#include <machine/cpu.h> 56299425Smm#include <machine/vm.h> 57299425Smm#include <machine/pcb.h> 58299425Smm#include <machine/smp.h> 59299425Smm#include <x86/psl.h> 60299425Smm#include <x86/apicreg.h> 61299425Smm#include <machine/vmparam.h> 62299425Smm 63299425Smm#include <machine/vmm.h> 64299425Smm#include <machine/vmm_dev.h> 65299425Smm#include <machine/vmm_instruction_emul.h> 66299425Smm 67299425Smm#include "vmm_ioport.h" 68299425Smm#include "vmm_ktr.h" 69299425Smm#include "vmm_host.h" 70299425Smm#include "vmm_mem.h" 71299425Smm#include "vmm_util.h" 72299425Smm#include "vatpic.h" 73299425Smm#include "vatpit.h" 74299425Smm#include "vhpet.h" 75299425Smm#include "vioapic.h" 76299425Smm#include "vlapic.h" 77299425Smm#include "vpmtmr.h" 78299425Smm#include "vrtc.h" 79299425Smm#include "vmm_ipi.h" 80299425Smm#include "vmm_stat.h" 81299425Smm#include "vmm_lapic.h" 82299425Smm 83299425Smm#include "io/ppt.h" 84299425Smm#include "io/iommu.h" 85299425Smm 86299425Smmstruct vlapic; 87299425Smm 88299425Smm/* 89299425Smm * Initialization: 90299425Smm * (a) allocated when vcpu is created 91299425Smm * (i) initialized when vcpu is created and when it is reinitialized 92299425Smm * (o) initialized the first time the vcpu is created 93299425Smm * (x) initialized before use 94299425Smm */ 95299425Smmstruct vcpu { 96299425Smm struct mtx mtx; /* (o) protects 'state' and 'hostcpu' */ 97299425Smm enum vcpu_state state; /* (o) vcpu state */ 98299425Smm int hostcpu; /* (o) vcpu's host cpu */ 99299425Smm int reqidle; /* (i) request vcpu to idle */ 100299425Smm struct vlapic *vlapic; /* (i) APIC device model */ 101299425Smm enum x2apic_state x2apic_state; /* (i) APIC mode */ 102299425Smm uint64_t exitintinfo; /* (i) events pending at VM exit */ 103299425Smm int nmi_pending; /* (i) NMI pending */ 104299425Smm int extint_pending; /* (i) INTR pending */ 105299425Smm int exception_pending; /* (i) exception pending */ 106299425Smm int exc_vector; /* (x) exception collateral */ 107299425Smm int exc_errcode_valid; 108299425Smm uint32_t exc_errcode; 109299425Smm struct savefpu *guestfpu; /* (a,i) guest fpu state */ 110299425Smm uint64_t guest_xcr0; /* (i) guest %xcr0 register */ 111299425Smm void *stats; /* (a,i) statistics */ 112299425Smm struct vm_exit exitinfo; /* (x) exit reason and collateral */ 113299425Smm uint64_t nextrip; /* (x) next instruction to execute */ 114299425Smm}; 115299425Smm 116299425Smm#define vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx)) 117299425Smm#define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN) 118299425Smm#define vcpu_lock(v) mtx_lock_spin(&((v)->mtx)) 119299425Smm#define vcpu_unlock(v) mtx_unlock_spin(&((v)->mtx)) 120299425Smm#define vcpu_assert_locked(v) mtx_assert(&((v)->mtx), MA_OWNED) 121299425Smm 122299425Smmstruct mem_seg { 123299425Smm size_t len; 124299425Smm bool sysmem; 125299425Smm struct vm_object *object; 126}; 127#define VM_MAX_MEMSEGS 2 128 129struct mem_map { 130 vm_paddr_t gpa; 131 size_t len; 132 vm_ooffset_t segoff; 133 int segid; 134 int prot; 135 int flags; 136}; 137#define VM_MAX_MEMMAPS 4 138 139/* 140 * Initialization: 141 * (o) initialized the first time the VM is created 142 * (i) initialized when VM is created and when it is reinitialized 143 * (x) initialized before use 144 */ 145struct vm { 146 void *cookie; /* (i) cpu-specific data */ 147 void *iommu; /* (x) iommu-specific data */ 148 struct vhpet *vhpet; /* (i) virtual HPET */ 149 struct vioapic *vioapic; /* (i) virtual ioapic */ 150 struct vatpic *vatpic; /* (i) virtual atpic */ 151 struct vatpit *vatpit; /* (i) virtual atpit */ 152 struct vpmtmr *vpmtmr; /* (i) virtual ACPI PM timer */ 153 struct vrtc *vrtc; /* (o) virtual RTC */ 154 volatile cpuset_t active_cpus; /* (i) active vcpus */ 155 int suspend; /* (i) stop VM execution */ 156 volatile cpuset_t suspended_cpus; /* (i) suspended vcpus */ 157 volatile cpuset_t halted_cpus; /* (x) cpus in a hard halt */ 158 cpuset_t rendezvous_req_cpus; /* (x) rendezvous requested */ 159 cpuset_t rendezvous_done_cpus; /* (x) rendezvous finished */ 160 void *rendezvous_arg; /* (x) rendezvous func/arg */ 161 vm_rendezvous_func_t rendezvous_func; 162 struct mtx rendezvous_mtx; /* (o) rendezvous lock */ 163 struct mem_map mem_maps[VM_MAX_MEMMAPS]; /* (i) guest address space */ 164 struct mem_seg mem_segs[VM_MAX_MEMSEGS]; /* (o) guest memory regions */ 165 struct vmspace *vmspace; /* (o) guest's address space */ 166 char name[VM_MAX_NAMELEN]; /* (o) virtual machine name */ 167 struct vcpu vcpu[VM_MAXCPU]; /* (i) guest vcpus */ 168}; 169 170static int vmm_initialized; 171 172static struct vmm_ops *ops; 173#define VMM_INIT(num) (ops != NULL ? (*ops->init)(num) : 0) 174#define VMM_CLEANUP() (ops != NULL ? (*ops->cleanup)() : 0) 175#define VMM_RESUME() (ops != NULL ? (*ops->resume)() : 0) 176 177#define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL) 178#define VMRUN(vmi, vcpu, rip, pmap, evinfo) \ 179 (ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, evinfo) : ENXIO) 180#define VMCLEANUP(vmi) (ops != NULL ? (*ops->vmcleanup)(vmi) : NULL) 181#define VMSPACE_ALLOC(min, max) \ 182 (ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL) 183#define VMSPACE_FREE(vmspace) \ 184 (ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO) 185#define VMGETREG(vmi, vcpu, num, retval) \ 186 (ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO) 187#define VMSETREG(vmi, vcpu, num, val) \ 188 (ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO) 189#define VMGETDESC(vmi, vcpu, num, desc) \ 190 (ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO) 191#define VMSETDESC(vmi, vcpu, num, desc) \ 192 (ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO) 193#define VMGETCAP(vmi, vcpu, num, retval) \ 194 (ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO) 195#define VMSETCAP(vmi, vcpu, num, val) \ 196 (ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO) 197#define VLAPIC_INIT(vmi, vcpu) \ 198 (ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL) 199#define VLAPIC_CLEANUP(vmi, vlapic) \ 200 (ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL) 201 202#define fpu_start_emulating() load_cr0(rcr0() | CR0_TS) 203#define fpu_stop_emulating() clts() 204 205static MALLOC_DEFINE(M_VM, "vm", "vm"); 206 207/* statistics */ 208static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime"); 209 210SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL); 211 212/* 213 * Halt the guest if all vcpus are executing a HLT instruction with 214 * interrupts disabled. 215 */ 216static int halt_detection_enabled = 1; 217TUNABLE_INT("hw.vmm.halt_detection", &halt_detection_enabled); 218SYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN, 219 &halt_detection_enabled, 0, 220 "Halt VM if all vcpus execute HLT with interrupts disabled"); 221 222static int vmm_ipinum; 223SYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0, 224 "IPI vector used for vcpu notifications"); 225 226static int trace_guest_exceptions; 227SYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN, 228 &trace_guest_exceptions, 0, 229 "Trap into hypervisor on all guest exceptions and reflect them back"); 230 231static int vmm_force_iommu = 0; 232TUNABLE_INT("hw.vmm.force_iommu", &vmm_force_iommu); 233SYSCTL_INT(_hw_vmm, OID_AUTO, force_iommu, CTLFLAG_RDTUN, &vmm_force_iommu, 0, 234 "Force use of I/O MMU even if no passthrough devices were found."); 235 236static void vm_free_memmap(struct vm *vm, int ident); 237static bool sysmem_mapping(struct vm *vm, struct mem_map *mm); 238static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr); 239 240#ifdef KTR 241static const char * 242vcpu_state2str(enum vcpu_state state) 243{ 244 245 switch (state) { 246 case VCPU_IDLE: 247 return ("idle"); 248 case VCPU_FROZEN: 249 return ("frozen"); 250 case VCPU_RUNNING: 251 return ("running"); 252 case VCPU_SLEEPING: 253 return ("sleeping"); 254 default: 255 return ("unknown"); 256 } 257} 258#endif 259 260static void 261vcpu_cleanup(struct vm *vm, int i, bool destroy) 262{ 263 struct vcpu *vcpu = &vm->vcpu[i]; 264 265 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic); 266 if (destroy) { 267 vmm_stat_free(vcpu->stats); 268 fpu_save_area_free(vcpu->guestfpu); 269 } 270} 271 272static void 273vcpu_init(struct vm *vm, int vcpu_id, bool create) 274{ 275 struct vcpu *vcpu; 276 277 KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU, 278 ("vcpu_init: invalid vcpu %d", vcpu_id)); 279 280 vcpu = &vm->vcpu[vcpu_id]; 281 282 if (create) { 283 KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already " 284 "initialized", vcpu_id)); 285 vcpu_lock_init(vcpu); 286 vcpu->state = VCPU_IDLE; 287 vcpu->hostcpu = NOCPU; 288 vcpu->guestfpu = fpu_save_area_alloc(); 289 vcpu->stats = vmm_stat_alloc(); 290 } 291 292 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id); 293 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED); 294 vcpu->reqidle = 0; 295 vcpu->exitintinfo = 0; 296 vcpu->nmi_pending = 0; 297 vcpu->extint_pending = 0; 298 vcpu->exception_pending = 0; 299 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87; 300 fpu_save_area_reset(vcpu->guestfpu); 301 vmm_stat_init(vcpu->stats); 302} 303 304int 305vcpu_trace_exceptions(struct vm *vm, int vcpuid) 306{ 307 308 return (trace_guest_exceptions); 309} 310 311struct vm_exit * 312vm_exitinfo(struct vm *vm, int cpuid) 313{ 314 struct vcpu *vcpu; 315 316 if (cpuid < 0 || cpuid >= VM_MAXCPU) 317 panic("vm_exitinfo: invalid cpuid %d", cpuid); 318 319 vcpu = &vm->vcpu[cpuid]; 320 321 return (&vcpu->exitinfo); 322} 323 324static void 325vmm_resume(void) 326{ 327 VMM_RESUME(); 328} 329 330static int 331vmm_init(void) 332{ 333 int error; 334 335 vmm_host_state_init(); 336 337 vmm_ipinum = vmm_ipi_alloc(); 338 if (vmm_ipinum == 0) 339 vmm_ipinum = IPI_AST; 340 341 error = vmm_mem_init(); 342 if (error) 343 return (error); 344 345 if (vmm_is_intel()) 346 ops = &vmm_ops_intel; 347 else if (vmm_is_amd()) 348 ops = &vmm_ops_amd; 349 else 350 return (ENXIO); 351 352 vmm_resume_p = vmm_resume; 353 354 return (VMM_INIT(vmm_ipinum)); 355} 356 357static int 358vmm_handler(module_t mod, int what, void *arg) 359{ 360 int error; 361 362 switch (what) { 363 case MOD_LOAD: 364 vmmdev_init(); 365 if (vmm_force_iommu || ppt_avail_devices() > 0) 366 iommu_init(); 367 error = vmm_init(); 368 if (error == 0) 369 vmm_initialized = 1; 370 break; 371 case MOD_UNLOAD: 372 error = vmmdev_cleanup(); 373 if (error == 0) { 374 vmm_resume_p = NULL; 375 iommu_cleanup(); 376 if (vmm_ipinum != IPI_AST) 377 vmm_ipi_free(vmm_ipinum); 378 error = VMM_CLEANUP(); 379 /* 380 * Something bad happened - prevent new 381 * VMs from being created 382 */ 383 if (error) 384 vmm_initialized = 0; 385 } 386 break; 387 default: 388 error = 0; 389 break; 390 } 391 return (error); 392} 393 394static moduledata_t vmm_kmod = { 395 "vmm", 396 vmm_handler, 397 NULL 398}; 399 400/* 401 * vmm initialization has the following dependencies: 402 * 403 * - iommu initialization must happen after the pci passthru driver has had 404 * a chance to attach to any passthru devices (after SI_SUB_CONFIGURE). 405 * 406 * - VT-x initialization requires smp_rendezvous() and therefore must happen 407 * after SMP is fully functional (after SI_SUB_SMP). 408 */ 409DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY); 410MODULE_VERSION(vmm, 1); 411 412static void 413vm_init(struct vm *vm, bool create) 414{ 415 int i; 416 417 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace)); 418 vm->iommu = NULL; 419 vm->vioapic = vioapic_init(vm); 420 vm->vhpet = vhpet_init(vm); 421 vm->vatpic = vatpic_init(vm); 422 vm->vatpit = vatpit_init(vm); 423 vm->vpmtmr = vpmtmr_init(vm); 424 if (create) 425 vm->vrtc = vrtc_init(vm); 426 427 CPU_ZERO(&vm->active_cpus); 428 429 vm->suspend = 0; 430 CPU_ZERO(&vm->suspended_cpus); 431 432 for (i = 0; i < VM_MAXCPU; i++) 433 vcpu_init(vm, i, create); 434} 435 436int 437vm_create(const char *name, struct vm **retvm) 438{ 439 struct vm *vm; 440 struct vmspace *vmspace; 441 442 /* 443 * If vmm.ko could not be successfully initialized then don't attempt 444 * to create the virtual machine. 445 */ 446 if (!vmm_initialized) 447 return (ENXIO); 448 449 if (name == NULL || strlen(name) >= VM_MAX_NAMELEN) 450 return (EINVAL); 451 452 vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS); 453 if (vmspace == NULL) 454 return (ENOMEM); 455 456 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO); 457 strcpy(vm->name, name); 458 vm->vmspace = vmspace; 459 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF); 460 461 vm_init(vm, true); 462 463 *retvm = vm; 464 return (0); 465} 466 467static void 468vm_cleanup(struct vm *vm, bool destroy) 469{ 470 struct mem_map *mm; 471 int i; 472 473 ppt_unassign_all(vm); 474 475 if (vm->iommu != NULL) 476 iommu_destroy_domain(vm->iommu); 477 478 if (destroy) 479 vrtc_cleanup(vm->vrtc); 480 else 481 vrtc_reset(vm->vrtc); 482 vpmtmr_cleanup(vm->vpmtmr); 483 vatpit_cleanup(vm->vatpit); 484 vhpet_cleanup(vm->vhpet); 485 vatpic_cleanup(vm->vatpic); 486 vioapic_cleanup(vm->vioapic); 487 488 for (i = 0; i < VM_MAXCPU; i++) 489 vcpu_cleanup(vm, i, destroy); 490 491 VMCLEANUP(vm->cookie); 492 493 /* 494 * System memory is removed from the guest address space only when 495 * the VM is destroyed. This is because the mapping remains the same 496 * across VM reset. 497 * 498 * Device memory can be relocated by the guest (e.g. using PCI BARs) 499 * so those mappings are removed on a VM reset. 500 */ 501 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 502 mm = &vm->mem_maps[i]; 503 if (destroy || !sysmem_mapping(vm, mm)) 504 vm_free_memmap(vm, i); 505 } 506 507 if (destroy) { 508 for (i = 0; i < VM_MAX_MEMSEGS; i++) 509 vm_free_memseg(vm, i); 510 511 VMSPACE_FREE(vm->vmspace); 512 vm->vmspace = NULL; 513 } 514} 515 516void 517vm_destroy(struct vm *vm) 518{ 519 vm_cleanup(vm, true); 520 free(vm, M_VM); 521} 522 523int 524vm_reinit(struct vm *vm) 525{ 526 int error; 527 528 /* 529 * A virtual machine can be reset only if all vcpus are suspended. 530 */ 531 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 532 vm_cleanup(vm, false); 533 vm_init(vm, false); 534 error = 0; 535 } else { 536 error = EBUSY; 537 } 538 539 return (error); 540} 541 542const char * 543vm_name(struct vm *vm) 544{ 545 return (vm->name); 546} 547 548int 549vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 550{ 551 vm_object_t obj; 552 553 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL) 554 return (ENOMEM); 555 else 556 return (0); 557} 558 559int 560vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len) 561{ 562 563 vmm_mmio_free(vm->vmspace, gpa, len); 564 return (0); 565} 566 567/* 568 * Return 'true' if 'gpa' is allocated in the guest address space. 569 * 570 * This function is called in the context of a running vcpu which acts as 571 * an implicit lock on 'vm->mem_maps[]'. 572 */ 573bool 574vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa) 575{ 576 struct mem_map *mm; 577 int i; 578 579#ifdef INVARIANTS 580 int hostcpu, state; 581 state = vcpu_get_state(vm, vcpuid, &hostcpu); 582 KASSERT(state == VCPU_RUNNING && hostcpu == curcpu, 583 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu)); 584#endif 585 586 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 587 mm = &vm->mem_maps[i]; 588 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len) 589 return (true); /* 'gpa' is sysmem or devmem */ 590 } 591 592 if (ppt_is_mmio(vm, gpa)) 593 return (true); /* 'gpa' is pci passthru mmio */ 594 595 return (false); 596} 597 598int 599vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem) 600{ 601 struct mem_seg *seg; 602 vm_object_t obj; 603 604 if (ident < 0 || ident >= VM_MAX_MEMSEGS) 605 return (EINVAL); 606 607 if (len == 0 || (len & PAGE_MASK)) 608 return (EINVAL); 609 610 seg = &vm->mem_segs[ident]; 611 if (seg->object != NULL) { 612 if (seg->len == len && seg->sysmem == sysmem) 613 return (EEXIST); 614 else 615 return (EINVAL); 616 } 617 618 obj = vm_object_allocate(OBJT_DEFAULT, len >> PAGE_SHIFT); 619 if (obj == NULL) 620 return (ENOMEM); 621 622 seg->len = len; 623 seg->object = obj; 624 seg->sysmem = sysmem; 625 return (0); 626} 627 628int 629vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem, 630 vm_object_t *objptr) 631{ 632 struct mem_seg *seg; 633 634 if (ident < 0 || ident >= VM_MAX_MEMSEGS) 635 return (EINVAL); 636 637 seg = &vm->mem_segs[ident]; 638 if (len) 639 *len = seg->len; 640 if (sysmem) 641 *sysmem = seg->sysmem; 642 if (objptr) 643 *objptr = seg->object; 644 return (0); 645} 646 647void 648vm_free_memseg(struct vm *vm, int ident) 649{ 650 struct mem_seg *seg; 651 652 KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS, 653 ("%s: invalid memseg ident %d", __func__, ident)); 654 655 seg = &vm->mem_segs[ident]; 656 if (seg->object != NULL) { 657 vm_object_deallocate(seg->object); 658 bzero(seg, sizeof(struct mem_seg)); 659 } 660} 661 662int 663vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first, 664 size_t len, int prot, int flags) 665{ 666 struct mem_seg *seg; 667 struct mem_map *m, *map; 668 vm_ooffset_t last; 669 int i, error; 670 671 if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0) 672 return (EINVAL); 673 674 if (flags & ~VM_MEMMAP_F_WIRED) 675 return (EINVAL); 676 677 if (segid < 0 || segid >= VM_MAX_MEMSEGS) 678 return (EINVAL); 679 680 seg = &vm->mem_segs[segid]; 681 if (seg->object == NULL) 682 return (EINVAL); 683 684 last = first + len; 685 if (first < 0 || first >= last || last > seg->len) 686 return (EINVAL); 687 688 if ((gpa | first | last) & PAGE_MASK) 689 return (EINVAL); 690 691 map = NULL; 692 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 693 m = &vm->mem_maps[i]; 694 if (m->len == 0) { 695 map = m; 696 break; 697 } 698 } 699 700 if (map == NULL) 701 return (ENOSPC); 702 703 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa, 704 len, 0, VMFS_NO_SPACE, prot, prot, 0); 705 if (error != KERN_SUCCESS) 706 return (EFAULT); 707 708 vm_object_reference(seg->object); 709 710 if (flags & VM_MEMMAP_F_WIRED) { 711 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len, 712 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 713 if (error != KERN_SUCCESS) { 714 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len); 715 return (EFAULT); 716 } 717 } 718 719 map->gpa = gpa; 720 map->len = len; 721 map->segoff = first; 722 map->segid = segid; 723 map->prot = prot; 724 map->flags = flags; 725 return (0); 726} 727 728int 729vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid, 730 vm_ooffset_t *segoff, size_t *len, int *prot, int *flags) 731{ 732 struct mem_map *mm, *mmnext; 733 int i; 734 735 mmnext = NULL; 736 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 737 mm = &vm->mem_maps[i]; 738 if (mm->len == 0 || mm->gpa < *gpa) 739 continue; 740 if (mmnext == NULL || mm->gpa < mmnext->gpa) 741 mmnext = mm; 742 } 743 744 if (mmnext != NULL) { 745 *gpa = mmnext->gpa; 746 if (segid) 747 *segid = mmnext->segid; 748 if (segoff) 749 *segoff = mmnext->segoff; 750 if (len) 751 *len = mmnext->len; 752 if (prot) 753 *prot = mmnext->prot; 754 if (flags) 755 *flags = mmnext->flags; 756 return (0); 757 } else { 758 return (ENOENT); 759 } 760} 761 762static void 763vm_free_memmap(struct vm *vm, int ident) 764{ 765 struct mem_map *mm; 766 int error; 767 768 mm = &vm->mem_maps[ident]; 769 if (mm->len) { 770 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa, 771 mm->gpa + mm->len); 772 KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d", 773 __func__, error)); 774 bzero(mm, sizeof(struct mem_map)); 775 } 776} 777 778static __inline bool 779sysmem_mapping(struct vm *vm, struct mem_map *mm) 780{ 781 782 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem) 783 return (true); 784 else 785 return (false); 786} 787 788static vm_paddr_t 789sysmem_maxaddr(struct vm *vm) 790{ 791 struct mem_map *mm; 792 vm_paddr_t maxaddr; 793 int i; 794 795 maxaddr = 0; 796 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 797 mm = &vm->mem_maps[i]; 798 if (sysmem_mapping(vm, mm)) { 799 if (maxaddr < mm->gpa + mm->len) 800 maxaddr = mm->gpa + mm->len; 801 } 802 } 803 return (maxaddr); 804} 805 806static void 807vm_iommu_modify(struct vm *vm, boolean_t map) 808{ 809 int i, sz; 810 vm_paddr_t gpa, hpa; 811 struct mem_map *mm; 812 void *vp, *cookie, *host_domain; 813 814 sz = PAGE_SIZE; 815 host_domain = iommu_host_domain(); 816 817 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 818 mm = &vm->mem_maps[i]; 819 if (!sysmem_mapping(vm, mm)) 820 continue; 821 822 if (map) { 823 KASSERT((mm->flags & VM_MEMMAP_F_IOMMU) == 0, 824 ("iommu map found invalid memmap %#lx/%#lx/%#x", 825 mm->gpa, mm->len, mm->flags)); 826 if ((mm->flags & VM_MEMMAP_F_WIRED) == 0) 827 continue; 828 mm->flags |= VM_MEMMAP_F_IOMMU; 829 } else { 830 if ((mm->flags & VM_MEMMAP_F_IOMMU) == 0) 831 continue; 832 mm->flags &= ~VM_MEMMAP_F_IOMMU; 833 KASSERT((mm->flags & VM_MEMMAP_F_WIRED) != 0, 834 ("iommu unmap found invalid memmap %#lx/%#lx/%#x", 835 mm->gpa, mm->len, mm->flags)); 836 } 837 838 gpa = mm->gpa; 839 while (gpa < mm->gpa + mm->len) { 840 vp = vm_gpa_hold(vm, -1, gpa, PAGE_SIZE, VM_PROT_WRITE, 841 &cookie); 842 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx", 843 vm_name(vm), gpa)); 844 845 vm_gpa_release(cookie); 846 847 hpa = DMAP_TO_PHYS((uintptr_t)vp); 848 if (map) { 849 iommu_create_mapping(vm->iommu, gpa, hpa, sz); 850 iommu_remove_mapping(host_domain, hpa, sz); 851 } else { 852 iommu_remove_mapping(vm->iommu, gpa, sz); 853 iommu_create_mapping(host_domain, hpa, hpa, sz); 854 } 855 856 gpa += PAGE_SIZE; 857 } 858 } 859 860 /* 861 * Invalidate the cached translations associated with the domain 862 * from which pages were removed. 863 */ 864 if (map) 865 iommu_invalidate_tlb(host_domain); 866 else 867 iommu_invalidate_tlb(vm->iommu); 868} 869 870#define vm_iommu_unmap(vm) vm_iommu_modify((vm), FALSE) 871#define vm_iommu_map(vm) vm_iommu_modify((vm), TRUE) 872 873int 874vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func) 875{ 876 int error; 877 878 error = ppt_unassign_device(vm, bus, slot, func); 879 if (error) 880 return (error); 881 882 if (ppt_assigned_devices(vm) == 0) 883 vm_iommu_unmap(vm); 884 885 return (0); 886} 887 888int 889vm_assign_pptdev(struct vm *vm, int bus, int slot, int func) 890{ 891 int error; 892 vm_paddr_t maxaddr; 893 894 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */ 895 if (ppt_assigned_devices(vm) == 0) { 896 KASSERT(vm->iommu == NULL, 897 ("vm_assign_pptdev: iommu must be NULL")); 898 maxaddr = sysmem_maxaddr(vm); 899 vm->iommu = iommu_create_domain(maxaddr); 900 vm_iommu_map(vm); 901 } 902 903 error = ppt_assign_device(vm, bus, slot, func); 904 return (error); 905} 906 907void * 908vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot, 909 void **cookie) 910{ 911 int i, count, pageoff; 912 struct mem_map *mm; 913 vm_page_t m; 914#ifdef INVARIANTS 915 /* 916 * All vcpus are frozen by ioctls that modify the memory map 917 * (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is 918 * guaranteed if at least one vcpu is in the VCPU_FROZEN state. 919 */ 920 int state; 921 KASSERT(vcpuid >= -1 && vcpuid < VM_MAXCPU, ("%s: invalid vcpuid %d", 922 __func__, vcpuid)); 923 for (i = 0; i < VM_MAXCPU; i++) { 924 if (vcpuid != -1 && vcpuid != i) 925 continue; 926 state = vcpu_get_state(vm, i, NULL); 927 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d", 928 __func__, state)); 929 } 930#endif 931 pageoff = gpa & PAGE_MASK; 932 if (len > PAGE_SIZE - pageoff) 933 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len); 934 935 count = 0; 936 for (i = 0; i < VM_MAX_MEMMAPS; i++) { 937 mm = &vm->mem_maps[i]; 938 if (sysmem_mapping(vm, mm) && gpa >= mm->gpa && 939 gpa < mm->gpa + mm->len) { 940 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map, 941 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1); 942 break; 943 } 944 } 945 946 if (count == 1) { 947 *cookie = m; 948 return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff)); 949 } else { 950 *cookie = NULL; 951 return (NULL); 952 } 953} 954 955void 956vm_gpa_release(void *cookie) 957{ 958 vm_page_t m = cookie; 959 960 vm_page_lock(m); 961 vm_page_unhold(m); 962 vm_page_unlock(m); 963} 964 965int 966vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval) 967{ 968 969 if (vcpu < 0 || vcpu >= VM_MAXCPU) 970 return (EINVAL); 971 972 if (reg >= VM_REG_LAST) 973 return (EINVAL); 974 975 return (VMGETREG(vm->cookie, vcpu, reg, retval)); 976} 977 978int 979vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val) 980{ 981 struct vcpu *vcpu; 982 int error; 983 984 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 985 return (EINVAL); 986 987 if (reg >= VM_REG_LAST) 988 return (EINVAL); 989 990 error = VMSETREG(vm->cookie, vcpuid, reg, val); 991 if (error || reg != VM_REG_GUEST_RIP) 992 return (error); 993 994 /* Set 'nextrip' to match the value of %rip */ 995 VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val); 996 vcpu = &vm->vcpu[vcpuid]; 997 vcpu->nextrip = val; 998 return (0); 999} 1000 1001static boolean_t 1002is_descriptor_table(int reg) 1003{ 1004 1005 switch (reg) { 1006 case VM_REG_GUEST_IDTR: 1007 case VM_REG_GUEST_GDTR: 1008 return (TRUE); 1009 default: 1010 return (FALSE); 1011 } 1012} 1013 1014static boolean_t 1015is_segment_register(int reg) 1016{ 1017 1018 switch (reg) { 1019 case VM_REG_GUEST_ES: 1020 case VM_REG_GUEST_CS: 1021 case VM_REG_GUEST_SS: 1022 case VM_REG_GUEST_DS: 1023 case VM_REG_GUEST_FS: 1024 case VM_REG_GUEST_GS: 1025 case VM_REG_GUEST_TR: 1026 case VM_REG_GUEST_LDTR: 1027 return (TRUE); 1028 default: 1029 return (FALSE); 1030 } 1031} 1032 1033int 1034vm_get_seg_desc(struct vm *vm, int vcpu, int reg, 1035 struct seg_desc *desc) 1036{ 1037 1038 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1039 return (EINVAL); 1040 1041 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1042 return (EINVAL); 1043 1044 return (VMGETDESC(vm->cookie, vcpu, reg, desc)); 1045} 1046 1047int 1048vm_set_seg_desc(struct vm *vm, int vcpu, int reg, 1049 struct seg_desc *desc) 1050{ 1051 if (vcpu < 0 || vcpu >= VM_MAXCPU) 1052 return (EINVAL); 1053 1054 if (!is_segment_register(reg) && !is_descriptor_table(reg)) 1055 return (EINVAL); 1056 1057 return (VMSETDESC(vm->cookie, vcpu, reg, desc)); 1058} 1059 1060static void 1061restore_guest_fpustate(struct vcpu *vcpu) 1062{ 1063 1064 /* flush host state to the pcb */ 1065 fpuexit(curthread); 1066 1067 /* restore guest FPU state */ 1068 fpu_stop_emulating(); 1069 fpurestore(vcpu->guestfpu); 1070 1071 /* restore guest XCR0 if XSAVE is enabled in the host */ 1072 if (rcr4() & CR4_XSAVE) 1073 load_xcr(0, vcpu->guest_xcr0); 1074 1075 /* 1076 * The FPU is now "dirty" with the guest's state so turn on emulation 1077 * to trap any access to the FPU by the host. 1078 */ 1079 fpu_start_emulating(); 1080} 1081 1082static void 1083save_guest_fpustate(struct vcpu *vcpu) 1084{ 1085 1086 if ((rcr0() & CR0_TS) == 0) 1087 panic("fpu emulation not enabled in host!"); 1088 1089 /* save guest XCR0 and restore host XCR0 */ 1090 if (rcr4() & CR4_XSAVE) { 1091 vcpu->guest_xcr0 = rxcr(0); 1092 load_xcr(0, vmm_get_host_xcr0()); 1093 } 1094 1095 /* save guest FPU state */ 1096 fpu_stop_emulating(); 1097 fpusave(vcpu->guestfpu); 1098 fpu_start_emulating(); 1099} 1100 1101static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle"); 1102 1103static int 1104vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate, 1105 bool from_idle) 1106{ 1107 struct vcpu *vcpu; 1108 int error; 1109 1110 vcpu = &vm->vcpu[vcpuid]; 1111 vcpu_assert_locked(vcpu); 1112 1113 /* 1114 * State transitions from the vmmdev_ioctl() must always begin from 1115 * the VCPU_IDLE state. This guarantees that there is only a single 1116 * ioctl() operating on a vcpu at any point. 1117 */ 1118 if (from_idle) { 1119 while (vcpu->state != VCPU_IDLE) { 1120 vcpu->reqidle = 1; 1121 vcpu_notify_event_locked(vcpu, false); 1122 VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to " 1123 "idle requested", vcpu_state2str(vcpu->state)); 1124 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz); 1125 } 1126 } else { 1127 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from " 1128 "vcpu idle state")); 1129 } 1130 1131 if (vcpu->state == VCPU_RUNNING) { 1132 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d " 1133 "mismatch for running vcpu", curcpu, vcpu->hostcpu)); 1134 } else { 1135 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a " 1136 "vcpu that is not running", vcpu->hostcpu)); 1137 } 1138 1139 /* 1140 * The following state transitions are allowed: 1141 * IDLE -> FROZEN -> IDLE 1142 * FROZEN -> RUNNING -> FROZEN 1143 * FROZEN -> SLEEPING -> FROZEN 1144 */ 1145 switch (vcpu->state) { 1146 case VCPU_IDLE: 1147 case VCPU_RUNNING: 1148 case VCPU_SLEEPING: 1149 error = (newstate != VCPU_FROZEN); 1150 break; 1151 case VCPU_FROZEN: 1152 error = (newstate == VCPU_FROZEN); 1153 break; 1154 default: 1155 error = 1; 1156 break; 1157 } 1158 1159 if (error) 1160 return (EBUSY); 1161 1162 VCPU_CTR2(vm, vcpuid, "vcpu state changed from %s to %s", 1163 vcpu_state2str(vcpu->state), vcpu_state2str(newstate)); 1164 1165 vcpu->state = newstate; 1166 if (newstate == VCPU_RUNNING) 1167 vcpu->hostcpu = curcpu; 1168 else 1169 vcpu->hostcpu = NOCPU; 1170 1171 if (newstate == VCPU_IDLE) 1172 wakeup(&vcpu->state); 1173 1174 return (0); 1175} 1176 1177static void 1178vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1179{ 1180 int error; 1181 1182 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0) 1183 panic("Error %d setting state to %d\n", error, newstate); 1184} 1185 1186static void 1187vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate) 1188{ 1189 int error; 1190 1191 if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0) 1192 panic("Error %d setting state to %d", error, newstate); 1193} 1194 1195static void 1196vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func) 1197{ 1198 1199 KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked")); 1200 1201 /* 1202 * Update 'rendezvous_func' and execute a write memory barrier to 1203 * ensure that it is visible across all host cpus. This is not needed 1204 * for correctness but it does ensure that all the vcpus will notice 1205 * that the rendezvous is requested immediately. 1206 */ 1207 vm->rendezvous_func = func; 1208 wmb(); 1209} 1210 1211#define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \ 1212 do { \ 1213 if (vcpuid >= 0) \ 1214 VCPU_CTR0(vm, vcpuid, fmt); \ 1215 else \ 1216 VM_CTR0(vm, fmt); \ 1217 } while (0) 1218 1219static void 1220vm_handle_rendezvous(struct vm *vm, int vcpuid) 1221{ 1222 1223 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 1224 ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); 1225 1226 mtx_lock(&vm->rendezvous_mtx); 1227 while (vm->rendezvous_func != NULL) { 1228 /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ 1229 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); 1230 1231 if (vcpuid != -1 && 1232 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && 1233 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { 1234 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func"); 1235 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg); 1236 CPU_SET(vcpuid, &vm->rendezvous_done_cpus); 1237 } 1238 if (CPU_CMP(&vm->rendezvous_req_cpus, 1239 &vm->rendezvous_done_cpus) == 0) { 1240 VCPU_CTR0(vm, vcpuid, "Rendezvous completed"); 1241 vm_set_rendezvous_func(vm, NULL); 1242 wakeup(&vm->rendezvous_func); 1243 break; 1244 } 1245 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion"); 1246 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, 1247 "vmrndv", 0); 1248 } 1249 mtx_unlock(&vm->rendezvous_mtx); 1250} 1251 1252/* 1253 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run. 1254 */ 1255static int 1256vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu) 1257{ 1258 struct vcpu *vcpu; 1259 const char *wmesg; 1260 int t, vcpu_halted, vm_halted; 1261 1262 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); 1263 1264 vcpu = &vm->vcpu[vcpuid]; 1265 vcpu_halted = 0; 1266 vm_halted = 0; 1267 1268 vcpu_lock(vcpu); 1269 while (1) { 1270 /* 1271 * Do a final check for pending NMI or interrupts before 1272 * really putting this thread to sleep. Also check for 1273 * software events that would cause this vcpu to wakeup. 1274 * 1275 * These interrupts/events could have happened after the 1276 * vcpu returned from VMRUN() and before it acquired the 1277 * vcpu lock above. 1278 */ 1279 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle) 1280 break; 1281 if (vm_nmi_pending(vm, vcpuid)) 1282 break; 1283 if (!intr_disabled) { 1284 if (vm_extint_pending(vm, vcpuid) || 1285 vlapic_pending_intr(vcpu->vlapic, NULL)) { 1286 break; 1287 } 1288 } 1289 1290 /* Don't go to sleep if the vcpu thread needs to yield */ 1291 if (vcpu_should_yield(vm, vcpuid)) 1292 break; 1293 1294 /* 1295 * Some Linux guests implement "halt" by having all vcpus 1296 * execute HLT with interrupts disabled. 'halted_cpus' keeps 1297 * track of the vcpus that have entered this state. When all 1298 * vcpus enter the halted state the virtual machine is halted. 1299 */ 1300 if (intr_disabled) { 1301 wmesg = "vmhalt"; 1302 VCPU_CTR0(vm, vcpuid, "Halted"); 1303 if (!vcpu_halted && halt_detection_enabled) { 1304 vcpu_halted = 1; 1305 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus); 1306 } 1307 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) { 1308 vm_halted = 1; 1309 break; 1310 } 1311 } else { 1312 wmesg = "vmidle"; 1313 } 1314 1315 t = ticks; 1316 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1317 /* 1318 * XXX msleep_spin() cannot be interrupted by signals so 1319 * wake up periodically to check pending signals. 1320 */ 1321 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz); 1322 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1323 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t); 1324 } 1325 1326 if (vcpu_halted) 1327 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus); 1328 1329 vcpu_unlock(vcpu); 1330 1331 if (vm_halted) 1332 vm_suspend(vm, VM_SUSPEND_HALT); 1333 1334 return (0); 1335} 1336 1337static int 1338vm_handle_paging(struct vm *vm, int vcpuid, bool *retu) 1339{ 1340 int rv, ftype; 1341 struct vm_map *map; 1342 struct vcpu *vcpu; 1343 struct vm_exit *vme; 1344 1345 vcpu = &vm->vcpu[vcpuid]; 1346 vme = &vcpu->exitinfo; 1347 1348 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 1349 __func__, vme->inst_length)); 1350 1351 ftype = vme->u.paging.fault_type; 1352 KASSERT(ftype == VM_PROT_READ || 1353 ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE, 1354 ("vm_handle_paging: invalid fault_type %d", ftype)); 1355 1356 if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) { 1357 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace), 1358 vme->u.paging.gpa, ftype); 1359 if (rv == 0) { 1360 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx", 1361 ftype == VM_PROT_READ ? "accessed" : "dirty", 1362 vme->u.paging.gpa); 1363 goto done; 1364 } 1365 } 1366 1367 map = &vm->vmspace->vm_map; 1368 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL); 1369 1370 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, " 1371 "ftype = %d", rv, vme->u.paging.gpa, ftype); 1372 1373 if (rv != KERN_SUCCESS) 1374 return (EFAULT); 1375done: 1376 return (0); 1377} 1378 1379static int 1380vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu) 1381{ 1382 struct vie *vie; 1383 struct vcpu *vcpu; 1384 struct vm_exit *vme; 1385 uint64_t gla, gpa, cs_base; 1386 struct vm_guest_paging *paging; 1387 mem_region_read_t mread; 1388 mem_region_write_t mwrite; 1389 enum vm_cpu_mode cpu_mode; 1390 int cs_d, error, fault; 1391 1392 vcpu = &vm->vcpu[vcpuid]; 1393 vme = &vcpu->exitinfo; 1394 1395 KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d", 1396 __func__, vme->inst_length)); 1397 1398 gla = vme->u.inst_emul.gla; 1399 gpa = vme->u.inst_emul.gpa; 1400 cs_base = vme->u.inst_emul.cs_base; 1401 cs_d = vme->u.inst_emul.cs_d; 1402 vie = &vme->u.inst_emul.vie; 1403 paging = &vme->u.inst_emul.paging; 1404 cpu_mode = paging->cpu_mode; 1405 1406 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa); 1407 1408 /* Fetch, decode and emulate the faulting instruction */ 1409 if (vie->num_valid == 0) { 1410 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip + 1411 cs_base, VIE_INST_SIZE, vie, &fault); 1412 } else { 1413 /* 1414 * The instruction bytes have already been copied into 'vie' 1415 */ 1416 error = fault = 0; 1417 } 1418 if (error || fault) 1419 return (error); 1420 1421 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) { 1422 VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx", 1423 vme->rip + cs_base); 1424 *retu = true; /* dump instruction bytes in userspace */ 1425 return (0); 1426 } 1427 1428 /* 1429 * Update 'nextrip' based on the length of the emulated instruction. 1430 */ 1431 vme->inst_length = vie->num_processed; 1432 vcpu->nextrip += vie->num_processed; 1433 VCPU_CTR1(vm, vcpuid, "nextrip updated to %#lx after instruction " 1434 "decoding", vcpu->nextrip); 1435 1436 /* return to userland unless this is an in-kernel emulated device */ 1437 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) { 1438 mread = lapic_mmio_read; 1439 mwrite = lapic_mmio_write; 1440 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) { 1441 mread = vioapic_mmio_read; 1442 mwrite = vioapic_mmio_write; 1443 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) { 1444 mread = vhpet_mmio_read; 1445 mwrite = vhpet_mmio_write; 1446 } else { 1447 *retu = true; 1448 return (0); 1449 } 1450 1451 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging, 1452 mread, mwrite, retu); 1453 1454 return (error); 1455} 1456 1457static int 1458vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu) 1459{ 1460 int i, done; 1461 struct vcpu *vcpu; 1462 1463 done = 0; 1464 vcpu = &vm->vcpu[vcpuid]; 1465 1466 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus); 1467 1468 /* 1469 * Wait until all 'active_cpus' have suspended themselves. 1470 * 1471 * Since a VM may be suspended at any time including when one or 1472 * more vcpus are doing a rendezvous we need to call the rendezvous 1473 * handler while we are waiting to prevent a deadlock. 1474 */ 1475 vcpu_lock(vcpu); 1476 while (1) { 1477 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) { 1478 VCPU_CTR0(vm, vcpuid, "All vcpus suspended"); 1479 break; 1480 } 1481 1482 if (vm->rendezvous_func == NULL) { 1483 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend"); 1484 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING); 1485 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz); 1486 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN); 1487 } else { 1488 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend"); 1489 vcpu_unlock(vcpu); 1490 vm_handle_rendezvous(vm, vcpuid); 1491 vcpu_lock(vcpu); 1492 } 1493 } 1494 vcpu_unlock(vcpu); 1495 1496 /* 1497 * Wakeup the other sleeping vcpus and return to userspace. 1498 */ 1499 for (i = 0; i < VM_MAXCPU; i++) { 1500 if (CPU_ISSET(i, &vm->suspended_cpus)) { 1501 vcpu_notify_event(vm, i, false); 1502 } 1503 } 1504 1505 *retu = true; 1506 return (0); 1507} 1508 1509static int 1510vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu) 1511{ 1512 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 1513 1514 vcpu_lock(vcpu); 1515 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle)); 1516 vcpu->reqidle = 0; 1517 vcpu_unlock(vcpu); 1518 *retu = true; 1519 return (0); 1520} 1521 1522int 1523vm_suspend(struct vm *vm, enum vm_suspend_how how) 1524{ 1525 int i; 1526 1527 if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST) 1528 return (EINVAL); 1529 1530 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) { 1531 VM_CTR2(vm, "virtual machine already suspended %d/%d", 1532 vm->suspend, how); 1533 return (EALREADY); 1534 } 1535 1536 VM_CTR1(vm, "virtual machine successfully suspended %d", how); 1537 1538 /* 1539 * Notify all active vcpus that they are now suspended. 1540 */ 1541 for (i = 0; i < VM_MAXCPU; i++) { 1542 if (CPU_ISSET(i, &vm->active_cpus)) 1543 vcpu_notify_event(vm, i, false); 1544 } 1545 1546 return (0); 1547} 1548 1549void 1550vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip) 1551{ 1552 struct vm_exit *vmexit; 1553 1554 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST, 1555 ("vm_exit_suspended: invalid suspend type %d", vm->suspend)); 1556 1557 vmexit = vm_exitinfo(vm, vcpuid); 1558 vmexit->rip = rip; 1559 vmexit->inst_length = 0; 1560 vmexit->exitcode = VM_EXITCODE_SUSPENDED; 1561 vmexit->u.suspended.how = vm->suspend; 1562} 1563 1564void 1565vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip) 1566{ 1567 struct vm_exit *vmexit; 1568 1569 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress")); 1570 1571 vmexit = vm_exitinfo(vm, vcpuid); 1572 vmexit->rip = rip; 1573 vmexit->inst_length = 0; 1574 vmexit->exitcode = VM_EXITCODE_RENDEZVOUS; 1575 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1); 1576} 1577 1578void 1579vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip) 1580{ 1581 struct vm_exit *vmexit; 1582 1583 vmexit = vm_exitinfo(vm, vcpuid); 1584 vmexit->rip = rip; 1585 vmexit->inst_length = 0; 1586 vmexit->exitcode = VM_EXITCODE_REQIDLE; 1587 vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1); 1588} 1589 1590void 1591vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip) 1592{ 1593 struct vm_exit *vmexit; 1594 1595 vmexit = vm_exitinfo(vm, vcpuid); 1596 vmexit->rip = rip; 1597 vmexit->inst_length = 0; 1598 vmexit->exitcode = VM_EXITCODE_BOGUS; 1599 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1); 1600} 1601 1602int 1603vm_run(struct vm *vm, struct vm_run *vmrun) 1604{ 1605 struct vm_eventinfo evinfo; 1606 int error, vcpuid; 1607 struct vcpu *vcpu; 1608 struct pcb *pcb; 1609 uint64_t tscval; 1610 struct vm_exit *vme; 1611 bool retu, intr_disabled; 1612 pmap_t pmap; 1613 1614 vcpuid = vmrun->cpuid; 1615 1616 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1617 return (EINVAL); 1618 1619 if (!CPU_ISSET(vcpuid, &vm->active_cpus)) 1620 return (EINVAL); 1621 1622 if (CPU_ISSET(vcpuid, &vm->suspended_cpus)) 1623 return (EINVAL); 1624 1625 pmap = vmspace_pmap(vm->vmspace); 1626 vcpu = &vm->vcpu[vcpuid]; 1627 vme = &vcpu->exitinfo; 1628 evinfo.rptr = &vm->rendezvous_func; 1629 evinfo.sptr = &vm->suspend; 1630 evinfo.iptr = &vcpu->reqidle; 1631restart: 1632 critical_enter(); 1633 1634 KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active), 1635 ("vm_run: absurd pm_active")); 1636 1637 tscval = rdtsc(); 1638 1639 pcb = PCPU_GET(curpcb); 1640 set_pcb_flags(pcb, PCB_FULL_IRET); 1641 1642 restore_guest_fpustate(vcpu); 1643 1644 vcpu_require_state(vm, vcpuid, VCPU_RUNNING); 1645 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo); 1646 vcpu_require_state(vm, vcpuid, VCPU_FROZEN); 1647 1648 save_guest_fpustate(vcpu); 1649 1650 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval); 1651 1652 critical_exit(); 1653 1654 if (error == 0) { 1655 retu = false; 1656 vcpu->nextrip = vme->rip + vme->inst_length; 1657 switch (vme->exitcode) { 1658 case VM_EXITCODE_REQIDLE: 1659 error = vm_handle_reqidle(vm, vcpuid, &retu); 1660 break; 1661 case VM_EXITCODE_SUSPENDED: 1662 error = vm_handle_suspend(vm, vcpuid, &retu); 1663 break; 1664 case VM_EXITCODE_IOAPIC_EOI: 1665 vioapic_process_eoi(vm, vcpuid, 1666 vme->u.ioapic_eoi.vector); 1667 break; 1668 case VM_EXITCODE_RENDEZVOUS: 1669 vm_handle_rendezvous(vm, vcpuid); 1670 error = 0; 1671 break; 1672 case VM_EXITCODE_HLT: 1673 intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0); 1674 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu); 1675 break; 1676 case VM_EXITCODE_PAGING: 1677 error = vm_handle_paging(vm, vcpuid, &retu); 1678 break; 1679 case VM_EXITCODE_INST_EMUL: 1680 error = vm_handle_inst_emul(vm, vcpuid, &retu); 1681 break; 1682 case VM_EXITCODE_INOUT: 1683 case VM_EXITCODE_INOUT_STR: 1684 error = vm_handle_inout(vm, vcpuid, vme, &retu); 1685 break; 1686 case VM_EXITCODE_MONITOR: 1687 case VM_EXITCODE_MWAIT: 1688 vm_inject_ud(vm, vcpuid); 1689 break; 1690 default: 1691 retu = true; /* handled in userland */ 1692 break; 1693 } 1694 } 1695 1696 if (error == 0 && retu == false) 1697 goto restart; 1698 1699 VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode); 1700 1701 /* copy the exit information */ 1702 bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit)); 1703 return (error); 1704} 1705 1706int 1707vm_restart_instruction(void *arg, int vcpuid) 1708{ 1709 struct vm *vm; 1710 struct vcpu *vcpu; 1711 enum vcpu_state state; 1712 uint64_t rip; 1713 int error; 1714 1715 vm = arg; 1716 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1717 return (EINVAL); 1718 1719 vcpu = &vm->vcpu[vcpuid]; 1720 state = vcpu_get_state(vm, vcpuid, NULL); 1721 if (state == VCPU_RUNNING) { 1722 /* 1723 * When a vcpu is "running" the next instruction is determined 1724 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'. 1725 * Thus setting 'inst_length' to zero will cause the current 1726 * instruction to be restarted. 1727 */ 1728 vcpu->exitinfo.inst_length = 0; 1729 VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by " 1730 "setting inst_length to zero", vcpu->exitinfo.rip); 1731 } else if (state == VCPU_FROZEN) { 1732 /* 1733 * When a vcpu is "frozen" it is outside the critical section 1734 * around VMRUN() and 'nextrip' points to the next instruction. 1735 * Thus instruction restart is achieved by setting 'nextrip' 1736 * to the vcpu's %rip. 1737 */ 1738 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip); 1739 KASSERT(!error, ("%s: error %d getting rip", __func__, error)); 1740 VCPU_CTR2(vm, vcpuid, "restarting instruction by updating " 1741 "nextrip from %#lx to %#lx", vcpu->nextrip, rip); 1742 vcpu->nextrip = rip; 1743 } else { 1744 panic("%s: invalid state %d", __func__, state); 1745 } 1746 return (0); 1747} 1748 1749int 1750vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info) 1751{ 1752 struct vcpu *vcpu; 1753 int type, vector; 1754 1755 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1756 return (EINVAL); 1757 1758 vcpu = &vm->vcpu[vcpuid]; 1759 1760 if (info & VM_INTINFO_VALID) { 1761 type = info & VM_INTINFO_TYPE; 1762 vector = info & 0xff; 1763 if (type == VM_INTINFO_NMI && vector != IDT_NMI) 1764 return (EINVAL); 1765 if (type == VM_INTINFO_HWEXCEPTION && vector >= 32) 1766 return (EINVAL); 1767 if (info & VM_INTINFO_RSVD) 1768 return (EINVAL); 1769 } else { 1770 info = 0; 1771 } 1772 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info); 1773 vcpu->exitintinfo = info; 1774 return (0); 1775} 1776 1777enum exc_class { 1778 EXC_BENIGN, 1779 EXC_CONTRIBUTORY, 1780 EXC_PAGEFAULT 1781}; 1782 1783#define IDT_VE 20 /* Virtualization Exception (Intel specific) */ 1784 1785static enum exc_class 1786exception_class(uint64_t info) 1787{ 1788 int type, vector; 1789 1790 KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info)); 1791 type = info & VM_INTINFO_TYPE; 1792 vector = info & 0xff; 1793 1794 /* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */ 1795 switch (type) { 1796 case VM_INTINFO_HWINTR: 1797 case VM_INTINFO_SWINTR: 1798 case VM_INTINFO_NMI: 1799 return (EXC_BENIGN); 1800 default: 1801 /* 1802 * Hardware exception. 1803 * 1804 * SVM and VT-x use identical type values to represent NMI, 1805 * hardware interrupt and software interrupt. 1806 * 1807 * SVM uses type '3' for all exceptions. VT-x uses type '3' 1808 * for exceptions except #BP and #OF. #BP and #OF use a type 1809 * value of '5' or '6'. Therefore we don't check for explicit 1810 * values of 'type' to classify 'intinfo' into a hardware 1811 * exception. 1812 */ 1813 break; 1814 } 1815 1816 switch (vector) { 1817 case IDT_PF: 1818 case IDT_VE: 1819 return (EXC_PAGEFAULT); 1820 case IDT_DE: 1821 case IDT_TS: 1822 case IDT_NP: 1823 case IDT_SS: 1824 case IDT_GP: 1825 return (EXC_CONTRIBUTORY); 1826 default: 1827 return (EXC_BENIGN); 1828 } 1829} 1830 1831static int 1832nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2, 1833 uint64_t *retinfo) 1834{ 1835 enum exc_class exc1, exc2; 1836 int type1, vector1; 1837 1838 KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1)); 1839 KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2)); 1840 1841 /* 1842 * If an exception occurs while attempting to call the double-fault 1843 * handler the processor enters shutdown mode (aka triple fault). 1844 */ 1845 type1 = info1 & VM_INTINFO_TYPE; 1846 vector1 = info1 & 0xff; 1847 if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) { 1848 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)", 1849 info1, info2); 1850 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT); 1851 *retinfo = 0; 1852 return (0); 1853 } 1854 1855 /* 1856 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3 1857 */ 1858 exc1 = exception_class(info1); 1859 exc2 = exception_class(info2); 1860 if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) || 1861 (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) { 1862 /* Convert nested fault into a double fault. */ 1863 *retinfo = IDT_DF; 1864 *retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1865 *retinfo |= VM_INTINFO_DEL_ERRCODE; 1866 } else { 1867 /* Handle exceptions serially */ 1868 *retinfo = info2; 1869 } 1870 return (1); 1871} 1872 1873static uint64_t 1874vcpu_exception_intinfo(struct vcpu *vcpu) 1875{ 1876 uint64_t info = 0; 1877 1878 if (vcpu->exception_pending) { 1879 info = vcpu->exc_vector & 0xff; 1880 info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION; 1881 if (vcpu->exc_errcode_valid) { 1882 info |= VM_INTINFO_DEL_ERRCODE; 1883 info |= (uint64_t)vcpu->exc_errcode << 32; 1884 } 1885 } 1886 return (info); 1887} 1888 1889int 1890vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo) 1891{ 1892 struct vcpu *vcpu; 1893 uint64_t info1, info2; 1894 int valid; 1895 1896 KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid)); 1897 1898 vcpu = &vm->vcpu[vcpuid]; 1899 1900 info1 = vcpu->exitintinfo; 1901 vcpu->exitintinfo = 0; 1902 1903 info2 = 0; 1904 if (vcpu->exception_pending) { 1905 info2 = vcpu_exception_intinfo(vcpu); 1906 vcpu->exception_pending = 0; 1907 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx", 1908 vcpu->exc_vector, info2); 1909 } 1910 1911 if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) { 1912 valid = nested_fault(vm, vcpuid, info1, info2, retinfo); 1913 } else if (info1 & VM_INTINFO_VALID) { 1914 *retinfo = info1; 1915 valid = 1; 1916 } else if (info2 & VM_INTINFO_VALID) { 1917 *retinfo = info2; 1918 valid = 1; 1919 } else { 1920 valid = 0; 1921 } 1922 1923 if (valid) { 1924 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), " 1925 "retinfo(%#lx)", __func__, info1, info2, *retinfo); 1926 } 1927 1928 return (valid); 1929} 1930 1931int 1932vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2) 1933{ 1934 struct vcpu *vcpu; 1935 1936 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1937 return (EINVAL); 1938 1939 vcpu = &vm->vcpu[vcpuid]; 1940 *info1 = vcpu->exitintinfo; 1941 *info2 = vcpu_exception_intinfo(vcpu); 1942 return (0); 1943} 1944 1945int 1946vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid, 1947 uint32_t errcode, int restart_instruction) 1948{ 1949 struct vcpu *vcpu; 1950 uint64_t regval; 1951 int error; 1952 1953 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 1954 return (EINVAL); 1955 1956 if (vector < 0 || vector >= 32) 1957 return (EINVAL); 1958 1959 /* 1960 * A double fault exception should never be injected directly into 1961 * the guest. It is a derived exception that results from specific 1962 * combinations of nested faults. 1963 */ 1964 if (vector == IDT_DF) 1965 return (EINVAL); 1966 1967 vcpu = &vm->vcpu[vcpuid]; 1968 1969 if (vcpu->exception_pending) { 1970 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to " 1971 "pending exception %d", vector, vcpu->exc_vector); 1972 return (EBUSY); 1973 } 1974 1975 if (errcode_valid) { 1976 /* 1977 * Exceptions don't deliver an error code in real mode. 1978 */ 1979 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val); 1980 KASSERT(!error, ("%s: error %d getting CR0", __func__, error)); 1981 if (!(regval & CR0_PE)) 1982 errcode_valid = 0; 1983 } 1984 1985 /* 1986 * From section 26.6.1 "Interruptibility State" in Intel SDM: 1987 * 1988 * Event blocking by "STI" or "MOV SS" is cleared after guest executes 1989 * one instruction or incurs an exception. 1990 */ 1991 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); 1992 KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", 1993 __func__, error)); 1994 1995 if (restart_instruction) 1996 vm_restart_instruction(vm, vcpuid); 1997 1998 vcpu->exception_pending = 1; 1999 vcpu->exc_vector = vector; 2000 vcpu->exc_errcode = errcode; 2001 vcpu->exc_errcode_valid = errcode_valid; 2002 VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector); 2003 return (0); 2004} 2005 2006void 2007vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid, 2008 int errcode) 2009{ 2010 struct vm *vm; 2011 int error, restart_instruction; 2012 2013 vm = vmarg; 2014 restart_instruction = 1; 2015 2016 error = vm_inject_exception(vm, vcpuid, vector, errcode_valid, 2017 errcode, restart_instruction); 2018 KASSERT(error == 0, ("vm_inject_exception error %d", error)); 2019} 2020 2021void 2022vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2) 2023{ 2024 struct vm *vm; 2025 int error; 2026 2027 vm = vmarg; 2028 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx", 2029 error_code, cr2); 2030 2031 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2); 2032 KASSERT(error == 0, ("vm_set_register(cr2) error %d", error)); 2033 2034 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code); 2035} 2036 2037static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu"); 2038 2039int 2040vm_inject_nmi(struct vm *vm, int vcpuid) 2041{ 2042 struct vcpu *vcpu; 2043 2044 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2045 return (EINVAL); 2046 2047 vcpu = &vm->vcpu[vcpuid]; 2048 2049 vcpu->nmi_pending = 1; 2050 vcpu_notify_event(vm, vcpuid, false); 2051 return (0); 2052} 2053 2054int 2055vm_nmi_pending(struct vm *vm, int vcpuid) 2056{ 2057 struct vcpu *vcpu; 2058 2059 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2060 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 2061 2062 vcpu = &vm->vcpu[vcpuid]; 2063 2064 return (vcpu->nmi_pending); 2065} 2066 2067void 2068vm_nmi_clear(struct vm *vm, int vcpuid) 2069{ 2070 struct vcpu *vcpu; 2071 2072 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2073 panic("vm_nmi_pending: invalid vcpuid %d", vcpuid); 2074 2075 vcpu = &vm->vcpu[vcpuid]; 2076 2077 if (vcpu->nmi_pending == 0) 2078 panic("vm_nmi_clear: inconsistent nmi_pending state"); 2079 2080 vcpu->nmi_pending = 0; 2081 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1); 2082} 2083 2084static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu"); 2085 2086int 2087vm_inject_extint(struct vm *vm, int vcpuid) 2088{ 2089 struct vcpu *vcpu; 2090 2091 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2092 return (EINVAL); 2093 2094 vcpu = &vm->vcpu[vcpuid]; 2095 2096 vcpu->extint_pending = 1; 2097 vcpu_notify_event(vm, vcpuid, false); 2098 return (0); 2099} 2100 2101int 2102vm_extint_pending(struct vm *vm, int vcpuid) 2103{ 2104 struct vcpu *vcpu; 2105 2106 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2107 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 2108 2109 vcpu = &vm->vcpu[vcpuid]; 2110 2111 return (vcpu->extint_pending); 2112} 2113 2114void 2115vm_extint_clear(struct vm *vm, int vcpuid) 2116{ 2117 struct vcpu *vcpu; 2118 2119 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2120 panic("vm_extint_pending: invalid vcpuid %d", vcpuid); 2121 2122 vcpu = &vm->vcpu[vcpuid]; 2123 2124 if (vcpu->extint_pending == 0) 2125 panic("vm_extint_clear: inconsistent extint_pending state"); 2126 2127 vcpu->extint_pending = 0; 2128 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1); 2129} 2130 2131int 2132vm_get_capability(struct vm *vm, int vcpu, int type, int *retval) 2133{ 2134 if (vcpu < 0 || vcpu >= VM_MAXCPU) 2135 return (EINVAL); 2136 2137 if (type < 0 || type >= VM_CAP_MAX) 2138 return (EINVAL); 2139 2140 return (VMGETCAP(vm->cookie, vcpu, type, retval)); 2141} 2142 2143int 2144vm_set_capability(struct vm *vm, int vcpu, int type, int val) 2145{ 2146 if (vcpu < 0 || vcpu >= VM_MAXCPU) 2147 return (EINVAL); 2148 2149 if (type < 0 || type >= VM_CAP_MAX) 2150 return (EINVAL); 2151 2152 return (VMSETCAP(vm->cookie, vcpu, type, val)); 2153} 2154 2155struct vlapic * 2156vm_lapic(struct vm *vm, int cpu) 2157{ 2158 return (vm->vcpu[cpu].vlapic); 2159} 2160 2161struct vioapic * 2162vm_ioapic(struct vm *vm) 2163{ 2164 2165 return (vm->vioapic); 2166} 2167 2168struct vhpet * 2169vm_hpet(struct vm *vm) 2170{ 2171 2172 return (vm->vhpet); 2173} 2174 2175boolean_t 2176vmm_is_pptdev(int bus, int slot, int func) 2177{ 2178 int found, i, n; 2179 int b, s, f; 2180 char *val, *cp, *cp2; 2181 2182 /* 2183 * XXX 2184 * The length of an environment variable is limited to 128 bytes which 2185 * puts an upper limit on the number of passthru devices that may be 2186 * specified using a single environment variable. 2187 * 2188 * Work around this by scanning multiple environment variable 2189 * names instead of a single one - yuck! 2190 */ 2191 const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL }; 2192 2193 /* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */ 2194 found = 0; 2195 for (i = 0; names[i] != NULL && !found; i++) { 2196 cp = val = getenv(names[i]); 2197 while (cp != NULL && *cp != '\0') { 2198 if ((cp2 = strchr(cp, ' ')) != NULL) 2199 *cp2 = '\0'; 2200 2201 n = sscanf(cp, "%d/%d/%d", &b, &s, &f); 2202 if (n == 3 && bus == b && slot == s && func == f) { 2203 found = 1; 2204 break; 2205 } 2206 2207 if (cp2 != NULL) 2208 *cp2++ = ' '; 2209 2210 cp = cp2; 2211 } 2212 freeenv(val); 2213 } 2214 return (found); 2215} 2216 2217void * 2218vm_iommu_domain(struct vm *vm) 2219{ 2220 2221 return (vm->iommu); 2222} 2223 2224int 2225vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate, 2226 bool from_idle) 2227{ 2228 int error; 2229 struct vcpu *vcpu; 2230 2231 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2232 panic("vm_set_run_state: invalid vcpuid %d", vcpuid); 2233 2234 vcpu = &vm->vcpu[vcpuid]; 2235 2236 vcpu_lock(vcpu); 2237 error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle); 2238 vcpu_unlock(vcpu); 2239 2240 return (error); 2241} 2242 2243enum vcpu_state 2244vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu) 2245{ 2246 struct vcpu *vcpu; 2247 enum vcpu_state state; 2248 2249 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2250 panic("vm_get_run_state: invalid vcpuid %d", vcpuid); 2251 2252 vcpu = &vm->vcpu[vcpuid]; 2253 2254 vcpu_lock(vcpu); 2255 state = vcpu->state; 2256 if (hostcpu != NULL) 2257 *hostcpu = vcpu->hostcpu; 2258 vcpu_unlock(vcpu); 2259 2260 return (state); 2261} 2262 2263int 2264vm_activate_cpu(struct vm *vm, int vcpuid) 2265{ 2266 2267 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2268 return (EINVAL); 2269 2270 if (CPU_ISSET(vcpuid, &vm->active_cpus)) 2271 return (EBUSY); 2272 2273 VCPU_CTR0(vm, vcpuid, "activated"); 2274 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus); 2275 return (0); 2276} 2277 2278cpuset_t 2279vm_active_cpus(struct vm *vm) 2280{ 2281 2282 return (vm->active_cpus); 2283} 2284 2285cpuset_t 2286vm_suspended_cpus(struct vm *vm) 2287{ 2288 2289 return (vm->suspended_cpus); 2290} 2291 2292void * 2293vcpu_stats(struct vm *vm, int vcpuid) 2294{ 2295 2296 return (vm->vcpu[vcpuid].stats); 2297} 2298 2299int 2300vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state) 2301{ 2302 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2303 return (EINVAL); 2304 2305 *state = vm->vcpu[vcpuid].x2apic_state; 2306 2307 return (0); 2308} 2309 2310int 2311vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state) 2312{ 2313 if (vcpuid < 0 || vcpuid >= VM_MAXCPU) 2314 return (EINVAL); 2315 2316 if (state >= X2APIC_STATE_LAST) 2317 return (EINVAL); 2318 2319 vm->vcpu[vcpuid].x2apic_state = state; 2320 2321 vlapic_set_x2apic_state(vm, vcpuid, state); 2322 2323 return (0); 2324} 2325 2326/* 2327 * This function is called to ensure that a vcpu "sees" a pending event 2328 * as soon as possible: 2329 * - If the vcpu thread is sleeping then it is woken up. 2330 * - If the vcpu is running on a different host_cpu then an IPI will be directed 2331 * to the host_cpu to cause the vcpu to trap into the hypervisor. 2332 */ 2333static void 2334vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr) 2335{ 2336 int hostcpu; 2337 2338 hostcpu = vcpu->hostcpu; 2339 if (vcpu->state == VCPU_RUNNING) { 2340 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu")); 2341 if (hostcpu != curcpu) { 2342 if (lapic_intr) { 2343 vlapic_post_intr(vcpu->vlapic, hostcpu, 2344 vmm_ipinum); 2345 } else { 2346 ipi_cpu(hostcpu, vmm_ipinum); 2347 } 2348 } else { 2349 /* 2350 * If the 'vcpu' is running on 'curcpu' then it must 2351 * be sending a notification to itself (e.g. SELF_IPI). 2352 * The pending event will be picked up when the vcpu 2353 * transitions back to guest context. 2354 */ 2355 } 2356 } else { 2357 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent " 2358 "with hostcpu %d", vcpu->state, hostcpu)); 2359 if (vcpu->state == VCPU_SLEEPING) 2360 wakeup_one(vcpu); 2361 } 2362} 2363 2364void 2365vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr) 2366{ 2367 struct vcpu *vcpu = &vm->vcpu[vcpuid]; 2368 2369 vcpu_lock(vcpu); 2370 vcpu_notify_event_locked(vcpu, lapic_intr); 2371 vcpu_unlock(vcpu); 2372} 2373 2374struct vmspace * 2375vm_get_vmspace(struct vm *vm) 2376{ 2377 2378 return (vm->vmspace); 2379} 2380 2381int 2382vm_apicid2vcpuid(struct vm *vm, int apicid) 2383{ 2384 /* 2385 * XXX apic id is assumed to be numerically identical to vcpu id 2386 */ 2387 return (apicid); 2388} 2389 2390void 2391vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest, 2392 vm_rendezvous_func_t func, void *arg) 2393{ 2394 int i; 2395 2396 /* 2397 * Enforce that this function is called without any locks 2398 */ 2399 WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous"); 2400 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), 2401 ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid)); 2402 2403restart: 2404 mtx_lock(&vm->rendezvous_mtx); 2405 if (vm->rendezvous_func != NULL) { 2406 /* 2407 * If a rendezvous is already in progress then we need to 2408 * call the rendezvous handler in case this 'vcpuid' is one 2409 * of the targets of the rendezvous. 2410 */ 2411 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress"); 2412 mtx_unlock(&vm->rendezvous_mtx); 2413 vm_handle_rendezvous(vm, vcpuid); 2414 goto restart; 2415 } 2416 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous " 2417 "rendezvous is still in progress")); 2418 2419 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous"); 2420 vm->rendezvous_req_cpus = dest; 2421 CPU_ZERO(&vm->rendezvous_done_cpus); 2422 vm->rendezvous_arg = arg; 2423 vm_set_rendezvous_func(vm, func); 2424 mtx_unlock(&vm->rendezvous_mtx); 2425 2426 /* 2427 * Wake up any sleeping vcpus and trigger a VM-exit in any running 2428 * vcpus so they handle the rendezvous as soon as possible. 2429 */ 2430 for (i = 0; i < VM_MAXCPU; i++) { 2431 if (CPU_ISSET(i, &dest)) 2432 vcpu_notify_event(vm, i, false); 2433 } 2434 2435 vm_handle_rendezvous(vm, vcpuid); 2436} 2437 2438struct vatpic * 2439vm_atpic(struct vm *vm) 2440{ 2441 return (vm->vatpic); 2442} 2443 2444struct vatpit * 2445vm_atpit(struct vm *vm) 2446{ 2447 return (vm->vatpit); 2448} 2449 2450struct vpmtmr * 2451vm_pmtmr(struct vm *vm) 2452{ 2453 2454 return (vm->vpmtmr); 2455} 2456 2457struct vrtc * 2458vm_rtc(struct vm *vm) 2459{ 2460 2461 return (vm->vrtc); 2462} 2463 2464enum vm_reg_name 2465vm_segment_name(int seg) 2466{ 2467 static enum vm_reg_name seg_names[] = { 2468 VM_REG_GUEST_ES, 2469 VM_REG_GUEST_CS, 2470 VM_REG_GUEST_SS, 2471 VM_REG_GUEST_DS, 2472 VM_REG_GUEST_FS, 2473 VM_REG_GUEST_GS 2474 }; 2475 2476 KASSERT(seg >= 0 && seg < nitems(seg_names), 2477 ("%s: invalid segment encoding %d", __func__, seg)); 2478 return (seg_names[seg]); 2479} 2480 2481void 2482vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, 2483 int num_copyinfo) 2484{ 2485 int idx; 2486 2487 for (idx = 0; idx < num_copyinfo; idx++) { 2488 if (copyinfo[idx].cookie != NULL) 2489 vm_gpa_release(copyinfo[idx].cookie); 2490 } 2491 bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo)); 2492} 2493 2494int 2495vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, 2496 uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo, 2497 int num_copyinfo, int *fault) 2498{ 2499 int error, idx, nused; 2500 size_t n, off, remaining; 2501 void *hva, *cookie; 2502 uint64_t gpa; 2503 2504 bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo); 2505 2506 nused = 0; 2507 remaining = len; 2508 while (remaining > 0) { 2509 KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo")); 2510 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault); 2511 if (error || *fault) 2512 return (error); 2513 off = gpa & PAGE_MASK; 2514 n = min(remaining, PAGE_SIZE - off); 2515 copyinfo[nused].gpa = gpa; 2516 copyinfo[nused].len = n; 2517 remaining -= n; 2518 gla += n; 2519 nused++; 2520 } 2521 2522 for (idx = 0; idx < nused; idx++) { 2523 hva = vm_gpa_hold(vm, vcpuid, copyinfo[idx].gpa, 2524 copyinfo[idx].len, prot, &cookie); 2525 if (hva == NULL) 2526 break; 2527 copyinfo[idx].hva = hva; 2528 copyinfo[idx].cookie = cookie; 2529 } 2530 2531 if (idx != nused) { 2532 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo); 2533 return (EFAULT); 2534 } else { 2535 *fault = 0; 2536 return (0); 2537 } 2538} 2539 2540void 2541vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr, 2542 size_t len) 2543{ 2544 char *dst; 2545 int idx; 2546 2547 dst = kaddr; 2548 idx = 0; 2549 while (len > 0) { 2550 bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len); 2551 len -= copyinfo[idx].len; 2552 dst += copyinfo[idx].len; 2553 idx++; 2554 } 2555} 2556 2557void 2558vm_copyout(struct vm *vm, int vcpuid, const void *kaddr, 2559 struct vm_copyinfo *copyinfo, size_t len) 2560{ 2561 const char *src; 2562 int idx; 2563 2564 src = kaddr; 2565 idx = 0; 2566 while (len > 0) { 2567 bcopy(src, copyinfo[idx].hva, copyinfo[idx].len); 2568 len -= copyinfo[idx].len; 2569 src += copyinfo[idx].len; 2570 idx++; 2571 } 2572} 2573 2574/* 2575 * Return the amount of in-use and wired memory for the VM. Since 2576 * these are global stats, only return the values with for vCPU 0 2577 */ 2578VMM_STAT_DECLARE(VMM_MEM_RESIDENT); 2579VMM_STAT_DECLARE(VMM_MEM_WIRED); 2580 2581static void 2582vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2583{ 2584 2585 if (vcpu == 0) { 2586 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT, 2587 PAGE_SIZE * vmspace_resident_count(vm->vmspace)); 2588 } 2589} 2590 2591static void 2592vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat) 2593{ 2594 2595 if (vcpu == 0) { 2596 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED, 2597 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace))); 2598 } 2599} 2600 2601VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt); 2602VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt); 2603