• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-12-stable/sys/amd64/vmm/

Lines Matching refs:vm

49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_param.h>
145 struct vm {
169 /* The following describe the vm cpu topology */
183 #define VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
213 static MALLOC_DEFINE(M_VM, "vm", "vm");
238 static void vm_free_memmap(struct vm *vm, int ident);
239 static bool sysmem_mapping(struct vm *vm, struct mem_map *mm);
263 vcpu_cleanup(struct vm *vm, int i, bool destroy)
265 struct vcpu *vcpu = &vm->vcpu[i];
267 VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
275 vcpu_init(struct vm *vm, int vcpu_id, bool create)
279 KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus,
282 vcpu = &vm->vcpu[vcpu_id];
294 vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
295 vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
307 vcpu_trace_exceptions(struct vm *vm, int vcpuid)
314 vm_exitinfo(struct vm *vm, int cpuid)
318 if (cpuid < 0 || cpuid >= vm->maxcpus)
321 vcpu = &vm->vcpu[cpuid];
411 vm_init(struct vm *vm, bool create)
415 vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace));
416 vm->iommu = NULL;
417 vm->vioapic = vioapic_init(vm);
418 vm->vhpet = vhpet_init(vm);
419 vm->vatpic = vatpic_init(vm);
420 vm->vatpit = vatpit_init(vm);
421 vm->vpmtmr = vpmtmr_init(vm);
423 vm->vrtc = vrtc_init(vm);
425 CPU_ZERO(&vm->active_cpus);
426 CPU_ZERO(&vm->debug_cpus);
428 vm->suspend = 0;
429 CPU_ZERO(&vm->suspended_cpus);
431 for (i = 0; i < vm->maxcpus; i++)
432 vcpu_init(vm, i, create);
442 vm_create(const char *name, struct vm **retvm)
444 struct vm *vm;
461 vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
462 strcpy(vm->name, name);
463 vm->vmspace = vmspace;
464 mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
466 vm->sockets = 1;
467 vm->cores = cores_per_package; /* XXX backwards compatibility */
468 vm->threads = threads_per_core; /* XXX backwards compatibility */
469 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */
471 vm_init(vm, true);
473 *retvm = vm;
478 vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
481 *sockets = vm->sockets;
482 *cores = vm->cores;
483 *threads = vm->threads;
484 *maxcpus = vm->maxcpus;
488 vm_get_maxcpus(struct vm *vm)
490 return (vm->maxcpus);
494 vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
499 if ((sockets * cores * threads) > vm->maxcpus)
502 vm->sockets = sockets;
503 vm->cores = cores;
504 vm->threads = threads;
505 vm->maxcpus = VM_MAXCPU; /* XXX temp to keep code working */
510 vm_cleanup(struct vm *vm, bool destroy)
515 ppt_unassign_all(vm);
517 if (vm->iommu != NULL)
518 iommu_destroy_domain(vm->iommu);
521 vrtc_cleanup(vm->vrtc);
523 vrtc_reset(vm->vrtc);
524 vpmtmr_cleanup(vm->vpmtmr);
525 vatpit_cleanup(vm->vatpit);
526 vhpet_cleanup(vm->vhpet);
527 vatpic_cleanup(vm->vatpic);
528 vioapic_cleanup(vm->vioapic);
530 for (i = 0; i < vm->maxcpus; i++)
531 vcpu_cleanup(vm, i, destroy);
533 VMCLEANUP(vm->cookie);
544 mm = &vm->mem_maps[i];
545 if (destroy || !sysmem_mapping(vm, mm))
546 vm_free_memmap(vm, i);
551 vm_free_memseg(vm, i);
553 VMSPACE_FREE(vm->vmspace);
554 vm->vmspace = NULL;
559 vm_destroy(struct vm *vm)
561 vm_cleanup(vm, true);
562 free(vm, M_VM);
566 vm_reinit(struct vm *vm)
573 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
574 vm_cleanup(vm, false);
575 vm_init(vm, false);
585 vm_name(struct vm *vm)
587 return (vm->name);
591 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
595 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
602 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
605 vmm_mmio_free(vm->vmspace, gpa, len);
613 * an implicit lock on 'vm->mem_maps[]'.
616 vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa)
623 state = vcpu_get_state(vm, vcpuid, &hostcpu);
629 mm = &vm->mem_maps[i];
634 if (ppt_is_mmio(vm, gpa))
641 vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem)
652 seg = &vm->mem_segs[ident];
671 vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
679 seg = &vm->mem_segs[ident];
690 vm_free_memseg(struct vm *vm, int ident)
697 seg = &vm->mem_segs[ident];
705 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
722 seg = &vm->mem_segs[segid];
735 m = &vm->mem_maps[i];
745 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
753 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
756 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
772 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
780 mm = &vm->mem_maps[i];
806 vm_free_memmap(struct vm *vm, int ident)
811 mm = &vm->mem_maps[ident];
813 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
822 sysmem_mapping(struct vm *vm, struct mem_map *mm)
825 if (mm->len != 0 && vm->mem_segs[mm->segid].sysmem)
832 vmm_sysmem_maxaddr(struct vm *vm)
840 mm = &vm->mem_maps[i];
841 if (sysmem_mapping(vm, mm)) {
850 vm_iommu_modify(struct vm *vm, bool map)
861 mm = &vm->mem_maps[i];
862 if (!sysmem_mapping(vm, mm))
883 vp = vm_gpa_hold(vm, -1, gpa, PAGE_SIZE, VM_PROT_WRITE,
885 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
886 vm_name(vm), gpa));
892 iommu_create_mapping(vm->iommu, gpa, hpa, sz);
895 iommu_remove_mapping(vm->iommu, gpa, sz);
910 iommu_invalidate_tlb(vm->iommu);
913 #define vm_iommu_unmap(vm) vm_iommu_modify((vm), false)
914 #define vm_iommu_map(vm) vm_iommu_modify((vm), true)
917 vm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
921 error = ppt_unassign_device(vm, bus, slot, func);
925 if (ppt_assigned_devices(vm) == 0)
926 vm_iommu_unmap(vm);
932 vm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
938 if (ppt_assigned_devices(vm) == 0) {
939 KASSERT(vm->iommu == NULL,
941 maxaddr = vmm_sysmem_maxaddr(vm);
942 vm->iommu = iommu_create_domain(maxaddr);
943 if (vm->iommu == NULL)
945 vm_iommu_map(vm);
948 error = ppt_assign_device(vm, bus, slot, func);
953 vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
962 * (e.g. VM_MMAP_MEMSEG). Therefore 'vm->memmap[]' stability is
966 KASSERT(vcpuid >= -1 && vcpuid < vm->maxcpus, ("%s: invalid vcpuid %d",
968 for (i = 0; i < vm->maxcpus; i++) {
971 state = vcpu_get_state(vm, i, NULL);
982 mm = &vm->mem_maps[i];
984 count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
1010 vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
1013 if (vcpu < 0 || vcpu >= vm->maxcpus)
1019 return (VMGETREG(vm->cookie, vcpu, reg, retval));
1023 vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
1028 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1034 error = VMSETREG(vm->cookie, vcpuid, reg, val);
1039 VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
1040 vcpu = &vm->vcpu[vcpuid];
1078 vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
1082 if (vcpu < 0 || vcpu >= vm->maxcpus)
1088 return (VMGETDESC(vm->cookie, vcpu, reg, desc));
1092 vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
1095 if (vcpu < 0 || vcpu >= vm->maxcpus)
1101 return (VMSETDESC(vm->cookie, vcpu, reg, desc));
1148 vcpu_set_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate,
1154 vcpu = &vm->vcpu[vcpuid];
1166 VCPU_CTR1(vm, vcpuid, "vcpu state change from %s to "
1206 VCPU_CTR2(vm, vcpuid, "vcpu state changed from %s to %s",
1222 vcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1226 if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
1231 vcpu_require_state_locked(struct vm *vm, int vcpuid, enum vcpu_state newstate)
1235 if ((error = vcpu_set_state_locked(vm, vcpuid, newstate, false)) != 0)
1239 #define RENDEZVOUS_CTR0(vm, vcpuid, fmt) \
1242 VCPU_CTR0(vm, vcpuid, fmt); \
1244 VM_CTR0(vm, fmt); \
1248 vm_handle_rendezvous(struct vm *vm, int vcpuid)
1253 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus),
1258 mtx_lock(&vm->rendezvous_mtx);
1259 while (vm->rendezvous_func != NULL) {
1261 CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
1264 CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
1265 !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
1266 VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
1267 (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
1268 CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
1270 if (CPU_CMP(&vm->rendezvous_req_cpus,
1271 &vm->rendezvous_done_cpus) == 0) {
1272 VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
1273 vm->rendezvous_func = NULL;
1274 wakeup(&vm->rendezvous_func);
1277 RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
1278 mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
1281 mtx_unlock(&vm->rendezvous_mtx);
1285 mtx_lock(&vm->rendezvous_mtx);
1288 mtx_unlock(&vm->rendezvous_mtx);
1296 vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
1303 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1305 vcpu = &vm->vcpu[vcpuid];
1322 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
1324 if (vm_nmi_pending(vm, vcpuid))
1327 if (vm_extint_pending(vm, vcpuid) ||
1334 if (vcpu_should_yield(vm, vcpuid))
1337 if (vcpu_debugged(vm, vcpuid))
1348 VCPU_CTR0(vm, vcpuid, "Halted");
1351 CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
1353 if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
1362 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1368 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1369 vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
1380 CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
1385 vm_suspend(vm, VM_SUSPEND_HALT);
1391 vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
1398 vcpu = &vm->vcpu[vcpuid];
1410 rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1413 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
1420 map = &vm->vmspace->vm_map;
1423 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
1433 vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
1445 vcpu = &vm->vcpu[vcpuid];
1459 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
1463 error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip +
1474 if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) {
1475 VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx",
1486 VCPU_CTR1(vm, vcpuid, "nextrip updated to %#lx after instruction "
1504 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
1511 vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
1518 vcpu = &vm->vcpu[vcpuid];
1521 CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
1532 if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
1533 VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
1537 if (vm->rendezvous_func == NULL) {
1538 VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
1539 vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
1541 vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
1548 VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
1550 error = vm_handle_rendezvous(vm, vcpuid);
1559 for (i = 0; i < vm->maxcpus; i++) {
1560 if (CPU_ISSET(i, &vm->suspended_cpus)) {
1561 vcpu_notify_event(vm, i, false);
1570 vm_handle_reqidle(struct vm *vm, int vcpuid, bool *retu)
1572 struct vcpu *vcpu = &vm->vcpu[vcpuid];
1583 vm_suspend(struct vm *vm, enum vm_suspend_how how)
1590 if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
1591 VM_CTR2(vm, "virtual machine already suspended %d/%d",
1592 vm->suspend, how);
1596 VM_CTR1(vm, "virtual machine successfully suspended %d", how);
1601 for (i = 0; i < vm->maxcpus; i++) {
1602 if (CPU_ISSET(i, &vm->active_cpus))
1603 vcpu_notify_event(vm, i, false);
1610 vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
1614 KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1615 ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1617 vmexit = vm_exitinfo(vm, vcpuid);
1621 vmexit->u.suspended.how = vm->suspend;
1625 vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip)
1629 vmexit = vm_exitinfo(vm, vcpuid);
1636 vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
1640 KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
1642 vmexit = vm_exitinfo(vm, vcpuid);
1646 vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
1650 vm_exit_reqidle(struct vm *vm, int vcpuid, uint64_t rip)
1654 vmexit = vm_exitinfo(vm, vcpuid);
1658 vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1);
1662 vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
1666 vmexit = vm_exitinfo(vm, vcpuid);
1670 vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
1674 vm_run(struct vm *vm, struct vm_run *vmrun)
1687 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1690 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1693 if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1696 pmap = vmspace_pmap(vm->vmspace);
1697 vcpu = &vm->vcpu[vcpuid];
1699 evinfo.rptr = &vm->rendezvous_func;
1700 evinfo.sptr = &vm->suspend;
1715 vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1716 error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, &evinfo);
1717 vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1721 vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1730 error = vm_handle_reqidle(vm, vcpuid, &retu);
1733 error = vm_handle_suspend(vm, vcpuid, &retu);
1736 vioapic_process_eoi(vm, vcpuid,
1740 error = vm_handle_rendezvous(vm, vcpuid);
1744 error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
1747 error = vm_handle_paging(vm, vcpuid, &retu);
1750 error = vm_handle_inst_emul(vm, vcpuid, &retu);
1754 error = vm_handle_inout(vm, vcpuid, vme, &retu);
1759 vm_inject_ud(vm, vcpuid);
1770 VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
1780 struct vm *vm;
1786 vm = arg;
1787 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1790 vcpu = &vm->vcpu[vcpuid];
1791 state = vcpu_get_state(vm, vcpuid, NULL);
1800 VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by "
1809 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
1811 VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
1821 vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
1826 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
1829 vcpu = &vm->vcpu[vcpuid];
1843 VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info);
1903 nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
1919 VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)",
1921 vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
1961 vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
1968 vcpuid < vm->maxcpus, ("invalid vcpu %d", vcpuid));
1970 vcpu = &vm->vcpu[vcpuid];
1979 VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx",
1984 valid = nested_fault(vm, vcpuid, info1, info2, retinfo);
1996 VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), "
2004 vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
2008 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2011 vcpu = &vm->vcpu[vcpuid];
2018 vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
2025 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2039 vcpu = &vm->vcpu[vcpuid];
2042 VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
2051 error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, &regval);
2063 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
2068 vm_restart_instruction(vm, vcpuid);
2074 VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
2082 struct vm *vm;
2085 vm = vmarg;
2088 error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
2096 struct vm *vm;
2099 vm = vmarg;
2100 VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
2103 error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
2106 vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
2112 vm_inject_nmi(struct vm *vm, int vcpuid)
2116 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2119 vcpu = &vm->vcpu[vcpuid];
2122 vcpu_notify_event(vm, vcpuid, false);
2127 vm_nmi_pending(struct vm *vm, int vcpuid)
2131 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2134 vcpu = &vm->vcpu[vcpuid];
2140 vm_nmi_clear(struct vm *vm, int vcpuid)
2144 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2147 vcpu = &vm->vcpu[vcpuid];
2153 vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
2159 vm_inject_extint(struct vm *vm, int vcpuid)
2163 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2166 vcpu = &vm->vcpu[vcpuid];
2169 vcpu_notify_event(vm, vcpuid, false);
2174 vm_extint_pending(struct vm *vm, int vcpuid)
2178 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2181 vcpu = &vm->vcpu[vcpuid];
2187 vm_extint_clear(struct vm *vm, int vcpuid)
2191 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2194 vcpu = &vm->vcpu[vcpuid];
2200 vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
2204 vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
2206 if (vcpu < 0 || vcpu >= vm->maxcpus)
2212 return (VMGETCAP(vm->cookie, vcpu, type, retval));
2216 vm_set_capability(struct vm *vm, int vcpu, int type, int val)
2218 if (vcpu < 0 || vcpu >= vm->maxcpus)
2224 return (VMSETCAP(vm->cookie, vcpu, type, val));
2228 vm_lapic(struct vm *vm, int cpu)
2230 return (vm->vcpu[cpu].vlapic);
2234 vm_ioapic(struct vm *vm)
2237 return (vm->vioapic);
2241 vm_hpet(struct vm *vm)
2244 return (vm->vhpet);
2290 vm_iommu_domain(struct vm *vm)
2293 return (vm->iommu);
2297 vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
2303 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2306 vcpu = &vm->vcpu[vcpuid];
2309 error = vcpu_set_state_locked(vm, vcpuid, newstate, from_idle);
2316 vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
2321 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2324 vcpu = &vm->vcpu[vcpuid];
2336 vm_activate_cpu(struct vm *vm, int vcpuid)
2339 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2342 if (CPU_ISSET(vcpuid, &vm->active_cpus))
2345 VCPU_CTR0(vm, vcpuid, "activated");
2346 CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
2351 vm_suspend_cpu(struct vm *vm, int vcpuid)
2355 if (vcpuid < -1 || vcpuid >= vm->maxcpus)
2359 vm->debug_cpus = vm->active_cpus;
2360 for (i = 0; i < vm->maxcpus; i++) {
2361 if (CPU_ISSET(i, &vm->active_cpus))
2362 vcpu_notify_event(vm, i, false);
2365 if (!CPU_ISSET(vcpuid, &vm->active_cpus))
2368 CPU_SET_ATOMIC(vcpuid, &vm->debug_cpus);
2369 vcpu_notify_event(vm, vcpuid, false);
2375 vm_resume_cpu(struct vm *vm, int vcpuid)
2378 if (vcpuid < -1 || vcpuid >= vm->maxcpus)
2382 CPU_ZERO(&vm->debug_cpus);
2384 if (!CPU_ISSET(vcpuid, &vm->debug_cpus))
2387 CPU_CLR_ATOMIC(vcpuid, &vm->debug_cpus);
2393 vcpu_debugged(struct vm *vm, int vcpuid)
2396 return (CPU_ISSET(vcpuid, &vm->debug_cpus));
2400 vm_active_cpus(struct vm *vm)
2403 return (vm->active_cpus);
2407 vm_debug_cpus(struct vm *vm)
2410 return (vm->debug_cpus);
2414 vm_suspended_cpus(struct vm *vm)
2417 return (vm->suspended_cpus);
2421 vcpu_stats(struct vm *vm, int vcpuid)
2424 return (vm->vcpu[vcpuid].stats);
2428 vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
2430 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2433 *state = vm->vcpu[vcpuid].x2apic_state;
2439 vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
2441 if (vcpuid < 0 || vcpuid >= vm->maxcpus)
2447 vm->vcpu[vcpuid].x2apic_state = state;
2449 vlapic_set_x2apic_state(vm, vcpuid, state);
2493 vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
2495 struct vcpu *vcpu = &vm->vcpu[vcpuid];
2503 vm_get_vmspace(struct vm *vm)
2506 return (vm->vmspace);
2510 vm_apicid2vcpuid(struct vm *vm, int apicid)
2519 vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
2528 KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < vm->maxcpus),
2532 mtx_lock(&vm->rendezvous_mtx);
2533 if (vm->rendezvous_func != NULL) {
2539 RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress");
2540 mtx_unlock(&vm->rendezvous_mtx);
2541 error = vm_handle_rendezvous(vm, vcpuid);
2546 KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
2549 RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous");
2550 vm->rendezvous_req_cpus = dest;
2551 CPU_ZERO(&vm->rendezvous_done_cpus);
2552 vm->rendezvous_arg = arg;
2553 vm->rendezvous_func = func;
2554 mtx_unlock(&vm->rendezvous_mtx);
2560 for (i = 0; i < vm->maxcpus; i++) {
2562 vcpu_notify_event(vm, i, false);
2565 return (vm_handle_rendezvous(vm, vcpuid));
2569 vm_atpic(struct vm *vm)
2571 return (vm->vatpic);
2575 vm_atpit(struct vm *vm)
2577 return (vm->vatpit);
2581 vm_pmtmr(struct vm *vm)
2584 return (vm->vpmtmr);
2588 vm_rtc(struct vm *vm)
2591 return (vm->vrtc);
2612 vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
2625 vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2640 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault);
2653 hva = vm_gpa_hold(vm, vcpuid, copyinfo[idx].gpa,
2662 vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo);
2671 vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr,
2688 vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
2712 vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2716 vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
2717 PAGE_SIZE * vmspace_resident_count(vm->vmspace));
2722 vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2726 vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
2727 PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));