Lines Matching refs:gpa

128 	vm_paddr_t	gpa;
587 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
591 if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
598 vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
601 vmm_mmio_free(vm->vmspace, gpa, len);
606 * Return 'true' if 'gpa' is allocated in the guest address space.
612 vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa)
626 if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
627 return (true); /* 'gpa' is sysmem or devmem */
630 if (ppt_is_mmio(vm, gpa))
631 return (true); /* 'gpa' is pci passthru mmio */
701 vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
726 if ((gpa | first | last) & PAGE_MASK)
741 error = vm_map_find(&vm->vmspace->vm_map, seg->object, first, &gpa,
749 error = vm_map_wire(&vm->vmspace->vm_map, gpa, gpa + len,
752 vm_map_remove(&vm->vmspace->vm_map, gpa, gpa + len);
757 map->gpa = gpa;
767 vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
776 if (mm->len == 0 || mm->gpa < *gpa)
778 if (mmnext == NULL || mm->gpa < mmnext->gpa)
783 *gpa = mmnext->gpa;
808 error = vm_map_remove(&vm->vmspace->vm_map, mm->gpa,
809 mm->gpa + mm->len);
837 if (maxaddr < mm->gpa + mm->len)
838 maxaddr = mm->gpa + mm->len;
848 vm_paddr_t gpa, hpa;
863 mm->gpa, mm->len, mm->flags));
873 mm->gpa, mm->len, mm->flags));
876 gpa = mm->gpa;
877 while (gpa < mm->gpa + mm->len) {
878 vp = vm_gpa_hold(vm, -1, gpa, PAGE_SIZE, VM_PROT_WRITE,
880 KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
881 vm_name(vm), gpa));
887 iommu_create_mapping(vm->iommu, gpa, hpa, sz);
890 iommu_remove_mapping(vm->iommu, gpa, sz);
894 gpa += PAGE_SIZE;
932 /* Set up the IOMMU to do the 'gpa' to 'hpa' translation */
948 vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
971 pageoff = gpa & PAGE_MASK;
973 panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
978 if (sysmem_mapping(vm, mm) && gpa >= mm->gpa &&
979 gpa < mm->gpa + mm->len) {
981 trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
1398 vme->u.paging.gpa, ftype);
1400 VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
1402 vme->u.paging.gpa);
1408 rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
1410 VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
1411 "ftype = %d", rv, vme->u.paging.gpa, ftype);
1425 uint64_t gla, gpa, cs_base;
1439 gpa = vme->u.inst_emul.gpa;
1446 VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
1477 if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1480 } else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1483 } else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
1491 error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
2544 uint64_t gpa;
2552 error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault);
2555 off = gpa & PAGE_MASK;
2557 copyinfo[nused].gpa = gpa;
2565 hva = vm_gpa_hold(vm, vcpuid, copyinfo[idx].gpa,