vmm_mem.c revision 295124
1130803Smarcel/*- 2130803Smarcel * Copyright (c) 2011 NetApp, Inc. 3130803Smarcel * All rights reserved. 4130803Smarcel * 5130803Smarcel * Redistribution and use in source and binary forms, with or without 6130803Smarcel * modification, are permitted provided that the following conditions 7130803Smarcel * are met: 8130803Smarcel * 1. Redistributions of source code must retain the above copyright 9130803Smarcel * notice, this list of conditions and the following disclaimer. 10130803Smarcel * 2. Redistributions in binary form must reproduce the above copyright 11130803Smarcel * notice, this list of conditions and the following disclaimer in the 12130803Smarcel * documentation and/or other materials provided with the distribution. 13130803Smarcel * 14130803Smarcel * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15130803Smarcel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16130803Smarcel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17130803Smarcel * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18130803Smarcel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19130803Smarcel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20130803Smarcel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21130803Smarcel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22130803Smarcel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23130803Smarcel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24130803Smarcel * SUCH DAMAGE. 25130803Smarcel * 26130803Smarcel * $FreeBSD: stable/10/sys/amd64/vmm/vmm_mem.c 295124 2016-02-01 14:56:11Z grehan $ 27130803Smarcel */ 28130803Smarcel 29130803Smarcel#include <sys/cdefs.h> 30130803Smarcel__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_mem.c 295124 2016-02-01 14:56:11Z grehan $"); 31130803Smarcel 32130803Smarcel#include <sys/param.h> 33130803Smarcel#include <sys/systm.h> 34130803Smarcel#include <sys/malloc.h> 35130803Smarcel#include <sys/sglist.h> 36130803Smarcel#include <sys/lock.h> 37130803Smarcel#include <sys/rwlock.h> 38130803Smarcel 39130803Smarcel#include <vm/vm.h> 40130803Smarcel#include <vm/vm_param.h> 41130803Smarcel#include <vm/pmap.h> 42130803Smarcel#include <vm/vm_map.h> 43130803Smarcel#include <vm/vm_object.h> 44130803Smarcel#include <vm/vm_page.h> 45130803Smarcel#include <vm/vm_pager.h> 46130803Smarcel 47130803Smarcel#include <machine/md_var.h> 48130803Smarcel 49130803Smarcel#include "vmm_mem.h" 50130803Smarcel 51130803Smarcelint 52130803Smarcelvmm_mem_init(void) 53130803Smarcel{ 54130803Smarcel 55130803Smarcel return (0); 56130803Smarcel} 57130803Smarcel 58130803Smarcelvm_object_t 59130803Smarcelvmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, size_t len, 60130803Smarcel vm_paddr_t hpa) 61130803Smarcel{ 62130803Smarcel int error; 63130803Smarcel vm_object_t obj; 64130803Smarcel struct sglist *sg; 65130803Smarcel 66130803Smarcel sg = sglist_alloc(1, M_WAITOK); 67130803Smarcel error = sglist_append_phys(sg, hpa, len); 68130803Smarcel KASSERT(error == 0, ("error %d appending physaddr to sglist", error)); 69130803Smarcel 70130803Smarcel obj = vm_pager_allocate(OBJT_SG, sg, len, VM_PROT_RW, 0, NULL); 71130803Smarcel if (obj != NULL) { 72130803Smarcel /* 73130803Smarcel * VT-x ignores the MTRR settings when figuring out the 74130803Smarcel * memory type for translations obtained through EPT. 75130803Smarcel * 76130803Smarcel * Therefore we explicitly force the pages provided by 77130803Smarcel * this object to be mapped as uncacheable. 78130803Smarcel */ 79130803Smarcel VM_OBJECT_WLOCK(obj); 80130803Smarcel error = vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE); 81130803Smarcel VM_OBJECT_WUNLOCK(obj); 82130803Smarcel if (error != KERN_SUCCESS) { 83130803Smarcel panic("vmm_mmio_alloc: vm_object_set_memattr error %d", 84130803Smarcel error); 85130803Smarcel } 86130803Smarcel error = vm_map_find(&vmspace->vm_map, obj, 0, &gpa, len, 0, 87130803Smarcel VMFS_NO_SPACE, VM_PROT_RW, VM_PROT_RW, 0); 88130803Smarcel if (error != KERN_SUCCESS) { 89130803Smarcel vm_object_deallocate(obj); 90130803Smarcel obj = NULL; 91130803Smarcel } 92130803Smarcel } 93130803Smarcel 94130803Smarcel /* 95130803Smarcel * Drop the reference on the sglist. 96130803Smarcel * 97130803Smarcel * If the scatter/gather object was successfully allocated then it 98130803Smarcel * has incremented the reference count on the sglist. Dropping the 99130803Smarcel * initial reference count ensures that the sglist will be freed 100130803Smarcel * when the object is deallocated. 101130803Smarcel * 102130803Smarcel * If the object could not be allocated then we end up freeing the 103130803Smarcel * sglist. 104130803Smarcel */ 105130803Smarcel sglist_free(sg); 106130803Smarcel 107130803Smarcel return (obj); 108130803Smarcel} 109130803Smarcel 110130803Smarcelvoid 111130803Smarcelvmm_mmio_free(struct vmspace *vmspace, vm_paddr_t gpa, size_t len) 112130803Smarcel{ 113130803Smarcel 114130803Smarcel vm_map_remove(&vmspace->vm_map, gpa, gpa + len); 115130803Smarcel} 116130803Smarcel 117130803Smarcelvm_paddr_t 118130803Smarcelvmm_mem_maxaddr(void) 119130803Smarcel{ 120130803Smarcel 121130803Smarcel return (ptoa(Maxmem)); 122130803Smarcel} 123130803Smarcel