vm_kern.c revision 316073
1139825Simp/*- 21541Srgrimes * Copyright (c) 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * 51541Srgrimes * This code is derived from software contributed to Berkeley by 61541Srgrimes * The Mach Operating System project at Carnegie-Mellon University. 71541Srgrimes * 81541Srgrimes * Redistribution and use in source and binary forms, with or without 91541Srgrimes * modification, are permitted provided that the following conditions 101541Srgrimes * are met: 111541Srgrimes * 1. Redistributions of source code must retain the above copyright 121541Srgrimes * notice, this list of conditions and the following disclaimer. 131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer in the 151541Srgrimes * documentation and/or other materials provided with the distribution. 161541Srgrimes * 4. Neither the name of the University nor the names of its contributors 171541Srgrimes * may be used to endorse or promote products derived from this software 181541Srgrimes * without specific prior written permission. 191541Srgrimes * 201541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 211541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 221541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 231541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 241541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 251541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 261541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 271541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 281541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 291541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 301541Srgrimes * SUCH DAMAGE. 311541Srgrimes * 321817Sdg * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 331541Srgrimes * 341541Srgrimes * 351541Srgrimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 361541Srgrimes * All rights reserved. 371541Srgrimes * 381541Srgrimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 395455Sdg * 401541Srgrimes * Permission to use, copy, modify and distribute this software and 411541Srgrimes * its documentation is hereby granted, provided that both the copyright 421541Srgrimes * notice and this permission notice appear in all copies of the 431541Srgrimes * software, derivative works or modified versions, and any portions 441541Srgrimes * thereof, and that both notices appear in supporting documentation. 455455Sdg * 465455Sdg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 475455Sdg * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 481541Srgrimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 495455Sdg * 501541Srgrimes * Carnegie Mellon requests users of this software to return to 511541Srgrimes * 521541Srgrimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 531541Srgrimes * School of Computer Science 541541Srgrimes * Carnegie Mellon University 551541Srgrimes * Pittsburgh PA 15213-3890 561541Srgrimes * 571541Srgrimes * any improvements or extensions that they make and grant Carnegie the 581541Srgrimes * rights to redistribute these changes. 591541Srgrimes */ 601541Srgrimes 611541Srgrimes/* 621541Srgrimes * Kernel memory management. 631541Srgrimes */ 641541Srgrimes 65116226Sobrien#include <sys/cdefs.h> 66116226Sobrien__FBSDID("$FreeBSD: stable/11/sys/vm/vm_kern.c 316073 2017-03-28 06:07:59Z kib $"); 67116226Sobrien 681541Srgrimes#include <sys/param.h> 691541Srgrimes#include <sys/systm.h> 7087157Sluigi#include <sys/kernel.h> /* for ticks and hz */ 71168395Spjd#include <sys/eventhandler.h> 7276166Smarkm#include <sys/lock.h> 732112Swollman#include <sys/proc.h> 746129Sdg#include <sys/malloc.h> 75248084Sattilio#include <sys/rwlock.h> 76188964Srwatson#include <sys/sysctl.h> 77254025Sjeff#include <sys/vmem.h> 781541Srgrimes 791541Srgrimes#include <vm/vm.h> 8012662Sdg#include <vm/vm_param.h> 81254025Sjeff#include <vm/vm_kern.h> 8212662Sdg#include <vm/pmap.h> 8312662Sdg#include <vm/vm_map.h> 8412662Sdg#include <vm/vm_object.h> 851541Srgrimes#include <vm/vm_page.h> 861541Srgrimes#include <vm/vm_pageout.h> 8712726Sbde#include <vm/vm_extern.h> 88168395Spjd#include <vm/uma.h> 891541Srgrimes 90248277Skibvm_map_t kernel_map; 91248277Skibvm_map_t exec_map; 92118764Ssilbyvm_map_t pipe_map; 932112Swollman 94221853Smdfconst void *zero_region; 95221853SmdfCTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); 96221853Smdf 97290728Sjhb/* NB: Used by kernel debuggers. */ 98290728Sjhbconst u_long vm_maxuser_address = VM_MAXUSER_ADDRESS; 99290728Sjhb 100246316SmariusSYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD, 101273377Shselasky SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address"); 102246316Smarius 103246316SmariusSYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD, 104246926Salc#if defined(__arm__) || defined(__sparc64__) 105246316Smarius &vm_max_kernel_address, 0, 106246316Smarius#else 107273377Shselasky SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS, 108246316Smarius#endif 109246316Smarius "Max kernel address"); 110246316Smarius 1111541Srgrimes/* 112254025Sjeff * kva_alloc: 11347841Sdt * 114118317Salc * Allocate a virtual address range with no underlying object and 115118317Salc * no initial mapping to physical memory. Any mapping from this 116118317Salc * range to physical memory must be explicitly created prior to 117118317Salc * its use, typically with pmap_qenter(). Any attempt to create 118118317Salc * a mapping on demand through vm_fault() will result in a panic. 11947841Sdt */ 12047841Sdtvm_offset_t 121254025Sjeffkva_alloc(size) 12270480Salfred vm_size_t size; 12347841Sdt{ 12447841Sdt vm_offset_t addr; 12547841Sdt 12647841Sdt size = round_page(size); 127254025Sjeff if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr)) 12847841Sdt return (0); 129254025Sjeff 13047841Sdt return (addr); 13147841Sdt} 13247841Sdt 13347841Sdt/* 134254025Sjeff * kva_free: 135206819Sjmallett * 136254025Sjeff * Release a region of kernel virtual memory allocated 137254025Sjeff * with kva_alloc, and return the physical pages 138254025Sjeff * associated with that region. 139254025Sjeff * 140254025Sjeff * This routine may not block on kernel maps. 141206819Sjmallett */ 142254025Sjeffvoid 143254025Sjeffkva_free(addr, size) 144206819Sjmallett vm_offset_t addr; 14570480Salfred vm_size_t size; 1461541Srgrimes{ 1471541Srgrimes 1481541Srgrimes size = round_page(size); 149254025Sjeff vmem_free(kernel_arena, addr, size); 1501541Srgrimes} 1511541Srgrimes 1521541Srgrimes/* 153238452Salc * Allocates a region from the kernel address map and physical pages 154238452Salc * within the specified address range to the kernel object. Creates a 155238452Salc * wired mapping from this region to these pages, and returns the 156238452Salc * region's starting virtual address. The allocated pages are not 157238452Salc * necessarily physically contiguous. If M_ZERO is specified through the 158238452Salc * given flags, then the pages are zeroed before they are mapped. 159238452Salc */ 160238452Salcvm_offset_t 161254025Sjeffkmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low, 162238452Salc vm_paddr_t high, vm_memattr_t memattr) 163238452Salc{ 164254025Sjeff vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 165316073Skib vm_offset_t addr, i, offset; 166238452Salc vm_page_t m; 167238452Salc int pflags, tries; 168238452Salc 169238452Salc size = round_page(size); 170254025Sjeff if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) 171238452Salc return (0); 172238452Salc offset = addr - VM_MIN_KERNEL_ADDRESS; 173254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 174248084Sattilio VM_OBJECT_WLOCK(object); 175254025Sjeff for (i = 0; i < size; i += PAGE_SIZE) { 176238452Salc tries = 0; 177238452Salcretry: 178316073Skib m = vm_page_alloc_contig(object, atop(offset + i), 179254025Sjeff pflags, 1, low, high, PAGE_SIZE, 0, memattr); 180238452Salc if (m == NULL) { 181248084Sattilio VM_OBJECT_WUNLOCK(object); 182238452Salc if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 183292469Salc if (!vm_page_reclaim_contig(pflags, 1, 184292469Salc low, high, PAGE_SIZE, 0) && 185292469Salc (flags & M_WAITOK) != 0) 186292469Salc VM_WAIT; 187248084Sattilio VM_OBJECT_WLOCK(object); 188238452Salc tries++; 189238452Salc goto retry; 190238452Salc } 191288281Salc kmem_unback(object, addr, i); 192254025Sjeff vmem_free(vmem, addr, size); 193238452Salc return (0); 194238452Salc } 195238452Salc if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 196238452Salc pmap_zero_page(m); 197238452Salc m->valid = VM_PAGE_BITS_ALL; 198269728Skib pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 199269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 200238452Salc } 201248084Sattilio VM_OBJECT_WUNLOCK(object); 202238452Salc return (addr); 203238452Salc} 204238452Salc 205238452Salc/* 206238452Salc * Allocates a region from the kernel address map and physically 207238452Salc * contiguous pages within the specified address range to the kernel 208238452Salc * object. Creates a wired mapping from this region to these pages, and 209238452Salc * returns the region's starting virtual address. If M_ZERO is specified 210238452Salc * through the given flags, then the pages are zeroed before they are 211238452Salc * mapped. 212238452Salc */ 213238452Salcvm_offset_t 214254025Sjeffkmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low, 215238452Salc vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 216238452Salc vm_memattr_t memattr) 217238452Salc{ 218254025Sjeff vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 219316073Skib vm_offset_t addr, offset, tmp; 220238452Salc vm_page_t end_m, m; 221292469Salc u_long npages; 222238452Salc int pflags, tries; 223238452Salc 224238452Salc size = round_page(size); 225254025Sjeff if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 226238452Salc return (0); 227238452Salc offset = addr - VM_MIN_KERNEL_ADDRESS; 228254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 229292469Salc npages = atop(size); 230248084Sattilio VM_OBJECT_WLOCK(object); 231238452Salc tries = 0; 232238452Salcretry: 233316073Skib m = vm_page_alloc_contig(object, atop(offset), pflags, 234292469Salc npages, low, high, alignment, boundary, memattr); 235238452Salc if (m == NULL) { 236248084Sattilio VM_OBJECT_WUNLOCK(object); 237238452Salc if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 238292469Salc if (!vm_page_reclaim_contig(pflags, npages, low, high, 239292469Salc alignment, boundary) && (flags & M_WAITOK) != 0) 240292469Salc VM_WAIT; 241248084Sattilio VM_OBJECT_WLOCK(object); 242238452Salc tries++; 243238452Salc goto retry; 244238452Salc } 245254025Sjeff vmem_free(vmem, addr, size); 246238452Salc return (0); 247238452Salc } 248292469Salc end_m = m + npages; 249254025Sjeff tmp = addr; 250238452Salc for (; m < end_m; m++) { 251238452Salc if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 252238452Salc pmap_zero_page(m); 253238452Salc m->valid = VM_PAGE_BITS_ALL; 254269728Skib pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL, 255269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 256254025Sjeff tmp += PAGE_SIZE; 257238452Salc } 258248084Sattilio VM_OBJECT_WUNLOCK(object); 259238452Salc return (addr); 260238452Salc} 261238452Salc 262238452Salc/* 2631541Srgrimes * kmem_suballoc: 2641541Srgrimes * 2651541Srgrimes * Allocates a map to manage a subrange 2661541Srgrimes * of the kernel virtual address space. 2671541Srgrimes * 2681541Srgrimes * Arguments are as follows: 2691541Srgrimes * 2701541Srgrimes * parent Map to take range from 27170480Salfred * min, max Returned endpoints of map 2721541Srgrimes * size Size of range to find 273178933Salc * superpage_align Request that min is superpage aligned 2741541Srgrimes */ 2758876Srgrimesvm_map_t 276178933Salckmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, 277178933Salc vm_size_t size, boolean_t superpage_align) 2781541Srgrimes{ 27970478Salfred int ret; 2805455Sdg vm_map_t result; 2811541Srgrimes 2821541Srgrimes size = round_page(size); 2831541Srgrimes 284178637Salc *min = vm_map_min(parent); 285255426Sjhb ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ? 286254430Sjhb VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 287194766Skib MAP_ACC_NO_CHARGE); 288177762Salc if (ret != KERN_SUCCESS) 289177762Salc panic("kmem_suballoc: bad status return of %d", ret); 2901541Srgrimes *max = *min + size; 29132702Sdyson result = vm_map_create(vm_map_pmap(parent), *min, *max); 2921541Srgrimes if (result == NULL) 2931541Srgrimes panic("kmem_suballoc: cannot create submap"); 29470478Salfred if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 2951541Srgrimes panic("kmem_suballoc: unable to change range to submap"); 2965455Sdg return (result); 2971541Srgrimes} 2981541Srgrimes 2991541Srgrimes/* 30042957Sdillon * kmem_malloc: 3011541Srgrimes * 302254025Sjeff * Allocate wired-down pages in the kernel's address space. 3031541Srgrimes */ 3041541Srgrimesvm_offset_t 305254025Sjeffkmem_malloc(struct vmem *vmem, vm_size_t size, int flags) 3061541Srgrimes{ 3075455Sdg vm_offset_t addr; 308254025Sjeff int rv; 3091541Srgrimes 3101541Srgrimes size = round_page(size); 311254025Sjeff if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 312254025Sjeff return (0); 3131541Srgrimes 314254025Sjeff rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object, 315254025Sjeff addr, size, flags); 316254025Sjeff if (rv != KERN_SUCCESS) { 317254025Sjeff vmem_free(vmem, addr, size); 318254025Sjeff return (0); 3191541Srgrimes } 320254025Sjeff return (addr); 321211194Smdf} 322211194Smdf 323211194Smdf/* 324211194Smdf * kmem_back: 325211194Smdf * 326211194Smdf * Allocate physical pages for the specified virtual address range. 327211194Smdf */ 328211194Smdfint 329254025Sjeffkmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) 330211194Smdf{ 331211194Smdf vm_offset_t offset, i; 332211194Smdf vm_page_t m; 333211194Smdf int pflags; 334211194Smdf 335254025Sjeff KASSERT(object == kmem_object || object == kernel_object, 336254025Sjeff ("kmem_back: only supports kernel objects.")); 337254025Sjeff 33815367Sdyson offset = addr - VM_MIN_KERNEL_ADDRESS; 339254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 3401541Srgrimes 341254025Sjeff VM_OBJECT_WLOCK(object); 3421541Srgrimes for (i = 0; i < size; i += PAGE_SIZE) { 34315809Sdysonretry: 344316073Skib m = vm_page_alloc(object, atop(offset + i), pflags); 34598450Sjeff 3461541Srgrimes /* 3475455Sdg * Ran out of space, free everything up and return. Don't need 3485455Sdg * to lock page queues here as we know that the pages we got 3495455Sdg * aren't on any queues. 3501541Srgrimes */ 3511541Srgrimes if (m == NULL) { 352288281Salc VM_OBJECT_WUNLOCK(object); 35342957Sdillon if ((flags & M_NOWAIT) == 0) { 35415809Sdyson VM_WAIT; 355254025Sjeff VM_OBJECT_WLOCK(object); 35615809Sdyson goto retry; 35715809Sdyson } 358288281Salc kmem_unback(object, addr, i); 359211194Smdf return (KERN_NO_SPACE); 3601541Srgrimes } 36198455Sjeff if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 362102382Salc pmap_zero_page(m); 363224746Skib KASSERT((m->oflags & VPO_UNMANAGED) != 0, 364166964Salc ("kmem_malloc: page %p is managed", m)); 365254025Sjeff m->valid = VM_PAGE_BITS_ALL; 366269728Skib pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 367269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 3681541Srgrimes } 369254025Sjeff VM_OBJECT_WUNLOCK(object); 3701541Srgrimes 371254025Sjeff return (KERN_SUCCESS); 372254025Sjeff} 3731541Srgrimes 374288281Salc/* 375288281Salc * kmem_unback: 376288281Salc * 377288281Salc * Unmap and free the physical pages underlying the specified virtual 378288281Salc * address range. 379288281Salc * 380288281Salc * A physical page must exist within the specified object at each index 381288281Salc * that is being unmapped. 382288281Salc */ 383254025Sjeffvoid 384254025Sjeffkmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) 385254025Sjeff{ 386254025Sjeff vm_page_t m; 387284207Salc vm_offset_t i, offset; 38820993Sdyson 389254025Sjeff KASSERT(object == kmem_object || object == kernel_object, 390254025Sjeff ("kmem_unback: only supports kernel objects.")); 391254025Sjeff 392266588Salc pmap_remove(kernel_pmap, addr, addr + size); 393254025Sjeff offset = addr - VM_MIN_KERNEL_ADDRESS; 394254025Sjeff VM_OBJECT_WLOCK(object); 3951541Srgrimes for (i = 0; i < size; i += PAGE_SIZE) { 396316073Skib m = vm_page_lookup(object, atop(offset + i)); 397288912Salc vm_page_unwire(m, PQ_NONE); 398254025Sjeff vm_page_free(m); 3991541Srgrimes } 400254025Sjeff VM_OBJECT_WUNLOCK(object); 401254025Sjeff} 4021541Srgrimes 403254025Sjeff/* 404254025Sjeff * kmem_free: 405254025Sjeff * 406254025Sjeff * Free memory allocated with kmem_malloc. The size must match the 407254025Sjeff * original allocation. 408254025Sjeff */ 409254025Sjeffvoid 410254025Sjeffkmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size) 411254025Sjeff{ 412254025Sjeff 413254025Sjeff size = round_page(size); 414254025Sjeff kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object, 415254025Sjeff addr, size); 416254025Sjeff vmem_free(vmem, addr, size); 4171541Srgrimes} 4181541Srgrimes 4191541Srgrimes/* 420254025Sjeff * kmap_alloc_wait: 4211541Srgrimes * 4221541Srgrimes * Allocates pageable memory from a sub-map of the kernel. If the submap 4231541Srgrimes * has no room, the caller sleeps waiting for more memory in the submap. 4241541Srgrimes * 42542957Sdillon * This routine may block. 4261541Srgrimes */ 4278876Srgrimesvm_offset_t 428254025Sjeffkmap_alloc_wait(map, size) 4295455Sdg vm_map_t map; 4305455Sdg vm_size_t size; 4311541Srgrimes{ 4325455Sdg vm_offset_t addr; 4331541Srgrimes 4341541Srgrimes size = round_page(size); 435194766Skib if (!swap_reserve(size)) 436194766Skib return (0); 4371541Srgrimes 4381541Srgrimes for (;;) { 4391541Srgrimes /* 4405455Sdg * To make this work for more than one map, use the map's lock 4415455Sdg * to lock out sleepers/wakers. 4421541Srgrimes */ 4431541Srgrimes vm_map_lock(map); 44433758Sdyson if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 4451541Srgrimes break; 4461541Srgrimes /* no space now; see if we can ever get space */ 4471541Srgrimes if (vm_map_max(map) - vm_map_min(map) < size) { 4481541Srgrimes vm_map_unlock(map); 449194766Skib swap_release(size); 4501541Srgrimes return (0); 4511541Srgrimes } 45299754Salc map->needs_wakeup = TRUE; 453173429Spjd vm_map_unlock_and_wait(map, 0); 4541541Srgrimes } 455194766Skib vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, 456194766Skib VM_PROT_ALL, MAP_ACC_CHARGED); 4571541Srgrimes vm_map_unlock(map); 4581541Srgrimes return (addr); 4591541Srgrimes} 4601541Srgrimes 4611541Srgrimes/* 462254025Sjeff * kmap_free_wakeup: 4631541Srgrimes * 4649507Sdg * Returns memory to a submap of the kernel, and wakes up any processes 4651541Srgrimes * waiting for memory in that map. 4661541Srgrimes */ 4678876Srgrimesvoid 468254025Sjeffkmap_free_wakeup(map, addr, size) 4695455Sdg vm_map_t map; 4705455Sdg vm_offset_t addr; 4715455Sdg vm_size_t size; 4721541Srgrimes{ 47376827Salfred 4741541Srgrimes vm_map_lock(map); 475189015Skib (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 47699754Salc if (map->needs_wakeup) { 47799754Salc map->needs_wakeup = FALSE; 47899754Salc vm_map_wakeup(map); 47999754Salc } 4801541Srgrimes vm_map_unlock(map); 4811541Srgrimes} 4821541Srgrimes 483254025Sjeffvoid 484221853Smdfkmem_init_zero_region(void) 485221853Smdf{ 486221855Smdf vm_offset_t addr, i; 487221853Smdf vm_page_t m; 488221853Smdf 489221855Smdf /* 490221855Smdf * Map a single physical page of zeros to a larger virtual range. 491221855Smdf * This requires less looping in places that want large amounts of 492221855Smdf * zeros, while not using much more physical resources. 493221855Smdf */ 494254025Sjeff addr = kva_alloc(ZERO_REGION_SIZE); 495226843Salc m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 496221853Smdf VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); 497221853Smdf if ((m->flags & PG_ZERO) == 0) 498221853Smdf pmap_zero_page(m); 499221853Smdf for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE) 500221853Smdf pmap_qenter(addr + i, &m, 1); 501254025Sjeff pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ); 502221853Smdf 503221853Smdf zero_region = (const void *)addr; 504221853Smdf} 505221853Smdf 5061541Srgrimes/* 50742957Sdillon * kmem_init: 50842957Sdillon * 50942957Sdillon * Create the kernel map; insert a mapping covering kernel text, 51042957Sdillon * data, bss, and all space allocated thus far (`boostrap' data). The 51142957Sdillon * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 51242957Sdillon * `start' as allocated, and the range between `start' and `end' as free. 5131541Srgrimes */ 5148876Srgrimesvoid 5155455Sdgkmem_init(start, end) 5161541Srgrimes vm_offset_t start, end; 5171541Srgrimes{ 51870480Salfred vm_map_t m; 5191541Srgrimes 52032702Sdyson m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 521108426Salc m->system_map = 1; 5221541Srgrimes vm_map_lock(m); 5231541Srgrimes /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 5241541Srgrimes kernel_map = m; 525108426Salc (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 526179923Salc#ifdef __amd64__ 527179923Salc KERNBASE, 528179923Salc#else 529179923Salc VM_MIN_KERNEL_ADDRESS, 530179923Salc#endif 531179923Salc start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 5321541Srgrimes /* ... and ending with the completion of the above `insert' */ 5331541Srgrimes vm_map_unlock(m); 5341541Srgrimes} 535188964Srwatson 536188967Srwatson#ifdef DIAGNOSTIC 537188964Srwatson/* 538188964Srwatson * Allow userspace to directly trigger the VM drain routine for testing 539188964Srwatson * purposes. 540188964Srwatson */ 541188964Srwatsonstatic int 542188964Srwatsondebug_vm_lowmem(SYSCTL_HANDLER_ARGS) 543188964Srwatson{ 544188964Srwatson int error, i; 545188964Srwatson 546188964Srwatson i = 0; 547188964Srwatson error = sysctl_handle_int(oidp, &i, 0, req); 548188964Srwatson if (error) 549188964Srwatson return (error); 550314663Savg if ((i & ~(VM_LOW_KMEM | VM_LOW_PAGES)) != 0) 551314663Savg return (EINVAL); 552314663Savg if (i != 0) 553314663Savg EVENTHANDLER_INVOKE(vm_lowmem, i); 554188964Srwatson return (0); 555188964Srwatson} 556188964Srwatson 557188964SrwatsonSYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0, 558314663Savg debug_vm_lowmem, "I", "set to trigger vm_lowmem event with given flags"); 559188967Srwatson#endif 560