vm_kern.c revision 269728
1139825Simp/*- 21541Srgrimes * Copyright (c) 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * 51541Srgrimes * This code is derived from software contributed to Berkeley by 61541Srgrimes * The Mach Operating System project at Carnegie-Mellon University. 71541Srgrimes * 81541Srgrimes * Redistribution and use in source and binary forms, with or without 91541Srgrimes * modification, are permitted provided that the following conditions 101541Srgrimes * are met: 111541Srgrimes * 1. Redistributions of source code must retain the above copyright 121541Srgrimes * notice, this list of conditions and the following disclaimer. 131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer in the 151541Srgrimes * documentation and/or other materials provided with the distribution. 161541Srgrimes * 4. Neither the name of the University nor the names of its contributors 171541Srgrimes * may be used to endorse or promote products derived from this software 181541Srgrimes * without specific prior written permission. 191541Srgrimes * 201541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 211541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 221541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 231541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 241541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 251541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 261541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 271541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 281541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 291541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 301541Srgrimes * SUCH DAMAGE. 311541Srgrimes * 321817Sdg * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 331541Srgrimes * 341541Srgrimes * 351541Srgrimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 361541Srgrimes * All rights reserved. 371541Srgrimes * 381541Srgrimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 395455Sdg * 401541Srgrimes * Permission to use, copy, modify and distribute this software and 411541Srgrimes * its documentation is hereby granted, provided that both the copyright 421541Srgrimes * notice and this permission notice appear in all copies of the 431541Srgrimes * software, derivative works or modified versions, and any portions 441541Srgrimes * thereof, and that both notices appear in supporting documentation. 455455Sdg * 465455Sdg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 475455Sdg * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 481541Srgrimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 495455Sdg * 501541Srgrimes * Carnegie Mellon requests users of this software to return to 511541Srgrimes * 521541Srgrimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 531541Srgrimes * School of Computer Science 541541Srgrimes * Carnegie Mellon University 551541Srgrimes * Pittsburgh PA 15213-3890 561541Srgrimes * 571541Srgrimes * any improvements or extensions that they make and grant Carnegie the 581541Srgrimes * rights to redistribute these changes. 591541Srgrimes */ 601541Srgrimes 611541Srgrimes/* 621541Srgrimes * Kernel memory management. 631541Srgrimes */ 641541Srgrimes 65116226Sobrien#include <sys/cdefs.h> 66116226Sobrien__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 269728 2014-08-08 17:12:03Z kib $"); 67116226Sobrien 681541Srgrimes#include <sys/param.h> 691541Srgrimes#include <sys/systm.h> 7087157Sluigi#include <sys/kernel.h> /* for ticks and hz */ 71168395Spjd#include <sys/eventhandler.h> 7276166Smarkm#include <sys/lock.h> 732112Swollman#include <sys/proc.h> 746129Sdg#include <sys/malloc.h> 75248084Sattilio#include <sys/rwlock.h> 76188964Srwatson#include <sys/sysctl.h> 77254025Sjeff#include <sys/vmem.h> 781541Srgrimes 791541Srgrimes#include <vm/vm.h> 8012662Sdg#include <vm/vm_param.h> 81254025Sjeff#include <vm/vm_kern.h> 8212662Sdg#include <vm/pmap.h> 8312662Sdg#include <vm/vm_map.h> 8412662Sdg#include <vm/vm_object.h> 851541Srgrimes#include <vm/vm_page.h> 861541Srgrimes#include <vm/vm_pageout.h> 8712726Sbde#include <vm/vm_extern.h> 88168395Spjd#include <vm/uma.h> 891541Srgrimes 90248277Skibvm_map_t kernel_map; 91248277Skibvm_map_t exec_map; 92118764Ssilbyvm_map_t pipe_map; 932112Swollman 94221853Smdfconst void *zero_region; 95221853SmdfCTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); 96221853Smdf 97246316SmariusSYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD, 98246316Smarius NULL, VM_MIN_KERNEL_ADDRESS, "Min kernel address"); 99246316Smarius 100246316SmariusSYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD, 101246926Salc#if defined(__arm__) || defined(__sparc64__) 102246316Smarius &vm_max_kernel_address, 0, 103246316Smarius#else 104246316Smarius NULL, VM_MAX_KERNEL_ADDRESS, 105246316Smarius#endif 106246316Smarius "Max kernel address"); 107246316Smarius 1081541Srgrimes/* 109254025Sjeff * kva_alloc: 11047841Sdt * 111118317Salc * Allocate a virtual address range with no underlying object and 112118317Salc * no initial mapping to physical memory. Any mapping from this 113118317Salc * range to physical memory must be explicitly created prior to 114118317Salc * its use, typically with pmap_qenter(). Any attempt to create 115118317Salc * a mapping on demand through vm_fault() will result in a panic. 11647841Sdt */ 11747841Sdtvm_offset_t 118254025Sjeffkva_alloc(size) 11970480Salfred vm_size_t size; 12047841Sdt{ 12147841Sdt vm_offset_t addr; 12247841Sdt 12347841Sdt size = round_page(size); 124254025Sjeff if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr)) 12547841Sdt return (0); 126254025Sjeff 12747841Sdt return (addr); 12847841Sdt} 12947841Sdt 13047841Sdt/* 131254025Sjeff * kva_free: 132206819Sjmallett * 133254025Sjeff * Release a region of kernel virtual memory allocated 134254025Sjeff * with kva_alloc, and return the physical pages 135254025Sjeff * associated with that region. 136254025Sjeff * 137254025Sjeff * This routine may not block on kernel maps. 138206819Sjmallett */ 139254025Sjeffvoid 140254025Sjeffkva_free(addr, size) 141206819Sjmallett vm_offset_t addr; 14270480Salfred vm_size_t size; 1431541Srgrimes{ 1441541Srgrimes 1451541Srgrimes size = round_page(size); 146254025Sjeff vmem_free(kernel_arena, addr, size); 1471541Srgrimes} 1481541Srgrimes 1491541Srgrimes/* 150238452Salc * Allocates a region from the kernel address map and physical pages 151238452Salc * within the specified address range to the kernel object. Creates a 152238452Salc * wired mapping from this region to these pages, and returns the 153238452Salc * region's starting virtual address. The allocated pages are not 154238452Salc * necessarily physically contiguous. If M_ZERO is specified through the 155238452Salc * given flags, then the pages are zeroed before they are mapped. 156238452Salc */ 157238452Salcvm_offset_t 158254025Sjeffkmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low, 159238452Salc vm_paddr_t high, vm_memattr_t memattr) 160238452Salc{ 161254025Sjeff vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 162238452Salc vm_offset_t addr; 163254025Sjeff vm_ooffset_t offset; 164238452Salc vm_page_t m; 165238452Salc int pflags, tries; 166254025Sjeff int i; 167238452Salc 168238452Salc size = round_page(size); 169254025Sjeff if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) 170238452Salc return (0); 171238452Salc offset = addr - VM_MIN_KERNEL_ADDRESS; 172254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 173248084Sattilio VM_OBJECT_WLOCK(object); 174254025Sjeff for (i = 0; i < size; i += PAGE_SIZE) { 175238452Salc tries = 0; 176238452Salcretry: 177254025Sjeff m = vm_page_alloc_contig(object, OFF_TO_IDX(offset + i), 178254025Sjeff pflags, 1, low, high, PAGE_SIZE, 0, memattr); 179238452Salc if (m == NULL) { 180248084Sattilio VM_OBJECT_WUNLOCK(object); 181238452Salc if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 182238561Salc vm_pageout_grow_cache(tries, low, high); 183248084Sattilio VM_OBJECT_WLOCK(object); 184238452Salc tries++; 185238452Salc goto retry; 186238452Salc } 187254025Sjeff /* 188254025Sjeff * Unmap and free the pages. 189238452Salc */ 190254025Sjeff if (i != 0) 191254025Sjeff pmap_remove(kernel_pmap, addr, addr + i); 192254025Sjeff while (i != 0) { 193254025Sjeff i -= PAGE_SIZE; 194254025Sjeff m = vm_page_lookup(object, 195254025Sjeff OFF_TO_IDX(offset + i)); 196267548Sattilio vm_page_unwire(m, PQ_INACTIVE); 197254025Sjeff vm_page_free(m); 198254025Sjeff } 199254025Sjeff vmem_free(vmem, addr, size); 200238452Salc return (0); 201238452Salc } 202238452Salc if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 203238452Salc pmap_zero_page(m); 204238452Salc m->valid = VM_PAGE_BITS_ALL; 205269728Skib pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 206269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 207238452Salc } 208248084Sattilio VM_OBJECT_WUNLOCK(object); 209238452Salc return (addr); 210238452Salc} 211238452Salc 212238452Salc/* 213238452Salc * Allocates a region from the kernel address map and physically 214238452Salc * contiguous pages within the specified address range to the kernel 215238452Salc * object. Creates a wired mapping from this region to these pages, and 216238452Salc * returns the region's starting virtual address. If M_ZERO is specified 217238452Salc * through the given flags, then the pages are zeroed before they are 218238452Salc * mapped. 219238452Salc */ 220238452Salcvm_offset_t 221254025Sjeffkmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low, 222238452Salc vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 223238452Salc vm_memattr_t memattr) 224238452Salc{ 225254025Sjeff vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 226254025Sjeff vm_offset_t addr, tmp; 227238452Salc vm_ooffset_t offset; 228238452Salc vm_page_t end_m, m; 229238452Salc int pflags, tries; 230238452Salc 231238452Salc size = round_page(size); 232254025Sjeff if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 233238452Salc return (0); 234238452Salc offset = addr - VM_MIN_KERNEL_ADDRESS; 235254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 236248084Sattilio VM_OBJECT_WLOCK(object); 237238452Salc tries = 0; 238238452Salcretry: 239238452Salc m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 240238452Salc atop(size), low, high, alignment, boundary, memattr); 241238452Salc if (m == NULL) { 242248084Sattilio VM_OBJECT_WUNLOCK(object); 243238452Salc if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 244238561Salc vm_pageout_grow_cache(tries, low, high); 245248084Sattilio VM_OBJECT_WLOCK(object); 246238452Salc tries++; 247238452Salc goto retry; 248238452Salc } 249254025Sjeff vmem_free(vmem, addr, size); 250238452Salc return (0); 251238452Salc } 252238452Salc end_m = m + atop(size); 253254025Sjeff tmp = addr; 254238452Salc for (; m < end_m; m++) { 255238452Salc if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 256238452Salc pmap_zero_page(m); 257238452Salc m->valid = VM_PAGE_BITS_ALL; 258269728Skib pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL, 259269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 260254025Sjeff tmp += PAGE_SIZE; 261238452Salc } 262248084Sattilio VM_OBJECT_WUNLOCK(object); 263238452Salc return (addr); 264238452Salc} 265238452Salc 266238452Salc/* 2671541Srgrimes * kmem_suballoc: 2681541Srgrimes * 2691541Srgrimes * Allocates a map to manage a subrange 2701541Srgrimes * of the kernel virtual address space. 2711541Srgrimes * 2721541Srgrimes * Arguments are as follows: 2731541Srgrimes * 2741541Srgrimes * parent Map to take range from 27570480Salfred * min, max Returned endpoints of map 2761541Srgrimes * size Size of range to find 277178933Salc * superpage_align Request that min is superpage aligned 2781541Srgrimes */ 2798876Srgrimesvm_map_t 280178933Salckmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, 281178933Salc vm_size_t size, boolean_t superpage_align) 2821541Srgrimes{ 28370478Salfred int ret; 2845455Sdg vm_map_t result; 2851541Srgrimes 2861541Srgrimes size = round_page(size); 2871541Srgrimes 288178637Salc *min = vm_map_min(parent); 289255426Sjhb ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ? 290254430Sjhb VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 291194766Skib MAP_ACC_NO_CHARGE); 292177762Salc if (ret != KERN_SUCCESS) 293177762Salc panic("kmem_suballoc: bad status return of %d", ret); 2941541Srgrimes *max = *min + size; 29532702Sdyson result = vm_map_create(vm_map_pmap(parent), *min, *max); 2961541Srgrimes if (result == NULL) 2971541Srgrimes panic("kmem_suballoc: cannot create submap"); 29870478Salfred if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 2991541Srgrimes panic("kmem_suballoc: unable to change range to submap"); 3005455Sdg return (result); 3011541Srgrimes} 3021541Srgrimes 3031541Srgrimes/* 30442957Sdillon * kmem_malloc: 3051541Srgrimes * 306254025Sjeff * Allocate wired-down pages in the kernel's address space. 3071541Srgrimes */ 3081541Srgrimesvm_offset_t 309254025Sjeffkmem_malloc(struct vmem *vmem, vm_size_t size, int flags) 3101541Srgrimes{ 3115455Sdg vm_offset_t addr; 312254025Sjeff int rv; 3131541Srgrimes 3141541Srgrimes size = round_page(size); 315254025Sjeff if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 316254025Sjeff return (0); 3171541Srgrimes 318254025Sjeff rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object, 319254025Sjeff addr, size, flags); 320254025Sjeff if (rv != KERN_SUCCESS) { 321254025Sjeff vmem_free(vmem, addr, size); 322254025Sjeff return (0); 3231541Srgrimes } 324254025Sjeff return (addr); 325211194Smdf} 326211194Smdf 327211194Smdf/* 328211194Smdf * kmem_back: 329211194Smdf * 330211194Smdf * Allocate physical pages for the specified virtual address range. 331211194Smdf */ 332211194Smdfint 333254025Sjeffkmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) 334211194Smdf{ 335211194Smdf vm_offset_t offset, i; 336211194Smdf vm_page_t m; 337211194Smdf int pflags; 338211194Smdf 339254025Sjeff KASSERT(object == kmem_object || object == kernel_object, 340254025Sjeff ("kmem_back: only supports kernel objects.")); 341254025Sjeff 34215367Sdyson offset = addr - VM_MIN_KERNEL_ADDRESS; 343254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 3441541Srgrimes 345254025Sjeff VM_OBJECT_WLOCK(object); 3461541Srgrimes for (i = 0; i < size; i += PAGE_SIZE) { 34715809Sdysonretry: 348254025Sjeff m = vm_page_alloc(object, OFF_TO_IDX(offset + i), pflags); 34998450Sjeff 3501541Srgrimes /* 3515455Sdg * Ran out of space, free everything up and return. Don't need 3525455Sdg * to lock page queues here as we know that the pages we got 3535455Sdg * aren't on any queues. 3541541Srgrimes */ 3551541Srgrimes if (m == NULL) { 35642957Sdillon if ((flags & M_NOWAIT) == 0) { 357254025Sjeff VM_OBJECT_WUNLOCK(object); 35815809Sdyson VM_WAIT; 359254025Sjeff VM_OBJECT_WLOCK(object); 36015809Sdyson goto retry; 36115809Sdyson } 36291946Stegge /* 363254025Sjeff * Unmap and free the pages. 36491946Stegge */ 365254025Sjeff if (i != 0) 366254025Sjeff pmap_remove(kernel_pmap, addr, addr + i); 36791946Stegge while (i != 0) { 36891946Stegge i -= PAGE_SIZE; 369254025Sjeff m = vm_page_lookup(object, 37091946Stegge OFF_TO_IDX(offset + i)); 371267548Sattilio vm_page_unwire(m, PQ_INACTIVE); 37291946Stegge vm_page_free(m); 37391946Stegge } 374254025Sjeff VM_OBJECT_WUNLOCK(object); 375211194Smdf return (KERN_NO_SPACE); 3761541Srgrimes } 37798455Sjeff if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 378102382Salc pmap_zero_page(m); 379224746Skib KASSERT((m->oflags & VPO_UNMANAGED) != 0, 380166964Salc ("kmem_malloc: page %p is managed", m)); 381254025Sjeff m->valid = VM_PAGE_BITS_ALL; 382269728Skib pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 383269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 3841541Srgrimes } 385254025Sjeff VM_OBJECT_WUNLOCK(object); 3861541Srgrimes 387254025Sjeff return (KERN_SUCCESS); 388254025Sjeff} 3891541Srgrimes 390254025Sjeffvoid 391254025Sjeffkmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) 392254025Sjeff{ 393254025Sjeff vm_page_t m; 394254025Sjeff vm_offset_t offset; 395254025Sjeff int i; 39620993Sdyson 397254025Sjeff KASSERT(object == kmem_object || object == kernel_object, 398254025Sjeff ("kmem_unback: only supports kernel objects.")); 399254025Sjeff 400266588Salc pmap_remove(kernel_pmap, addr, addr + size); 401254025Sjeff offset = addr - VM_MIN_KERNEL_ADDRESS; 402254025Sjeff VM_OBJECT_WLOCK(object); 4031541Srgrimes for (i = 0; i < size; i += PAGE_SIZE) { 404254025Sjeff m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); 405267548Sattilio vm_page_unwire(m, PQ_INACTIVE); 406254025Sjeff vm_page_free(m); 4071541Srgrimes } 408254025Sjeff VM_OBJECT_WUNLOCK(object); 409254025Sjeff} 4101541Srgrimes 411254025Sjeff/* 412254025Sjeff * kmem_free: 413254025Sjeff * 414254025Sjeff * Free memory allocated with kmem_malloc. The size must match the 415254025Sjeff * original allocation. 416254025Sjeff */ 417254025Sjeffvoid 418254025Sjeffkmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size) 419254025Sjeff{ 420254025Sjeff 421254025Sjeff size = round_page(size); 422254025Sjeff kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object, 423254025Sjeff addr, size); 424254025Sjeff vmem_free(vmem, addr, size); 4251541Srgrimes} 4261541Srgrimes 4271541Srgrimes/* 428254025Sjeff * kmap_alloc_wait: 4291541Srgrimes * 4301541Srgrimes * Allocates pageable memory from a sub-map of the kernel. If the submap 4311541Srgrimes * has no room, the caller sleeps waiting for more memory in the submap. 4321541Srgrimes * 43342957Sdillon * This routine may block. 4341541Srgrimes */ 4358876Srgrimesvm_offset_t 436254025Sjeffkmap_alloc_wait(map, size) 4375455Sdg vm_map_t map; 4385455Sdg vm_size_t size; 4391541Srgrimes{ 4405455Sdg vm_offset_t addr; 4411541Srgrimes 4421541Srgrimes size = round_page(size); 443194766Skib if (!swap_reserve(size)) 444194766Skib return (0); 4451541Srgrimes 4461541Srgrimes for (;;) { 4471541Srgrimes /* 4485455Sdg * To make this work for more than one map, use the map's lock 4495455Sdg * to lock out sleepers/wakers. 4501541Srgrimes */ 4511541Srgrimes vm_map_lock(map); 45233758Sdyson if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 4531541Srgrimes break; 4541541Srgrimes /* no space now; see if we can ever get space */ 4551541Srgrimes if (vm_map_max(map) - vm_map_min(map) < size) { 4561541Srgrimes vm_map_unlock(map); 457194766Skib swap_release(size); 4581541Srgrimes return (0); 4591541Srgrimes } 46099754Salc map->needs_wakeup = TRUE; 461173429Spjd vm_map_unlock_and_wait(map, 0); 4621541Srgrimes } 463194766Skib vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, 464194766Skib VM_PROT_ALL, MAP_ACC_CHARGED); 4651541Srgrimes vm_map_unlock(map); 4661541Srgrimes return (addr); 4671541Srgrimes} 4681541Srgrimes 4691541Srgrimes/* 470254025Sjeff * kmap_free_wakeup: 4711541Srgrimes * 4729507Sdg * Returns memory to a submap of the kernel, and wakes up any processes 4731541Srgrimes * waiting for memory in that map. 4741541Srgrimes */ 4758876Srgrimesvoid 476254025Sjeffkmap_free_wakeup(map, addr, size) 4775455Sdg vm_map_t map; 4785455Sdg vm_offset_t addr; 4795455Sdg vm_size_t size; 4801541Srgrimes{ 48176827Salfred 4821541Srgrimes vm_map_lock(map); 483189015Skib (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 48499754Salc if (map->needs_wakeup) { 48599754Salc map->needs_wakeup = FALSE; 48699754Salc vm_map_wakeup(map); 48799754Salc } 4881541Srgrimes vm_map_unlock(map); 4891541Srgrimes} 4901541Srgrimes 491254025Sjeffvoid 492221853Smdfkmem_init_zero_region(void) 493221853Smdf{ 494221855Smdf vm_offset_t addr, i; 495221853Smdf vm_page_t m; 496221853Smdf 497221855Smdf /* 498221855Smdf * Map a single physical page of zeros to a larger virtual range. 499221855Smdf * This requires less looping in places that want large amounts of 500221855Smdf * zeros, while not using much more physical resources. 501221855Smdf */ 502254025Sjeff addr = kva_alloc(ZERO_REGION_SIZE); 503226843Salc m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 504221853Smdf VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); 505221853Smdf if ((m->flags & PG_ZERO) == 0) 506221853Smdf pmap_zero_page(m); 507221853Smdf for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE) 508221853Smdf pmap_qenter(addr + i, &m, 1); 509254025Sjeff pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ); 510221853Smdf 511221853Smdf zero_region = (const void *)addr; 512221853Smdf} 513221853Smdf 5141541Srgrimes/* 51542957Sdillon * kmem_init: 51642957Sdillon * 51742957Sdillon * Create the kernel map; insert a mapping covering kernel text, 51842957Sdillon * data, bss, and all space allocated thus far (`boostrap' data). The 51942957Sdillon * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 52042957Sdillon * `start' as allocated, and the range between `start' and `end' as free. 5211541Srgrimes */ 5228876Srgrimesvoid 5235455Sdgkmem_init(start, end) 5241541Srgrimes vm_offset_t start, end; 5251541Srgrimes{ 52670480Salfred vm_map_t m; 5271541Srgrimes 52832702Sdyson m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 529108426Salc m->system_map = 1; 5301541Srgrimes vm_map_lock(m); 5311541Srgrimes /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 5321541Srgrimes kernel_map = m; 533108426Salc (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 534179923Salc#ifdef __amd64__ 535179923Salc KERNBASE, 536179923Salc#else 537179923Salc VM_MIN_KERNEL_ADDRESS, 538179923Salc#endif 539179923Salc start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 5401541Srgrimes /* ... and ending with the completion of the above `insert' */ 5411541Srgrimes vm_map_unlock(m); 5421541Srgrimes} 543188964Srwatson 544188967Srwatson#ifdef DIAGNOSTIC 545188964Srwatson/* 546188964Srwatson * Allow userspace to directly trigger the VM drain routine for testing 547188964Srwatson * purposes. 548188964Srwatson */ 549188964Srwatsonstatic int 550188964Srwatsondebug_vm_lowmem(SYSCTL_HANDLER_ARGS) 551188964Srwatson{ 552188964Srwatson int error, i; 553188964Srwatson 554188964Srwatson i = 0; 555188964Srwatson error = sysctl_handle_int(oidp, &i, 0, req); 556188964Srwatson if (error) 557188964Srwatson return (error); 558188964Srwatson if (i) 559188964Srwatson EVENTHANDLER_INVOKE(vm_lowmem, 0); 560188964Srwatson return (0); 561188964Srwatson} 562188964Srwatson 563188964SrwatsonSYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0, 564188964Srwatson debug_vm_lowmem, "I", "set to trigger vm_lowmem event"); 565188967Srwatson#endif 566