vm_kern.c revision 327701
1139825Simp/*- 21541Srgrimes * Copyright (c) 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * 51541Srgrimes * This code is derived from software contributed to Berkeley by 61541Srgrimes * The Mach Operating System project at Carnegie-Mellon University. 71541Srgrimes * 81541Srgrimes * Redistribution and use in source and binary forms, with or without 91541Srgrimes * modification, are permitted provided that the following conditions 101541Srgrimes * are met: 111541Srgrimes * 1. Redistributions of source code must retain the above copyright 121541Srgrimes * notice, this list of conditions and the following disclaimer. 131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer in the 151541Srgrimes * documentation and/or other materials provided with the distribution. 161541Srgrimes * 4. Neither the name of the University nor the names of its contributors 171541Srgrimes * may be used to endorse or promote products derived from this software 181541Srgrimes * without specific prior written permission. 191541Srgrimes * 201541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 211541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 221541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 231541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 241541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 251541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 261541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 271541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 281541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 291541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 301541Srgrimes * SUCH DAMAGE. 311541Srgrimes * 321817Sdg * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 331541Srgrimes * 341541Srgrimes * 351541Srgrimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 361541Srgrimes * All rights reserved. 371541Srgrimes * 381541Srgrimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 395455Sdg * 401541Srgrimes * Permission to use, copy, modify and distribute this software and 411541Srgrimes * its documentation is hereby granted, provided that both the copyright 421541Srgrimes * notice and this permission notice appear in all copies of the 431541Srgrimes * software, derivative works or modified versions, and any portions 441541Srgrimes * thereof, and that both notices appear in supporting documentation. 455455Sdg * 465455Sdg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 475455Sdg * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 481541Srgrimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 495455Sdg * 501541Srgrimes * Carnegie Mellon requests users of this software to return to 511541Srgrimes * 521541Srgrimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 531541Srgrimes * School of Computer Science 541541Srgrimes * Carnegie Mellon University 551541Srgrimes * Pittsburgh PA 15213-3890 561541Srgrimes * 571541Srgrimes * any improvements or extensions that they make and grant Carnegie the 581541Srgrimes * rights to redistribute these changes. 591541Srgrimes */ 601541Srgrimes 611541Srgrimes/* 621541Srgrimes * Kernel memory management. 631541Srgrimes */ 641541Srgrimes 65116226Sobrien#include <sys/cdefs.h> 66116226Sobrien__FBSDID("$FreeBSD: stable/11/sys/vm/vm_kern.c 327701 2018-01-08 16:36:33Z markj $"); 67116226Sobrien 681541Srgrimes#include <sys/param.h> 691541Srgrimes#include <sys/systm.h> 7087157Sluigi#include <sys/kernel.h> /* for ticks and hz */ 71168395Spjd#include <sys/eventhandler.h> 7276166Smarkm#include <sys/lock.h> 732112Swollman#include <sys/proc.h> 746129Sdg#include <sys/malloc.h> 75248084Sattilio#include <sys/rwlock.h> 76188964Srwatson#include <sys/sysctl.h> 77254025Sjeff#include <sys/vmem.h> 781541Srgrimes 791541Srgrimes#include <vm/vm.h> 8012662Sdg#include <vm/vm_param.h> 81254025Sjeff#include <vm/vm_kern.h> 8212662Sdg#include <vm/pmap.h> 8312662Sdg#include <vm/vm_map.h> 8412662Sdg#include <vm/vm_object.h> 851541Srgrimes#include <vm/vm_page.h> 861541Srgrimes#include <vm/vm_pageout.h> 87327701Smarkj#include <vm/vm_radix.h> 8812726Sbde#include <vm/vm_extern.h> 89168395Spjd#include <vm/uma.h> 901541Srgrimes 91248277Skibvm_map_t kernel_map; 92248277Skibvm_map_t exec_map; 93118764Ssilbyvm_map_t pipe_map; 942112Swollman 95221853Smdfconst void *zero_region; 96221853SmdfCTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); 97221853Smdf 98290728Sjhb/* NB: Used by kernel debuggers. */ 99290728Sjhbconst u_long vm_maxuser_address = VM_MAXUSER_ADDRESS; 100290728Sjhb 101320797Smarkju_int exec_map_entry_size; 102320797Smarkju_int exec_map_entries; 103320797Smarkj 104246316SmariusSYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD, 105273377Shselasky SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address"); 106246316Smarius 107246316SmariusSYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD, 108246926Salc#if defined(__arm__) || defined(__sparc64__) 109246316Smarius &vm_max_kernel_address, 0, 110246316Smarius#else 111273377Shselasky SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS, 112246316Smarius#endif 113246316Smarius "Max kernel address"); 114246316Smarius 1151541Srgrimes/* 116254025Sjeff * kva_alloc: 11747841Sdt * 118118317Salc * Allocate a virtual address range with no underlying object and 119118317Salc * no initial mapping to physical memory. Any mapping from this 120118317Salc * range to physical memory must be explicitly created prior to 121118317Salc * its use, typically with pmap_qenter(). Any attempt to create 122118317Salc * a mapping on demand through vm_fault() will result in a panic. 12347841Sdt */ 12447841Sdtvm_offset_t 125324781Semastekva_alloc(vm_size_t size) 12647841Sdt{ 12747841Sdt vm_offset_t addr; 12847841Sdt 12947841Sdt size = round_page(size); 130254025Sjeff if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr)) 13147841Sdt return (0); 132254025Sjeff 13347841Sdt return (addr); 13447841Sdt} 13547841Sdt 13647841Sdt/* 137254025Sjeff * kva_free: 138206819Sjmallett * 139254025Sjeff * Release a region of kernel virtual memory allocated 140254025Sjeff * with kva_alloc, and return the physical pages 141254025Sjeff * associated with that region. 142254025Sjeff * 143254025Sjeff * This routine may not block on kernel maps. 144206819Sjmallett */ 145254025Sjeffvoid 146324781Semastekva_free(vm_offset_t addr, vm_size_t size) 1471541Srgrimes{ 1481541Srgrimes 1491541Srgrimes size = round_page(size); 150254025Sjeff vmem_free(kernel_arena, addr, size); 1511541Srgrimes} 1521541Srgrimes 1531541Srgrimes/* 154238452Salc * Allocates a region from the kernel address map and physical pages 155238452Salc * within the specified address range to the kernel object. Creates a 156238452Salc * wired mapping from this region to these pages, and returns the 157238452Salc * region's starting virtual address. The allocated pages are not 158238452Salc * necessarily physically contiguous. If M_ZERO is specified through the 159238452Salc * given flags, then the pages are zeroed before they are mapped. 160238452Salc */ 161238452Salcvm_offset_t 162254025Sjeffkmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low, 163238452Salc vm_paddr_t high, vm_memattr_t memattr) 164238452Salc{ 165254025Sjeff vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 166316073Skib vm_offset_t addr, i, offset; 167238452Salc vm_page_t m; 168238452Salc int pflags, tries; 169238452Salc 170238452Salc size = round_page(size); 171254025Sjeff if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) 172238452Salc return (0); 173238452Salc offset = addr - VM_MIN_KERNEL_ADDRESS; 174254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 175248084Sattilio VM_OBJECT_WLOCK(object); 176254025Sjeff for (i = 0; i < size; i += PAGE_SIZE) { 177238452Salc tries = 0; 178238452Salcretry: 179316073Skib m = vm_page_alloc_contig(object, atop(offset + i), 180254025Sjeff pflags, 1, low, high, PAGE_SIZE, 0, memattr); 181238452Salc if (m == NULL) { 182248084Sattilio VM_OBJECT_WUNLOCK(object); 183238452Salc if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 184292469Salc if (!vm_page_reclaim_contig(pflags, 1, 185292469Salc low, high, PAGE_SIZE, 0) && 186292469Salc (flags & M_WAITOK) != 0) 187292469Salc VM_WAIT; 188248084Sattilio VM_OBJECT_WLOCK(object); 189238452Salc tries++; 190238452Salc goto retry; 191238452Salc } 192288281Salc kmem_unback(object, addr, i); 193254025Sjeff vmem_free(vmem, addr, size); 194238452Salc return (0); 195238452Salc } 196238452Salc if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 197238452Salc pmap_zero_page(m); 198238452Salc m->valid = VM_PAGE_BITS_ALL; 199269728Skib pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 200269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 201238452Salc } 202248084Sattilio VM_OBJECT_WUNLOCK(object); 203238452Salc return (addr); 204238452Salc} 205238452Salc 206238452Salc/* 207238452Salc * Allocates a region from the kernel address map and physically 208238452Salc * contiguous pages within the specified address range to the kernel 209238452Salc * object. Creates a wired mapping from this region to these pages, and 210238452Salc * returns the region's starting virtual address. If M_ZERO is specified 211238452Salc * through the given flags, then the pages are zeroed before they are 212238452Salc * mapped. 213238452Salc */ 214238452Salcvm_offset_t 215254025Sjeffkmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low, 216238452Salc vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 217238452Salc vm_memattr_t memattr) 218238452Salc{ 219254025Sjeff vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 220316073Skib vm_offset_t addr, offset, tmp; 221238452Salc vm_page_t end_m, m; 222292469Salc u_long npages; 223238452Salc int pflags, tries; 224238452Salc 225238452Salc size = round_page(size); 226254025Sjeff if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 227238452Salc return (0); 228238452Salc offset = addr - VM_MIN_KERNEL_ADDRESS; 229254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 230292469Salc npages = atop(size); 231248084Sattilio VM_OBJECT_WLOCK(object); 232238452Salc tries = 0; 233238452Salcretry: 234316073Skib m = vm_page_alloc_contig(object, atop(offset), pflags, 235292469Salc npages, low, high, alignment, boundary, memattr); 236238452Salc if (m == NULL) { 237248084Sattilio VM_OBJECT_WUNLOCK(object); 238238452Salc if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 239292469Salc if (!vm_page_reclaim_contig(pflags, npages, low, high, 240292469Salc alignment, boundary) && (flags & M_WAITOK) != 0) 241292469Salc VM_WAIT; 242248084Sattilio VM_OBJECT_WLOCK(object); 243238452Salc tries++; 244238452Salc goto retry; 245238452Salc } 246254025Sjeff vmem_free(vmem, addr, size); 247238452Salc return (0); 248238452Salc } 249292469Salc end_m = m + npages; 250254025Sjeff tmp = addr; 251238452Salc for (; m < end_m; m++) { 252238452Salc if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 253238452Salc pmap_zero_page(m); 254238452Salc m->valid = VM_PAGE_BITS_ALL; 255269728Skib pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL, 256269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 257254025Sjeff tmp += PAGE_SIZE; 258238452Salc } 259248084Sattilio VM_OBJECT_WUNLOCK(object); 260238452Salc return (addr); 261238452Salc} 262238452Salc 263238452Salc/* 2641541Srgrimes * kmem_suballoc: 2651541Srgrimes * 2661541Srgrimes * Allocates a map to manage a subrange 2671541Srgrimes * of the kernel virtual address space. 2681541Srgrimes * 2691541Srgrimes * Arguments are as follows: 2701541Srgrimes * 2711541Srgrimes * parent Map to take range from 27270480Salfred * min, max Returned endpoints of map 2731541Srgrimes * size Size of range to find 274178933Salc * superpage_align Request that min is superpage aligned 2751541Srgrimes */ 2768876Srgrimesvm_map_t 277178933Salckmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, 278178933Salc vm_size_t size, boolean_t superpage_align) 2791541Srgrimes{ 28070478Salfred int ret; 2815455Sdg vm_map_t result; 2821541Srgrimes 2831541Srgrimes size = round_page(size); 2841541Srgrimes 285178637Salc *min = vm_map_min(parent); 286255426Sjhb ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ? 287254430Sjhb VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 288194766Skib MAP_ACC_NO_CHARGE); 289177762Salc if (ret != KERN_SUCCESS) 290177762Salc panic("kmem_suballoc: bad status return of %d", ret); 2911541Srgrimes *max = *min + size; 29232702Sdyson result = vm_map_create(vm_map_pmap(parent), *min, *max); 2931541Srgrimes if (result == NULL) 2941541Srgrimes panic("kmem_suballoc: cannot create submap"); 29570478Salfred if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 2961541Srgrimes panic("kmem_suballoc: unable to change range to submap"); 2975455Sdg return (result); 2981541Srgrimes} 2991541Srgrimes 3001541Srgrimes/* 30142957Sdillon * kmem_malloc: 3021541Srgrimes * 303254025Sjeff * Allocate wired-down pages in the kernel's address space. 3041541Srgrimes */ 3051541Srgrimesvm_offset_t 306254025Sjeffkmem_malloc(struct vmem *vmem, vm_size_t size, int flags) 3071541Srgrimes{ 3085455Sdg vm_offset_t addr; 309254025Sjeff int rv; 3101541Srgrimes 3111541Srgrimes size = round_page(size); 312254025Sjeff if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 313254025Sjeff return (0); 3141541Srgrimes 315254025Sjeff rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object, 316254025Sjeff addr, size, flags); 317254025Sjeff if (rv != KERN_SUCCESS) { 318254025Sjeff vmem_free(vmem, addr, size); 319254025Sjeff return (0); 3201541Srgrimes } 321254025Sjeff return (addr); 322211194Smdf} 323211194Smdf 324211194Smdf/* 325211194Smdf * kmem_back: 326211194Smdf * 327211194Smdf * Allocate physical pages for the specified virtual address range. 328211194Smdf */ 329211194Smdfint 330254025Sjeffkmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) 331211194Smdf{ 332211194Smdf vm_offset_t offset, i; 333327701Smarkj vm_page_t m, mpred; 334211194Smdf int pflags; 335211194Smdf 336254025Sjeff KASSERT(object == kmem_object || object == kernel_object, 337254025Sjeff ("kmem_back: only supports kernel objects.")); 338254025Sjeff 33915367Sdyson offset = addr - VM_MIN_KERNEL_ADDRESS; 340254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 3411541Srgrimes 342327701Smarkj i = 0; 343327701Smarkjretry: 344254025Sjeff VM_OBJECT_WLOCK(object); 345327701Smarkj mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i)); 346327701Smarkj for (; i < size; i += PAGE_SIZE, mpred = m) { 347327701Smarkj m = vm_page_alloc_after(object, atop(offset + i), pflags, 348327701Smarkj mpred); 34998450Sjeff 3501541Srgrimes /* 3515455Sdg * Ran out of space, free everything up and return. Don't need 3525455Sdg * to lock page queues here as we know that the pages we got 3535455Sdg * aren't on any queues. 3541541Srgrimes */ 3551541Srgrimes if (m == NULL) { 356288281Salc VM_OBJECT_WUNLOCK(object); 35742957Sdillon if ((flags & M_NOWAIT) == 0) { 35815809Sdyson VM_WAIT; 35915809Sdyson goto retry; 36015809Sdyson } 361288281Salc kmem_unback(object, addr, i); 362211194Smdf return (KERN_NO_SPACE); 3631541Srgrimes } 36498455Sjeff if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 365102382Salc pmap_zero_page(m); 366224746Skib KASSERT((m->oflags & VPO_UNMANAGED) != 0, 367166964Salc ("kmem_malloc: page %p is managed", m)); 368254025Sjeff m->valid = VM_PAGE_BITS_ALL; 369269728Skib pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 370269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 3711541Srgrimes } 372254025Sjeff VM_OBJECT_WUNLOCK(object); 3731541Srgrimes 374254025Sjeff return (KERN_SUCCESS); 375254025Sjeff} 3761541Srgrimes 377288281Salc/* 378288281Salc * kmem_unback: 379288281Salc * 380288281Salc * Unmap and free the physical pages underlying the specified virtual 381288281Salc * address range. 382288281Salc * 383288281Salc * A physical page must exist within the specified object at each index 384288281Salc * that is being unmapped. 385288281Salc */ 386254025Sjeffvoid 387254025Sjeffkmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) 388254025Sjeff{ 389322673Smarkj vm_page_t m, next; 390322673Smarkj vm_offset_t end, offset; 39120993Sdyson 392254025Sjeff KASSERT(object == kmem_object || object == kernel_object, 393254025Sjeff ("kmem_unback: only supports kernel objects.")); 394254025Sjeff 395266588Salc pmap_remove(kernel_pmap, addr, addr + size); 396254025Sjeff offset = addr - VM_MIN_KERNEL_ADDRESS; 397322673Smarkj end = offset + size; 398254025Sjeff VM_OBJECT_WLOCK(object); 399322673Smarkj for (m = vm_page_lookup(object, atop(offset)); offset < end; 400322673Smarkj offset += PAGE_SIZE, m = next) { 401322673Smarkj next = vm_page_next(m); 402288912Salc vm_page_unwire(m, PQ_NONE); 403254025Sjeff vm_page_free(m); 4041541Srgrimes } 405254025Sjeff VM_OBJECT_WUNLOCK(object); 406254025Sjeff} 4071541Srgrimes 408254025Sjeff/* 409254025Sjeff * kmem_free: 410254025Sjeff * 411254025Sjeff * Free memory allocated with kmem_malloc. The size must match the 412254025Sjeff * original allocation. 413254025Sjeff */ 414254025Sjeffvoid 415254025Sjeffkmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size) 416254025Sjeff{ 417254025Sjeff 418254025Sjeff size = round_page(size); 419254025Sjeff kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object, 420254025Sjeff addr, size); 421254025Sjeff vmem_free(vmem, addr, size); 4221541Srgrimes} 4231541Srgrimes 4241541Srgrimes/* 425254025Sjeff * kmap_alloc_wait: 4261541Srgrimes * 4271541Srgrimes * Allocates pageable memory from a sub-map of the kernel. If the submap 4281541Srgrimes * has no room, the caller sleeps waiting for more memory in the submap. 4291541Srgrimes * 43042957Sdillon * This routine may block. 4311541Srgrimes */ 4328876Srgrimesvm_offset_t 433324781Semastekmap_alloc_wait(vm_map_t map, vm_size_t size) 4341541Srgrimes{ 4355455Sdg vm_offset_t addr; 4361541Srgrimes 4371541Srgrimes size = round_page(size); 438194766Skib if (!swap_reserve(size)) 439194766Skib return (0); 4401541Srgrimes 4411541Srgrimes for (;;) { 4421541Srgrimes /* 4435455Sdg * To make this work for more than one map, use the map's lock 4445455Sdg * to lock out sleepers/wakers. 4451541Srgrimes */ 4461541Srgrimes vm_map_lock(map); 44733758Sdyson if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 4481541Srgrimes break; 4491541Srgrimes /* no space now; see if we can ever get space */ 4501541Srgrimes if (vm_map_max(map) - vm_map_min(map) < size) { 4511541Srgrimes vm_map_unlock(map); 452194766Skib swap_release(size); 4531541Srgrimes return (0); 4541541Srgrimes } 45599754Salc map->needs_wakeup = TRUE; 456173429Spjd vm_map_unlock_and_wait(map, 0); 4571541Srgrimes } 458194766Skib vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, 459194766Skib VM_PROT_ALL, MAP_ACC_CHARGED); 4601541Srgrimes vm_map_unlock(map); 4611541Srgrimes return (addr); 4621541Srgrimes} 4631541Srgrimes 4641541Srgrimes/* 465254025Sjeff * kmap_free_wakeup: 4661541Srgrimes * 4679507Sdg * Returns memory to a submap of the kernel, and wakes up any processes 4681541Srgrimes * waiting for memory in that map. 4691541Srgrimes */ 4708876Srgrimesvoid 471324781Semastekmap_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) 4721541Srgrimes{ 47376827Salfred 4741541Srgrimes vm_map_lock(map); 475189015Skib (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 47699754Salc if (map->needs_wakeup) { 47799754Salc map->needs_wakeup = FALSE; 47899754Salc vm_map_wakeup(map); 47999754Salc } 4801541Srgrimes vm_map_unlock(map); 4811541Srgrimes} 4821541Srgrimes 483254025Sjeffvoid 484221853Smdfkmem_init_zero_region(void) 485221853Smdf{ 486221855Smdf vm_offset_t addr, i; 487221853Smdf vm_page_t m; 488221853Smdf 489221855Smdf /* 490221855Smdf * Map a single physical page of zeros to a larger virtual range. 491221855Smdf * This requires less looping in places that want large amounts of 492221855Smdf * zeros, while not using much more physical resources. 493221855Smdf */ 494254025Sjeff addr = kva_alloc(ZERO_REGION_SIZE); 495226843Salc m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 496221853Smdf VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); 497221853Smdf if ((m->flags & PG_ZERO) == 0) 498221853Smdf pmap_zero_page(m); 499221853Smdf for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE) 500221853Smdf pmap_qenter(addr + i, &m, 1); 501254025Sjeff pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ); 502221853Smdf 503221853Smdf zero_region = (const void *)addr; 504221853Smdf} 505221853Smdf 5061541Srgrimes/* 50742957Sdillon * kmem_init: 50842957Sdillon * 50942957Sdillon * Create the kernel map; insert a mapping covering kernel text, 51042957Sdillon * data, bss, and all space allocated thus far (`boostrap' data). The 51142957Sdillon * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 51242957Sdillon * `start' as allocated, and the range between `start' and `end' as free. 5131541Srgrimes */ 5148876Srgrimesvoid 515324781Semastekmem_init(vm_offset_t start, vm_offset_t end) 5161541Srgrimes{ 51770480Salfred vm_map_t m; 5181541Srgrimes 51932702Sdyson m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 520108426Salc m->system_map = 1; 5211541Srgrimes vm_map_lock(m); 5221541Srgrimes /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 5231541Srgrimes kernel_map = m; 524108426Salc (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 525179923Salc#ifdef __amd64__ 526179923Salc KERNBASE, 527179923Salc#else 528179923Salc VM_MIN_KERNEL_ADDRESS, 529179923Salc#endif 530179923Salc start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 5311541Srgrimes /* ... and ending with the completion of the above `insert' */ 5321541Srgrimes vm_map_unlock(m); 5331541Srgrimes} 534188964Srwatson 535188967Srwatson#ifdef DIAGNOSTIC 536188964Srwatson/* 537188964Srwatson * Allow userspace to directly trigger the VM drain routine for testing 538188964Srwatson * purposes. 539188964Srwatson */ 540188964Srwatsonstatic int 541188964Srwatsondebug_vm_lowmem(SYSCTL_HANDLER_ARGS) 542188964Srwatson{ 543188964Srwatson int error, i; 544188964Srwatson 545188964Srwatson i = 0; 546188964Srwatson error = sysctl_handle_int(oidp, &i, 0, req); 547188964Srwatson if (error) 548188964Srwatson return (error); 549314663Savg if ((i & ~(VM_LOW_KMEM | VM_LOW_PAGES)) != 0) 550314663Savg return (EINVAL); 551314663Savg if (i != 0) 552314663Savg EVENTHANDLER_INVOKE(vm_lowmem, i); 553188964Srwatson return (0); 554188964Srwatson} 555188964Srwatson 556188964SrwatsonSYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0, 557314663Savg debug_vm_lowmem, "I", "set to trigger vm_lowmem event with given flags"); 558188967Srwatson#endif 559