vm_kern.c revision 288281
1139825Simp/*- 21541Srgrimes * Copyright (c) 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * 51541Srgrimes * This code is derived from software contributed to Berkeley by 61541Srgrimes * The Mach Operating System project at Carnegie-Mellon University. 71541Srgrimes * 81541Srgrimes * Redistribution and use in source and binary forms, with or without 91541Srgrimes * modification, are permitted provided that the following conditions 101541Srgrimes * are met: 111541Srgrimes * 1. Redistributions of source code must retain the above copyright 121541Srgrimes * notice, this list of conditions and the following disclaimer. 131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer in the 151541Srgrimes * documentation and/or other materials provided with the distribution. 161541Srgrimes * 4. Neither the name of the University nor the names of its contributors 171541Srgrimes * may be used to endorse or promote products derived from this software 181541Srgrimes * without specific prior written permission. 191541Srgrimes * 201541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 211541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 221541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 231541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 241541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 251541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 261541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 271541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 281541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 291541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 301541Srgrimes * SUCH DAMAGE. 311541Srgrimes * 321817Sdg * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 331541Srgrimes * 341541Srgrimes * 351541Srgrimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 361541Srgrimes * All rights reserved. 371541Srgrimes * 381541Srgrimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 395455Sdg * 401541Srgrimes * Permission to use, copy, modify and distribute this software and 411541Srgrimes * its documentation is hereby granted, provided that both the copyright 421541Srgrimes * notice and this permission notice appear in all copies of the 431541Srgrimes * software, derivative works or modified versions, and any portions 441541Srgrimes * thereof, and that both notices appear in supporting documentation. 455455Sdg * 465455Sdg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 475455Sdg * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 481541Srgrimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 495455Sdg * 501541Srgrimes * Carnegie Mellon requests users of this software to return to 511541Srgrimes * 521541Srgrimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 531541Srgrimes * School of Computer Science 541541Srgrimes * Carnegie Mellon University 551541Srgrimes * Pittsburgh PA 15213-3890 561541Srgrimes * 571541Srgrimes * any improvements or extensions that they make and grant Carnegie the 581541Srgrimes * rights to redistribute these changes. 591541Srgrimes */ 601541Srgrimes 611541Srgrimes/* 621541Srgrimes * Kernel memory management. 631541Srgrimes */ 641541Srgrimes 65116226Sobrien#include <sys/cdefs.h> 66116226Sobrien__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 288281 2015-09-26 22:57:10Z alc $"); 67116226Sobrien 681541Srgrimes#include <sys/param.h> 691541Srgrimes#include <sys/systm.h> 7087157Sluigi#include <sys/kernel.h> /* for ticks and hz */ 71168395Spjd#include <sys/eventhandler.h> 7276166Smarkm#include <sys/lock.h> 732112Swollman#include <sys/proc.h> 746129Sdg#include <sys/malloc.h> 75248084Sattilio#include <sys/rwlock.h> 76188964Srwatson#include <sys/sysctl.h> 77254025Sjeff#include <sys/vmem.h> 781541Srgrimes 791541Srgrimes#include <vm/vm.h> 8012662Sdg#include <vm/vm_param.h> 81254025Sjeff#include <vm/vm_kern.h> 8212662Sdg#include <vm/pmap.h> 8312662Sdg#include <vm/vm_map.h> 8412662Sdg#include <vm/vm_object.h> 851541Srgrimes#include <vm/vm_page.h> 861541Srgrimes#include <vm/vm_pageout.h> 8712726Sbde#include <vm/vm_extern.h> 88168395Spjd#include <vm/uma.h> 891541Srgrimes 90248277Skibvm_map_t kernel_map; 91248277Skibvm_map_t exec_map; 92118764Ssilbyvm_map_t pipe_map; 932112Swollman 94221853Smdfconst void *zero_region; 95221853SmdfCTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0); 96221853Smdf 97246316SmariusSYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD, 98273377Shselasky SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address"); 99246316Smarius 100246316SmariusSYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD, 101246926Salc#if defined(__arm__) || defined(__sparc64__) 102246316Smarius &vm_max_kernel_address, 0, 103246316Smarius#else 104273377Shselasky SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS, 105246316Smarius#endif 106246316Smarius "Max kernel address"); 107246316Smarius 1081541Srgrimes/* 109254025Sjeff * kva_alloc: 11047841Sdt * 111118317Salc * Allocate a virtual address range with no underlying object and 112118317Salc * no initial mapping to physical memory. Any mapping from this 113118317Salc * range to physical memory must be explicitly created prior to 114118317Salc * its use, typically with pmap_qenter(). Any attempt to create 115118317Salc * a mapping on demand through vm_fault() will result in a panic. 11647841Sdt */ 11747841Sdtvm_offset_t 118254025Sjeffkva_alloc(size) 11970480Salfred vm_size_t size; 12047841Sdt{ 12147841Sdt vm_offset_t addr; 12247841Sdt 12347841Sdt size = round_page(size); 124254025Sjeff if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr)) 12547841Sdt return (0); 126254025Sjeff 12747841Sdt return (addr); 12847841Sdt} 12947841Sdt 13047841Sdt/* 131254025Sjeff * kva_free: 132206819Sjmallett * 133254025Sjeff * Release a region of kernel virtual memory allocated 134254025Sjeff * with kva_alloc, and return the physical pages 135254025Sjeff * associated with that region. 136254025Sjeff * 137254025Sjeff * This routine may not block on kernel maps. 138206819Sjmallett */ 139254025Sjeffvoid 140254025Sjeffkva_free(addr, size) 141206819Sjmallett vm_offset_t addr; 14270480Salfred vm_size_t size; 1431541Srgrimes{ 1441541Srgrimes 1451541Srgrimes size = round_page(size); 146254025Sjeff vmem_free(kernel_arena, addr, size); 1471541Srgrimes} 1481541Srgrimes 1491541Srgrimes/* 150238452Salc * Allocates a region from the kernel address map and physical pages 151238452Salc * within the specified address range to the kernel object. Creates a 152238452Salc * wired mapping from this region to these pages, and returns the 153238452Salc * region's starting virtual address. The allocated pages are not 154238452Salc * necessarily physically contiguous. If M_ZERO is specified through the 155238452Salc * given flags, then the pages are zeroed before they are mapped. 156238452Salc */ 157238452Salcvm_offset_t 158254025Sjeffkmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low, 159238452Salc vm_paddr_t high, vm_memattr_t memattr) 160238452Salc{ 161254025Sjeff vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 162288281Salc vm_offset_t addr, i; 163254025Sjeff vm_ooffset_t offset; 164238452Salc vm_page_t m; 165238452Salc int pflags, tries; 166238452Salc 167238452Salc size = round_page(size); 168254025Sjeff if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr)) 169238452Salc return (0); 170238452Salc offset = addr - VM_MIN_KERNEL_ADDRESS; 171254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 172248084Sattilio VM_OBJECT_WLOCK(object); 173254025Sjeff for (i = 0; i < size; i += PAGE_SIZE) { 174238452Salc tries = 0; 175238452Salcretry: 176254025Sjeff m = vm_page_alloc_contig(object, OFF_TO_IDX(offset + i), 177254025Sjeff pflags, 1, low, high, PAGE_SIZE, 0, memattr); 178238452Salc if (m == NULL) { 179248084Sattilio VM_OBJECT_WUNLOCK(object); 180238452Salc if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 181238561Salc vm_pageout_grow_cache(tries, low, high); 182248084Sattilio VM_OBJECT_WLOCK(object); 183238452Salc tries++; 184238452Salc goto retry; 185238452Salc } 186288281Salc kmem_unback(object, addr, i); 187254025Sjeff vmem_free(vmem, addr, size); 188238452Salc return (0); 189238452Salc } 190238452Salc if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 191238452Salc pmap_zero_page(m); 192238452Salc m->valid = VM_PAGE_BITS_ALL; 193269728Skib pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 194269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 195238452Salc } 196248084Sattilio VM_OBJECT_WUNLOCK(object); 197238452Salc return (addr); 198238452Salc} 199238452Salc 200238452Salc/* 201238452Salc * Allocates a region from the kernel address map and physically 202238452Salc * contiguous pages within the specified address range to the kernel 203238452Salc * object. Creates a wired mapping from this region to these pages, and 204238452Salc * returns the region's starting virtual address. If M_ZERO is specified 205238452Salc * through the given flags, then the pages are zeroed before they are 206238452Salc * mapped. 207238452Salc */ 208238452Salcvm_offset_t 209254025Sjeffkmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low, 210238452Salc vm_paddr_t high, u_long alignment, vm_paddr_t boundary, 211238452Salc vm_memattr_t memattr) 212238452Salc{ 213254025Sjeff vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object; 214254025Sjeff vm_offset_t addr, tmp; 215238452Salc vm_ooffset_t offset; 216238452Salc vm_page_t end_m, m; 217238452Salc int pflags, tries; 218238452Salc 219238452Salc size = round_page(size); 220254025Sjeff if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 221238452Salc return (0); 222238452Salc offset = addr - VM_MIN_KERNEL_ADDRESS; 223254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 224248084Sattilio VM_OBJECT_WLOCK(object); 225238452Salc tries = 0; 226238452Salcretry: 227238452Salc m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags, 228238452Salc atop(size), low, high, alignment, boundary, memattr); 229238452Salc if (m == NULL) { 230248084Sattilio VM_OBJECT_WUNLOCK(object); 231238452Salc if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) { 232238561Salc vm_pageout_grow_cache(tries, low, high); 233248084Sattilio VM_OBJECT_WLOCK(object); 234238452Salc tries++; 235238452Salc goto retry; 236238452Salc } 237254025Sjeff vmem_free(vmem, addr, size); 238238452Salc return (0); 239238452Salc } 240238452Salc end_m = m + atop(size); 241254025Sjeff tmp = addr; 242238452Salc for (; m < end_m; m++) { 243238452Salc if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0) 244238452Salc pmap_zero_page(m); 245238452Salc m->valid = VM_PAGE_BITS_ALL; 246269728Skib pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL, 247269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 248254025Sjeff tmp += PAGE_SIZE; 249238452Salc } 250248084Sattilio VM_OBJECT_WUNLOCK(object); 251238452Salc return (addr); 252238452Salc} 253238452Salc 254238452Salc/* 2551541Srgrimes * kmem_suballoc: 2561541Srgrimes * 2571541Srgrimes * Allocates a map to manage a subrange 2581541Srgrimes * of the kernel virtual address space. 2591541Srgrimes * 2601541Srgrimes * Arguments are as follows: 2611541Srgrimes * 2621541Srgrimes * parent Map to take range from 26370480Salfred * min, max Returned endpoints of map 2641541Srgrimes * size Size of range to find 265178933Salc * superpage_align Request that min is superpage aligned 2661541Srgrimes */ 2678876Srgrimesvm_map_t 268178933Salckmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, 269178933Salc vm_size_t size, boolean_t superpage_align) 2701541Srgrimes{ 27170478Salfred int ret; 2725455Sdg vm_map_t result; 2731541Srgrimes 2741541Srgrimes size = round_page(size); 2751541Srgrimes 276178637Salc *min = vm_map_min(parent); 277255426Sjhb ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ? 278254430Sjhb VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 279194766Skib MAP_ACC_NO_CHARGE); 280177762Salc if (ret != KERN_SUCCESS) 281177762Salc panic("kmem_suballoc: bad status return of %d", ret); 2821541Srgrimes *max = *min + size; 28332702Sdyson result = vm_map_create(vm_map_pmap(parent), *min, *max); 2841541Srgrimes if (result == NULL) 2851541Srgrimes panic("kmem_suballoc: cannot create submap"); 28670478Salfred if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 2871541Srgrimes panic("kmem_suballoc: unable to change range to submap"); 2885455Sdg return (result); 2891541Srgrimes} 2901541Srgrimes 2911541Srgrimes/* 29242957Sdillon * kmem_malloc: 2931541Srgrimes * 294254025Sjeff * Allocate wired-down pages in the kernel's address space. 2951541Srgrimes */ 2961541Srgrimesvm_offset_t 297254025Sjeffkmem_malloc(struct vmem *vmem, vm_size_t size, int flags) 2981541Srgrimes{ 2995455Sdg vm_offset_t addr; 300254025Sjeff int rv; 3011541Srgrimes 3021541Srgrimes size = round_page(size); 303254025Sjeff if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr)) 304254025Sjeff return (0); 3051541Srgrimes 306254025Sjeff rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object, 307254025Sjeff addr, size, flags); 308254025Sjeff if (rv != KERN_SUCCESS) { 309254025Sjeff vmem_free(vmem, addr, size); 310254025Sjeff return (0); 3111541Srgrimes } 312254025Sjeff return (addr); 313211194Smdf} 314211194Smdf 315211194Smdf/* 316211194Smdf * kmem_back: 317211194Smdf * 318211194Smdf * Allocate physical pages for the specified virtual address range. 319211194Smdf */ 320211194Smdfint 321254025Sjeffkmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags) 322211194Smdf{ 323211194Smdf vm_offset_t offset, i; 324211194Smdf vm_page_t m; 325211194Smdf int pflags; 326211194Smdf 327254025Sjeff KASSERT(object == kmem_object || object == kernel_object, 328254025Sjeff ("kmem_back: only supports kernel objects.")); 329254025Sjeff 33015367Sdyson offset = addr - VM_MIN_KERNEL_ADDRESS; 331254025Sjeff pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED; 3321541Srgrimes 333254025Sjeff VM_OBJECT_WLOCK(object); 3341541Srgrimes for (i = 0; i < size; i += PAGE_SIZE) { 33515809Sdysonretry: 336254025Sjeff m = vm_page_alloc(object, OFF_TO_IDX(offset + i), pflags); 33798450Sjeff 3381541Srgrimes /* 3395455Sdg * Ran out of space, free everything up and return. Don't need 3405455Sdg * to lock page queues here as we know that the pages we got 3415455Sdg * aren't on any queues. 3421541Srgrimes */ 3431541Srgrimes if (m == NULL) { 344288281Salc VM_OBJECT_WUNLOCK(object); 34542957Sdillon if ((flags & M_NOWAIT) == 0) { 34615809Sdyson VM_WAIT; 347254025Sjeff VM_OBJECT_WLOCK(object); 34815809Sdyson goto retry; 34915809Sdyson } 350288281Salc kmem_unback(object, addr, i); 351211194Smdf return (KERN_NO_SPACE); 3521541Srgrimes } 35398455Sjeff if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 354102382Salc pmap_zero_page(m); 355224746Skib KASSERT((m->oflags & VPO_UNMANAGED) != 0, 356166964Salc ("kmem_malloc: page %p is managed", m)); 357254025Sjeff m->valid = VM_PAGE_BITS_ALL; 358269728Skib pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 359269728Skib VM_PROT_ALL | PMAP_ENTER_WIRED, 0); 3601541Srgrimes } 361254025Sjeff VM_OBJECT_WUNLOCK(object); 3621541Srgrimes 363254025Sjeff return (KERN_SUCCESS); 364254025Sjeff} 3651541Srgrimes 366288281Salc/* 367288281Salc * kmem_unback: 368288281Salc * 369288281Salc * Unmap and free the physical pages underlying the specified virtual 370288281Salc * address range. 371288281Salc * 372288281Salc * A physical page must exist within the specified object at each index 373288281Salc * that is being unmapped. 374288281Salc */ 375254025Sjeffvoid 376254025Sjeffkmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size) 377254025Sjeff{ 378254025Sjeff vm_page_t m; 379284207Salc vm_offset_t i, offset; 38020993Sdyson 381254025Sjeff KASSERT(object == kmem_object || object == kernel_object, 382254025Sjeff ("kmem_unback: only supports kernel objects.")); 383254025Sjeff 384266588Salc pmap_remove(kernel_pmap, addr, addr + size); 385254025Sjeff offset = addr - VM_MIN_KERNEL_ADDRESS; 386254025Sjeff VM_OBJECT_WLOCK(object); 3871541Srgrimes for (i = 0; i < size; i += PAGE_SIZE) { 388254025Sjeff m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); 389267548Sattilio vm_page_unwire(m, PQ_INACTIVE); 390254025Sjeff vm_page_free(m); 3911541Srgrimes } 392254025Sjeff VM_OBJECT_WUNLOCK(object); 393254025Sjeff} 3941541Srgrimes 395254025Sjeff/* 396254025Sjeff * kmem_free: 397254025Sjeff * 398254025Sjeff * Free memory allocated with kmem_malloc. The size must match the 399254025Sjeff * original allocation. 400254025Sjeff */ 401254025Sjeffvoid 402254025Sjeffkmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size) 403254025Sjeff{ 404254025Sjeff 405254025Sjeff size = round_page(size); 406254025Sjeff kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object, 407254025Sjeff addr, size); 408254025Sjeff vmem_free(vmem, addr, size); 4091541Srgrimes} 4101541Srgrimes 4111541Srgrimes/* 412254025Sjeff * kmap_alloc_wait: 4131541Srgrimes * 4141541Srgrimes * Allocates pageable memory from a sub-map of the kernel. If the submap 4151541Srgrimes * has no room, the caller sleeps waiting for more memory in the submap. 4161541Srgrimes * 41742957Sdillon * This routine may block. 4181541Srgrimes */ 4198876Srgrimesvm_offset_t 420254025Sjeffkmap_alloc_wait(map, size) 4215455Sdg vm_map_t map; 4225455Sdg vm_size_t size; 4231541Srgrimes{ 4245455Sdg vm_offset_t addr; 4251541Srgrimes 4261541Srgrimes size = round_page(size); 427194766Skib if (!swap_reserve(size)) 428194766Skib return (0); 4291541Srgrimes 4301541Srgrimes for (;;) { 4311541Srgrimes /* 4325455Sdg * To make this work for more than one map, use the map's lock 4335455Sdg * to lock out sleepers/wakers. 4341541Srgrimes */ 4351541Srgrimes vm_map_lock(map); 43633758Sdyson if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 4371541Srgrimes break; 4381541Srgrimes /* no space now; see if we can ever get space */ 4391541Srgrimes if (vm_map_max(map) - vm_map_min(map) < size) { 4401541Srgrimes vm_map_unlock(map); 441194766Skib swap_release(size); 4421541Srgrimes return (0); 4431541Srgrimes } 44499754Salc map->needs_wakeup = TRUE; 445173429Spjd vm_map_unlock_and_wait(map, 0); 4461541Srgrimes } 447194766Skib vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, 448194766Skib VM_PROT_ALL, MAP_ACC_CHARGED); 4491541Srgrimes vm_map_unlock(map); 4501541Srgrimes return (addr); 4511541Srgrimes} 4521541Srgrimes 4531541Srgrimes/* 454254025Sjeff * kmap_free_wakeup: 4551541Srgrimes * 4569507Sdg * Returns memory to a submap of the kernel, and wakes up any processes 4571541Srgrimes * waiting for memory in that map. 4581541Srgrimes */ 4598876Srgrimesvoid 460254025Sjeffkmap_free_wakeup(map, addr, size) 4615455Sdg vm_map_t map; 4625455Sdg vm_offset_t addr; 4635455Sdg vm_size_t size; 4641541Srgrimes{ 46576827Salfred 4661541Srgrimes vm_map_lock(map); 467189015Skib (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 46899754Salc if (map->needs_wakeup) { 46999754Salc map->needs_wakeup = FALSE; 47099754Salc vm_map_wakeup(map); 47199754Salc } 4721541Srgrimes vm_map_unlock(map); 4731541Srgrimes} 4741541Srgrimes 475254025Sjeffvoid 476221853Smdfkmem_init_zero_region(void) 477221853Smdf{ 478221855Smdf vm_offset_t addr, i; 479221853Smdf vm_page_t m; 480221853Smdf 481221855Smdf /* 482221855Smdf * Map a single physical page of zeros to a larger virtual range. 483221855Smdf * This requires less looping in places that want large amounts of 484221855Smdf * zeros, while not using much more physical resources. 485221855Smdf */ 486254025Sjeff addr = kva_alloc(ZERO_REGION_SIZE); 487226843Salc m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 488221853Smdf VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); 489221853Smdf if ((m->flags & PG_ZERO) == 0) 490221853Smdf pmap_zero_page(m); 491221853Smdf for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE) 492221853Smdf pmap_qenter(addr + i, &m, 1); 493254025Sjeff pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ); 494221853Smdf 495221853Smdf zero_region = (const void *)addr; 496221853Smdf} 497221853Smdf 4981541Srgrimes/* 49942957Sdillon * kmem_init: 50042957Sdillon * 50142957Sdillon * Create the kernel map; insert a mapping covering kernel text, 50242957Sdillon * data, bss, and all space allocated thus far (`boostrap' data). The 50342957Sdillon * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 50442957Sdillon * `start' as allocated, and the range between `start' and `end' as free. 5051541Srgrimes */ 5068876Srgrimesvoid 5075455Sdgkmem_init(start, end) 5081541Srgrimes vm_offset_t start, end; 5091541Srgrimes{ 51070480Salfred vm_map_t m; 5111541Srgrimes 51232702Sdyson m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 513108426Salc m->system_map = 1; 5141541Srgrimes vm_map_lock(m); 5151541Srgrimes /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 5161541Srgrimes kernel_map = m; 517108426Salc (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 518179923Salc#ifdef __amd64__ 519179923Salc KERNBASE, 520179923Salc#else 521179923Salc VM_MIN_KERNEL_ADDRESS, 522179923Salc#endif 523179923Salc start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 5241541Srgrimes /* ... and ending with the completion of the above `insert' */ 5251541Srgrimes vm_map_unlock(m); 5261541Srgrimes} 527188964Srwatson 528188967Srwatson#ifdef DIAGNOSTIC 529188964Srwatson/* 530188964Srwatson * Allow userspace to directly trigger the VM drain routine for testing 531188964Srwatson * purposes. 532188964Srwatson */ 533188964Srwatsonstatic int 534188964Srwatsondebug_vm_lowmem(SYSCTL_HANDLER_ARGS) 535188964Srwatson{ 536188964Srwatson int error, i; 537188964Srwatson 538188964Srwatson i = 0; 539188964Srwatson error = sysctl_handle_int(oidp, &i, 0, req); 540188964Srwatson if (error) 541188964Srwatson return (error); 542188964Srwatson if (i) 543188964Srwatson EVENTHANDLER_INVOKE(vm_lowmem, 0); 544188964Srwatson return (0); 545188964Srwatson} 546188964Srwatson 547188964SrwatsonSYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0, 548188964Srwatson debug_vm_lowmem, "I", "set to trigger vm_lowmem event"); 549188967Srwatson#endif 550