vm_kern.c revision 139825
1139825Simp/*- 21541Srgrimes * Copyright (c) 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * 51541Srgrimes * This code is derived from software contributed to Berkeley by 61541Srgrimes * The Mach Operating System project at Carnegie-Mellon University. 71541Srgrimes * 81541Srgrimes * Redistribution and use in source and binary forms, with or without 91541Srgrimes * modification, are permitted provided that the following conditions 101541Srgrimes * are met: 111541Srgrimes * 1. Redistributions of source code must retain the above copyright 121541Srgrimes * notice, this list of conditions and the following disclaimer. 131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer in the 151541Srgrimes * documentation and/or other materials provided with the distribution. 161541Srgrimes * 4. Neither the name of the University nor the names of its contributors 171541Srgrimes * may be used to endorse or promote products derived from this software 181541Srgrimes * without specific prior written permission. 191541Srgrimes * 201541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 211541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 221541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 231541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 241541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 251541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 261541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 271541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 281541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 291541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 301541Srgrimes * SUCH DAMAGE. 311541Srgrimes * 321817Sdg * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 331541Srgrimes * 341541Srgrimes * 351541Srgrimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 361541Srgrimes * All rights reserved. 371541Srgrimes * 381541Srgrimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 395455Sdg * 401541Srgrimes * Permission to use, copy, modify and distribute this software and 411541Srgrimes * its documentation is hereby granted, provided that both the copyright 421541Srgrimes * notice and this permission notice appear in all copies of the 431541Srgrimes * software, derivative works or modified versions, and any portions 441541Srgrimes * thereof, and that both notices appear in supporting documentation. 455455Sdg * 465455Sdg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 475455Sdg * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 481541Srgrimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 495455Sdg * 501541Srgrimes * Carnegie Mellon requests users of this software to return to 511541Srgrimes * 521541Srgrimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 531541Srgrimes * School of Computer Science 541541Srgrimes * Carnegie Mellon University 551541Srgrimes * Pittsburgh PA 15213-3890 561541Srgrimes * 571541Srgrimes * any improvements or extensions that they make and grant Carnegie the 581541Srgrimes * rights to redistribute these changes. 591541Srgrimes */ 601541Srgrimes 611541Srgrimes/* 621541Srgrimes * Kernel memory management. 631541Srgrimes */ 641541Srgrimes 65116226Sobrien#include <sys/cdefs.h> 66116226Sobrien__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 139825 2005-01-07 02:29:27Z imp $"); 67116226Sobrien 681541Srgrimes#include <sys/param.h> 691541Srgrimes#include <sys/systm.h> 7087157Sluigi#include <sys/kernel.h> /* for ticks and hz */ 7176166Smarkm#include <sys/lock.h> 7276166Smarkm#include <sys/mutex.h> 732112Swollman#include <sys/proc.h> 746129Sdg#include <sys/malloc.h> 751541Srgrimes 761541Srgrimes#include <vm/vm.h> 7712662Sdg#include <vm/vm_param.h> 7812662Sdg#include <vm/pmap.h> 7912662Sdg#include <vm/vm_map.h> 8012662Sdg#include <vm/vm_object.h> 811541Srgrimes#include <vm/vm_page.h> 821541Srgrimes#include <vm/vm_pageout.h> 8312726Sbde#include <vm/vm_extern.h> 841541Srgrimes 8519830Sdysonvm_map_t kernel_map=0; 8619830Sdysonvm_map_t kmem_map=0; 8719830Sdysonvm_map_t exec_map=0; 88118764Ssilbyvm_map_t pipe_map; 8919830Sdysonvm_map_t buffer_map=0; 902112Swollman 911541Srgrimes/* 9247841Sdt * kmem_alloc_nofault: 9347841Sdt * 94118317Salc * Allocate a virtual address range with no underlying object and 95118317Salc * no initial mapping to physical memory. Any mapping from this 96118317Salc * range to physical memory must be explicitly created prior to 97118317Salc * its use, typically with pmap_qenter(). Any attempt to create 98118317Salc * a mapping on demand through vm_fault() will result in a panic. 9947841Sdt */ 10047841Sdtvm_offset_t 10147841Sdtkmem_alloc_nofault(map, size) 10247841Sdt vm_map_t map; 10370480Salfred vm_size_t size; 10447841Sdt{ 10547841Sdt vm_offset_t addr; 10670480Salfred int result; 10747841Sdt 10847841Sdt size = round_page(size); 10947841Sdt addr = vm_map_min(map); 11098686Salc result = vm_map_find(map, NULL, 0, 11147841Sdt &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 11247841Sdt if (result != KERN_SUCCESS) { 11347841Sdt return (0); 11447841Sdt } 11547841Sdt return (addr); 11647841Sdt} 11747841Sdt 11847841Sdt/* 1191541Srgrimes * Allocate wired-down memory in the kernel's address map 1201541Srgrimes * or a submap. 1211541Srgrimes */ 1228876Srgrimesvm_offset_t 1235455Sdgkmem_alloc(map, size) 12470480Salfred vm_map_t map; 12570480Salfred vm_size_t size; 1261541Srgrimes{ 1275455Sdg vm_offset_t addr; 12870480Salfred vm_offset_t offset; 1295455Sdg vm_offset_t i; 1301541Srgrimes 1311541Srgrimes size = round_page(size); 1321541Srgrimes 1331541Srgrimes /* 1345455Sdg * Use the kernel object for wired-down kernel pages. Assume that no 1355455Sdg * region of the kernel object is referenced more than once. 1361541Srgrimes */ 1371541Srgrimes 1381541Srgrimes /* 1395455Sdg * Locate sufficient space in the map. This will give us the final 1405455Sdg * virtual address for the new memory, and thus will tell us the 1415455Sdg * offset within the kernel map. 1421541Srgrimes */ 1431541Srgrimes vm_map_lock(map); 14433758Sdyson if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 1451541Srgrimes vm_map_unlock(map); 1461541Srgrimes return (0); 1471541Srgrimes } 1481541Srgrimes offset = addr - VM_MIN_KERNEL_ADDRESS; 1491541Srgrimes vm_object_reference(kernel_object); 15013490Sdyson vm_map_insert(map, kernel_object, offset, addr, addr + size, 15113490Sdyson VM_PROT_ALL, VM_PROT_ALL, 0); 1521541Srgrimes vm_map_unlock(map); 1531541Srgrimes 1541541Srgrimes /* 1555455Sdg * Guarantee that there are pages already in this object before 156122383Smini * calling vm_map_wire. This is to prevent the following 1575455Sdg * scenario: 1588876Srgrimes * 1595455Sdg * 1) Threads have swapped out, so that there is a pager for the 1605455Sdg * kernel_object. 2) The kmsg zone is empty, and so we are 161122383Smini * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 1625455Sdg * there is no page, but there is a pager, so we call 1635455Sdg * pager_data_request. But the kmsg zone is empty, so we must 1645455Sdg * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 1655455Sdg * we get the data back from the pager, it will be (very stale) 1665455Sdg * non-zero data. kmem_alloc is defined to return zero-filled memory. 1678876Srgrimes * 1685455Sdg * We're intentionally not activating the pages we allocate to prevent a 169122383Smini * race with page-out. vm_map_wire will wire the pages. 1701541Srgrimes */ 171120761Salc VM_OBJECT_LOCK(kernel_object); 1725455Sdg for (i = 0; i < size; i += PAGE_SIZE) { 1735455Sdg vm_page_t mem; 1741541Srgrimes 17533109Sdyson mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 176136923Salc VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_RETRY); 177120761Salc mem->valid = VM_PAGE_BITS_ALL; 178108251Salc vm_page_lock_queues(); 179124321Salc vm_page_unmanage(mem); 180108251Salc vm_page_unlock_queues(); 1811541Srgrimes } 182120761Salc VM_OBJECT_UNLOCK(kernel_object); 1835455Sdg 1841541Srgrimes /* 1855455Sdg * And finally, mark the data as non-pageable. 1861541Srgrimes */ 187118771Sbms (void) vm_map_wire(map, addr, addr + size, 188118771Sbms VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); 1891541Srgrimes 1905455Sdg return (addr); 1911541Srgrimes} 1921541Srgrimes 1931541Srgrimes/* 1941541Srgrimes * kmem_free: 1951541Srgrimes * 1961541Srgrimes * Release a region of kernel virtual memory allocated 1971541Srgrimes * with kmem_alloc, and return the physical pages 1981541Srgrimes * associated with that region. 19942957Sdillon * 20042957Sdillon * This routine may not block on kernel maps. 2011541Srgrimes */ 2028876Srgrimesvoid 2035455Sdgkmem_free(map, addr, size) 2045455Sdg vm_map_t map; 20570480Salfred vm_offset_t addr; 2065455Sdg vm_size_t size; 2071541Srgrimes{ 20871571Sjhb 2091541Srgrimes (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 2101541Srgrimes} 2111541Srgrimes 2121541Srgrimes/* 2131541Srgrimes * kmem_suballoc: 2141541Srgrimes * 2151541Srgrimes * Allocates a map to manage a subrange 2161541Srgrimes * of the kernel virtual address space. 2171541Srgrimes * 2181541Srgrimes * Arguments are as follows: 2191541Srgrimes * 2201541Srgrimes * parent Map to take range from 22170480Salfred * min, max Returned endpoints of map 2221541Srgrimes * size Size of range to find 2231541Srgrimes */ 2248876Srgrimesvm_map_t 22532702Sdysonkmem_suballoc(parent, min, max, size) 22670478Salfred vm_map_t parent; 2275455Sdg vm_offset_t *min, *max; 22870478Salfred vm_size_t size; 2291541Srgrimes{ 23070478Salfred int ret; 2315455Sdg vm_map_t result; 2321541Srgrimes 2331541Srgrimes size = round_page(size); 2341541Srgrimes 2351541Srgrimes *min = (vm_offset_t) vm_map_min(parent); 2361541Srgrimes ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 23713490Sdyson min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 2381541Srgrimes if (ret != KERN_SUCCESS) { 2391541Srgrimes printf("kmem_suballoc: bad status return of %d.\n", ret); 2401541Srgrimes panic("kmem_suballoc"); 2411541Srgrimes } 2421541Srgrimes *max = *min + size; 24332702Sdyson result = vm_map_create(vm_map_pmap(parent), *min, *max); 2441541Srgrimes if (result == NULL) 2451541Srgrimes panic("kmem_suballoc: cannot create submap"); 24670478Salfred if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 2471541Srgrimes panic("kmem_suballoc: unable to change range to submap"); 2485455Sdg return (result); 2491541Srgrimes} 2501541Srgrimes 2511541Srgrimes/* 25242957Sdillon * kmem_malloc: 2531541Srgrimes * 25442957Sdillon * Allocate wired-down memory in the kernel's address map for the higher 25542957Sdillon * level kernel memory allocator (kern/kern_malloc.c). We cannot use 25642957Sdillon * kmem_alloc() because we may need to allocate memory at interrupt 25742957Sdillon * level where we cannot block (canwait == FALSE). 2581541Srgrimes * 25942957Sdillon * This routine has its own private kernel submap (kmem_map) and object 26042957Sdillon * (kmem_object). This, combined with the fact that only malloc uses 26142957Sdillon * this routine, ensures that we will never block in map or object waits. 2621541Srgrimes * 26342957Sdillon * Note that this still only works in a uni-processor environment and 26442957Sdillon * when called at splhigh(). 26542957Sdillon * 26642957Sdillon * We don't worry about expanding the map (adding entries) since entries 26742957Sdillon * for wired maps are statically allocated. 26842957Sdillon * 26942957Sdillon * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 27042957Sdillon * I have not verified that it actually does not block. 27178592Sbmilekic * 27278592Sbmilekic * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 27378592Sbmilekic * which we never free. 2741541Srgrimes */ 2751541Srgrimesvm_offset_t 27642957Sdillonkmem_malloc(map, size, flags) 27770480Salfred vm_map_t map; 27870480Salfred vm_size_t size; 27942957Sdillon int flags; 2801541Srgrimes{ 28170480Salfred vm_offset_t offset, i; 2825455Sdg vm_map_entry_t entry; 2835455Sdg vm_offset_t addr; 2845455Sdg vm_page_t m; 28598455Sjeff int pflags; 2861541Srgrimes 2871541Srgrimes size = round_page(size); 2881541Srgrimes addr = vm_map_min(map); 2891541Srgrimes 2901541Srgrimes /* 2915455Sdg * Locate sufficient space in the map. This will give us the final 2925455Sdg * virtual address for the new memory, and thus will tell us the 2935455Sdg * offset within the kernel map. 2941541Srgrimes */ 2951541Srgrimes vm_map_lock(map); 29633758Sdyson if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 2971541Srgrimes vm_map_unlock(map); 29842957Sdillon if ((flags & M_NOWAIT) == 0) 29948409Speter panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 30048409Speter (long)size, (long)map->size); 301113418Salc return (0); 3021541Srgrimes } 30315367Sdyson offset = addr - VM_MIN_KERNEL_ADDRESS; 3041541Srgrimes vm_object_reference(kmem_object); 30513490Sdyson vm_map_insert(map, kmem_object, offset, addr, addr + size, 30613490Sdyson VM_PROT_ALL, VM_PROT_ALL, 0); 3071541Srgrimes 30898455Sjeff /* 30998455Sjeff * Note: if M_NOWAIT specified alone, allocate from 31098455Sjeff * interrupt-safe queues only (just the free list). If 31198455Sjeff * M_USE_RESERVE is also specified, we can also 31298455Sjeff * allocate from the cache. Neither of the latter two 31398455Sjeff * flags may be specified from an interrupt since interrupts 31498455Sjeff * are not allowed to mess with the cache queue. 31598455Sjeff */ 31698455Sjeff 31798455Sjeff if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 318108351Salc pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 31998455Sjeff else 320108351Salc pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 32198455Sjeff 32298455Sjeff if (flags & M_ZERO) 32398455Sjeff pflags |= VM_ALLOC_ZERO; 32498455Sjeff 325113489Salc VM_OBJECT_LOCK(kmem_object); 3261541Srgrimes for (i = 0; i < size; i += PAGE_SIZE) { 32715809Sdysonretry: 32898450Sjeff m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags); 32998450Sjeff 3301541Srgrimes /* 3315455Sdg * Ran out of space, free everything up and return. Don't need 3325455Sdg * to lock page queues here as we know that the pages we got 3335455Sdg * aren't on any queues. 3341541Srgrimes */ 3351541Srgrimes if (m == NULL) { 33642957Sdillon if ((flags & M_NOWAIT) == 0) { 337113489Salc VM_OBJECT_UNLOCK(kmem_object); 33844793Salc vm_map_unlock(map); 33915809Sdyson VM_WAIT; 34044793Salc vm_map_lock(map); 341113489Salc VM_OBJECT_LOCK(kmem_object); 34215809Sdyson goto retry; 34315809Sdyson } 34491946Stegge /* 34591946Stegge * Free the pages before removing the map entry. 34691946Stegge * They are already marked busy. Calling 34791946Stegge * vm_map_delete before the pages has been freed or 34891946Stegge * unbusied will cause a deadlock. 34991946Stegge */ 35091946Stegge while (i != 0) { 35191946Stegge i -= PAGE_SIZE; 35291946Stegge m = vm_page_lookup(kmem_object, 35391946Stegge OFF_TO_IDX(offset + i)); 354100796Salc vm_page_lock_queues(); 355108351Salc vm_page_unwire(m, 0); 35691946Stegge vm_page_free(m); 357100796Salc vm_page_unlock_queues(); 35891946Stegge } 359113489Salc VM_OBJECT_UNLOCK(kmem_object); 3601541Srgrimes vm_map_delete(map, addr, addr + size); 3611541Srgrimes vm_map_unlock(map); 362113418Salc return (0); 3631541Srgrimes } 36498455Sjeff if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 365102382Salc pmap_zero_page(m); 366120761Salc m->valid = VM_PAGE_BITS_ALL; 367108262Salc vm_page_lock_queues(); 368120050Salc vm_page_unmanage(m); 369108262Salc vm_page_unlock_queues(); 3701541Srgrimes } 371113489Salc VM_OBJECT_UNLOCK(kmem_object); 3721541Srgrimes 3731541Srgrimes /* 3745455Sdg * Mark map entry as non-pageable. Assert: vm_map_insert() will never 3755455Sdg * be able to extend the previous entry so there will be a new entry 3765455Sdg * exactly corresponding to this address range and it will have 3775455Sdg * wired_count == 0. 3781541Srgrimes */ 3791541Srgrimes if (!vm_map_lookup_entry(map, addr, &entry) || 3801541Srgrimes entry->start != addr || entry->end != addr + size || 38144793Salc entry->wired_count != 0) 3821541Srgrimes panic("kmem_malloc: entry not found or misaligned"); 38344793Salc entry->wired_count = 1; 3841541Srgrimes 385124048Salc /* 386124048Salc * At this point, the kmem_object must be unlocked because 387124048Salc * vm_map_simplify_entry() calls vm_object_deallocate(), which 388124048Salc * locks the kmem_object. 389124048Salc */ 39020993Sdyson vm_map_simplify_entry(map, entry); 39120993Sdyson 3921541Srgrimes /* 3935455Sdg * Loop thru pages, entering them in the pmap. (We cannot add them to 3945455Sdg * the wired count without wrapping the vm_page_queue_lock in 3955455Sdg * splimp...) 3961541Srgrimes */ 397124048Salc VM_OBJECT_LOCK(kmem_object); 3981541Srgrimes for (i = 0; i < size; i += PAGE_SIZE) { 39912767Sdyson m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 40042957Sdillon /* 40142957Sdillon * Because this is kernel_pmap, this call will not block. 40242957Sdillon */ 40360755Speter pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 404107989Salc vm_page_lock_queues(); 405101634Salc vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED); 406108351Salc vm_page_wakeup(m); 407107989Salc vm_page_unlock_queues(); 4081541Srgrimes } 409124048Salc VM_OBJECT_UNLOCK(kmem_object); 4101541Srgrimes vm_map_unlock(map); 4111541Srgrimes 4125455Sdg return (addr); 4131541Srgrimes} 4141541Srgrimes 4151541Srgrimes/* 41642957Sdillon * kmem_alloc_wait: 4171541Srgrimes * 4181541Srgrimes * Allocates pageable memory from a sub-map of the kernel. If the submap 4191541Srgrimes * has no room, the caller sleeps waiting for more memory in the submap. 4201541Srgrimes * 42142957Sdillon * This routine may block. 4221541Srgrimes */ 4238876Srgrimesvm_offset_t 4245455Sdgkmem_alloc_wait(map, size) 4255455Sdg vm_map_t map; 4265455Sdg vm_size_t size; 4271541Srgrimes{ 4285455Sdg vm_offset_t addr; 4291541Srgrimes 4301541Srgrimes size = round_page(size); 4311541Srgrimes 4321541Srgrimes for (;;) { 4331541Srgrimes /* 4345455Sdg * To make this work for more than one map, use the map's lock 4355455Sdg * to lock out sleepers/wakers. 4361541Srgrimes */ 4371541Srgrimes vm_map_lock(map); 43833758Sdyson if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 4391541Srgrimes break; 4401541Srgrimes /* no space now; see if we can ever get space */ 4411541Srgrimes if (vm_map_max(map) - vm_map_min(map) < size) { 4421541Srgrimes vm_map_unlock(map); 4431541Srgrimes return (0); 4441541Srgrimes } 44599754Salc map->needs_wakeup = TRUE; 44699754Salc vm_map_unlock_and_wait(map, FALSE); 4471541Srgrimes } 44899754Salc vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 4491541Srgrimes vm_map_unlock(map); 4501541Srgrimes return (addr); 4511541Srgrimes} 4521541Srgrimes 4531541Srgrimes/* 45442957Sdillon * kmem_free_wakeup: 4551541Srgrimes * 4569507Sdg * Returns memory to a submap of the kernel, and wakes up any processes 4571541Srgrimes * waiting for memory in that map. 4581541Srgrimes */ 4598876Srgrimesvoid 4605455Sdgkmem_free_wakeup(map, addr, size) 4615455Sdg vm_map_t map; 4625455Sdg vm_offset_t addr; 4635455Sdg vm_size_t size; 4641541Srgrimes{ 46576827Salfred 4661541Srgrimes vm_map_lock(map); 4671541Srgrimes (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 46899754Salc if (map->needs_wakeup) { 46999754Salc map->needs_wakeup = FALSE; 47099754Salc vm_map_wakeup(map); 47199754Salc } 4721541Srgrimes vm_map_unlock(map); 4731541Srgrimes} 4741541Srgrimes 4751541Srgrimes/* 47642957Sdillon * kmem_init: 47742957Sdillon * 47842957Sdillon * Create the kernel map; insert a mapping covering kernel text, 47942957Sdillon * data, bss, and all space allocated thus far (`boostrap' data). The 48042957Sdillon * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 48142957Sdillon * `start' as allocated, and the range between `start' and `end' as free. 4821541Srgrimes */ 4838876Srgrimesvoid 4845455Sdgkmem_init(start, end) 4851541Srgrimes vm_offset_t start, end; 4861541Srgrimes{ 48770480Salfred vm_map_t m; 4881541Srgrimes 48932702Sdyson m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 490108426Salc m->system_map = 1; 4911541Srgrimes vm_map_lock(m); 4921541Srgrimes /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 4931541Srgrimes kernel_map = m; 494108426Salc (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 49513490Sdyson VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 4961541Srgrimes /* ... and ending with the completion of the above `insert' */ 4971541Srgrimes vm_map_unlock(m); 4981541Srgrimes} 499