vm_kern.c revision 87157
15455Sdg/* 21541Srgrimes * Copyright (c) 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * 51541Srgrimes * This code is derived from software contributed to Berkeley by 61541Srgrimes * The Mach Operating System project at Carnegie-Mellon University. 71541Srgrimes * 81541Srgrimes * Redistribution and use in source and binary forms, with or without 91541Srgrimes * modification, are permitted provided that the following conditions 101541Srgrimes * are met: 111541Srgrimes * 1. Redistributions of source code must retain the above copyright 121541Srgrimes * notice, this list of conditions and the following disclaimer. 131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer in the 151541Srgrimes * documentation and/or other materials provided with the distribution. 161541Srgrimes * 3. All advertising materials mentioning features or use of this software 1758705Scharnier * must display the following acknowledgement: 181541Srgrimes * This product includes software developed by the University of 191541Srgrimes * California, Berkeley and its contributors. 201541Srgrimes * 4. Neither the name of the University nor the names of its contributors 211541Srgrimes * may be used to endorse or promote products derived from this software 221541Srgrimes * without specific prior written permission. 231541Srgrimes * 241541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 251541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 261541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 271541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 281541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 291541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 301541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 311541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 321541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 331541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 341541Srgrimes * SUCH DAMAGE. 351541Srgrimes * 361817Sdg * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 371541Srgrimes * 381541Srgrimes * 391541Srgrimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 401541Srgrimes * All rights reserved. 411541Srgrimes * 421541Srgrimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 435455Sdg * 441541Srgrimes * Permission to use, copy, modify and distribute this software and 451541Srgrimes * its documentation is hereby granted, provided that both the copyright 461541Srgrimes * notice and this permission notice appear in all copies of the 471541Srgrimes * software, derivative works or modified versions, and any portions 481541Srgrimes * thereof, and that both notices appear in supporting documentation. 495455Sdg * 505455Sdg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 515455Sdg * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 521541Srgrimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 535455Sdg * 541541Srgrimes * Carnegie Mellon requests users of this software to return to 551541Srgrimes * 561541Srgrimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 571541Srgrimes * School of Computer Science 581541Srgrimes * Carnegie Mellon University 591541Srgrimes * Pittsburgh PA 15213-3890 601541Srgrimes * 611541Srgrimes * any improvements or extensions that they make and grant Carnegie the 621541Srgrimes * rights to redistribute these changes. 631817Sdg * 6450477Speter * $FreeBSD: head/sys/vm/vm_kern.c 87157 2001-12-01 00:21:30Z luigi $ 651541Srgrimes */ 661541Srgrimes 671541Srgrimes/* 681541Srgrimes * Kernel memory management. 691541Srgrimes */ 701541Srgrimes 711541Srgrimes#include <sys/param.h> 721541Srgrimes#include <sys/systm.h> 7387157Sluigi#include <sys/kernel.h> /* for ticks and hz */ 7476166Smarkm#include <sys/lock.h> 7576166Smarkm#include <sys/mutex.h> 762112Swollman#include <sys/proc.h> 776129Sdg#include <sys/malloc.h> 781541Srgrimes 791541Srgrimes#include <vm/vm.h> 8012662Sdg#include <vm/vm_param.h> 8112662Sdg#include <vm/pmap.h> 8212662Sdg#include <vm/vm_map.h> 8312662Sdg#include <vm/vm_object.h> 841541Srgrimes#include <vm/vm_page.h> 851541Srgrimes#include <vm/vm_pageout.h> 8612726Sbde#include <vm/vm_extern.h> 871541Srgrimes 8819830Sdysonvm_map_t kernel_map=0; 8919830Sdysonvm_map_t kmem_map=0; 9019830Sdysonvm_map_t exec_map=0; 9119830Sdysonvm_map_t clean_map=0; 9219830Sdysonvm_map_t buffer_map=0; 932112Swollman 941541Srgrimes/* 951541Srgrimes * kmem_alloc_pageable: 961541Srgrimes * 971541Srgrimes * Allocate pageable memory to the kernel's address map. 9812259Sdg * "map" must be kernel_map or a submap of kernel_map. 991541Srgrimes */ 1001541Srgrimes 1018876Srgrimesvm_offset_t 1025455Sdgkmem_alloc_pageable(map, size) 1035455Sdg vm_map_t map; 10470480Salfred vm_size_t size; 1051541Srgrimes{ 1065455Sdg vm_offset_t addr; 10770480Salfred int result; 1081541Srgrimes 10979224Sdillon GIANT_REQUIRED; 11079224Sdillon 1111541Srgrimes size = round_page(size); 1121541Srgrimes addr = vm_map_min(map); 1131541Srgrimes result = vm_map_find(map, NULL, (vm_offset_t) 0, 11413490Sdyson &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 1151541Srgrimes if (result != KERN_SUCCESS) { 1165455Sdg return (0); 1171541Srgrimes } 1185455Sdg return (addr); 1191541Srgrimes} 1201541Srgrimes 1211541Srgrimes/* 12247841Sdt * kmem_alloc_nofault: 12347841Sdt * 12447841Sdt * Same as kmem_alloc_pageable, except that it create a nofault entry. 12547841Sdt */ 12647841Sdt 12747841Sdtvm_offset_t 12847841Sdtkmem_alloc_nofault(map, size) 12947841Sdt vm_map_t map; 13070480Salfred vm_size_t size; 13147841Sdt{ 13247841Sdt vm_offset_t addr; 13370480Salfred int result; 13447841Sdt 13579224Sdillon GIANT_REQUIRED; 13676827Salfred 13747841Sdt size = round_page(size); 13847841Sdt addr = vm_map_min(map); 13947841Sdt result = vm_map_find(map, NULL, (vm_offset_t) 0, 14047841Sdt &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 14147841Sdt if (result != KERN_SUCCESS) { 14247841Sdt return (0); 14347841Sdt } 14447841Sdt return (addr); 14547841Sdt} 14647841Sdt 14747841Sdt/* 1481541Srgrimes * Allocate wired-down memory in the kernel's address map 1491541Srgrimes * or a submap. 1501541Srgrimes */ 1518876Srgrimesvm_offset_t 1525455Sdgkmem_alloc(map, size) 15370480Salfred vm_map_t map; 15470480Salfred vm_size_t size; 1551541Srgrimes{ 1565455Sdg vm_offset_t addr; 15770480Salfred vm_offset_t offset; 1585455Sdg vm_offset_t i; 1591541Srgrimes 16079224Sdillon GIANT_REQUIRED; 16179224Sdillon 1621541Srgrimes size = round_page(size); 1631541Srgrimes 1641541Srgrimes /* 1655455Sdg * Use the kernel object for wired-down kernel pages. Assume that no 1665455Sdg * region of the kernel object is referenced more than once. 1671541Srgrimes */ 1681541Srgrimes 1691541Srgrimes /* 1705455Sdg * Locate sufficient space in the map. This will give us the final 1715455Sdg * virtual address for the new memory, and thus will tell us the 1725455Sdg * offset within the kernel map. 1731541Srgrimes */ 1741541Srgrimes vm_map_lock(map); 17533758Sdyson if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 1761541Srgrimes vm_map_unlock(map); 1771541Srgrimes return (0); 1781541Srgrimes } 1791541Srgrimes offset = addr - VM_MIN_KERNEL_ADDRESS; 1801541Srgrimes vm_object_reference(kernel_object); 18113490Sdyson vm_map_insert(map, kernel_object, offset, addr, addr + size, 18213490Sdyson VM_PROT_ALL, VM_PROT_ALL, 0); 1831541Srgrimes vm_map_unlock(map); 1841541Srgrimes 1851541Srgrimes /* 1865455Sdg * Guarantee that there are pages already in this object before 1875455Sdg * calling vm_map_pageable. This is to prevent the following 1885455Sdg * scenario: 1898876Srgrimes * 1905455Sdg * 1) Threads have swapped out, so that there is a pager for the 1915455Sdg * kernel_object. 2) The kmsg zone is empty, and so we are 1925455Sdg * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 1935455Sdg * there is no page, but there is a pager, so we call 1945455Sdg * pager_data_request. But the kmsg zone is empty, so we must 1955455Sdg * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 1965455Sdg * we get the data back from the pager, it will be (very stale) 1975455Sdg * non-zero data. kmem_alloc is defined to return zero-filled memory. 1988876Srgrimes * 1995455Sdg * We're intentionally not activating the pages we allocate to prevent a 2005455Sdg * race with page-out. vm_map_pageable will wire the pages. 2011541Srgrimes */ 2021541Srgrimes 2035455Sdg for (i = 0; i < size; i += PAGE_SIZE) { 2045455Sdg vm_page_t mem; 2051541Srgrimes 20633109Sdyson mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 20733109Sdyson VM_ALLOC_ZERO | VM_ALLOC_RETRY); 20810548Sdyson if ((mem->flags & PG_ZERO) == 0) 20910548Sdyson vm_page_zero_fill(mem); 2106585Sdg mem->valid = VM_PAGE_BITS_ALL; 21142957Sdillon vm_page_flag_clear(mem, PG_ZERO); 21242957Sdillon vm_page_wakeup(mem); 2131541Srgrimes } 2145455Sdg 2151541Srgrimes /* 2165455Sdg * And finally, mark the data as non-pageable. 2171541Srgrimes */ 2181541Srgrimes 2191541Srgrimes (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); 2201541Srgrimes 2215455Sdg return (addr); 2221541Srgrimes} 2231541Srgrimes 2241541Srgrimes/* 2251541Srgrimes * kmem_free: 2261541Srgrimes * 2271541Srgrimes * Release a region of kernel virtual memory allocated 2281541Srgrimes * with kmem_alloc, and return the physical pages 2291541Srgrimes * associated with that region. 23042957Sdillon * 23142957Sdillon * This routine may not block on kernel maps. 2321541Srgrimes */ 2338876Srgrimesvoid 2345455Sdgkmem_free(map, addr, size) 2355455Sdg vm_map_t map; 23670480Salfred vm_offset_t addr; 2375455Sdg vm_size_t size; 2381541Srgrimes{ 23979224Sdillon GIANT_REQUIRED; 24071571Sjhb 2411541Srgrimes (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 2421541Srgrimes} 2431541Srgrimes 2441541Srgrimes/* 2451541Srgrimes * kmem_suballoc: 2461541Srgrimes * 2471541Srgrimes * Allocates a map to manage a subrange 2481541Srgrimes * of the kernel virtual address space. 2491541Srgrimes * 2501541Srgrimes * Arguments are as follows: 2511541Srgrimes * 2521541Srgrimes * parent Map to take range from 25370480Salfred * min, max Returned endpoints of map 2541541Srgrimes * size Size of range to find 2551541Srgrimes */ 2568876Srgrimesvm_map_t 25732702Sdysonkmem_suballoc(parent, min, max, size) 25870478Salfred vm_map_t parent; 2595455Sdg vm_offset_t *min, *max; 26070478Salfred vm_size_t size; 2611541Srgrimes{ 26270478Salfred int ret; 2635455Sdg vm_map_t result; 2641541Srgrimes 26579224Sdillon GIANT_REQUIRED; 26676827Salfred 2671541Srgrimes size = round_page(size); 2681541Srgrimes 2691541Srgrimes *min = (vm_offset_t) vm_map_min(parent); 2701541Srgrimes ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 27113490Sdyson min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 2721541Srgrimes if (ret != KERN_SUCCESS) { 2731541Srgrimes printf("kmem_suballoc: bad status return of %d.\n", ret); 2741541Srgrimes panic("kmem_suballoc"); 2751541Srgrimes } 2761541Srgrimes *max = *min + size; 2771541Srgrimes pmap_reference(vm_map_pmap(parent)); 27832702Sdyson result = vm_map_create(vm_map_pmap(parent), *min, *max); 2791541Srgrimes if (result == NULL) 2801541Srgrimes panic("kmem_suballoc: cannot create submap"); 28170478Salfred if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 2821541Srgrimes panic("kmem_suballoc: unable to change range to submap"); 2835455Sdg return (result); 2841541Srgrimes} 2851541Srgrimes 2861541Srgrimes/* 28742957Sdillon * kmem_malloc: 2881541Srgrimes * 28942957Sdillon * Allocate wired-down memory in the kernel's address map for the higher 29042957Sdillon * level kernel memory allocator (kern/kern_malloc.c). We cannot use 29142957Sdillon * kmem_alloc() because we may need to allocate memory at interrupt 29242957Sdillon * level where we cannot block (canwait == FALSE). 2931541Srgrimes * 29442957Sdillon * This routine has its own private kernel submap (kmem_map) and object 29542957Sdillon * (kmem_object). This, combined with the fact that only malloc uses 29642957Sdillon * this routine, ensures that we will never block in map or object waits. 2971541Srgrimes * 29842957Sdillon * Note that this still only works in a uni-processor environment and 29942957Sdillon * when called at splhigh(). 30042957Sdillon * 30142957Sdillon * We don't worry about expanding the map (adding entries) since entries 30242957Sdillon * for wired maps are statically allocated. 30342957Sdillon * 30442957Sdillon * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 30542957Sdillon * I have not verified that it actually does not block. 30678592Sbmilekic * 30778592Sbmilekic * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 30878592Sbmilekic * which we never free. 3091541Srgrimes */ 3101541Srgrimesvm_offset_t 31142957Sdillonkmem_malloc(map, size, flags) 31270480Salfred vm_map_t map; 31370480Salfred vm_size_t size; 31442957Sdillon int flags; 3151541Srgrimes{ 31670480Salfred vm_offset_t offset, i; 3175455Sdg vm_map_entry_t entry; 3185455Sdg vm_offset_t addr; 3195455Sdg vm_page_t m; 3201541Srgrimes 32179224Sdillon GIANT_REQUIRED; 32279224Sdillon 3231541Srgrimes size = round_page(size); 3241541Srgrimes addr = vm_map_min(map); 3251541Srgrimes 3261541Srgrimes /* 3275455Sdg * Locate sufficient space in the map. This will give us the final 3285455Sdg * virtual address for the new memory, and thus will tell us the 3295455Sdg * offset within the kernel map. 3301541Srgrimes */ 3311541Srgrimes vm_map_lock(map); 33233758Sdyson if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 3331541Srgrimes vm_map_unlock(map); 33478592Sbmilekic if (map != kmem_map) { 33587157Sluigi static int last_report; /* when we did it (in ticks) */ 33687157Sluigi if (ticks < last_report || 33787157Sluigi (ticks - last_report) >= hz) { 33887157Sluigi last_report = ticks; 33987157Sluigi printf("Out of mbuf address space!\n"); 34087157Sluigi printf("Consider increasing NMBCLUSTERS\n"); 34187157Sluigi } 34276827Salfred goto bad; 3437066Sdg } 34442957Sdillon if ((flags & M_NOWAIT) == 0) 34548409Speter panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 34648409Speter (long)size, (long)map->size); 34776827Salfred goto bad; 3481541Srgrimes } 34915367Sdyson offset = addr - VM_MIN_KERNEL_ADDRESS; 3501541Srgrimes vm_object_reference(kmem_object); 35113490Sdyson vm_map_insert(map, kmem_object, offset, addr, addr + size, 35213490Sdyson VM_PROT_ALL, VM_PROT_ALL, 0); 3531541Srgrimes 3541541Srgrimes for (i = 0; i < size; i += PAGE_SIZE) { 35542957Sdillon /* 35642957Sdillon * Note: if M_NOWAIT specified alone, allocate from 35742957Sdillon * interrupt-safe queues only (just the free list). If 35881399Sjhb * M_USE_RESERVE is also specified, we can also 35942957Sdillon * allocate from the cache. Neither of the latter two 36042957Sdillon * flags may be specified from an interrupt since interrupts 36142957Sdillon * are not allowed to mess with the cache queue. 36242957Sdillon */ 36315809Sdysonretry: 36412767Sdyson m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), 36581399Sjhb ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) ? 36642957Sdillon VM_ALLOC_INTERRUPT : 36742957Sdillon VM_ALLOC_SYSTEM); 3681541Srgrimes 3691541Srgrimes /* 3705455Sdg * Ran out of space, free everything up and return. Don't need 3715455Sdg * to lock page queues here as we know that the pages we got 3725455Sdg * aren't on any queues. 3731541Srgrimes */ 3741541Srgrimes if (m == NULL) { 37542957Sdillon if ((flags & M_NOWAIT) == 0) { 37644793Salc vm_map_unlock(map); 37715809Sdyson VM_WAIT; 37844793Salc vm_map_lock(map); 37915809Sdyson goto retry; 38015809Sdyson } 3811541Srgrimes vm_map_delete(map, addr, addr + size); 3821541Srgrimes vm_map_unlock(map); 38376827Salfred goto bad; 3841541Srgrimes } 38538799Sdfr vm_page_flag_clear(m, PG_ZERO); 3866585Sdg m->valid = VM_PAGE_BITS_ALL; 3871541Srgrimes } 3881541Srgrimes 3891541Srgrimes /* 3905455Sdg * Mark map entry as non-pageable. Assert: vm_map_insert() will never 3915455Sdg * be able to extend the previous entry so there will be a new entry 3925455Sdg * exactly corresponding to this address range and it will have 3935455Sdg * wired_count == 0. 3941541Srgrimes */ 3951541Srgrimes if (!vm_map_lookup_entry(map, addr, &entry) || 3961541Srgrimes entry->start != addr || entry->end != addr + size || 39744793Salc entry->wired_count != 0) 3981541Srgrimes panic("kmem_malloc: entry not found or misaligned"); 39944793Salc entry->wired_count = 1; 4001541Srgrimes 40120993Sdyson vm_map_simplify_entry(map, entry); 40220993Sdyson 4031541Srgrimes /* 4045455Sdg * Loop thru pages, entering them in the pmap. (We cannot add them to 4055455Sdg * the wired count without wrapping the vm_page_queue_lock in 4065455Sdg * splimp...) 4071541Srgrimes */ 4081541Srgrimes for (i = 0; i < size; i += PAGE_SIZE) { 40912767Sdyson m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 41013490Sdyson vm_page_wire(m); 41138799Sdfr vm_page_wakeup(m); 41242957Sdillon /* 41342957Sdillon * Because this is kernel_pmap, this call will not block. 41442957Sdillon */ 41560755Speter pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 41638799Sdfr vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 4171541Srgrimes } 4181541Srgrimes vm_map_unlock(map); 4191541Srgrimes 4205455Sdg return (addr); 42176827Salfred 42276827Salfredbad: 42376827Salfred return (0); 4241541Srgrimes} 4251541Srgrimes 4261541Srgrimes/* 42742957Sdillon * kmem_alloc_wait: 4281541Srgrimes * 4291541Srgrimes * Allocates pageable memory from a sub-map of the kernel. If the submap 4301541Srgrimes * has no room, the caller sleeps waiting for more memory in the submap. 4311541Srgrimes * 43242957Sdillon * This routine may block. 4331541Srgrimes */ 43442957Sdillon 4358876Srgrimesvm_offset_t 4365455Sdgkmem_alloc_wait(map, size) 4375455Sdg vm_map_t map; 4385455Sdg vm_size_t size; 4391541Srgrimes{ 4405455Sdg vm_offset_t addr; 4411541Srgrimes 44279224Sdillon GIANT_REQUIRED; 44376827Salfred 4441541Srgrimes size = round_page(size); 4451541Srgrimes 4461541Srgrimes for (;;) { 4471541Srgrimes /* 4485455Sdg * To make this work for more than one map, use the map's lock 4495455Sdg * to lock out sleepers/wakers. 4501541Srgrimes */ 4511541Srgrimes vm_map_lock(map); 45233758Sdyson if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 4531541Srgrimes break; 4541541Srgrimes /* no space now; see if we can ever get space */ 4551541Srgrimes if (vm_map_max(map) - vm_map_min(map) < size) { 4561541Srgrimes vm_map_unlock(map); 4571541Srgrimes return (0); 4581541Srgrimes } 4591541Srgrimes vm_map_unlock(map); 46079224Sdillon tsleep(map, PVM, "kmaw", 0); 4611541Srgrimes } 46213490Sdyson vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 4631541Srgrimes vm_map_unlock(map); 4641541Srgrimes return (addr); 4651541Srgrimes} 4661541Srgrimes 4671541Srgrimes/* 46842957Sdillon * kmem_free_wakeup: 4691541Srgrimes * 4709507Sdg * Returns memory to a submap of the kernel, and wakes up any processes 4711541Srgrimes * waiting for memory in that map. 4721541Srgrimes */ 4738876Srgrimesvoid 4745455Sdgkmem_free_wakeup(map, addr, size) 4755455Sdg vm_map_t map; 4765455Sdg vm_offset_t addr; 4775455Sdg vm_size_t size; 4781541Srgrimes{ 47979224Sdillon GIANT_REQUIRED; 48076827Salfred 4811541Srgrimes vm_map_lock(map); 4821541Srgrimes (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 4839507Sdg wakeup(map); 4841541Srgrimes vm_map_unlock(map); 4851541Srgrimes} 4861541Srgrimes 4871541Srgrimes/* 48842957Sdillon * kmem_init: 48942957Sdillon * 49042957Sdillon * Create the kernel map; insert a mapping covering kernel text, 49142957Sdillon * data, bss, and all space allocated thus far (`boostrap' data). The 49242957Sdillon * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 49342957Sdillon * `start' as allocated, and the range between `start' and `end' as free. 4941541Srgrimes */ 49542957Sdillon 4968876Srgrimesvoid 4975455Sdgkmem_init(start, end) 4981541Srgrimes vm_offset_t start, end; 4991541Srgrimes{ 50070480Salfred vm_map_t m; 5011541Srgrimes 50232702Sdyson m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 5031541Srgrimes vm_map_lock(m); 5041541Srgrimes /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 5051541Srgrimes kernel_map = m; 50627899Sdyson kernel_map->system_map = 1; 5075455Sdg (void) vm_map_insert(m, NULL, (vm_offset_t) 0, 50813490Sdyson VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 5091541Srgrimes /* ... and ending with the completion of the above `insert' */ 5101541Srgrimes vm_map_unlock(m); 5111541Srgrimes} 512