vm_kern.c revision 125882
1139804Simp/* 21541Srgrimes * Copyright (c) 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * 51541Srgrimes * This code is derived from software contributed to Berkeley by 61541Srgrimes * The Mach Operating System project at Carnegie-Mellon University. 71541Srgrimes * 81541Srgrimes * Redistribution and use in source and binary forms, with or without 91541Srgrimes * modification, are permitted provided that the following conditions 101541Srgrimes * are met: 111541Srgrimes * 1. Redistributions of source code must retain the above copyright 121541Srgrimes * notice, this list of conditions and the following disclaimer. 131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer in the 151541Srgrimes * documentation and/or other materials provided with the distribution. 161541Srgrimes * 3. All advertising materials mentioning features or use of this software 171541Srgrimes * must display the following acknowledgement: 181541Srgrimes * This product includes software developed by the University of 191541Srgrimes * California, Berkeley and its contributors. 201541Srgrimes * 4. Neither the name of the University nor the names of its contributors 211541Srgrimes * may be used to endorse or promote products derived from this software 221541Srgrimes * without specific prior written permission. 231541Srgrimes * 241541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 251541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 261541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 271541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 281541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 291541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 301541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 311541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32116182Sobrien * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33116182Sobrien * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34116182Sobrien * SUCH DAMAGE. 3577598Sjesper * 36101013Srwatson * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 371541Srgrimes * 3895759Stanimura * 3912041Swollman * Copyright (c) 1987, 1990 Carnegie-Mellon University. 4076166Smarkm * All rights reserved. 41295126Sglebius * 421541Srgrimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 4395759Stanimura * 4476166Smarkm * Permission to use, copy, modify and distribute this software and 451541Srgrimes * its documentation is hereby granted, provided that both the copyright 4651381Sgreen * notice and this permission notice appear in all copies of the 4795759Stanimura * software, derivative works or modified versions, and any portions 481541Srgrimes * thereof, and that both notices appear in supporting documentation. 491541Srgrimes * 50169236Srwatson * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 5112041Swollman * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 521541Srgrimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53160621Srwatson * 54160621Srwatson * Carnegie Mellon requests users of this software to return to 55160621Srwatson * 56160621Srwatson * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57160621Srwatson * School of Computer Science 5888633Salfred * Carnegie Mellon University 591541Srgrimes * Pittsburgh PA 15213-3890 60160621Srwatson * 611541Srgrimes * any improvements or extensions that they make and grant Carnegie the 621541Srgrimes * rights to redistribute these changes. 63101996Sdg */ 64172557Smohans 65225169Sbz/* 661541Srgrimes * Kernel memory management. 6713267Swollman */ 6813267Swollman 69256185Sglebius#include <sys/cdefs.h> 70160915Srwatson__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 125882 2004-02-16 18:41:58Z des $"); 71160875Srwatson 721541Srgrimes#include <sys/param.h> 73293432Sglebius#include <sys/systm.h> 74293432Sglebius#include <sys/kernel.h> /* for ticks and hz */ 75293432Sglebius#include <sys/lock.h> 76293432Sglebius#include <sys/mutex.h> 77293432Sglebius#include <sys/proc.h> 78293432Sglebius#include <sys/malloc.h> 79293432Sglebius 80293432Sglebius#include <vm/vm.h> 81293432Sglebius#include <vm/vm_param.h> 82293432Sglebius#include <vm/pmap.h> 83293432Sglebius#include <vm/vm_map.h> 84293432Sglebius#include <vm/vm_object.h> 85293432Sglebius#include <vm/vm_page.h> 86293432Sglebius#include <vm/vm_pageout.h> 87293432Sglebius#include <vm/vm_extern.h> 88293432Sglebius 89293432Sglebiusvm_map_t kernel_map=0; 90275326Sglebiusvm_map_t kmem_map=0; 91275326Sglebiusvm_map_t exec_map=0; 92275326Sglebiusvm_map_t pipe_map; 93275326Sglebiusvm_map_t buffer_map=0; 94275326Sglebius 95275326Sglebius/* 96275326Sglebius * kmem_alloc_pageable: 97275326Sglebius * 98275326Sglebius * Allocate pageable memory to the kernel's address map. 99275326Sglebius * "map" must be kernel_map or a submap of kernel_map. 100275326Sglebius */ 101275326Sglebiusvm_offset_t 102275326Sglebiuskmem_alloc_pageable(map, size) 103275326Sglebius vm_map_t map; 104275326Sglebius vm_size_t size; 105275326Sglebius{ 106275326Sglebius vm_offset_t addr; 107275326Sglebius int result; 108275326Sglebius 109275326Sglebius size = round_page(size); 110275326Sglebius addr = vm_map_min(map); 111275326Sglebius result = vm_map_find(map, NULL, 0, 112275326Sglebius &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 113275326Sglebius if (result != KERN_SUCCESS) { 114275326Sglebius return (0); 115275326Sglebius } 116275326Sglebius return (addr); 117275326Sglebius} 118275326Sglebius 119275326Sglebius/* 120275326Sglebius * kmem_alloc_nofault: 121275326Sglebius * 122275326Sglebius * Allocate a virtual address range with no underlying object and 123275326Sglebius * no initial mapping to physical memory. Any mapping from this 124275326Sglebius * range to physical memory must be explicitly created prior to 125275326Sglebius * its use, typically with pmap_qenter(). Any attempt to create 126275326Sglebius * a mapping on demand through vm_fault() will result in a panic. 127275312Sglebius */ 128275312Sglebiusvm_offset_t 129275312Sglebiuskmem_alloc_nofault(map, size) 130275312Sglebius vm_map_t map; 131275312Sglebius vm_size_t size; 132275312Sglebius{ 133275312Sglebius vm_offset_t addr; 134275312Sglebius int result; 135275326Sglebius 136275312Sglebius size = round_page(size); 137275326Sglebius addr = vm_map_min(map); 138275326Sglebius result = vm_map_find(map, NULL, 0, 139275326Sglebius &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 140275326Sglebius if (result != KERN_SUCCESS) { 141275326Sglebius return (0); 142275326Sglebius } 143275326Sglebius return (addr); 144275326Sglebius} 145275312Sglebius 146275312Sglebius/* 147275312Sglebius * Allocate wired-down memory in the kernel's address map 148275312Sglebius * or a submap. 149275312Sglebius */ 150275312Sglebiusvm_offset_t 151275312Sglebiuskmem_alloc(map, size) 152275312Sglebius vm_map_t map; 153275312Sglebius vm_size_t size; 154275312Sglebius{ 155275312Sglebius vm_offset_t addr; 156275312Sglebius vm_offset_t offset; 157275312Sglebius vm_offset_t i; 158275312Sglebius 159275312Sglebius size = round_page(size); 160275312Sglebius 161275312Sglebius /* 162275312Sglebius * Use the kernel object for wired-down kernel pages. Assume that no 163275312Sglebius * region of the kernel object is referenced more than once. 164275312Sglebius */ 165275312Sglebius 166275312Sglebius /* 167275312Sglebius * Locate sufficient space in the map. This will give us the final 168275326Sglebius * virtual address for the new memory, and thus will tell us the 169275312Sglebius * offset within the kernel map. 170275326Sglebius */ 171275326Sglebius vm_map_lock(map); 172275326Sglebius if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 173275326Sglebius vm_map_unlock(map); 174275326Sglebius return (0); 175275326Sglebius } 176275326Sglebius offset = addr - VM_MIN_KERNEL_ADDRESS; 177275326Sglebius vm_object_reference(kernel_object); 178275326Sglebius vm_map_insert(map, kernel_object, offset, addr, addr + size, 179275326Sglebius VM_PROT_ALL, VM_PROT_ALL, 0); 180275326Sglebius vm_map_unlock(map); 181275326Sglebius 182275326Sglebius /* 183275326Sglebius * Guarantee that there are pages already in this object before 184275326Sglebius * calling vm_map_wire. This is to prevent the following 185275326Sglebius * scenario: 186275326Sglebius * 187275326Sglebius * 1) Threads have swapped out, so that there is a pager for the 188275312Sglebius * kernel_object. 2) The kmsg zone is empty, and so we are 189275312Sglebius * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 190275312Sglebius * there is no page, but there is a pager, so we call 191275312Sglebius * pager_data_request. But the kmsg zone is empty, so we must 192275312Sglebius * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 193275312Sglebius * we get the data back from the pager, it will be (very stale) 194275312Sglebius * non-zero data. kmem_alloc is defined to return zero-filled memory. 195275312Sglebius * 196275312Sglebius * We're intentionally not activating the pages we allocate to prevent a 197275312Sglebius * race with page-out. vm_map_wire will wire the pages. 198275312Sglebius */ 199275312Sglebius VM_OBJECT_LOCK(kernel_object); 200275312Sglebius for (i = 0; i < size; i += PAGE_SIZE) { 201275312Sglebius vm_page_t mem; 202275312Sglebius 203275312Sglebius mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 204275312Sglebius VM_ALLOC_ZERO | VM_ALLOC_RETRY); 205275312Sglebius if ((mem->flags & PG_ZERO) == 0) 206275312Sglebius pmap_zero_page(mem); 207160915Srwatson mem->valid = VM_PAGE_BITS_ALL; 208160915Srwatson vm_page_lock_queues(); 209160915Srwatson vm_page_unmanage(mem); 210160915Srwatson vm_page_wakeup(mem); 211160915Srwatson vm_page_unlock_queues(); 212160915Srwatson } 213160915Srwatson VM_OBJECT_UNLOCK(kernel_object); 2141541Srgrimes 215130831Srwatson /* 216160915Srwatson * And finally, mark the data as non-pageable. 217130831Srwatson */ 2181541Srgrimes (void) vm_map_wire(map, addr, addr + size, 219130831Srwatson VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); 220130831Srwatson 221130831Srwatson return (addr); 222130831Srwatson} 223130831Srwatson 224130831Srwatson/* 225130831Srwatson * kmem_free: 2261549Srgrimes * 227160915Srwatson * Release a region of kernel virtual memory allocated 2281541Srgrimes * with kmem_alloc, and return the physical pages 2291541Srgrimes * associated with that region. 230130831Srwatson * 231130831Srwatson * This routine may not block on kernel maps. 232130831Srwatson */ 2331541Srgrimesvoid 2341541Srgrimeskmem_free(map, addr, size) 2351549Srgrimes vm_map_t map; 236160915Srwatson vm_offset_t addr; 2371541Srgrimes vm_size_t size; 2381541Srgrimes{ 239130831Srwatson 240130831Srwatson (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 241130480Srwatson} 242130831Srwatson 243130831Srwatson/* 2441541Srgrimes * kmem_suballoc: 2451541Srgrimes * 246130831Srwatson * Allocates a map to manage a subrange 247160915Srwatson * of the kernel virtual address space. 248130831Srwatson * 249130831Srwatson * Arguments are as follows: 250130831Srwatson * 251130831Srwatson * parent Map to take range from 252130831Srwatson * min, max Returned endpoints of map 253130831Srwatson * size Size of range to find 254130831Srwatson */ 2551541Srgrimesvm_map_t 2561541Srgrimeskmem_suballoc(parent, min, max, size) 2571541Srgrimes vm_map_t parent; 2581549Srgrimes vm_offset_t *min, *max; 259160915Srwatson vm_size_t size; 2601541Srgrimes{ 2611541Srgrimes int ret; 262130705Srwatson vm_map_t result; 263130705Srwatson 2641541Srgrimes size = round_page(size); 265275326Sglebius 26612843Sbde *min = (vm_offset_t) vm_map_min(parent); 267255138Sdavide ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 2681541Srgrimes min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 2691541Srgrimes if (ret != KERN_SUCCESS) { 2701549Srgrimes printf("kmem_suballoc: bad status return of %d.\n", ret); 271169236Srwatson panic("kmem_suballoc"); 2721541Srgrimes } 2731541Srgrimes *max = *min + size; 274175845Srwatson result = vm_map_create(vm_map_pmap(parent), *min, *max); 275175845Srwatson if (result == NULL) 276175845Srwatson panic("kmem_suballoc: cannot create submap"); 277175845Srwatson if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 278175845Srwatson panic("kmem_suballoc: unable to change range to submap"); 279175845Srwatson return (result); 280170151Srwatson} 281170151Srwatson 282170151Srwatson/* 283170151Srwatson * kmem_malloc: 284169236Srwatson * 285169236Srwatson * Allocate wired-down memory in the kernel's address map for the higher 286169236Srwatson * level kernel memory allocator (kern/kern_malloc.c). We cannot use 287169236Srwatson * kmem_alloc() because we may need to allocate memory at interrupt 2881541Srgrimes * level where we cannot block (canwait == FALSE). 2891541Srgrimes * 2901541Srgrimes * This routine has its own private kernel submap (kmem_map) and object 291169236Srwatson * (kmem_object). This, combined with the fact that only malloc uses 292169236Srwatson * this routine, ensures that we will never block in map or object waits. 293169236Srwatson * 294169236Srwatson * Note that this still only works in a uni-processor environment and 295169236Srwatson * when called at splhigh(). 296169236Srwatson * 297169236Srwatson * We don't worry about expanding the map (adding entries) since entries 2981541Srgrimes * for wired maps are statically allocated. 299160915Srwatson * 300160915Srwatson * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 301130831Srwatson * I have not verified that it actually does not block. 302130831Srwatson * 303130831Srwatson * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 304130831Srwatson * which we never free. 305130831Srwatson */ 306130831Srwatsonvm_offset_t 307130831Srwatsonkmem_malloc(map, size, flags) 308130831Srwatson vm_map_t map; 3091541Srgrimes vm_size_t size; 3101549Srgrimes int flags; 311160915Srwatson{ 3121541Srgrimes vm_offset_t offset, i; 313193272Sjhb vm_map_entry_t entry; 31495552Stanimura vm_offset_t addr; 315130831Srwatson vm_page_t m; 316130831Srwatson int pflags; 317122352Stanimura 318174647Sjeff size = round_page(size); 319174647Sjeff addr = vm_map_min(map); 3201541Srgrimes 3211541Srgrimes /* 322275326Sglebius * Locate sufficient space in the map. This will give us the final 3231541Srgrimes * virtual address for the new memory, and thus will tell us the 324133741Sjmg * offset within the kernel map. 325193272Sjhb */ 326243882Sglebius vm_map_lock(map); 327193272Sjhb if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 328193272Sjhb vm_map_unlock(map); 329193272Sjhb if (map != kmem_map) { 330193272Sjhb static int last_report; /* when we did it (in ticks) */ 331193272Sjhb if (ticks < last_report || 332193272Sjhb (ticks - last_report) >= hz) { 333193272Sjhb last_report = ticks; 334193272Sjhb printf("Out of mbuf address space!\n"); 335296277Sjhb printf("Consider increasing NMBCLUSTERS\n"); 336130831Srwatson } 337193272Sjhb return (0); 338193272Sjhb } 33997658Stanimura#if 0 34095883Salfred if ((flags & M_NOWAIT) == 0) 341130831Srwatson panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 3421541Srgrimes (long)size, (long)map->size); 3431541Srgrimes#endif 3441541Srgrimes return (0); 3451541Srgrimes } 3461541Srgrimes offset = addr - VM_MIN_KERNEL_ADDRESS; 347160915Srwatson vm_object_reference(kmem_object); 348160915Srwatson vm_map_insert(map, kmem_object, offset, addr, addr + size, 349160915Srwatson VM_PROT_ALL, VM_PROT_ALL, 0); 350160915Srwatson 351160915Srwatson /* 3521541Srgrimes * Note: if M_NOWAIT specified alone, allocate from 353160915Srwatson * interrupt-safe queues only (just the free list). If 354160915Srwatson * M_USE_RESERVE is also specified, we can also 355160915Srwatson * allocate from the cache. Neither of the latter two 356160915Srwatson * flags may be specified from an interrupt since interrupts 357160915Srwatson * are not allowed to mess with the cache queue. 3581541Srgrimes */ 359160915Srwatson 360160915Srwatson if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 361160915Srwatson pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 362160915Srwatson else 363160915Srwatson pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 364160915Srwatson 365160915Srwatson if (flags & M_ZERO) 366160915Srwatson pflags |= VM_ALLOC_ZERO; 367160915Srwatson 3681541Srgrimes VM_OBJECT_LOCK(kmem_object); 3691541Srgrimes for (i = 0; i < size; i += PAGE_SIZE) { 3701541Srgrimesretry: 3711541Srgrimes m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags); 372160915Srwatson 373160915Srwatson /* 3741541Srgrimes * Ran out of space, free everything up and return. Don't need 3751549Srgrimes * to lock page queues here as we know that the pages we got 376160915Srwatson * aren't on any queues. 3771541Srgrimes */ 37883366Sjulian if (m == NULL) { 3791541Srgrimes if ((flags & M_NOWAIT) == 0) { 380131006Srwatson VM_OBJECT_UNLOCK(kmem_object); 381131006Srwatson vm_map_unlock(map); 382131006Srwatson VM_WAIT; 3831541Srgrimes vm_map_lock(map); 384131006Srwatson VM_OBJECT_LOCK(kmem_object); 3851541Srgrimes goto retry; 3861541Srgrimes } 3871541Srgrimes /* 3881541Srgrimes * Free the pages before removing the map entry. 3891541Srgrimes * They are already marked busy. Calling 3901541Srgrimes * vm_map_delete before the pages has been freed or 3911541Srgrimes * unbusied will cause a deadlock. 392131006Srwatson */ 393130653Srwatson while (i != 0) { 3941541Srgrimes i -= PAGE_SIZE; 3951541Srgrimes m = vm_page_lookup(kmem_object, 396131006Srwatson OFF_TO_IDX(offset + i)); 3971541Srgrimes vm_page_lock_queues(); 398131006Srwatson vm_page_unwire(m, 0); 399131006Srwatson vm_page_free(m); 4001541Srgrimes vm_page_unlock_queues(); 4011541Srgrimes } 4021541Srgrimes VM_OBJECT_UNLOCK(kmem_object); 403101996Sdg vm_map_delete(map, addr, addr + size); 404101996Sdg vm_map_unlock(map); 405101996Sdg return (0); 406101996Sdg } 407162086Sjhb if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 408101996Sdg pmap_zero_page(m); 409162086Sjhb m->valid = VM_PAGE_BITS_ALL; 410101996Sdg vm_page_lock_queues(); 411101996Sdg vm_page_unmanage(m); 412162086Sjhb vm_page_unlock_queues(); 413101996Sdg } 414162086Sjhb VM_OBJECT_UNLOCK(kmem_object); 415101996Sdg 416101996Sdg /* 417101996Sdg * Mark map entry as non-pageable. Assert: vm_map_insert() will never 418101996Sdg * be able to extend the previous entry so there will be a new entry 4191541Srgrimes * exactly corresponding to this address range and it will have 420160915Srwatson * wired_count == 0. 421160915Srwatson */ 4221541Srgrimes if (!vm_map_lookup_entry(map, addr, &entry) || 4231549Srgrimes entry->start != addr || entry->end != addr + size || 424160915Srwatson entry->wired_count != 0) 425160915Srwatson panic("kmem_malloc: entry not found or misaligned"); 4261541Srgrimes entry->wired_count = 1; 427125454Sjhb 42852070Sgreen /* 429131006Srwatson * At this point, the kmem_object must be unlocked because 430131006Srwatson * vm_map_simplify_entry() calls vm_object_deallocate(), which 43152070Sgreen * locks the kmem_object. 432183663Srwatson */ 433183663Srwatson vm_map_simplify_entry(map, entry); 434183663Srwatson 435183663Srwatson /* 436183663Srwatson * Loop thru pages, entering them in the pmap. (We cannot add them to 43752070Sgreen * the wired count without wrapping the vm_page_queue_lock in 438101996Sdg * splimp...) 4391541Srgrimes */ 440125454Sjhb VM_OBJECT_LOCK(kmem_object); 441284215Smjg for (i = 0; i < size; i += PAGE_SIZE) { 442125454Sjhb m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 443125454Sjhb /* 44465495Struckman * Because this is kernel_pmap, this call will not block. 445125454Sjhb */ 44652070Sgreen pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 44713267Swollman vm_page_lock_queues(); 4481541Srgrimes vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED); 4491541Srgrimes vm_page_wakeup(m); 4501541Srgrimes vm_page_unlock_queues(); 4511541Srgrimes } 4521541Srgrimes VM_OBJECT_UNLOCK(kmem_object); 453131006Srwatson vm_map_unlock(map); 454160915Srwatson 455160915Srwatson return (addr); 456131006Srwatson} 457131006Srwatson 458131006Srwatson/* 459131006Srwatson * kmem_alloc_wait: 460131006Srwatson * 461131006Srwatson * Allocates pageable memory from a sub-map of the kernel. If the submap 462131006Srwatson * has no room, the caller sleeps waiting for more memory in the submap. 463131006Srwatson * 464131006Srwatson * This routine may block. 4651541Srgrimes */ 4661541Srgrimesvm_offset_t 4671541Srgrimeskmem_alloc_wait(map, size) 468175968Srwatson vm_map_t map; 469160915Srwatson vm_size_t size; 470160875Srwatson{ 471160875Srwatson vm_offset_t addr; 472160875Srwatson 473160875Srwatson size = round_page(size); 474160875Srwatson 475160875Srwatson for (;;) { 476160875Srwatson /* 477160875Srwatson * To make this work for more than one map, use the map's lock 4781549Srgrimes * to lock out sleepers/wakers. 479160915Srwatson */ 4801541Srgrimes vm_map_lock(map); 4811541Srgrimes if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 482130831Srwatson break; 483130831Srwatson /* no space now; see if we can ever get space */ 484160875Srwatson if (vm_map_max(map) - vm_map_min(map) < size) { 4851541Srgrimes vm_map_unlock(map); 4861541Srgrimes return (0); 487130831Srwatson } 488160915Srwatson map->needs_wakeup = TRUE; 489130831Srwatson vm_map_unlock_and_wait(map, FALSE); 490130831Srwatson } 491130831Srwatson vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 492130831Srwatson vm_map_unlock(map); 493130831Srwatson return (addr); 494130831Srwatson} 495160875Srwatson 496160875Srwatson/* 497160915Srwatson * kmem_free_wakeup: 498160875Srwatson * 499160875Srwatson * Returns memory to a submap of the kernel, and wakes up any processes 500160875Srwatson * waiting for memory in that map. 501160875Srwatson */ 502160875Srwatsonvoid 5031541Srgrimeskmem_free_wakeup(map, addr, size) 504160915Srwatson vm_map_t map; 5051541Srgrimes vm_offset_t addr; 506160915Srwatson vm_size_t size; 507160915Srwatson{ 508160915Srwatson 509160915Srwatson vm_map_lock(map); 510160915Srwatson (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 511160915Srwatson if (map->needs_wakeup) { 512160915Srwatson map->needs_wakeup = FALSE; 513160915Srwatson vm_map_wakeup(map); 514160915Srwatson } 515160915Srwatson vm_map_unlock(map); 516160915Srwatson} 517160915Srwatson 5181541Srgrimes/* 519160915Srwatson * kmem_init: 520160915Srwatson * 521160915Srwatson * Create the kernel map; insert a mapping covering kernel text, 522160915Srwatson * data, bss, and all space allocated thus far (`boostrap' data). The 523160915Srwatson * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 5241541Srgrimes * `start' as allocated, and the range between `start' and `end' as free. 525121628Ssam */ 526121628Ssamvoid 527121628Ssamkmem_init(start, end) 528121628Ssam vm_offset_t start, end; 529121628Ssam{ 530121628Ssam vm_map_t m; 531130831Srwatson 532130831Srwatson m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 533121628Ssam m->system_map = 1; 534121628Ssam vm_map_lock(m); 535121628Ssam /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 536121628Ssam kernel_map = m; 537121628Ssam (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 538121628Ssam VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 539121628Ssam /* ... and ending with the completion of the above `insert' */ 540121628Ssam vm_map_unlock(m); 541121628Ssam} 542121628Ssam