vm_kern.c revision 128613
1126261Smlaier/* 2130613Smlaier * Copyright (c) 1991, 1993 3133574Smlaier * The Regents of the University of California. All rights reserved. 4126258Smlaier * 5126258Smlaier * This code is derived from software contributed to Berkeley by 6126258Smlaier * The Mach Operating System project at Carnegie-Mellon University. 7126258Smlaier * 8126258Smlaier * Redistribution and use in source and binary forms, with or without 9126258Smlaier * modification, are permitted provided that the following conditions 10126258Smlaier * are met: 11126258Smlaier * 1. Redistributions of source code must retain the above copyright 12126258Smlaier * notice, this list of conditions and the following disclaimer. 13126258Smlaier * 2. Redistributions in binary form must reproduce the above copyright 14126258Smlaier * notice, this list of conditions and the following disclaimer in the 15126258Smlaier * documentation and/or other materials provided with the distribution. 16126258Smlaier * 4. Neither the name of the University nor the names of its contributors 17126258Smlaier * may be used to endorse or promote products derived from this software 18126258Smlaier * without specific prior written permission. 19126258Smlaier * 20126258Smlaier * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21126258Smlaier * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22126258Smlaier * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23126258Smlaier * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24126258Smlaier * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25126258Smlaier * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26126258Smlaier * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27126258Smlaier * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28126258Smlaier * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29126258Smlaier * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30126258Smlaier * SUCH DAMAGE. 31126258Smlaier * 32126258Smlaier * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 33126258Smlaier * 34126258Smlaier * 35126258Smlaier * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36126258Smlaier * All rights reserved. 37126258Smlaier * 38126258Smlaier * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39126258Smlaier * 40126258Smlaier * Permission to use, copy, modify and distribute this software and 41126258Smlaier * its documentation is hereby granted, provided that both the copyright 42126258Smlaier * notice and this permission notice appear in all copies of the 43127145Smlaier * software, derivative works or modified versions, and any portions 44130933Sbrooks * thereof, and that both notices appear in supporting documentation. 45126261Smlaier * 46126261Smlaier * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47126258Smlaier * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48126261Smlaier * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49126261Smlaier * 50127145Smlaier * Carnegie Mellon requests users of this software to return to 51126261Smlaier * 52126261Smlaier * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53126261Smlaier * School of Computer Science 54126258Smlaier * Carnegie Mellon University 55126258Smlaier * Pittsburgh PA 15213-3890 56126258Smlaier * 57135920Smlaier * any improvements or extensions that they make and grant Carnegie the 58135920Smlaier * rights to redistribute these changes. 59135920Smlaier */ 60126258Smlaier 61126258Smlaier/* 62126258Smlaier * Kernel memory management. 63126258Smlaier */ 64126258Smlaier 65130613Smlaier#include <sys/cdefs.h> 66126258Smlaier__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 128613 2004-04-24 20:53:55Z alc $"); 67126258Smlaier 68126258Smlaier#include <sys/param.h> 69126258Smlaier#include <sys/systm.h> 70126258Smlaier#include <sys/kernel.h> /* for ticks and hz */ 71126258Smlaier#include <sys/lock.h> 72126258Smlaier#include <sys/mutex.h> 73126258Smlaier#include <sys/proc.h> 74126258Smlaier#include <sys/malloc.h> 75126258Smlaier 76126258Smlaier#include <vm/vm.h> 77126258Smlaier#include <vm/vm_param.h> 78126258Smlaier#include <vm/pmap.h> 79126258Smlaier#include <vm/vm_map.h> 80126258Smlaier#include <vm/vm_object.h> 81126258Smlaier#include <vm/vm_page.h> 82126258Smlaier#include <vm/vm_pageout.h> 83126258Smlaier#include <vm/vm_extern.h> 84126258Smlaier 85126258Smlaiervm_map_t kernel_map=0; 86130613Smlaiervm_map_t kmem_map=0; 87130613Smlaiervm_map_t exec_map=0; 88126258Smlaiervm_map_t pipe_map; 89130613Smlaiervm_map_t buffer_map=0; 90126258Smlaier 91126258Smlaier/* 92126258Smlaier * kmem_alloc_pageable: 93126258Smlaier * 94126258Smlaier * Allocate pageable memory to the kernel's address map. 95126258Smlaier * "map" must be kernel_map or a submap of kernel_map. 96130613Smlaier */ 97126258Smlaiervm_offset_t 98126258Smlaierkmem_alloc_pageable(map, size) 99126258Smlaier vm_map_t map; 100126258Smlaier vm_size_t size; 101126258Smlaier{ 102126258Smlaier vm_offset_t addr; 103126258Smlaier int result; 104126258Smlaier 105126258Smlaier size = round_page(size); 106126258Smlaier addr = vm_map_min(map); 107126258Smlaier result = vm_map_find(map, NULL, 0, 108126258Smlaier &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 109126258Smlaier if (result != KERN_SUCCESS) { 110126258Smlaier return (0); 111126258Smlaier } 112126258Smlaier return (addr); 113126258Smlaier} 114126258Smlaier 115126258Smlaier/* 116126258Smlaier * kmem_alloc_nofault: 117130613Smlaier * 118130613Smlaier * Allocate a virtual address range with no underlying object and 119130613Smlaier * no initial mapping to physical memory. Any mapping from this 120130613Smlaier * range to physical memory must be explicitly created prior to 121130613Smlaier * its use, typically with pmap_qenter(). Any attempt to create 122130613Smlaier * a mapping on demand through vm_fault() will result in a panic. 123126258Smlaier */ 124126258Smlaiervm_offset_t 125126258Smlaierkmem_alloc_nofault(map, size) 126126258Smlaier vm_map_t map; 127126258Smlaier vm_size_t size; 128126258Smlaier{ 129126258Smlaier vm_offset_t addr; 130126258Smlaier int result; 131126258Smlaier 132126258Smlaier size = round_page(size); 133130613Smlaier addr = vm_map_min(map); 134126258Smlaier result = vm_map_find(map, NULL, 0, 135130613Smlaier &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 136126258Smlaier if (result != KERN_SUCCESS) { 137126258Smlaier return (0); 138126258Smlaier } 139130613Smlaier return (addr); 140126258Smlaier} 141126258Smlaier 142127145Smlaier/* 143127145Smlaier * Allocate wired-down memory in the kernel's address map 144130613Smlaier * or a submap. 145130613Smlaier */ 146130613Smlaiervm_offset_t 147130613Smlaierkmem_alloc(map, size) 148130613Smlaier vm_map_t map; 149130613Smlaier vm_size_t size; 150130613Smlaier{ 151130613Smlaier vm_offset_t addr; 152130613Smlaier vm_offset_t offset; 153130613Smlaier vm_offset_t i; 154130613Smlaier 155130613Smlaier size = round_page(size); 156130613Smlaier 157126258Smlaier /* 158126258Smlaier * Use the kernel object for wired-down kernel pages. Assume that no 159126258Smlaier * region of the kernel object is referenced more than once. 160126258Smlaier */ 161126258Smlaier 162126258Smlaier /* 163127145Smlaier * Locate sufficient space in the map. This will give us the final 164126261Smlaier * virtual address for the new memory, and thus will tell us the 165126261Smlaier * offset within the kernel map. 166126263Smlaier */ 167126263Smlaier vm_map_lock(map); 168126263Smlaier if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 169126263Smlaier vm_map_unlock(map); 170126263Smlaier return (0); 171126261Smlaier } 172126261Smlaier offset = addr - VM_MIN_KERNEL_ADDRESS; 173126261Smlaier vm_object_reference(kernel_object); 174126261Smlaier vm_map_insert(map, kernel_object, offset, addr, addr + size, 175126261Smlaier VM_PROT_ALL, VM_PROT_ALL, 0); 176126261Smlaier vm_map_unlock(map); 177126261Smlaier 178126261Smlaier /* 179126261Smlaier * Guarantee that there are pages already in this object before 180126261Smlaier * calling vm_map_wire. This is to prevent the following 181126261Smlaier * scenario: 182126261Smlaier * 183126261Smlaier * 1) Threads have swapped out, so that there is a pager for the 184126261Smlaier * kernel_object. 2) The kmsg zone is empty, and so we are 185126261Smlaier * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 186126261Smlaier * there is no page, but there is a pager, so we call 187126261Smlaier * pager_data_request. But the kmsg zone is empty, so we must 188126261Smlaier * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 189126261Smlaier * we get the data back from the pager, it will be (very stale) 190126261Smlaier * non-zero data. kmem_alloc is defined to return zero-filled memory. 191126261Smlaier * 192126261Smlaier * We're intentionally not activating the pages we allocate to prevent a 193126261Smlaier * race with page-out. vm_map_wire will wire the pages. 194126261Smlaier */ 195126261Smlaier VM_OBJECT_LOCK(kernel_object); 196126261Smlaier for (i = 0; i < size; i += PAGE_SIZE) { 197126261Smlaier vm_page_t mem; 198126261Smlaier 199126261Smlaier mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 200126261Smlaier VM_ALLOC_ZERO | VM_ALLOC_RETRY); 201126261Smlaier mem->valid = VM_PAGE_BITS_ALL; 202126261Smlaier vm_page_lock_queues(); 203126261Smlaier vm_page_unmanage(mem); 204126261Smlaier vm_page_wakeup(mem); 205126261Smlaier vm_page_unlock_queues(); 206126261Smlaier } 207126261Smlaier VM_OBJECT_UNLOCK(kernel_object); 208126261Smlaier 209126261Smlaier /* 210126261Smlaier * And finally, mark the data as non-pageable. 211126261Smlaier */ 212126261Smlaier (void) vm_map_wire(map, addr, addr + size, 213126261Smlaier VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); 214126261Smlaier 215126261Smlaier return (addr); 216126261Smlaier} 217126261Smlaier 218126261Smlaier/* 219126261Smlaier * kmem_free: 220126261Smlaier * 221126261Smlaier * Release a region of kernel virtual memory allocated 222126261Smlaier * with kmem_alloc, and return the physical pages 223130613Smlaier * associated with that region. 224130613Smlaier * 225130613Smlaier * This routine may not block on kernel maps. 226130613Smlaier */ 227130613Smlaiervoid 228130613Smlaierkmem_free(map, addr, size) 229130613Smlaier vm_map_t map; 230130613Smlaier vm_offset_t addr; 231130613Smlaier vm_size_t size; 232130613Smlaier{ 233130613Smlaier 234130613Smlaier (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 235130613Smlaier} 236130613Smlaier 237130613Smlaier/* 238130613Smlaier * kmem_suballoc: 239126258Smlaier * 240126258Smlaier * Allocates a map to manage a subrange 241126258Smlaier * of the kernel virtual address space. 242126258Smlaier * 243126258Smlaier * Arguments are as follows: 244126258Smlaier * 245126258Smlaier * parent Map to take range from 246126258Smlaier * min, max Returned endpoints of map 247126258Smlaier * size Size of range to find 248126258Smlaier */ 249126258Smlaiervm_map_t 250126258Smlaierkmem_suballoc(parent, min, max, size) 251126258Smlaier vm_map_t parent; 252126258Smlaier vm_offset_t *min, *max; 253126258Smlaier vm_size_t size; 254126258Smlaier{ 255126258Smlaier int ret; 256126258Smlaier vm_map_t result; 257126258Smlaier 258126258Smlaier size = round_page(size); 259126258Smlaier 260126258Smlaier *min = (vm_offset_t) vm_map_min(parent); 261126258Smlaier ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 262126258Smlaier min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 263126258Smlaier if (ret != KERN_SUCCESS) { 264126258Smlaier printf("kmem_suballoc: bad status return of %d.\n", ret); 265126258Smlaier panic("kmem_suballoc"); 266126258Smlaier } 267126258Smlaier *max = *min + size; 268126258Smlaier result = vm_map_create(vm_map_pmap(parent), *min, *max); 269126258Smlaier if (result == NULL) 270126258Smlaier panic("kmem_suballoc: cannot create submap"); 271126258Smlaier if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 272126258Smlaier panic("kmem_suballoc: unable to change range to submap"); 273126258Smlaier return (result); 274126258Smlaier} 275126258Smlaier 276126258Smlaier/* 277126258Smlaier * kmem_malloc: 278126258Smlaier * 279126258Smlaier * Allocate wired-down memory in the kernel's address map for the higher 280126258Smlaier * level kernel memory allocator (kern/kern_malloc.c). We cannot use 281126258Smlaier * kmem_alloc() because we may need to allocate memory at interrupt 282126258Smlaier * level where we cannot block (canwait == FALSE). 283126258Smlaier * 284126258Smlaier * This routine has its own private kernel submap (kmem_map) and object 285126258Smlaier * (kmem_object). This, combined with the fact that only malloc uses 286126258Smlaier * this routine, ensures that we will never block in map or object waits. 287126258Smlaier * 288126258Smlaier * Note that this still only works in a uni-processor environment and 289126258Smlaier * when called at splhigh(). 290126258Smlaier * 291126258Smlaier * We don't worry about expanding the map (adding entries) since entries 292126258Smlaier * for wired maps are statically allocated. 293126258Smlaier * 294126258Smlaier * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 295126258Smlaier * I have not verified that it actually does not block. 296126258Smlaier * 297126258Smlaier * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 298126258Smlaier * which we never free. 299126258Smlaier */ 300126258Smlaiervm_offset_t 301126258Smlaierkmem_malloc(map, size, flags) 302126258Smlaier vm_map_t map; 303126258Smlaier vm_size_t size; 304126258Smlaier int flags; 305126258Smlaier{ 306126258Smlaier vm_offset_t offset, i; 307126258Smlaier vm_map_entry_t entry; 308126258Smlaier vm_offset_t addr; 309126258Smlaier vm_page_t m; 310126258Smlaier int pflags; 311126258Smlaier 312126258Smlaier size = round_page(size); 313126258Smlaier addr = vm_map_min(map); 314126258Smlaier 315126258Smlaier /* 316126258Smlaier * Locate sufficient space in the map. This will give us the final 317126258Smlaier * virtual address for the new memory, and thus will tell us the 318126258Smlaier * offset within the kernel map. 319126258Smlaier */ 320126258Smlaier vm_map_lock(map); 321126258Smlaier if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 322126258Smlaier vm_map_unlock(map); 323126258Smlaier if (map != kmem_map) { 324126258Smlaier static int last_report; /* when we did it (in ticks) */ 325126258Smlaier if (ticks < last_report || 326126258Smlaier (ticks - last_report) >= hz) { 327126258Smlaier last_report = ticks; 328126258Smlaier printf("Out of mbuf address space!\n"); 329126258Smlaier printf("Consider increasing NMBCLUSTERS\n"); 330126258Smlaier } 331126258Smlaier return (0); 332126258Smlaier } 333126258Smlaier if ((flags & M_NOWAIT) == 0) 334126258Smlaier panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 335126258Smlaier (long)size, (long)map->size); 336126258Smlaier return (0); 337126258Smlaier } 338126258Smlaier offset = addr - VM_MIN_KERNEL_ADDRESS; 339126258Smlaier vm_object_reference(kmem_object); 340126258Smlaier vm_map_insert(map, kmem_object, offset, addr, addr + size, 341126258Smlaier VM_PROT_ALL, VM_PROT_ALL, 0); 342126258Smlaier 343126258Smlaier /* 344126258Smlaier * Note: if M_NOWAIT specified alone, allocate from 345126258Smlaier * interrupt-safe queues only (just the free list). If 346126258Smlaier * M_USE_RESERVE is also specified, we can also 347126258Smlaier * allocate from the cache. Neither of the latter two 348126258Smlaier * flags may be specified from an interrupt since interrupts 349126258Smlaier * are not allowed to mess with the cache queue. 350126258Smlaier */ 351126258Smlaier 352126258Smlaier if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 353126258Smlaier pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 354126258Smlaier else 355126258Smlaier pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 356126258Smlaier 357126258Smlaier if (flags & M_ZERO) 358126258Smlaier pflags |= VM_ALLOC_ZERO; 359126258Smlaier 360126258Smlaier VM_OBJECT_LOCK(kmem_object); 361126258Smlaier for (i = 0; i < size; i += PAGE_SIZE) { 362126258Smlaierretry: 363126258Smlaier m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags); 364126258Smlaier 365126258Smlaier /* 366126258Smlaier * Ran out of space, free everything up and return. Don't need 367126258Smlaier * to lock page queues here as we know that the pages we got 368126258Smlaier * aren't on any queues. 369126258Smlaier */ 370126258Smlaier if (m == NULL) { 371126258Smlaier if ((flags & M_NOWAIT) == 0) { 372126258Smlaier VM_OBJECT_UNLOCK(kmem_object); 373126258Smlaier vm_map_unlock(map); 374126258Smlaier VM_WAIT; 375130613Smlaier vm_map_lock(map); 376126258Smlaier VM_OBJECT_LOCK(kmem_object); 377126258Smlaier goto retry; 378126258Smlaier } 379126258Smlaier /* 380126258Smlaier * Free the pages before removing the map entry. 381126258Smlaier * They are already marked busy. Calling 382126258Smlaier * vm_map_delete before the pages has been freed or 383126258Smlaier * unbusied will cause a deadlock. 384126258Smlaier */ 385126258Smlaier while (i != 0) { 386126258Smlaier i -= PAGE_SIZE; 387126258Smlaier m = vm_page_lookup(kmem_object, 388126258Smlaier OFF_TO_IDX(offset + i)); 389126258Smlaier vm_page_lock_queues(); 390126258Smlaier vm_page_unwire(m, 0); 391126258Smlaier vm_page_free(m); 392126258Smlaier vm_page_unlock_queues(); 393126258Smlaier } 394126258Smlaier VM_OBJECT_UNLOCK(kmem_object); 395126258Smlaier vm_map_delete(map, addr, addr + size); 396126258Smlaier vm_map_unlock(map); 397126258Smlaier return (0); 398126258Smlaier } 399126258Smlaier if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 400126258Smlaier pmap_zero_page(m); 401126258Smlaier m->valid = VM_PAGE_BITS_ALL; 402126258Smlaier vm_page_lock_queues(); 403126258Smlaier vm_page_unmanage(m); 404130613Smlaier vm_page_unlock_queues(); 405126258Smlaier } 406126258Smlaier VM_OBJECT_UNLOCK(kmem_object); 407126258Smlaier 408126258Smlaier /* 409126258Smlaier * Mark map entry as non-pageable. Assert: vm_map_insert() will never 410126258Smlaier * be able to extend the previous entry so there will be a new entry 411126258Smlaier * exactly corresponding to this address range and it will have 412126258Smlaier * wired_count == 0. 413126258Smlaier */ 414126258Smlaier if (!vm_map_lookup_entry(map, addr, &entry) || 415126258Smlaier entry->start != addr || entry->end != addr + size || 416126258Smlaier entry->wired_count != 0) 417126258Smlaier panic("kmem_malloc: entry not found or misaligned"); 418126258Smlaier entry->wired_count = 1; 419126258Smlaier 420126258Smlaier /* 421126258Smlaier * At this point, the kmem_object must be unlocked because 422126258Smlaier * vm_map_simplify_entry() calls vm_object_deallocate(), which 423126258Smlaier * locks the kmem_object. 424126258Smlaier */ 425126258Smlaier vm_map_simplify_entry(map, entry); 426126258Smlaier 427126258Smlaier /* 428126258Smlaier * Loop thru pages, entering them in the pmap. (We cannot add them to 429126258Smlaier * the wired count without wrapping the vm_page_queue_lock in 430126258Smlaier * splimp...) 431126258Smlaier */ 432126258Smlaier VM_OBJECT_LOCK(kmem_object); 433126258Smlaier for (i = 0; i < size; i += PAGE_SIZE) { 434126258Smlaier m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 435126258Smlaier /* 436126258Smlaier * Because this is kernel_pmap, this call will not block. 437126258Smlaier */ 438126258Smlaier pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 439126258Smlaier vm_page_lock_queues(); 440126258Smlaier vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED); 441126258Smlaier vm_page_wakeup(m); 442126258Smlaier vm_page_unlock_queues(); 443126258Smlaier } 444126258Smlaier VM_OBJECT_UNLOCK(kmem_object); 445126258Smlaier vm_map_unlock(map); 446126258Smlaier 447126258Smlaier return (addr); 448126258Smlaier} 449126258Smlaier 450126258Smlaier/* 451126258Smlaier * kmem_alloc_wait: 452126258Smlaier * 453126258Smlaier * Allocates pageable memory from a sub-map of the kernel. If the submap 454126258Smlaier * has no room, the caller sleeps waiting for more memory in the submap. 455126258Smlaier * 456126258Smlaier * This routine may block. 457126258Smlaier */ 458126258Smlaiervm_offset_t 459126258Smlaierkmem_alloc_wait(map, size) 460126258Smlaier vm_map_t map; 461126258Smlaier vm_size_t size; 462126258Smlaier{ 463126258Smlaier vm_offset_t addr; 464126258Smlaier 465126258Smlaier size = round_page(size); 466126258Smlaier 467126258Smlaier for (;;) { 468126258Smlaier /* 469126258Smlaier * To make this work for more than one map, use the map's lock 470126258Smlaier * to lock out sleepers/wakers. 471126258Smlaier */ 472126258Smlaier vm_map_lock(map); 473126258Smlaier if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 474126258Smlaier break; 475126258Smlaier /* no space now; see if we can ever get space */ 476126258Smlaier if (vm_map_max(map) - vm_map_min(map) < size) { 477126258Smlaier vm_map_unlock(map); 478126258Smlaier return (0); 479126258Smlaier } 480126258Smlaier map->needs_wakeup = TRUE; 481126258Smlaier vm_map_unlock_and_wait(map, FALSE); 482126258Smlaier } 483126258Smlaier vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 484126258Smlaier vm_map_unlock(map); 485126258Smlaier return (addr); 486126258Smlaier} 487126258Smlaier 488126258Smlaier/* 489126258Smlaier * kmem_free_wakeup: 490126258Smlaier * 491126258Smlaier * Returns memory to a submap of the kernel, and wakes up any processes 492126258Smlaier * waiting for memory in that map. 493126258Smlaier */ 494126258Smlaiervoid 495126258Smlaierkmem_free_wakeup(map, addr, size) 496126258Smlaier vm_map_t map; 497126258Smlaier vm_offset_t addr; 498126258Smlaier vm_size_t size; 499126258Smlaier{ 500126258Smlaier 501126258Smlaier vm_map_lock(map); 502126258Smlaier (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 503126258Smlaier if (map->needs_wakeup) { 504126258Smlaier map->needs_wakeup = FALSE; 505126258Smlaier vm_map_wakeup(map); 506126258Smlaier } 507126258Smlaier vm_map_unlock(map); 508126258Smlaier} 509126258Smlaier 510126258Smlaier/* 511126258Smlaier * kmem_init: 512126258Smlaier * 513126258Smlaier * Create the kernel map; insert a mapping covering kernel text, 514126258Smlaier * data, bss, and all space allocated thus far (`boostrap' data). The 515126258Smlaier * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 516126258Smlaier * `start' as allocated, and the range between `start' and `end' as free. 517126258Smlaier */ 518126258Smlaiervoid 519126258Smlaierkmem_init(start, end) 520126258Smlaier vm_offset_t start, end; 521126258Smlaier{ 522126258Smlaier vm_map_t m; 523126258Smlaier 524126258Smlaier m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 525126258Smlaier m->system_map = 1; 526126258Smlaier vm_map_lock(m); 527126258Smlaier /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 528126258Smlaier kernel_map = m; 529126258Smlaier (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 530126258Smlaier VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 531126258Smlaier /* ... and ending with the completion of the above `insert' */ 532126258Smlaier vm_map_unlock(m); 533126258Smlaier} 534126258Smlaier