vm_kern.c revision 49852
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_kern.c,v 1.56 1999/07/01 19:53:40 peter Exp $ 65 */ 66 67/* 68 * Kernel memory management. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/proc.h> 74#include <sys/malloc.h> 75 76#include <vm/vm.h> 77#include <vm/vm_param.h> 78#include <vm/vm_prot.h> 79#include <sys/lock.h> 80#include <vm/pmap.h> 81#include <vm/vm_map.h> 82#include <vm/vm_object.h> 83#include <vm/vm_page.h> 84#include <vm/vm_pageout.h> 85#include <vm/vm_extern.h> 86 87vm_map_t kernel_map=0; 88vm_map_t kmem_map=0; 89vm_map_t exec_map=0; 90vm_map_t clean_map=0; 91vm_map_t buffer_map=0; 92vm_map_t mb_map=0; 93int mb_map_full=0; 94vm_map_t io_map=0; 95vm_map_t phys_map=0; 96 97/* 98 * kmem_alloc_pageable: 99 * 100 * Allocate pageable memory to the kernel's address map. 101 * "map" must be kernel_map or a submap of kernel_map. 102 */ 103 104vm_offset_t 105kmem_alloc_pageable(map, size) 106 vm_map_t map; 107 register vm_size_t size; 108{ 109 vm_offset_t addr; 110 register int result; 111 112 size = round_page(size); 113 addr = vm_map_min(map); 114 result = vm_map_find(map, NULL, (vm_offset_t) 0, 115 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 116 if (result != KERN_SUCCESS) { 117 return (0); 118 } 119 return (addr); 120} 121 122/* 123 * kmem_alloc_nofault: 124 * 125 * Same as kmem_alloc_pageable, except that it create a nofault entry. 126 */ 127 128vm_offset_t 129kmem_alloc_nofault(map, size) 130 vm_map_t map; 131 register vm_size_t size; 132{ 133 vm_offset_t addr; 134 register int result; 135 136 size = round_page(size); 137 addr = vm_map_min(map); 138 result = vm_map_find(map, NULL, (vm_offset_t) 0, 139 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 140 if (result != KERN_SUCCESS) { 141 return (0); 142 } 143 return (addr); 144} 145 146/* 147 * Allocate wired-down memory in the kernel's address map 148 * or a submap. 149 */ 150vm_offset_t 151kmem_alloc(map, size) 152 register vm_map_t map; 153 register vm_size_t size; 154{ 155 vm_offset_t addr; 156 register vm_offset_t offset; 157 vm_offset_t i; 158 159 size = round_page(size); 160 161 /* 162 * Use the kernel object for wired-down kernel pages. Assume that no 163 * region of the kernel object is referenced more than once. 164 */ 165 166 /* 167 * Locate sufficient space in the map. This will give us the final 168 * virtual address for the new memory, and thus will tell us the 169 * offset within the kernel map. 170 */ 171 vm_map_lock(map); 172 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 173 vm_map_unlock(map); 174 return (0); 175 } 176 offset = addr - VM_MIN_KERNEL_ADDRESS; 177 vm_object_reference(kernel_object); 178 vm_map_insert(map, kernel_object, offset, addr, addr + size, 179 VM_PROT_ALL, VM_PROT_ALL, 0); 180 vm_map_unlock(map); 181 182 /* 183 * Guarantee that there are pages already in this object before 184 * calling vm_map_pageable. This is to prevent the following 185 * scenario: 186 * 187 * 1) Threads have swapped out, so that there is a pager for the 188 * kernel_object. 2) The kmsg zone is empty, and so we are 189 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 190 * there is no page, but there is a pager, so we call 191 * pager_data_request. But the kmsg zone is empty, so we must 192 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 193 * we get the data back from the pager, it will be (very stale) 194 * non-zero data. kmem_alloc is defined to return zero-filled memory. 195 * 196 * We're intentionally not activating the pages we allocate to prevent a 197 * race with page-out. vm_map_pageable will wire the pages. 198 */ 199 200 for (i = 0; i < size; i += PAGE_SIZE) { 201 vm_page_t mem; 202 203 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 204 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 205 if ((mem->flags & PG_ZERO) == 0) 206 vm_page_zero_fill(mem); 207 mem->valid = VM_PAGE_BITS_ALL; 208 vm_page_flag_clear(mem, PG_ZERO); 209 vm_page_wakeup(mem); 210 } 211 212 /* 213 * And finally, mark the data as non-pageable. 214 */ 215 216 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); 217 218 return (addr); 219} 220 221/* 222 * kmem_free: 223 * 224 * Release a region of kernel virtual memory allocated 225 * with kmem_alloc, and return the physical pages 226 * associated with that region. 227 * 228 * This routine may not block on kernel maps. 229 */ 230void 231kmem_free(map, addr, size) 232 vm_map_t map; 233 register vm_offset_t addr; 234 vm_size_t size; 235{ 236 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 237} 238 239/* 240 * kmem_suballoc: 241 * 242 * Allocates a map to manage a subrange 243 * of the kernel virtual address space. 244 * 245 * Arguments are as follows: 246 * 247 * parent Map to take range from 248 * size Size of range to find 249 * min, max Returned endpoints of map 250 * pageable Can the region be paged 251 */ 252vm_map_t 253kmem_suballoc(parent, min, max, size) 254 register vm_map_t parent; 255 vm_offset_t *min, *max; 256 register vm_size_t size; 257{ 258 register int ret; 259 vm_map_t result; 260 261 size = round_page(size); 262 263 *min = (vm_offset_t) vm_map_min(parent); 264 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 265 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 266 if (ret != KERN_SUCCESS) { 267 printf("kmem_suballoc: bad status return of %d.\n", ret); 268 panic("kmem_suballoc"); 269 } 270 *max = *min + size; 271 pmap_reference(vm_map_pmap(parent)); 272 result = vm_map_create(vm_map_pmap(parent), *min, *max); 273 if (result == NULL) 274 panic("kmem_suballoc: cannot create submap"); 275 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 276 panic("kmem_suballoc: unable to change range to submap"); 277 return (result); 278} 279 280/* 281 * kmem_malloc: 282 * 283 * Allocate wired-down memory in the kernel's address map for the higher 284 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 285 * kmem_alloc() because we may need to allocate memory at interrupt 286 * level where we cannot block (canwait == FALSE). 287 * 288 * This routine has its own private kernel submap (kmem_map) and object 289 * (kmem_object). This, combined with the fact that only malloc uses 290 * this routine, ensures that we will never block in map or object waits. 291 * 292 * Note that this still only works in a uni-processor environment and 293 * when called at splhigh(). 294 * 295 * We don't worry about expanding the map (adding entries) since entries 296 * for wired maps are statically allocated. 297 * 298 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 299 * I have not verified that it actually does not block. 300 */ 301vm_offset_t 302kmem_malloc(map, size, flags) 303 register vm_map_t map; 304 register vm_size_t size; 305 int flags; 306{ 307 register vm_offset_t offset, i; 308 vm_map_entry_t entry; 309 vm_offset_t addr; 310 vm_page_t m; 311 312 if (map != kmem_map && map != mb_map) 313 panic("kmem_malloc: map != {kmem,mb}_map"); 314 315 size = round_page(size); 316 addr = vm_map_min(map); 317 318 /* 319 * Locate sufficient space in the map. This will give us the final 320 * virtual address for the new memory, and thus will tell us the 321 * offset within the kernel map. 322 */ 323 vm_map_lock(map); 324 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 325 vm_map_unlock(map); 326 if (map == mb_map) { 327 mb_map_full = TRUE; 328 printf("Out of mbuf clusters - adjust NMBCLUSTERS or increase maxusers!\n"); 329 return (0); 330 } 331 if ((flags & M_NOWAIT) == 0) 332 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 333 (long)size, (long)map->size); 334 return (0); 335 } 336 offset = addr - VM_MIN_KERNEL_ADDRESS; 337 vm_object_reference(kmem_object); 338 vm_map_insert(map, kmem_object, offset, addr, addr + size, 339 VM_PROT_ALL, VM_PROT_ALL, 0); 340 341 for (i = 0; i < size; i += PAGE_SIZE) { 342 /* 343 * Note: if M_NOWAIT specified alone, allocate from 344 * interrupt-safe queues only (just the free list). If 345 * M_ASLEEP or M_USE_RESERVE is also specified, we can also 346 * allocate from the cache. Neither of the latter two 347 * flags may be specified from an interrupt since interrupts 348 * are not allowed to mess with the cache queue. 349 */ 350retry: 351 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), 352 ((flags & (M_NOWAIT|M_ASLEEP|M_USE_RESERVE)) == M_NOWAIT) ? 353 VM_ALLOC_INTERRUPT : 354 VM_ALLOC_SYSTEM); 355 356 /* 357 * Ran out of space, free everything up and return. Don't need 358 * to lock page queues here as we know that the pages we got 359 * aren't on any queues. 360 */ 361 if (m == NULL) { 362 if ((flags & M_NOWAIT) == 0) { 363 vm_map_unlock(map); 364 VM_WAIT; 365 vm_map_lock(map); 366 goto retry; 367 } 368 vm_map_delete(map, addr, addr + size); 369 vm_map_unlock(map); 370 if (flags & M_ASLEEP) { 371 VM_AWAIT; 372 } 373 return (0); 374 } 375 vm_page_flag_clear(m, PG_ZERO); 376 m->valid = VM_PAGE_BITS_ALL; 377 } 378 379 /* 380 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 381 * be able to extend the previous entry so there will be a new entry 382 * exactly corresponding to this address range and it will have 383 * wired_count == 0. 384 */ 385 if (!vm_map_lookup_entry(map, addr, &entry) || 386 entry->start != addr || entry->end != addr + size || 387 entry->wired_count != 0) 388 panic("kmem_malloc: entry not found or misaligned"); 389 entry->wired_count = 1; 390 391 vm_map_simplify_entry(map, entry); 392 393 /* 394 * Loop thru pages, entering them in the pmap. (We cannot add them to 395 * the wired count without wrapping the vm_page_queue_lock in 396 * splimp...) 397 */ 398 for (i = 0; i < size; i += PAGE_SIZE) { 399 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 400 vm_page_wire(m); 401 vm_page_wakeup(m); 402 /* 403 * Because this is kernel_pmap, this call will not block. 404 */ 405 pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m), 406 VM_PROT_ALL, 1); 407 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 408 } 409 vm_map_unlock(map); 410 411 return (addr); 412} 413 414/* 415 * kmem_alloc_wait: 416 * 417 * Allocates pageable memory from a sub-map of the kernel. If the submap 418 * has no room, the caller sleeps waiting for more memory in the submap. 419 * 420 * This routine may block. 421 */ 422 423vm_offset_t 424kmem_alloc_wait(map, size) 425 vm_map_t map; 426 vm_size_t size; 427{ 428 vm_offset_t addr; 429 430 size = round_page(size); 431 432 for (;;) { 433 /* 434 * To make this work for more than one map, use the map's lock 435 * to lock out sleepers/wakers. 436 */ 437 vm_map_lock(map); 438 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 439 break; 440 /* no space now; see if we can ever get space */ 441 if (vm_map_max(map) - vm_map_min(map) < size) { 442 vm_map_unlock(map); 443 return (0); 444 } 445 vm_map_unlock(map); 446 tsleep(map, PVM, "kmaw", 0); 447 } 448 vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 449 vm_map_unlock(map); 450 return (addr); 451} 452 453/* 454 * kmem_free_wakeup: 455 * 456 * Returns memory to a submap of the kernel, and wakes up any processes 457 * waiting for memory in that map. 458 */ 459void 460kmem_free_wakeup(map, addr, size) 461 vm_map_t map; 462 vm_offset_t addr; 463 vm_size_t size; 464{ 465 vm_map_lock(map); 466 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 467 wakeup(map); 468 vm_map_unlock(map); 469} 470 471/* 472 * kmem_init: 473 * 474 * Create the kernel map; insert a mapping covering kernel text, 475 * data, bss, and all space allocated thus far (`boostrap' data). The 476 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 477 * `start' as allocated, and the range between `start' and `end' as free. 478 */ 479 480void 481kmem_init(start, end) 482 vm_offset_t start, end; 483{ 484 register vm_map_t m; 485 486 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 487 vm_map_lock(m); 488 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 489 kernel_map = m; 490 kernel_map->system_map = 1; 491 (void) vm_map_insert(m, NULL, (vm_offset_t) 0, 492 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 493 /* ... and ending with the completion of the above `insert' */ 494 vm_map_unlock(m); 495} 496 497