vm_kern.c revision 127961
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61/* 62 * Kernel memory management. 63 */ 64 65#include <sys/cdefs.h> 66__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 127961 2004-04-06 20:15:37Z imp $"); 67 68#include <sys/param.h> 69#include <sys/systm.h> 70#include <sys/kernel.h> /* for ticks and hz */ 71#include <sys/lock.h> 72#include <sys/mutex.h> 73#include <sys/proc.h> 74#include <sys/malloc.h> 75 76#include <vm/vm.h> 77#include <vm/vm_param.h> 78#include <vm/pmap.h> 79#include <vm/vm_map.h> 80#include <vm/vm_object.h> 81#include <vm/vm_page.h> 82#include <vm/vm_pageout.h> 83#include <vm/vm_extern.h> 84 85vm_map_t kernel_map=0; 86vm_map_t kmem_map=0; 87vm_map_t exec_map=0; 88vm_map_t pipe_map; 89vm_map_t buffer_map=0; 90 91/* 92 * kmem_alloc_pageable: 93 * 94 * Allocate pageable memory to the kernel's address map. 95 * "map" must be kernel_map or a submap of kernel_map. 96 */ 97vm_offset_t 98kmem_alloc_pageable(map, size) 99 vm_map_t map; 100 vm_size_t size; 101{ 102 vm_offset_t addr; 103 int result; 104 105 size = round_page(size); 106 addr = vm_map_min(map); 107 result = vm_map_find(map, NULL, 0, 108 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 109 if (result != KERN_SUCCESS) { 110 return (0); 111 } 112 return (addr); 113} 114 115/* 116 * kmem_alloc_nofault: 117 * 118 * Allocate a virtual address range with no underlying object and 119 * no initial mapping to physical memory. Any mapping from this 120 * range to physical memory must be explicitly created prior to 121 * its use, typically with pmap_qenter(). Any attempt to create 122 * a mapping on demand through vm_fault() will result in a panic. 123 */ 124vm_offset_t 125kmem_alloc_nofault(map, size) 126 vm_map_t map; 127 vm_size_t size; 128{ 129 vm_offset_t addr; 130 int result; 131 132 size = round_page(size); 133 addr = vm_map_min(map); 134 result = vm_map_find(map, NULL, 0, 135 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 136 if (result != KERN_SUCCESS) { 137 return (0); 138 } 139 return (addr); 140} 141 142/* 143 * Allocate wired-down memory in the kernel's address map 144 * or a submap. 145 */ 146vm_offset_t 147kmem_alloc(map, size) 148 vm_map_t map; 149 vm_size_t size; 150{ 151 vm_offset_t addr; 152 vm_offset_t offset; 153 vm_offset_t i; 154 155 size = round_page(size); 156 157 /* 158 * Use the kernel object for wired-down kernel pages. Assume that no 159 * region of the kernel object is referenced more than once. 160 */ 161 162 /* 163 * Locate sufficient space in the map. This will give us the final 164 * virtual address for the new memory, and thus will tell us the 165 * offset within the kernel map. 166 */ 167 vm_map_lock(map); 168 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 169 vm_map_unlock(map); 170 return (0); 171 } 172 offset = addr - VM_MIN_KERNEL_ADDRESS; 173 vm_object_reference(kernel_object); 174 vm_map_insert(map, kernel_object, offset, addr, addr + size, 175 VM_PROT_ALL, VM_PROT_ALL, 0); 176 vm_map_unlock(map); 177 178 /* 179 * Guarantee that there are pages already in this object before 180 * calling vm_map_wire. This is to prevent the following 181 * scenario: 182 * 183 * 1) Threads have swapped out, so that there is a pager for the 184 * kernel_object. 2) The kmsg zone is empty, and so we are 185 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 186 * there is no page, but there is a pager, so we call 187 * pager_data_request. But the kmsg zone is empty, so we must 188 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 189 * we get the data back from the pager, it will be (very stale) 190 * non-zero data. kmem_alloc is defined to return zero-filled memory. 191 * 192 * We're intentionally not activating the pages we allocate to prevent a 193 * race with page-out. vm_map_wire will wire the pages. 194 */ 195 VM_OBJECT_LOCK(kernel_object); 196 for (i = 0; i < size; i += PAGE_SIZE) { 197 vm_page_t mem; 198 199 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 200 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 201 if ((mem->flags & PG_ZERO) == 0) 202 pmap_zero_page(mem); 203 mem->valid = VM_PAGE_BITS_ALL; 204 vm_page_lock_queues(); 205 vm_page_unmanage(mem); 206 vm_page_wakeup(mem); 207 vm_page_unlock_queues(); 208 } 209 VM_OBJECT_UNLOCK(kernel_object); 210 211 /* 212 * And finally, mark the data as non-pageable. 213 */ 214 (void) vm_map_wire(map, addr, addr + size, 215 VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); 216 217 return (addr); 218} 219 220/* 221 * kmem_free: 222 * 223 * Release a region of kernel virtual memory allocated 224 * with kmem_alloc, and return the physical pages 225 * associated with that region. 226 * 227 * This routine may not block on kernel maps. 228 */ 229void 230kmem_free(map, addr, size) 231 vm_map_t map; 232 vm_offset_t addr; 233 vm_size_t size; 234{ 235 236 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 237} 238 239/* 240 * kmem_suballoc: 241 * 242 * Allocates a map to manage a subrange 243 * of the kernel virtual address space. 244 * 245 * Arguments are as follows: 246 * 247 * parent Map to take range from 248 * min, max Returned endpoints of map 249 * size Size of range to find 250 */ 251vm_map_t 252kmem_suballoc(parent, min, max, size) 253 vm_map_t parent; 254 vm_offset_t *min, *max; 255 vm_size_t size; 256{ 257 int ret; 258 vm_map_t result; 259 260 size = round_page(size); 261 262 *min = (vm_offset_t) vm_map_min(parent); 263 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 264 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 265 if (ret != KERN_SUCCESS) { 266 printf("kmem_suballoc: bad status return of %d.\n", ret); 267 panic("kmem_suballoc"); 268 } 269 *max = *min + size; 270 result = vm_map_create(vm_map_pmap(parent), *min, *max); 271 if (result == NULL) 272 panic("kmem_suballoc: cannot create submap"); 273 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 274 panic("kmem_suballoc: unable to change range to submap"); 275 return (result); 276} 277 278/* 279 * kmem_malloc: 280 * 281 * Allocate wired-down memory in the kernel's address map for the higher 282 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 283 * kmem_alloc() because we may need to allocate memory at interrupt 284 * level where we cannot block (canwait == FALSE). 285 * 286 * This routine has its own private kernel submap (kmem_map) and object 287 * (kmem_object). This, combined with the fact that only malloc uses 288 * this routine, ensures that we will never block in map or object waits. 289 * 290 * Note that this still only works in a uni-processor environment and 291 * when called at splhigh(). 292 * 293 * We don't worry about expanding the map (adding entries) since entries 294 * for wired maps are statically allocated. 295 * 296 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 297 * I have not verified that it actually does not block. 298 * 299 * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 300 * which we never free. 301 */ 302vm_offset_t 303kmem_malloc(map, size, flags) 304 vm_map_t map; 305 vm_size_t size; 306 int flags; 307{ 308 vm_offset_t offset, i; 309 vm_map_entry_t entry; 310 vm_offset_t addr; 311 vm_page_t m; 312 int pflags; 313 314 size = round_page(size); 315 addr = vm_map_min(map); 316 317 /* 318 * Locate sufficient space in the map. This will give us the final 319 * virtual address for the new memory, and thus will tell us the 320 * offset within the kernel map. 321 */ 322 vm_map_lock(map); 323 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 324 vm_map_unlock(map); 325 if (map != kmem_map) { 326 static int last_report; /* when we did it (in ticks) */ 327 if (ticks < last_report || 328 (ticks - last_report) >= hz) { 329 last_report = ticks; 330 printf("Out of mbuf address space!\n"); 331 printf("Consider increasing NMBCLUSTERS\n"); 332 } 333 return (0); 334 } 335 if ((flags & M_NOWAIT) == 0) 336 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 337 (long)size, (long)map->size); 338 return (0); 339 } 340 offset = addr - VM_MIN_KERNEL_ADDRESS; 341 vm_object_reference(kmem_object); 342 vm_map_insert(map, kmem_object, offset, addr, addr + size, 343 VM_PROT_ALL, VM_PROT_ALL, 0); 344 345 /* 346 * Note: if M_NOWAIT specified alone, allocate from 347 * interrupt-safe queues only (just the free list). If 348 * M_USE_RESERVE is also specified, we can also 349 * allocate from the cache. Neither of the latter two 350 * flags may be specified from an interrupt since interrupts 351 * are not allowed to mess with the cache queue. 352 */ 353 354 if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 355 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 356 else 357 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 358 359 if (flags & M_ZERO) 360 pflags |= VM_ALLOC_ZERO; 361 362 VM_OBJECT_LOCK(kmem_object); 363 for (i = 0; i < size; i += PAGE_SIZE) { 364retry: 365 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags); 366 367 /* 368 * Ran out of space, free everything up and return. Don't need 369 * to lock page queues here as we know that the pages we got 370 * aren't on any queues. 371 */ 372 if (m == NULL) { 373 if ((flags & M_NOWAIT) == 0) { 374 VM_OBJECT_UNLOCK(kmem_object); 375 vm_map_unlock(map); 376 VM_WAIT; 377 vm_map_lock(map); 378 VM_OBJECT_LOCK(kmem_object); 379 goto retry; 380 } 381 /* 382 * Free the pages before removing the map entry. 383 * They are already marked busy. Calling 384 * vm_map_delete before the pages has been freed or 385 * unbusied will cause a deadlock. 386 */ 387 while (i != 0) { 388 i -= PAGE_SIZE; 389 m = vm_page_lookup(kmem_object, 390 OFF_TO_IDX(offset + i)); 391 vm_page_lock_queues(); 392 vm_page_unwire(m, 0); 393 vm_page_free(m); 394 vm_page_unlock_queues(); 395 } 396 VM_OBJECT_UNLOCK(kmem_object); 397 vm_map_delete(map, addr, addr + size); 398 vm_map_unlock(map); 399 return (0); 400 } 401 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 402 pmap_zero_page(m); 403 m->valid = VM_PAGE_BITS_ALL; 404 vm_page_lock_queues(); 405 vm_page_unmanage(m); 406 vm_page_unlock_queues(); 407 } 408 VM_OBJECT_UNLOCK(kmem_object); 409 410 /* 411 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 412 * be able to extend the previous entry so there will be a new entry 413 * exactly corresponding to this address range and it will have 414 * wired_count == 0. 415 */ 416 if (!vm_map_lookup_entry(map, addr, &entry) || 417 entry->start != addr || entry->end != addr + size || 418 entry->wired_count != 0) 419 panic("kmem_malloc: entry not found or misaligned"); 420 entry->wired_count = 1; 421 422 /* 423 * At this point, the kmem_object must be unlocked because 424 * vm_map_simplify_entry() calls vm_object_deallocate(), which 425 * locks the kmem_object. 426 */ 427 vm_map_simplify_entry(map, entry); 428 429 /* 430 * Loop thru pages, entering them in the pmap. (We cannot add them to 431 * the wired count without wrapping the vm_page_queue_lock in 432 * splimp...) 433 */ 434 VM_OBJECT_LOCK(kmem_object); 435 for (i = 0; i < size; i += PAGE_SIZE) { 436 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 437 /* 438 * Because this is kernel_pmap, this call will not block. 439 */ 440 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 441 vm_page_lock_queues(); 442 vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED); 443 vm_page_wakeup(m); 444 vm_page_unlock_queues(); 445 } 446 VM_OBJECT_UNLOCK(kmem_object); 447 vm_map_unlock(map); 448 449 return (addr); 450} 451 452/* 453 * kmem_alloc_wait: 454 * 455 * Allocates pageable memory from a sub-map of the kernel. If the submap 456 * has no room, the caller sleeps waiting for more memory in the submap. 457 * 458 * This routine may block. 459 */ 460vm_offset_t 461kmem_alloc_wait(map, size) 462 vm_map_t map; 463 vm_size_t size; 464{ 465 vm_offset_t addr; 466 467 size = round_page(size); 468 469 for (;;) { 470 /* 471 * To make this work for more than one map, use the map's lock 472 * to lock out sleepers/wakers. 473 */ 474 vm_map_lock(map); 475 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 476 break; 477 /* no space now; see if we can ever get space */ 478 if (vm_map_max(map) - vm_map_min(map) < size) { 479 vm_map_unlock(map); 480 return (0); 481 } 482 map->needs_wakeup = TRUE; 483 vm_map_unlock_and_wait(map, FALSE); 484 } 485 vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 486 vm_map_unlock(map); 487 return (addr); 488} 489 490/* 491 * kmem_free_wakeup: 492 * 493 * Returns memory to a submap of the kernel, and wakes up any processes 494 * waiting for memory in that map. 495 */ 496void 497kmem_free_wakeup(map, addr, size) 498 vm_map_t map; 499 vm_offset_t addr; 500 vm_size_t size; 501{ 502 503 vm_map_lock(map); 504 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 505 if (map->needs_wakeup) { 506 map->needs_wakeup = FALSE; 507 vm_map_wakeup(map); 508 } 509 vm_map_unlock(map); 510} 511 512/* 513 * kmem_init: 514 * 515 * Create the kernel map; insert a mapping covering kernel text, 516 * data, bss, and all space allocated thus far (`boostrap' data). The 517 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 518 * `start' as allocated, and the range between `start' and `end' as free. 519 */ 520void 521kmem_init(start, end) 522 vm_offset_t start, end; 523{ 524 vm_map_t m; 525 526 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 527 m->system_map = 1; 528 vm_map_lock(m); 529 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 530 kernel_map = m; 531 (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 532 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 533 /* ... and ending with the completion of the above `insert' */ 534 vm_map_unlock(m); 535} 536