vm_kern.c revision 124195
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65/* 66 * Kernel memory management. 67 */ 68 69#include <sys/cdefs.h> 70__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 124195 2004-01-06 20:52:55Z alc $"); 71 72#include <sys/param.h> 73#include <sys/systm.h> 74#include <sys/kernel.h> /* for ticks and hz */ 75#include <sys/lock.h> 76#include <sys/mutex.h> 77#include <sys/proc.h> 78#include <sys/malloc.h> 79 80#include <vm/vm.h> 81#include <vm/vm_param.h> 82#include <vm/pmap.h> 83#include <vm/vm_map.h> 84#include <vm/vm_object.h> 85#include <vm/vm_page.h> 86#include <vm/vm_pageout.h> 87#include <vm/vm_extern.h> 88 89vm_map_t kernel_map=0; 90vm_map_t kmem_map=0; 91vm_map_t exec_map=0; 92vm_map_t pipe_map; 93vm_map_t buffer_map=0; 94 95/* 96 * kmem_alloc_pageable: 97 * 98 * Allocate pageable memory to the kernel's address map. 99 * "map" must be kernel_map or a submap of kernel_map. 100 */ 101vm_offset_t 102kmem_alloc_pageable(map, size) 103 vm_map_t map; 104 vm_size_t size; 105{ 106 vm_offset_t addr; 107 int result; 108 109 size = round_page(size); 110 addr = vm_map_min(map); 111 result = vm_map_find(map, NULL, 0, 112 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 113 if (result != KERN_SUCCESS) { 114 return (0); 115 } 116 return (addr); 117} 118 119/* 120 * kmem_alloc_nofault: 121 * 122 * Allocate a virtual address range with no underlying object and 123 * no initial mapping to physical memory. Any mapping from this 124 * range to physical memory must be explicitly created prior to 125 * its use, typically with pmap_qenter(). Any attempt to create 126 * a mapping on demand through vm_fault() will result in a panic. 127 */ 128vm_offset_t 129kmem_alloc_nofault(map, size) 130 vm_map_t map; 131 vm_size_t size; 132{ 133 vm_offset_t addr; 134 int result; 135 136 size = round_page(size); 137 addr = vm_map_min(map); 138 result = vm_map_find(map, NULL, 0, 139 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 140 if (result != KERN_SUCCESS) { 141 return (0); 142 } 143 return (addr); 144} 145 146/* 147 * Allocate wired-down memory in the kernel's address map 148 * or a submap. 149 */ 150vm_offset_t 151kmem_alloc(map, size) 152 vm_map_t map; 153 vm_size_t size; 154{ 155 vm_offset_t addr; 156 vm_offset_t offset; 157 vm_offset_t i; 158 159 size = round_page(size); 160 161 /* 162 * Use the kernel object for wired-down kernel pages. Assume that no 163 * region of the kernel object is referenced more than once. 164 */ 165 166 /* 167 * Locate sufficient space in the map. This will give us the final 168 * virtual address for the new memory, and thus will tell us the 169 * offset within the kernel map. 170 */ 171 vm_map_lock(map); 172 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 173 vm_map_unlock(map); 174 return (0); 175 } 176 offset = addr - VM_MIN_KERNEL_ADDRESS; 177 vm_object_reference(kernel_object); 178 vm_map_insert(map, kernel_object, offset, addr, addr + size, 179 VM_PROT_ALL, VM_PROT_ALL, 0); 180 vm_map_unlock(map); 181 182 /* 183 * Guarantee that there are pages already in this object before 184 * calling vm_map_wire. This is to prevent the following 185 * scenario: 186 * 187 * 1) Threads have swapped out, so that there is a pager for the 188 * kernel_object. 2) The kmsg zone is empty, and so we are 189 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 190 * there is no page, but there is a pager, so we call 191 * pager_data_request. But the kmsg zone is empty, so we must 192 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 193 * we get the data back from the pager, it will be (very stale) 194 * non-zero data. kmem_alloc is defined to return zero-filled memory. 195 * 196 * We're intentionally not activating the pages we allocate to prevent a 197 * race with page-out. vm_map_wire will wire the pages. 198 */ 199 VM_OBJECT_LOCK(kernel_object); 200 for (i = 0; i < size; i += PAGE_SIZE) { 201 vm_page_t mem; 202 203 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 204 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 205 if ((mem->flags & PG_ZERO) == 0) 206 pmap_zero_page(mem); 207 mem->valid = VM_PAGE_BITS_ALL; 208 vm_page_lock_queues(); 209 vm_page_wakeup(mem); 210 vm_page_unlock_queues(); 211 } 212 VM_OBJECT_UNLOCK(kernel_object); 213 214 /* 215 * And finally, mark the data as non-pageable. 216 */ 217 (void) vm_map_wire(map, addr, addr + size, 218 VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); 219 220 return (addr); 221} 222 223/* 224 * kmem_free: 225 * 226 * Release a region of kernel virtual memory allocated 227 * with kmem_alloc, and return the physical pages 228 * associated with that region. 229 * 230 * This routine may not block on kernel maps. 231 */ 232void 233kmem_free(map, addr, size) 234 vm_map_t map; 235 vm_offset_t addr; 236 vm_size_t size; 237{ 238 239 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 240} 241 242/* 243 * kmem_suballoc: 244 * 245 * Allocates a map to manage a subrange 246 * of the kernel virtual address space. 247 * 248 * Arguments are as follows: 249 * 250 * parent Map to take range from 251 * min, max Returned endpoints of map 252 * size Size of range to find 253 */ 254vm_map_t 255kmem_suballoc(parent, min, max, size) 256 vm_map_t parent; 257 vm_offset_t *min, *max; 258 vm_size_t size; 259{ 260 int ret; 261 vm_map_t result; 262 263 size = round_page(size); 264 265 *min = (vm_offset_t) vm_map_min(parent); 266 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 267 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 268 if (ret != KERN_SUCCESS) { 269 printf("kmem_suballoc: bad status return of %d.\n", ret); 270 panic("kmem_suballoc"); 271 } 272 *max = *min + size; 273 result = vm_map_create(vm_map_pmap(parent), *min, *max); 274 if (result == NULL) 275 panic("kmem_suballoc: cannot create submap"); 276 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 277 panic("kmem_suballoc: unable to change range to submap"); 278 return (result); 279} 280 281/* 282 * kmem_malloc: 283 * 284 * Allocate wired-down memory in the kernel's address map for the higher 285 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 286 * kmem_alloc() because we may need to allocate memory at interrupt 287 * level where we cannot block (canwait == FALSE). 288 * 289 * This routine has its own private kernel submap (kmem_map) and object 290 * (kmem_object). This, combined with the fact that only malloc uses 291 * this routine, ensures that we will never block in map or object waits. 292 * 293 * Note that this still only works in a uni-processor environment and 294 * when called at splhigh(). 295 * 296 * We don't worry about expanding the map (adding entries) since entries 297 * for wired maps are statically allocated. 298 * 299 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 300 * I have not verified that it actually does not block. 301 * 302 * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 303 * which we never free. 304 */ 305vm_offset_t 306kmem_malloc(map, size, flags) 307 vm_map_t map; 308 vm_size_t size; 309 int flags; 310{ 311 vm_offset_t offset, i; 312 vm_map_entry_t entry; 313 vm_offset_t addr; 314 vm_page_t m; 315 int pflags; 316 317 size = round_page(size); 318 addr = vm_map_min(map); 319 320 /* 321 * Locate sufficient space in the map. This will give us the final 322 * virtual address for the new memory, and thus will tell us the 323 * offset within the kernel map. 324 */ 325 vm_map_lock(map); 326 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 327 vm_map_unlock(map); 328 if (map != kmem_map) { 329 static int last_report; /* when we did it (in ticks) */ 330 if (ticks < last_report || 331 (ticks - last_report) >= hz) { 332 last_report = ticks; 333 printf("Out of mbuf address space!\n"); 334 printf("Consider increasing NMBCLUSTERS\n"); 335 } 336 return (0); 337 } 338 if ((flags & M_NOWAIT) == 0) 339 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 340 (long)size, (long)map->size); 341 return (0); 342 } 343 offset = addr - VM_MIN_KERNEL_ADDRESS; 344 vm_object_reference(kmem_object); 345 vm_map_insert(map, kmem_object, offset, addr, addr + size, 346 VM_PROT_ALL, VM_PROT_ALL, 0); 347 348 /* 349 * Note: if M_NOWAIT specified alone, allocate from 350 * interrupt-safe queues only (just the free list). If 351 * M_USE_RESERVE is also specified, we can also 352 * allocate from the cache. Neither of the latter two 353 * flags may be specified from an interrupt since interrupts 354 * are not allowed to mess with the cache queue. 355 */ 356 357 if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 358 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 359 else 360 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 361 362 if (flags & M_ZERO) 363 pflags |= VM_ALLOC_ZERO; 364 365 VM_OBJECT_LOCK(kmem_object); 366 for (i = 0; i < size; i += PAGE_SIZE) { 367retry: 368 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags); 369 370 /* 371 * Ran out of space, free everything up and return. Don't need 372 * to lock page queues here as we know that the pages we got 373 * aren't on any queues. 374 */ 375 if (m == NULL) { 376 if ((flags & M_NOWAIT) == 0) { 377 VM_OBJECT_UNLOCK(kmem_object); 378 vm_map_unlock(map); 379 VM_WAIT; 380 vm_map_lock(map); 381 VM_OBJECT_LOCK(kmem_object); 382 goto retry; 383 } 384 /* 385 * Free the pages before removing the map entry. 386 * They are already marked busy. Calling 387 * vm_map_delete before the pages has been freed or 388 * unbusied will cause a deadlock. 389 */ 390 while (i != 0) { 391 i -= PAGE_SIZE; 392 m = vm_page_lookup(kmem_object, 393 OFF_TO_IDX(offset + i)); 394 vm_page_lock_queues(); 395 vm_page_unwire(m, 0); 396 vm_page_free(m); 397 vm_page_unlock_queues(); 398 } 399 VM_OBJECT_UNLOCK(kmem_object); 400 vm_map_delete(map, addr, addr + size); 401 vm_map_unlock(map); 402 return (0); 403 } 404 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 405 pmap_zero_page(m); 406 m->valid = VM_PAGE_BITS_ALL; 407 vm_page_lock_queues(); 408 vm_page_unmanage(m); 409 vm_page_unlock_queues(); 410 } 411 VM_OBJECT_UNLOCK(kmem_object); 412 413 /* 414 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 415 * be able to extend the previous entry so there will be a new entry 416 * exactly corresponding to this address range and it will have 417 * wired_count == 0. 418 */ 419 if (!vm_map_lookup_entry(map, addr, &entry) || 420 entry->start != addr || entry->end != addr + size || 421 entry->wired_count != 0) 422 panic("kmem_malloc: entry not found or misaligned"); 423 entry->wired_count = 1; 424 425 /* 426 * At this point, the kmem_object must be unlocked because 427 * vm_map_simplify_entry() calls vm_object_deallocate(), which 428 * locks the kmem_object. 429 */ 430 vm_map_simplify_entry(map, entry); 431 432 /* 433 * Loop thru pages, entering them in the pmap. (We cannot add them to 434 * the wired count without wrapping the vm_page_queue_lock in 435 * splimp...) 436 */ 437 VM_OBJECT_LOCK(kmem_object); 438 for (i = 0; i < size; i += PAGE_SIZE) { 439 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 440 /* 441 * Because this is kernel_pmap, this call will not block. 442 */ 443 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 444 vm_page_lock_queues(); 445 vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED); 446 vm_page_wakeup(m); 447 vm_page_unlock_queues(); 448 } 449 VM_OBJECT_UNLOCK(kmem_object); 450 vm_map_unlock(map); 451 452 return (addr); 453} 454 455/* 456 * kmem_alloc_wait: 457 * 458 * Allocates pageable memory from a sub-map of the kernel. If the submap 459 * has no room, the caller sleeps waiting for more memory in the submap. 460 * 461 * This routine may block. 462 */ 463vm_offset_t 464kmem_alloc_wait(map, size) 465 vm_map_t map; 466 vm_size_t size; 467{ 468 vm_offset_t addr; 469 470 size = round_page(size); 471 472 for (;;) { 473 /* 474 * To make this work for more than one map, use the map's lock 475 * to lock out sleepers/wakers. 476 */ 477 vm_map_lock(map); 478 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 479 break; 480 /* no space now; see if we can ever get space */ 481 if (vm_map_max(map) - vm_map_min(map) < size) { 482 vm_map_unlock(map); 483 return (0); 484 } 485 map->needs_wakeup = TRUE; 486 vm_map_unlock_and_wait(map, FALSE); 487 } 488 vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 489 vm_map_unlock(map); 490 return (addr); 491} 492 493/* 494 * kmem_free_wakeup: 495 * 496 * Returns memory to a submap of the kernel, and wakes up any processes 497 * waiting for memory in that map. 498 */ 499void 500kmem_free_wakeup(map, addr, size) 501 vm_map_t map; 502 vm_offset_t addr; 503 vm_size_t size; 504{ 505 506 vm_map_lock(map); 507 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 508 if (map->needs_wakeup) { 509 map->needs_wakeup = FALSE; 510 vm_map_wakeup(map); 511 } 512 vm_map_unlock(map); 513} 514 515/* 516 * kmem_init: 517 * 518 * Create the kernel map; insert a mapping covering kernel text, 519 * data, bss, and all space allocated thus far (`boostrap' data). The 520 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 521 * `start' as allocated, and the range between `start' and `end' as free. 522 */ 523void 524kmem_init(start, end) 525 vm_offset_t start, end; 526{ 527 vm_map_t m; 528 529 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 530 m->system_map = 1; 531 vm_map_lock(m); 532 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 533 kernel_map = m; 534 (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 535 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 536 /* ... and ending with the completion of the above `insert' */ 537 vm_map_unlock(m); 538} 539