vm_kern.c revision 115997
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: head/sys/vm/vm_kern.c 115997 2003-06-07 23:24:10Z alc $ 65 */ 66 67/* 68 * Kernel memory management. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/kernel.h> /* for ticks and hz */ 74#include <sys/lock.h> 75#include <sys/mutex.h> 76#include <sys/proc.h> 77#include <sys/malloc.h> 78 79#include <vm/vm.h> 80#include <vm/vm_param.h> 81#include <vm/pmap.h> 82#include <vm/vm_map.h> 83#include <vm/vm_object.h> 84#include <vm/vm_page.h> 85#include <vm/vm_pageout.h> 86#include <vm/vm_extern.h> 87 88vm_map_t kernel_map=0; 89vm_map_t kmem_map=0; 90vm_map_t exec_map=0; 91vm_map_t clean_map=0; 92vm_map_t buffer_map=0; 93 94/* 95 * kmem_alloc_pageable: 96 * 97 * Allocate pageable memory to the kernel's address map. 98 * "map" must be kernel_map or a submap of kernel_map. 99 */ 100vm_offset_t 101kmem_alloc_pageable(map, size) 102 vm_map_t map; 103 vm_size_t size; 104{ 105 vm_offset_t addr; 106 int result; 107 108 size = round_page(size); 109 addr = vm_map_min(map); 110 result = vm_map_find(map, NULL, 0, 111 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 112 if (result != KERN_SUCCESS) { 113 return (0); 114 } 115 return (addr); 116} 117 118/* 119 * kmem_alloc_nofault: 120 * 121 * Same as kmem_alloc_pageable, except that it create a nofault entry. 122 */ 123vm_offset_t 124kmem_alloc_nofault(map, size) 125 vm_map_t map; 126 vm_size_t size; 127{ 128 vm_offset_t addr; 129 int result; 130 131 size = round_page(size); 132 addr = vm_map_min(map); 133 result = vm_map_find(map, NULL, 0, 134 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 135 if (result != KERN_SUCCESS) { 136 return (0); 137 } 138 return (addr); 139} 140 141/* 142 * Allocate wired-down memory in the kernel's address map 143 * or a submap. 144 */ 145vm_offset_t 146kmem_alloc(map, size) 147 vm_map_t map; 148 vm_size_t size; 149{ 150 vm_offset_t addr; 151 vm_offset_t offset; 152 vm_offset_t i; 153 154 GIANT_REQUIRED; 155 156 size = round_page(size); 157 158 /* 159 * Use the kernel object for wired-down kernel pages. Assume that no 160 * region of the kernel object is referenced more than once. 161 */ 162 163 /* 164 * Locate sufficient space in the map. This will give us the final 165 * virtual address for the new memory, and thus will tell us the 166 * offset within the kernel map. 167 */ 168 vm_map_lock(map); 169 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 170 vm_map_unlock(map); 171 return (0); 172 } 173 offset = addr - VM_MIN_KERNEL_ADDRESS; 174 vm_object_reference(kernel_object); 175 vm_map_insert(map, kernel_object, offset, addr, addr + size, 176 VM_PROT_ALL, VM_PROT_ALL, 0); 177 vm_map_unlock(map); 178 179 /* 180 * Guarantee that there are pages already in this object before 181 * calling vm_map_pageable. This is to prevent the following 182 * scenario: 183 * 184 * 1) Threads have swapped out, so that there is a pager for the 185 * kernel_object. 2) The kmsg zone is empty, and so we are 186 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 187 * there is no page, but there is a pager, so we call 188 * pager_data_request. But the kmsg zone is empty, so we must 189 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 190 * we get the data back from the pager, it will be (very stale) 191 * non-zero data. kmem_alloc is defined to return zero-filled memory. 192 * 193 * We're intentionally not activating the pages we allocate to prevent a 194 * race with page-out. vm_map_pageable will wire the pages. 195 */ 196 for (i = 0; i < size; i += PAGE_SIZE) { 197 vm_page_t mem; 198 199 VM_OBJECT_LOCK(kernel_object); 200 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 201 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 202 VM_OBJECT_UNLOCK(kernel_object); 203 if ((mem->flags & PG_ZERO) == 0) 204 pmap_zero_page(mem); 205 vm_page_lock_queues(); 206 mem->valid = VM_PAGE_BITS_ALL; 207 vm_page_flag_clear(mem, PG_ZERO); 208 vm_page_wakeup(mem); 209 vm_page_unlock_queues(); 210 } 211 212 /* 213 * And finally, mark the data as non-pageable. 214 */ 215 (void) vm_map_wire(map, addr, addr + size, FALSE); 216 217 return (addr); 218} 219 220/* 221 * kmem_free: 222 * 223 * Release a region of kernel virtual memory allocated 224 * with kmem_alloc, and return the physical pages 225 * associated with that region. 226 * 227 * This routine may not block on kernel maps. 228 */ 229void 230kmem_free(map, addr, size) 231 vm_map_t map; 232 vm_offset_t addr; 233 vm_size_t size; 234{ 235 236 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 237} 238 239/* 240 * kmem_suballoc: 241 * 242 * Allocates a map to manage a subrange 243 * of the kernel virtual address space. 244 * 245 * Arguments are as follows: 246 * 247 * parent Map to take range from 248 * min, max Returned endpoints of map 249 * size Size of range to find 250 */ 251vm_map_t 252kmem_suballoc(parent, min, max, size) 253 vm_map_t parent; 254 vm_offset_t *min, *max; 255 vm_size_t size; 256{ 257 int ret; 258 vm_map_t result; 259 260 GIANT_REQUIRED; 261 262 size = round_page(size); 263 264 *min = (vm_offset_t) vm_map_min(parent); 265 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 266 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 267 if (ret != KERN_SUCCESS) { 268 printf("kmem_suballoc: bad status return of %d.\n", ret); 269 panic("kmem_suballoc"); 270 } 271 *max = *min + size; 272 result = vm_map_create(vm_map_pmap(parent), *min, *max); 273 if (result == NULL) 274 panic("kmem_suballoc: cannot create submap"); 275 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 276 panic("kmem_suballoc: unable to change range to submap"); 277 return (result); 278} 279 280/* 281 * kmem_malloc: 282 * 283 * Allocate wired-down memory in the kernel's address map for the higher 284 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 285 * kmem_alloc() because we may need to allocate memory at interrupt 286 * level where we cannot block (canwait == FALSE). 287 * 288 * This routine has its own private kernel submap (kmem_map) and object 289 * (kmem_object). This, combined with the fact that only malloc uses 290 * this routine, ensures that we will never block in map or object waits. 291 * 292 * Note that this still only works in a uni-processor environment and 293 * when called at splhigh(). 294 * 295 * We don't worry about expanding the map (adding entries) since entries 296 * for wired maps are statically allocated. 297 * 298 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 299 * I have not verified that it actually does not block. 300 * 301 * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 302 * which we never free. 303 */ 304vm_offset_t 305kmem_malloc(map, size, flags) 306 vm_map_t map; 307 vm_size_t size; 308 int flags; 309{ 310 vm_offset_t offset, i; 311 vm_map_entry_t entry; 312 vm_offset_t addr; 313 vm_page_t m; 314 int pflags; 315 316 if ((flags & M_NOWAIT) == 0) 317 GIANT_REQUIRED; 318 319 size = round_page(size); 320 addr = vm_map_min(map); 321 322 /* 323 * Locate sufficient space in the map. This will give us the final 324 * virtual address for the new memory, and thus will tell us the 325 * offset within the kernel map. 326 */ 327 vm_map_lock(map); 328 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 329 vm_map_unlock(map); 330 if (map != kmem_map) { 331 static int last_report; /* when we did it (in ticks) */ 332 if (ticks < last_report || 333 (ticks - last_report) >= hz) { 334 last_report = ticks; 335 printf("Out of mbuf address space!\n"); 336 printf("Consider increasing NMBCLUSTERS\n"); 337 } 338 return (0); 339 } 340 if ((flags & M_NOWAIT) == 0) 341 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 342 (long)size, (long)map->size); 343 return (0); 344 } 345 offset = addr - VM_MIN_KERNEL_ADDRESS; 346 vm_object_reference(kmem_object); 347 vm_map_insert(map, kmem_object, offset, addr, addr + size, 348 VM_PROT_ALL, VM_PROT_ALL, 0); 349 350 /* 351 * Note: if M_NOWAIT specified alone, allocate from 352 * interrupt-safe queues only (just the free list). If 353 * M_USE_RESERVE is also specified, we can also 354 * allocate from the cache. Neither of the latter two 355 * flags may be specified from an interrupt since interrupts 356 * are not allowed to mess with the cache queue. 357 */ 358 359 if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 360 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 361 else 362 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 363 364 if (flags & M_ZERO) 365 pflags |= VM_ALLOC_ZERO; 366 367 VM_OBJECT_LOCK(kmem_object); 368 for (i = 0; i < size; i += PAGE_SIZE) { 369retry: 370 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags); 371 372 /* 373 * Ran out of space, free everything up and return. Don't need 374 * to lock page queues here as we know that the pages we got 375 * aren't on any queues. 376 */ 377 if (m == NULL) { 378 if ((flags & M_NOWAIT) == 0) { 379 VM_OBJECT_UNLOCK(kmem_object); 380 vm_map_unlock(map); 381 VM_WAIT; 382 vm_map_lock(map); 383 VM_OBJECT_LOCK(kmem_object); 384 goto retry; 385 } 386 /* 387 * Free the pages before removing the map entry. 388 * They are already marked busy. Calling 389 * vm_map_delete before the pages has been freed or 390 * unbusied will cause a deadlock. 391 */ 392 while (i != 0) { 393 i -= PAGE_SIZE; 394 m = vm_page_lookup(kmem_object, 395 OFF_TO_IDX(offset + i)); 396 vm_page_lock_queues(); 397 vm_page_unwire(m, 0); 398 vm_page_free(m); 399 vm_page_unlock_queues(); 400 } 401 VM_OBJECT_UNLOCK(kmem_object); 402 vm_map_delete(map, addr, addr + size); 403 vm_map_unlock(map); 404 return (0); 405 } 406 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 407 pmap_zero_page(m); 408 vm_page_lock_queues(); 409 vm_page_flag_clear(m, PG_ZERO); 410 m->valid = VM_PAGE_BITS_ALL; 411 vm_page_unlock_queues(); 412 } 413 VM_OBJECT_UNLOCK(kmem_object); 414 415 /* 416 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 417 * be able to extend the previous entry so there will be a new entry 418 * exactly corresponding to this address range and it will have 419 * wired_count == 0. 420 */ 421 if (!vm_map_lookup_entry(map, addr, &entry) || 422 entry->start != addr || entry->end != addr + size || 423 entry->wired_count != 0) 424 panic("kmem_malloc: entry not found or misaligned"); 425 entry->wired_count = 1; 426 427 vm_map_simplify_entry(map, entry); 428 429 /* 430 * Loop thru pages, entering them in the pmap. (We cannot add them to 431 * the wired count without wrapping the vm_page_queue_lock in 432 * splimp...) 433 */ 434 for (i = 0; i < size; i += PAGE_SIZE) { 435 VM_OBJECT_LOCK(kmem_object); 436 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 437 VM_OBJECT_UNLOCK(kmem_object); 438 /* 439 * Because this is kernel_pmap, this call will not block. 440 */ 441 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 442 vm_page_lock_queues(); 443 vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED); 444 vm_page_wakeup(m); 445 vm_page_unlock_queues(); 446 } 447 vm_map_unlock(map); 448 449 return (addr); 450} 451 452/* 453 * kmem_alloc_wait: 454 * 455 * Allocates pageable memory from a sub-map of the kernel. If the submap 456 * has no room, the caller sleeps waiting for more memory in the submap. 457 * 458 * This routine may block. 459 */ 460vm_offset_t 461kmem_alloc_wait(map, size) 462 vm_map_t map; 463 vm_size_t size; 464{ 465 vm_offset_t addr; 466 467 size = round_page(size); 468 469 for (;;) { 470 /* 471 * To make this work for more than one map, use the map's lock 472 * to lock out sleepers/wakers. 473 */ 474 vm_map_lock(map); 475 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 476 break; 477 /* no space now; see if we can ever get space */ 478 if (vm_map_max(map) - vm_map_min(map) < size) { 479 vm_map_unlock(map); 480 return (0); 481 } 482 map->needs_wakeup = TRUE; 483 vm_map_unlock_and_wait(map, FALSE); 484 } 485 vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 486 vm_map_unlock(map); 487 return (addr); 488} 489 490/* 491 * kmem_free_wakeup: 492 * 493 * Returns memory to a submap of the kernel, and wakes up any processes 494 * waiting for memory in that map. 495 */ 496void 497kmem_free_wakeup(map, addr, size) 498 vm_map_t map; 499 vm_offset_t addr; 500 vm_size_t size; 501{ 502 503 vm_map_lock(map); 504 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 505 if (map->needs_wakeup) { 506 map->needs_wakeup = FALSE; 507 vm_map_wakeup(map); 508 } 509 vm_map_unlock(map); 510} 511 512/* 513 * kmem_init: 514 * 515 * Create the kernel map; insert a mapping covering kernel text, 516 * data, bss, and all space allocated thus far (`boostrap' data). The 517 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 518 * `start' as allocated, and the range between `start' and `end' as free. 519 */ 520void 521kmem_init(start, end) 522 vm_offset_t start, end; 523{ 524 vm_map_t m; 525 526 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 527 m->system_map = 1; 528 vm_map_lock(m); 529 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 530 kernel_map = m; 531 (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 532 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 533 /* ... and ending with the completion of the above `insert' */ 534 vm_map_unlock(m); 535} 536