vm_kern.c revision 118096
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65/* 66 * Kernel memory management. 67 */ 68 69#include <sys/cdefs.h> 70__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 118096 2003-07-27 18:31:32Z alc $"); 71 72#include <sys/param.h> 73#include <sys/systm.h> 74#include <sys/kernel.h> /* for ticks and hz */ 75#include <sys/lock.h> 76#include <sys/mutex.h> 77#include <sys/proc.h> 78#include <sys/malloc.h> 79 80#include <vm/vm.h> 81#include <vm/vm_param.h> 82#include <vm/pmap.h> 83#include <vm/vm_map.h> 84#include <vm/vm_object.h> 85#include <vm/vm_page.h> 86#include <vm/vm_pageout.h> 87#include <vm/vm_extern.h> 88 89vm_map_t kernel_map=0; 90vm_map_t kmem_map=0; 91vm_map_t exec_map=0; 92vm_map_t clean_map=0; 93vm_map_t buffer_map=0; 94 95/* 96 * kmem_alloc_pageable: 97 * 98 * Allocate pageable memory to the kernel's address map. 99 * "map" must be kernel_map or a submap of kernel_map. 100 */ 101vm_offset_t 102kmem_alloc_pageable(map, size) 103 vm_map_t map; 104 vm_size_t size; 105{ 106 vm_offset_t addr; 107 int result; 108 109 size = round_page(size); 110 addr = vm_map_min(map); 111 result = vm_map_find(map, NULL, 0, 112 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 113 if (result != KERN_SUCCESS) { 114 return (0); 115 } 116 return (addr); 117} 118 119/* 120 * kmem_alloc_nofault: 121 * 122 * Same as kmem_alloc_pageable, except that it create a nofault entry. 123 */ 124vm_offset_t 125kmem_alloc_nofault(map, size) 126 vm_map_t map; 127 vm_size_t size; 128{ 129 vm_offset_t addr; 130 int result; 131 132 size = round_page(size); 133 addr = vm_map_min(map); 134 result = vm_map_find(map, NULL, 0, 135 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 136 if (result != KERN_SUCCESS) { 137 return (0); 138 } 139 return (addr); 140} 141 142/* 143 * Allocate wired-down memory in the kernel's address map 144 * or a submap. 145 */ 146vm_offset_t 147kmem_alloc(map, size) 148 vm_map_t map; 149 vm_size_t size; 150{ 151 vm_offset_t addr; 152 vm_offset_t offset; 153 vm_offset_t i; 154 155 size = round_page(size); 156 157 /* 158 * Use the kernel object for wired-down kernel pages. Assume that no 159 * region of the kernel object is referenced more than once. 160 */ 161 162 /* 163 * Locate sufficient space in the map. This will give us the final 164 * virtual address for the new memory, and thus will tell us the 165 * offset within the kernel map. 166 */ 167 vm_map_lock(map); 168 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 169 vm_map_unlock(map); 170 return (0); 171 } 172 offset = addr - VM_MIN_KERNEL_ADDRESS; 173 vm_object_reference(kernel_object); 174 vm_map_insert(map, kernel_object, offset, addr, addr + size, 175 VM_PROT_ALL, VM_PROT_ALL, 0); 176 vm_map_unlock(map); 177 178 /* 179 * Guarantee that there are pages already in this object before 180 * calling vm_map_pageable. This is to prevent the following 181 * scenario: 182 * 183 * 1) Threads have swapped out, so that there is a pager for the 184 * kernel_object. 2) The kmsg zone is empty, and so we are 185 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 186 * there is no page, but there is a pager, so we call 187 * pager_data_request. But the kmsg zone is empty, so we must 188 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 189 * we get the data back from the pager, it will be (very stale) 190 * non-zero data. kmem_alloc is defined to return zero-filled memory. 191 * 192 * We're intentionally not activating the pages we allocate to prevent a 193 * race with page-out. vm_map_pageable will wire the pages. 194 */ 195 for (i = 0; i < size; i += PAGE_SIZE) { 196 vm_page_t mem; 197 198 VM_OBJECT_LOCK(kernel_object); 199 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 200 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 201 VM_OBJECT_UNLOCK(kernel_object); 202 if ((mem->flags & PG_ZERO) == 0) 203 pmap_zero_page(mem); 204 vm_page_lock_queues(); 205 mem->valid = VM_PAGE_BITS_ALL; 206 vm_page_flag_clear(mem, PG_ZERO); 207 vm_page_wakeup(mem); 208 vm_page_unlock_queues(); 209 } 210 211 /* 212 * And finally, mark the data as non-pageable. 213 */ 214 (void) vm_map_wire(map, addr, addr + size, FALSE); 215 216 return (addr); 217} 218 219/* 220 * kmem_free: 221 * 222 * Release a region of kernel virtual memory allocated 223 * with kmem_alloc, and return the physical pages 224 * associated with that region. 225 * 226 * This routine may not block on kernel maps. 227 */ 228void 229kmem_free(map, addr, size) 230 vm_map_t map; 231 vm_offset_t addr; 232 vm_size_t size; 233{ 234 235 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 236} 237 238/* 239 * kmem_suballoc: 240 * 241 * Allocates a map to manage a subrange 242 * of the kernel virtual address space. 243 * 244 * Arguments are as follows: 245 * 246 * parent Map to take range from 247 * min, max Returned endpoints of map 248 * size Size of range to find 249 */ 250vm_map_t 251kmem_suballoc(parent, min, max, size) 252 vm_map_t parent; 253 vm_offset_t *min, *max; 254 vm_size_t size; 255{ 256 int ret; 257 vm_map_t result; 258 259 GIANT_REQUIRED; 260 261 size = round_page(size); 262 263 *min = (vm_offset_t) vm_map_min(parent); 264 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 265 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 266 if (ret != KERN_SUCCESS) { 267 printf("kmem_suballoc: bad status return of %d.\n", ret); 268 panic("kmem_suballoc"); 269 } 270 *max = *min + size; 271 result = vm_map_create(vm_map_pmap(parent), *min, *max); 272 if (result == NULL) 273 panic("kmem_suballoc: cannot create submap"); 274 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 275 panic("kmem_suballoc: unable to change range to submap"); 276 return (result); 277} 278 279/* 280 * kmem_malloc: 281 * 282 * Allocate wired-down memory in the kernel's address map for the higher 283 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 284 * kmem_alloc() because we may need to allocate memory at interrupt 285 * level where we cannot block (canwait == FALSE). 286 * 287 * This routine has its own private kernel submap (kmem_map) and object 288 * (kmem_object). This, combined with the fact that only malloc uses 289 * this routine, ensures that we will never block in map or object waits. 290 * 291 * Note that this still only works in a uni-processor environment and 292 * when called at splhigh(). 293 * 294 * We don't worry about expanding the map (adding entries) since entries 295 * for wired maps are statically allocated. 296 * 297 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 298 * I have not verified that it actually does not block. 299 * 300 * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 301 * which we never free. 302 */ 303vm_offset_t 304kmem_malloc(map, size, flags) 305 vm_map_t map; 306 vm_size_t size; 307 int flags; 308{ 309 vm_offset_t offset, i; 310 vm_map_entry_t entry; 311 vm_offset_t addr; 312 vm_page_t m; 313 int pflags; 314 315 size = round_page(size); 316 addr = vm_map_min(map); 317 318 /* 319 * Locate sufficient space in the map. This will give us the final 320 * virtual address for the new memory, and thus will tell us the 321 * offset within the kernel map. 322 */ 323 vm_map_lock(map); 324 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 325 vm_map_unlock(map); 326 if (map != kmem_map) { 327 static int last_report; /* when we did it (in ticks) */ 328 if (ticks < last_report || 329 (ticks - last_report) >= hz) { 330 last_report = ticks; 331 printf("Out of mbuf address space!\n"); 332 printf("Consider increasing NMBCLUSTERS\n"); 333 } 334 return (0); 335 } 336 if ((flags & M_NOWAIT) == 0) 337 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 338 (long)size, (long)map->size); 339 return (0); 340 } 341 offset = addr - VM_MIN_KERNEL_ADDRESS; 342 vm_object_reference(kmem_object); 343 vm_map_insert(map, kmem_object, offset, addr, addr + size, 344 VM_PROT_ALL, VM_PROT_ALL, 0); 345 346 /* 347 * Note: if M_NOWAIT specified alone, allocate from 348 * interrupt-safe queues only (just the free list). If 349 * M_USE_RESERVE is also specified, we can also 350 * allocate from the cache. Neither of the latter two 351 * flags may be specified from an interrupt since interrupts 352 * are not allowed to mess with the cache queue. 353 */ 354 355 if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 356 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 357 else 358 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 359 360 if (flags & M_ZERO) 361 pflags |= VM_ALLOC_ZERO; 362 363 VM_OBJECT_LOCK(kmem_object); 364 for (i = 0; i < size; i += PAGE_SIZE) { 365retry: 366 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags); 367 368 /* 369 * Ran out of space, free everything up and return. Don't need 370 * to lock page queues here as we know that the pages we got 371 * aren't on any queues. 372 */ 373 if (m == NULL) { 374 if ((flags & M_NOWAIT) == 0) { 375 VM_OBJECT_UNLOCK(kmem_object); 376 vm_map_unlock(map); 377 VM_WAIT; 378 vm_map_lock(map); 379 VM_OBJECT_LOCK(kmem_object); 380 goto retry; 381 } 382 /* 383 * Free the pages before removing the map entry. 384 * They are already marked busy. Calling 385 * vm_map_delete before the pages has been freed or 386 * unbusied will cause a deadlock. 387 */ 388 while (i != 0) { 389 i -= PAGE_SIZE; 390 m = vm_page_lookup(kmem_object, 391 OFF_TO_IDX(offset + i)); 392 vm_page_lock_queues(); 393 vm_page_unwire(m, 0); 394 vm_page_free(m); 395 vm_page_unlock_queues(); 396 } 397 VM_OBJECT_UNLOCK(kmem_object); 398 vm_map_delete(map, addr, addr + size); 399 vm_map_unlock(map); 400 return (0); 401 } 402 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 403 pmap_zero_page(m); 404 vm_page_lock_queues(); 405 vm_page_flag_clear(m, PG_ZERO); 406 m->valid = VM_PAGE_BITS_ALL; 407 vm_page_unlock_queues(); 408 } 409 VM_OBJECT_UNLOCK(kmem_object); 410 411 /* 412 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 413 * be able to extend the previous entry so there will be a new entry 414 * exactly corresponding to this address range and it will have 415 * wired_count == 0. 416 */ 417 if (!vm_map_lookup_entry(map, addr, &entry) || 418 entry->start != addr || entry->end != addr + size || 419 entry->wired_count != 0) 420 panic("kmem_malloc: entry not found or misaligned"); 421 entry->wired_count = 1; 422 423 vm_map_simplify_entry(map, entry); 424 425 /* 426 * Loop thru pages, entering them in the pmap. (We cannot add them to 427 * the wired count without wrapping the vm_page_queue_lock in 428 * splimp...) 429 */ 430 for (i = 0; i < size; i += PAGE_SIZE) { 431 VM_OBJECT_LOCK(kmem_object); 432 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 433 VM_OBJECT_UNLOCK(kmem_object); 434 /* 435 * Because this is kernel_pmap, this call will not block. 436 */ 437 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 438 vm_page_lock_queues(); 439 vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED); 440 vm_page_wakeup(m); 441 vm_page_unlock_queues(); 442 } 443 vm_map_unlock(map); 444 445 return (addr); 446} 447 448/* 449 * kmem_alloc_wait: 450 * 451 * Allocates pageable memory from a sub-map of the kernel. If the submap 452 * has no room, the caller sleeps waiting for more memory in the submap. 453 * 454 * This routine may block. 455 */ 456vm_offset_t 457kmem_alloc_wait(map, size) 458 vm_map_t map; 459 vm_size_t size; 460{ 461 vm_offset_t addr; 462 463 size = round_page(size); 464 465 for (;;) { 466 /* 467 * To make this work for more than one map, use the map's lock 468 * to lock out sleepers/wakers. 469 */ 470 vm_map_lock(map); 471 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 472 break; 473 /* no space now; see if we can ever get space */ 474 if (vm_map_max(map) - vm_map_min(map) < size) { 475 vm_map_unlock(map); 476 return (0); 477 } 478 map->needs_wakeup = TRUE; 479 vm_map_unlock_and_wait(map, FALSE); 480 } 481 vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 482 vm_map_unlock(map); 483 return (addr); 484} 485 486/* 487 * kmem_free_wakeup: 488 * 489 * Returns memory to a submap of the kernel, and wakes up any processes 490 * waiting for memory in that map. 491 */ 492void 493kmem_free_wakeup(map, addr, size) 494 vm_map_t map; 495 vm_offset_t addr; 496 vm_size_t size; 497{ 498 499 vm_map_lock(map); 500 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 501 if (map->needs_wakeup) { 502 map->needs_wakeup = FALSE; 503 vm_map_wakeup(map); 504 } 505 vm_map_unlock(map); 506} 507 508/* 509 * kmem_init: 510 * 511 * Create the kernel map; insert a mapping covering kernel text, 512 * data, bss, and all space allocated thus far (`boostrap' data). The 513 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 514 * `start' as allocated, and the range between `start' and `end' as free. 515 */ 516void 517kmem_init(start, end) 518 vm_offset_t start, end; 519{ 520 vm_map_t m; 521 522 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 523 m->system_map = 1; 524 vm_map_lock(m); 525 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 526 kernel_map = m; 527 (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0, 528 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 529 /* ... and ending with the completion of the above `insert' */ 530 vm_map_unlock(m); 531} 532