vm_kern.c revision 81399
150476Speter/* 215903Swosch * Copyright (c) 1991, 1993 315903Swosch * The Regents of the University of California. All rights reserved. 431074Swosch * 515903Swosch * This code is derived from software contributed to Berkeley by 615903Swosch * The Mach Operating System project at Carnegie-Mellon University. 715903Swosch * 815903Swosch * Redistribution and use in source and binary forms, with or without 915903Swosch * modification, are permitted provided that the following conditions 1015903Swosch * are met: 1115903Swosch * 1. Redistributions of source code must retain the above copyright 1215903Swosch * notice, this list of conditions and the following disclaimer. 1315903Swosch * 2. Redistributions in binary form must reproduce the above copyright 1415903Swosch * notice, this list of conditions and the following disclaimer in the 1515903Swosch * documentation and/or other materials provided with the distribution. 1615903Swosch * 3. All advertising materials mentioning features or use of this software 1715903Swosch * must display the following acknowledgement: 1815903Swosch * This product includes software developed by the University of 1915903Swosch * California, Berkeley and its contributors. 2015903Swosch * 4. Neither the name of the University nor the names of its contributors 2115903Swosch * may be used to endorse or promote products derived from this software 2274806Sru * without specific prior written permission. 2315903Swosch * 2415903Swosch * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2523546Swosch * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2623546Swosch * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2715903Swosch * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2815903Swosch * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2915903Swosch * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3015903Swosch * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3115903Swosch * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3215903Swosch * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3315903Swosch * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3415903Swosch * SUCH DAMAGE. 3515903Swosch * 3620935Swosch * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 3720935Swosch * 3860749Shoek * 3917511Speter * Copyright (c) 1987, 1990 Carnegie-Mellon University. 4015903Swosch * All rights reserved. 4127659Spst * 4227659Spst * Authors: Avadis Tevanian, Jr., Michael Wayne Young 4327659Spst * 4427659Spst * Permission to use, copy, modify and distribute this software and 4527659Spst * its documentation is hereby granted, provided that both the copyright 4615903Swosch * notice and this permission notice appear in all copies of the 4715903Swosch * software, derivative works or modified versions, and any portions 4815903Swosch * thereof, and that both notices appear in supporting documentation. 4915903Swosch * 5015903Swosch * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 511638Srgrimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 5294940Sru * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 5394940Sru * 5494940Sru * Carnegie Mellon requests users of this software to return to 5594940Sru * 5611468Sbde * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 571638Srgrimes * School of Computer Science 5827659Spst * Carnegie Mellon University 5927659Spst * Pittsburgh PA 15213-3890 6079613Sru * 6127659Spst * any improvements or extensions that they make and grant Carnegie the 6223546Swosch * rights to redistribute these changes. 6323546Swosch * 6411468Sbde * $FreeBSD: head/sys/vm/vm_kern.c 81399 2001-08-10 06:56:12Z jhb $ 6574939Sru */ 6674939Sru 6711468Sbde/* 6874939Sru * Kernel memory management. 6974939Sru */ 7011623Sbde 7111623Sbde#include <sys/param.h> 7274806Sru#include <sys/systm.h> 7311623Sbde#include <sys/lock.h> 7411623Sbde#include <sys/mutex.h> 7574939Sru#include <sys/proc.h> 7611623Sbde#include <sys/malloc.h> 7796164Sru 7896164Sru#include <vm/vm.h> 7911468Sbde#include <vm/vm_param.h> 8011468Sbde#include <vm/pmap.h> 8111468Sbde#include <vm/vm_map.h> 8218314Speter#include <vm/vm_object.h> 8318314Speter#include <vm/vm_page.h> 8418314Speter#include <vm/vm_pageout.h> 8518314Speter#include <vm/vm_extern.h> 8618314Speter 8718314Spetervm_map_t kernel_map=0; 8818314Spetervm_map_t kmem_map=0; 8918314Spetervm_map_t exec_map=0; 901844Swollmanvm_map_t clean_map=0; 911638Srgrimesvm_map_t buffer_map=0; 9217511Speter 9374806Sru/* 9474806Sru * kmem_alloc_pageable: 9574806Sru * 9674806Sru * Allocate pageable memory to the kernel's address map. 9718314Speter * "map" must be kernel_map or a submap of kernel_map. 9896164Sru */ 9917511Speter 10017831Spetervm_offset_t 10117511Speterkmem_alloc_pageable(map, size) 10227673Sbde vm_map_t map; 10327659Spst vm_size_t size; 10496164Sru{ 10527659Spst vm_offset_t addr; 10627659Spst int result; 10717511Speter 10817511Speter GIANT_REQUIRED; 10917511Speter 11017511Speter size = round_page(size); 11127659Spst addr = vm_map_min(map); 11274806Sru result = vm_map_find(map, NULL, (vm_offset_t) 0, 11374806Sru &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 11427673Sbde if (result != KERN_SUCCESS) { 11574806Sru return (0); 11627659Spst } 11796164Sru return (addr); 11827659Spst} 11927659Spst 12027659Spst/* 12127659Spst * kmem_alloc_nofault: 12227659Spst * 12327659Spst * Same as kmem_alloc_pageable, except that it create a nofault entry. 12427659Spst */ 12517511Speter 12611468Sbdevm_offset_t 1271844Swollmankmem_alloc_nofault(map, size) 12823546Swosch vm_map_t map; 12911136Swollman vm_size_t size; 13074806Sru{ 13174806Sru vm_offset_t addr; 13274806Sru int result; 13374806Sru 13423546Swosch GIANT_REQUIRED; 13596164Sru 13611468Sbde size = round_page(size); 13717831Speter addr = vm_map_min(map); 13823546Swosch result = vm_map_find(map, NULL, (vm_offset_t) 0, 13917511Speter &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 14023546Swosch if (result != KERN_SUCCESS) { 14117511Speter return (0); 14211136Swollman } 14327673Sbde return (addr); 14427659Spst} 14596164Sru 14627659Spst/* 14727659Spst * Allocate wired-down memory in the kernel's address map 14827659Spst * or a submap. 14927659Spst */ 15027659Spstvm_offset_t 15127659Spstkmem_alloc(map, size) 15211136Swollman vm_map_t map; 15311468Sbde vm_size_t size; 15411136Swollman{ 15527659Spst vm_offset_t addr; 15611136Swollman vm_offset_t offset; 1571638Srgrimes vm_offset_t i; 1581844Swollman 15996164Sru GIANT_REQUIRED; 16096164Sru 16174806Sru size = round_page(size); 16296164Sru 16311136Swollman /* 16418314Speter * Use the kernel object for wired-down kernel pages. Assume that no 16574806Sru * region of the kernel object is referenced more than once. 16627659Spst */ 16774806Sru 16827673Sbde /* 16927659Spst * Locate sufficient space in the map. This will give us the final 17074806Sru * virtual address for the new memory, and thus will tell us the 17127659Spst * offset within the kernel map. 17218314Speter */ 17318314Speter vm_map_lock(map); 17474806Sru if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 17574806Sru vm_map_unlock(map); 17674806Sru return (0); 17774806Sru } 17874806Sru offset = addr - VM_MIN_KERNEL_ADDRESS; 17974806Sru vm_object_reference(kernel_object); 18074806Sru vm_map_insert(map, kernel_object, offset, addr, addr + size, 18174806Sru VM_PROT_ALL, VM_PROT_ALL, 0); 18274806Sru vm_map_unlock(map); 18374806Sru 18474806Sru /* 18527673Sbde * Guarantee that there are pages already in this object before 18674806Sru * calling vm_map_pageable. This is to prevent the following 18727659Spst * scenario: 18874806Sru * 18927659Spst * 1) Threads have swapped out, so that there is a pager for the 19018314Speter * kernel_object. 2) The kmsg zone is empty, and so we are 19127659Spst * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 1921844Swollman * there is no page, but there is a pager, so we call 19374806Sru * pager_data_request. But the kmsg zone is empty, so we must 19474806Sru * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 19574806Sru * we get the data back from the pager, it will be (very stale) 19627673Sbde * non-zero data. kmem_alloc is defined to return zero-filled memory. 19727659Spst * 19874806Sru * We're intentionally not activating the pages we allocate to prevent a 19974806Sru * race with page-out. vm_map_pageable will wire the pages. 20027659Spst */ 2011844Swollman 2021844Swollman for (i = 0; i < size; i += PAGE_SIZE) { 20311468Sbde vm_page_t mem; 20420935Swosch 20515902Swosch mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 20615902Swosch VM_ALLOC_ZERO | VM_ALLOC_RETRY); 20715902Swosch if ((mem->flags & PG_ZERO) == 0) 20815902Swosch vm_page_zero_fill(mem); 20915902Swosch mem->valid = VM_PAGE_BITS_ALL; 21015902Swosch vm_page_flag_clear(mem, PG_ZERO); 21115902Swosch vm_page_wakeup(mem); 21215902Swosch } 21315902Swosch 21415902Swosch /* 2152353Sbde * And finally, mark the data as non-pageable. 21623546Swosch */ 2171844Swollman 21815902Swosch (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); 21927673Sbde 22027659Spst return (addr); 22127659Spst} 22227659Spst 22327659Spst/* 22427659Spst * kmem_free: 22527659Spst * 22627659Spst * Release a region of kernel virtual memory allocated 22727659Spst * with kmem_alloc, and return the physical pages 22827659Spst * associated with that region. 22927659Spst * 23027659Spst * This routine may not block on kernel maps. 23127659Spst */ 23227659Spstvoid 23327659Spstkmem_free(map, addr, size) 2341638Srgrimes vm_map_t map; 23527659Spst vm_offset_t addr; 23683075Sru vm_size_t size; 23783075Sru{ 23883075Sru GIANT_REQUIRED; 23983075Sru 24083075Sru (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 24183075Sru} 24283075Sru 24383080Sru/* 24483075Sru * kmem_suballoc: 24583080Sru * 24683075Sru * Allocates a map to manage a subrange 24783075Sru * of the kernel virtual address space. 24883075Sru * 249 * Arguments are as follows: 250 * 251 * parent Map to take range from 252 * min, max Returned endpoints of map 253 * size Size of range to find 254 */ 255vm_map_t 256kmem_suballoc(parent, min, max, size) 257 vm_map_t parent; 258 vm_offset_t *min, *max; 259 vm_size_t size; 260{ 261 int ret; 262 vm_map_t result; 263 264 GIANT_REQUIRED; 265 266 size = round_page(size); 267 268 *min = (vm_offset_t) vm_map_min(parent); 269 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 270 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 271 if (ret != KERN_SUCCESS) { 272 printf("kmem_suballoc: bad status return of %d.\n", ret); 273 panic("kmem_suballoc"); 274 } 275 *max = *min + size; 276 pmap_reference(vm_map_pmap(parent)); 277 result = vm_map_create(vm_map_pmap(parent), *min, *max); 278 if (result == NULL) 279 panic("kmem_suballoc: cannot create submap"); 280 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 281 panic("kmem_suballoc: unable to change range to submap"); 282 return (result); 283} 284 285/* 286 * kmem_malloc: 287 * 288 * Allocate wired-down memory in the kernel's address map for the higher 289 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 290 * kmem_alloc() because we may need to allocate memory at interrupt 291 * level where we cannot block (canwait == FALSE). 292 * 293 * This routine has its own private kernel submap (kmem_map) and object 294 * (kmem_object). This, combined with the fact that only malloc uses 295 * this routine, ensures that we will never block in map or object waits. 296 * 297 * Note that this still only works in a uni-processor environment and 298 * when called at splhigh(). 299 * 300 * We don't worry about expanding the map (adding entries) since entries 301 * for wired maps are statically allocated. 302 * 303 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 304 * I have not verified that it actually does not block. 305 * 306 * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 307 * which we never free. 308 */ 309vm_offset_t 310kmem_malloc(map, size, flags) 311 vm_map_t map; 312 vm_size_t size; 313 int flags; 314{ 315 vm_offset_t offset, i; 316 vm_map_entry_t entry; 317 vm_offset_t addr; 318 vm_page_t m; 319 320 GIANT_REQUIRED; 321 322 size = round_page(size); 323 addr = vm_map_min(map); 324 325 /* 326 * Locate sufficient space in the map. This will give us the final 327 * virtual address for the new memory, and thus will tell us the 328 * offset within the kernel map. 329 */ 330 vm_map_lock(map); 331 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 332 vm_map_unlock(map); 333 if (map != kmem_map) { 334 printf("Out of mbuf address space!\n"); 335 printf("Consider increasing NMBCLUSTERS\n"); 336 goto bad; 337 } 338 if ((flags & M_NOWAIT) == 0) 339 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 340 (long)size, (long)map->size); 341 goto bad; 342 } 343 offset = addr - VM_MIN_KERNEL_ADDRESS; 344 vm_object_reference(kmem_object); 345 vm_map_insert(map, kmem_object, offset, addr, addr + size, 346 VM_PROT_ALL, VM_PROT_ALL, 0); 347 348 for (i = 0; i < size; i += PAGE_SIZE) { 349 /* 350 * Note: if M_NOWAIT specified alone, allocate from 351 * interrupt-safe queues only (just the free list). If 352 * M_USE_RESERVE is also specified, we can also 353 * allocate from the cache. Neither of the latter two 354 * flags may be specified from an interrupt since interrupts 355 * are not allowed to mess with the cache queue. 356 */ 357retry: 358 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), 359 ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) ? 360 VM_ALLOC_INTERRUPT : 361 VM_ALLOC_SYSTEM); 362 363 /* 364 * Ran out of space, free everything up and return. Don't need 365 * to lock page queues here as we know that the pages we got 366 * aren't on any queues. 367 */ 368 if (m == NULL) { 369 if ((flags & M_NOWAIT) == 0) { 370 vm_map_unlock(map); 371 VM_WAIT; 372 vm_map_lock(map); 373 goto retry; 374 } 375 vm_map_delete(map, addr, addr + size); 376 vm_map_unlock(map); 377 goto bad; 378 } 379 vm_page_flag_clear(m, PG_ZERO); 380 m->valid = VM_PAGE_BITS_ALL; 381 } 382 383 /* 384 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 385 * be able to extend the previous entry so there will be a new entry 386 * exactly corresponding to this address range and it will have 387 * wired_count == 0. 388 */ 389 if (!vm_map_lookup_entry(map, addr, &entry) || 390 entry->start != addr || entry->end != addr + size || 391 entry->wired_count != 0) 392 panic("kmem_malloc: entry not found or misaligned"); 393 entry->wired_count = 1; 394 395 vm_map_simplify_entry(map, entry); 396 397 /* 398 * Loop thru pages, entering them in the pmap. (We cannot add them to 399 * the wired count without wrapping the vm_page_queue_lock in 400 * splimp...) 401 */ 402 for (i = 0; i < size; i += PAGE_SIZE) { 403 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 404 vm_page_wire(m); 405 vm_page_wakeup(m); 406 /* 407 * Because this is kernel_pmap, this call will not block. 408 */ 409 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 410 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 411 } 412 vm_map_unlock(map); 413 414 return (addr); 415 416bad: 417 return (0); 418} 419 420/* 421 * kmem_alloc_wait: 422 * 423 * Allocates pageable memory from a sub-map of the kernel. If the submap 424 * has no room, the caller sleeps waiting for more memory in the submap. 425 * 426 * This routine may block. 427 */ 428 429vm_offset_t 430kmem_alloc_wait(map, size) 431 vm_map_t map; 432 vm_size_t size; 433{ 434 vm_offset_t addr; 435 436 GIANT_REQUIRED; 437 438 size = round_page(size); 439 440 for (;;) { 441 /* 442 * To make this work for more than one map, use the map's lock 443 * to lock out sleepers/wakers. 444 */ 445 vm_map_lock(map); 446 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 447 break; 448 /* no space now; see if we can ever get space */ 449 if (vm_map_max(map) - vm_map_min(map) < size) { 450 vm_map_unlock(map); 451 return (0); 452 } 453 vm_map_unlock(map); 454 tsleep(map, PVM, "kmaw", 0); 455 } 456 vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 457 vm_map_unlock(map); 458 return (addr); 459} 460 461/* 462 * kmem_free_wakeup: 463 * 464 * Returns memory to a submap of the kernel, and wakes up any processes 465 * waiting for memory in that map. 466 */ 467void 468kmem_free_wakeup(map, addr, size) 469 vm_map_t map; 470 vm_offset_t addr; 471 vm_size_t size; 472{ 473 GIANT_REQUIRED; 474 475 vm_map_lock(map); 476 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 477 wakeup(map); 478 vm_map_unlock(map); 479} 480 481/* 482 * kmem_init: 483 * 484 * Create the kernel map; insert a mapping covering kernel text, 485 * data, bss, and all space allocated thus far (`boostrap' data). The 486 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 487 * `start' as allocated, and the range between `start' and `end' as free. 488 */ 489 490void 491kmem_init(start, end) 492 vm_offset_t start, end; 493{ 494 vm_map_t m; 495 496 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 497 vm_map_lock(m); 498 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 499 kernel_map = m; 500 kernel_map->system_map = 1; 501 (void) vm_map_insert(m, NULL, (vm_offset_t) 0, 502 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 503 /* ... and ending with the completion of the above `insert' */ 504 vm_map_unlock(m); 505} 506