vm_glue.c revision 132898
1139790Simp/* 26512Sphk * Copyright (c) 1991, 1993 33263Sdg * The Regents of the University of California. All rights reserved. 46512Sphk * 56512Sphk * This code is derived from software contributed to Berkeley by 63263Sdg * The Mach Operating System project at Carnegie-Mellon University. 73263Sdg * 88876Srgrimes * Redistribution and use in source and binary forms, with or without 98876Srgrimes * modification, are permitted provided that the following conditions 108876Srgrimes * are met: 113263Sdg * 1. Redistributions of source code must retain the above copyright 123263Sdg * notice, this list of conditions and the following disclaimer. 133263Sdg * 2. Redistributions in binary form must reproduce the above copyright 143264Sdg * notice, this list of conditions and the following disclaimer in the 1550477Speter * documentation and/or other materials provided with the distribution. 163263Sdg * 4. Neither the name of the University nor the names of its contributors 173263Sdg * may be used to endorse or promote products derived from this software 18215140Sjkim * without specific prior written permission. 19215140Sjkim * 203263Sdg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2155205Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2218444Sbde * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2318444Sbde * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2418444Sbde * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2518444Sbde * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 263263Sdg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 273263Sdg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2849197Smsmith * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 293263Sdg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 303263Sdg * SUCH DAMAGE. 313263Sdg * 323263Sdg * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 333263Sdg * 343263Sdg * 353263Sdg * Copyright (c) 1987, 1990 Carnegie-Mellon University. 363263Sdg * All rights reserved. 373263Sdg * 383263Sdg * Permission to use, copy, modify and distribute this software and 393263Sdg * its documentation is hereby granted, provided that both the copyright 403263Sdg * notice and this permission notice appear in all copies of the 413263Sdg * software, derivative works or modified versions, and any portions 423263Sdg * thereof, and that both notices appear in supporting documentation. 433263Sdg * 443263Sdg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 453263Sdg * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 463263Sdg * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 473263Sdg * 483263Sdg * Carnegie Mellon requests users of this software to return to 493263Sdg * 503263Sdg * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 513263Sdg * School of Computer Science 523263Sdg * Carnegie Mellon University 533263Sdg * Pittsburgh PA 15213-3890 543263Sdg * 553263Sdg * any improvements or extensions that they make and grant Carnegie the 563263Sdg * rights to redistribute these changes. 5737414Simp */ 5837414Simp 5937414Simp#include <sys/cdefs.h> 6037414Simp__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 132898 2004-07-30 20:31:02Z alc $"); 613263Sdg 623263Sdg#include "opt_vm.h" 633263Sdg#include "opt_kstack_pages.h" 643263Sdg#include "opt_kstack_max_pages.h" 653263Sdg 663263Sdg#include <sys/param.h> 673263Sdg#include <sys/systm.h> 683263Sdg#include <sys/limits.h> 693263Sdg#include <sys/lock.h> 703263Sdg#include <sys/mutex.h> 713263Sdg#include <sys/proc.h> 723263Sdg#include <sys/resourcevar.h> 733263Sdg#include <sys/shm.h> 743263Sdg#include <sys/vmmeter.h> 753263Sdg#include <sys/sx.h> 763263Sdg#include <sys/sysctl.h> 773263Sdg 783263Sdg#include <sys/kernel.h> 793263Sdg#include <sys/ktr.h> 803263Sdg#include <sys/unistd.h> 813263Sdg 823263Sdg#include <vm/vm.h> 833263Sdg#include <vm/vm_param.h> 843263Sdg#include <vm/pmap.h> 8564251Siwasaki#include <vm/vm_map.h> 863263Sdg#include <vm/vm_page.h> 873263Sdg#include <vm/vm_pageout.h> 883263Sdg#include <vm/vm_object.h> 893263Sdg#include <vm/vm_kern.h> 903263Sdg#include <vm/vm_extern.h> 913263Sdg#include <vm/vm_pager.h> 923263Sdg#include <vm/swap_pager.h> 933263Sdg 943263Sdg#include <sys/user.h> 953263Sdg 963263Sdgextern int maxslp; 973263Sdg 983263Sdg/* 993263Sdg * System initialization 1003263Sdg * 1013263Sdg * Note: proc0 from proc.h 1023263Sdg */ 1033263Sdgstatic void vm_init_limits(void *); 1043263SdgSYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 1053263Sdg 1063263Sdg/* 1073263Sdg * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 10864615Sume * 10964615Sume * Note: run scheduling should be divorced from the vm system. 11064615Sume */ 11164615Sumestatic void scheduler(void *); 11264615SumeSYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL) 11364615Sume 1143263Sdg#ifndef NO_SWAPPING 1153263Sdgstatic void swapout(struct proc *); 1163263Sdgstatic void vm_proc_swapin(struct proc *p); 1173263Sdgstatic void vm_proc_swapout(struct proc *p); 1183263Sdg#endif 1193263Sdg 1203263Sdg/* 1213263Sdg * MPSAFE 1223263Sdg * 1233263Sdg * WARNING! This code calls vm_map_check_protection() which only checks 1243263Sdg * the associated vm_map_entry range. It does not determine whether the 1253263Sdg * contents of the memory is actually readable or writable. In most cases 1263263Sdg * just checking the vm_map_entry is sufficient within the kernel's address 1273263Sdg * space. 1283263Sdg */ 1296512Sphkint 1303263Sdgkernacc(addr, len, rw) 1313263Sdg void *addr; 1326512Sphk int len, rw; 1336512Sphk{ 13492761Salfred boolean_t rv; 1356512Sphk vm_offset_t saddr, eaddr; 1366512Sphk vm_prot_t prot; 1376512Sphk 1386512Sphk KASSERT((rw & ~VM_PROT_ALL) == 0, 1396512Sphk ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 1406512Sphk prot = rw; 1416512Sphk saddr = trunc_page((vm_offset_t)addr); 1426512Sphk eaddr = round_page((vm_offset_t)addr + len); 1433263Sdg vm_map_lock_read(kernel_map); 14455205Speter rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 14537414Simp vm_map_unlock_read(kernel_map); 14637414Simp return (rv == TRUE); 1476512Sphk} 1486512Sphk 1493263Sdg/* 1503263Sdg * MPSAFE 1513263Sdg * 15255205Speter * WARNING! This code calls vm_map_check_protection() which only checks 15337414Simp * the associated vm_map_entry range. It does not determine whether the 1546512Sphk * contents of the memory is actually readable or writable. vmapbuf(), 1553263Sdg * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 1563263Sdg * used in conjuction with this call. 1573263Sdg */ 1583263Sdgint 1593263Sdguseracc(addr, len, rw) 1603263Sdg void *addr; 1613263Sdg int len, rw; 1623263Sdg{ 1633263Sdg boolean_t rv; 1643263Sdg vm_prot_t prot; 1653263Sdg vm_map_t map; 1663263Sdg 1673263Sdg KASSERT((rw & ~VM_PROT_ALL) == 0, 1683263Sdg ("illegal ``rw'' argument to useracc (%x)\n", rw)); 1693263Sdg prot = rw; 1703263Sdg map = &curproc->p_vmspace->vm_map; 1713263Sdg if ((vm_offset_t)addr + len > vm_map_max(map) || 1723263Sdg (vm_offset_t)addr + len < (vm_offset_t)addr) { 17337414Simp return (FALSE); 17437414Simp } 1753263Sdg vm_map_lock_read(map); 1763263Sdg rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 1773263Sdg round_page((vm_offset_t)addr + len), prot); 1783263Sdg vm_map_unlock_read(map); 1793263Sdg return (rv == TRUE); 1806512Sphk} 1813263Sdg 18231126Sjdpint 18331126Sjdpvslock(void *addr, size_t len) 18431126Sjdp{ 18531126Sjdp vm_offset_t end, last, start; 18631126Sjdp vm_size_t npages; 1873263Sdg int error; 1883263Sdg 1893263Sdg last = (vm_offset_t)addr + len; 1903263Sdg start = trunc_page((vm_offset_t)addr); 1913263Sdg end = round_page(last); 19214608Snate if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 19331126Sjdp return (EINVAL); 19431126Sjdp npages = atop(end - start); 19531126Sjdp if (npages > vm_page_max_wired) 19631126Sjdp return (ENOMEM); 19731126Sjdp PROC_LOCK(curproc); 19831126Sjdp if (ptoa(npages + 19931126Sjdp pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) > 20031126Sjdp lim_cur(curproc, RLIMIT_MEMLOCK)) { 20131126Sjdp PROC_UNLOCK(curproc); 20231126Sjdp return (ENOMEM); 20331126Sjdp } 20431126Sjdp PROC_UNLOCK(curproc); 20531126Sjdp#if 0 20631126Sjdp /* 20731126Sjdp * XXX - not yet 20831126Sjdp * 20931126Sjdp * The limit for transient usage of wired pages should be 21037414Simp * larger than for "permanent" wired pages (mlock()). 21137414Simp * 21237414Simp * Also, the sysctl code, which is the only present user 2133263Sdg * of vslock(), does a hard loop on EAGAIN. 2143263Sdg */ 21564615Sume if (npages + cnt.v_wire_count > vm_page_max_wired) 21664615Sume return (EAGAIN); 21764615Sume#endif 21864615Sume error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 21964615Sume VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 22064615Sume /* 22164615Sume * Return EFAULT on error to match copy{in,out}() behaviour 22264615Sume * rather than returning ENOMEM like mlock() would. 22364615Sume */ 22464615Sume return (error == KERN_SUCCESS ? 0 : EFAULT); 22564615Sume} 22664615Sume 22764615Sumevoid 22864615Sumevsunlock(void *addr, size_t len) 22964615Sume{ 23064615Sume 23164615Sume /* Rely on the parameter sanity checks performed by vslock(). */ 23249292Smsmith (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 233197536Sjkim trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 234197536Sjkim VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 235197536Sjkim} 236197536Sjkim 237197536Sjkim/* 238197536Sjkim * Create the U area for a new process. 23949292Smsmith * This routine directly affects the fork perf for a process. 24049292Smsmith */ 24149197Smsmithvoid 24249197Smsmithvm_proc_new(struct proc *p) 24348735Siwasaki{ 24448735Siwasaki vm_page_t ma[UAREA_PAGES]; 24548735Siwasaki vm_object_t upobj; 24648735Siwasaki vm_offset_t up; 2473263Sdg vm_page_t m; 24831126Sjdp u_int i; 2493263Sdg 2503263Sdg /* 2513263Sdg * Get a kernel virtual address for the U area for this process. 2523263Sdg */ 25321362Snate up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 25449292Smsmith if (up == 0) 25531126Sjdp panic("vm_proc_new: upage allocation failed"); 25637414Simp p->p_uarea = (struct user *)up; 25764615Sume 25848735Siwasaki /* 25948735Siwasaki * Allocate object and page(s) for the U area. 26048735Siwasaki */ 2613263Sdg upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 2626512Sphk p->p_upages_obj = upobj; 2633263Sdg VM_OBJECT_LOCK(upobj); 264215140Sjkim for (i = 0; i < UAREA_PAGES; i++) { 265 m = vm_page_grab(upobj, i, 266 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 267 ma[i] = m; 268 269 vm_page_lock_queues(); 270 vm_page_wakeup(m); 271 m->valid = VM_PAGE_BITS_ALL; 272 vm_page_unlock_queues(); 273 } 274 VM_OBJECT_UNLOCK(upobj); 275 276 /* 277 * Enter the pages into the kernel address space. 278 */ 279 pmap_qenter(up, ma, UAREA_PAGES); 280} 281 282/* 283 * Dispose the U area for a process that has exited. 284 * This routine directly impacts the exit perf of a process. 285 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called. 286 */ 287void 288vm_proc_dispose(struct proc *p) 289{ 290 vm_object_t upobj; 291 vm_offset_t up; 292 vm_page_t m; 293 294 upobj = p->p_upages_obj; 295 VM_OBJECT_LOCK(upobj); 296 if (upobj->resident_page_count != UAREA_PAGES) 297 panic("vm_proc_dispose: incorrect number of pages in upobj"); 298 vm_page_lock_queues(); 299 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 300 vm_page_busy(m); 301 vm_page_unwire(m, 0); 302 vm_page_free(m); 303 } 304 vm_page_unlock_queues(); 305 VM_OBJECT_UNLOCK(upobj); 306 up = (vm_offset_t)p->p_uarea; 307 pmap_qremove(up, UAREA_PAGES); 308 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 309 vm_object_deallocate(upobj); 310} 311 312#ifndef NO_SWAPPING 313/* 314 * Allow the U area for a process to be prejudicially paged out. 315 */ 316static void 317vm_proc_swapout(struct proc *p) 318{ 319 vm_object_t upobj; 320 vm_offset_t up; 321 vm_page_t m; 322 323 upobj = p->p_upages_obj; 324 VM_OBJECT_LOCK(upobj); 325 if (upobj->resident_page_count != UAREA_PAGES) 326 panic("vm_proc_dispose: incorrect number of pages in upobj"); 327 vm_page_lock_queues(); 328 TAILQ_FOREACH(m, &upobj->memq, listq) { 329 vm_page_dirty(m); 330 vm_page_unwire(m, 0); 331 } 332 vm_page_unlock_queues(); 333 VM_OBJECT_UNLOCK(upobj); 334 up = (vm_offset_t)p->p_uarea; 335 pmap_qremove(up, UAREA_PAGES); 336} 337 338/* 339 * Bring the U area for a specified process back in. 340 */ 341static void 342vm_proc_swapin(struct proc *p) 343{ 344 vm_page_t ma[UAREA_PAGES]; 345 vm_object_t upobj; 346 vm_offset_t up; 347 vm_page_t m; 348 int rv; 349 int i; 350 351 upobj = p->p_upages_obj; 352 VM_OBJECT_LOCK(upobj); 353 for (i = 0; i < UAREA_PAGES; i++) { 354 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 355 if (m->valid != VM_PAGE_BITS_ALL) { 356 rv = vm_pager_get_pages(upobj, &m, 1, 0); 357 if (rv != VM_PAGER_OK) 358 panic("vm_proc_swapin: cannot get upage"); 359 } 360 ma[i] = m; 361 } 362 if (upobj->resident_page_count != UAREA_PAGES) 363 panic("vm_proc_swapin: lost pages from upobj"); 364 vm_page_lock_queues(); 365 TAILQ_FOREACH(m, &upobj->memq, listq) { 366 m->valid = VM_PAGE_BITS_ALL; 367 vm_page_wire(m); 368 vm_page_wakeup(m); 369 } 370 vm_page_unlock_queues(); 371 VM_OBJECT_UNLOCK(upobj); 372 up = (vm_offset_t)p->p_uarea; 373 pmap_qenter(up, ma, UAREA_PAGES); 374} 375 376/* 377 * Swap in the UAREAs of all processes swapped out to the given device. 378 * The pages in the UAREA are marked dirty and their swap metadata is freed. 379 */ 380void 381vm_proc_swapin_all(struct swdevt *devidx) 382{ 383 struct proc *p; 384 vm_object_t object; 385 vm_page_t m; 386 387retry: 388 sx_slock(&allproc_lock); 389 FOREACH_PROC_IN_SYSTEM(p) { 390 PROC_LOCK(p); 391 object = p->p_upages_obj; 392 if (object != NULL) { 393 VM_OBJECT_LOCK(object); 394 if (swap_pager_isswapped(object, devidx)) { 395 VM_OBJECT_UNLOCK(object); 396 sx_sunlock(&allproc_lock); 397 faultin(p); 398 PROC_UNLOCK(p); 399 VM_OBJECT_LOCK(object); 400 vm_page_lock_queues(); 401 TAILQ_FOREACH(m, &object->memq, listq) 402 vm_page_dirty(m); 403 vm_page_unlock_queues(); 404 swap_pager_freespace(object, 0, 405 object->un_pager.swp.swp_bcount); 406 VM_OBJECT_UNLOCK(object); 407 goto retry; 408 } 409 VM_OBJECT_UNLOCK(object); 410 } 411 PROC_UNLOCK(p); 412 } 413 sx_sunlock(&allproc_lock); 414} 415#endif 416 417#ifndef KSTACK_MAX_PAGES 418#define KSTACK_MAX_PAGES 32 419#endif 420 421/* 422 * Create the kernel stack (including pcb for i386) for a new thread. 423 * This routine directly affects the fork perf for a process and 424 * create performance for a thread. 425 */ 426void 427vm_thread_new(struct thread *td, int pages) 428{ 429 vm_object_t ksobj; 430 vm_offset_t ks; 431 vm_page_t m, ma[KSTACK_MAX_PAGES]; 432 int i; 433 434 /* Bounds check */ 435 if (pages <= 1) 436 pages = KSTACK_PAGES; 437 else if (pages > KSTACK_MAX_PAGES) 438 pages = KSTACK_MAX_PAGES; 439 /* 440 * Allocate an object for the kstack. 441 */ 442 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 443 td->td_kstack_obj = ksobj; 444 /* 445 * Get a kernel virtual address for this thread's kstack. 446 */ 447 ks = kmem_alloc_nofault(kernel_map, 448 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 449 if (ks == 0) 450 panic("vm_thread_new: kstack allocation failed"); 451 if (KSTACK_GUARD_PAGES != 0) { 452 pmap_qremove(ks, KSTACK_GUARD_PAGES); 453 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 454 } 455 td->td_kstack = ks; 456 /* 457 * Knowing the number of pages allocated is useful when you 458 * want to deallocate them. 459 */ 460 td->td_kstack_pages = pages; 461 /* 462 * For the length of the stack, link in a real page of ram for each 463 * page of stack. 464 */ 465 VM_OBJECT_LOCK(ksobj); 466 for (i = 0; i < pages; i++) { 467 /* 468 * Get a kernel stack page. 469 */ 470 m = vm_page_grab(ksobj, i, 471 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 472 ma[i] = m; 473 vm_page_lock_queues(); 474 vm_page_wakeup(m); 475 m->valid = VM_PAGE_BITS_ALL; 476 vm_page_unlock_queues(); 477 } 478 VM_OBJECT_UNLOCK(ksobj); 479 pmap_qenter(ks, ma, pages); 480} 481 482/* 483 * Dispose of a thread's kernel stack. 484 */ 485void 486vm_thread_dispose(struct thread *td) 487{ 488 vm_object_t ksobj; 489 vm_offset_t ks; 490 vm_page_t m; 491 int i, pages; 492 493 pages = td->td_kstack_pages; 494 ksobj = td->td_kstack_obj; 495 ks = td->td_kstack; 496 pmap_qremove(ks, pages); 497 VM_OBJECT_LOCK(ksobj); 498 for (i = 0; i < pages; i++) { 499 m = vm_page_lookup(ksobj, i); 500 if (m == NULL) 501 panic("vm_thread_dispose: kstack already missing?"); 502 vm_page_lock_queues(); 503 vm_page_busy(m); 504 vm_page_unwire(m, 0); 505 vm_page_free(m); 506 vm_page_unlock_queues(); 507 } 508 VM_OBJECT_UNLOCK(ksobj); 509 vm_object_deallocate(ksobj); 510 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 511 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 512} 513 514/* 515 * Allow a thread's kernel stack to be paged out. 516 */ 517void 518vm_thread_swapout(struct thread *td) 519{ 520 vm_object_t ksobj; 521 vm_page_t m; 522 int i, pages; 523 524 cpu_thread_swapout(td); 525 pages = td->td_kstack_pages; 526 ksobj = td->td_kstack_obj; 527 pmap_qremove(td->td_kstack, pages); 528 VM_OBJECT_LOCK(ksobj); 529 for (i = 0; i < pages; i++) { 530 m = vm_page_lookup(ksobj, i); 531 if (m == NULL) 532 panic("vm_thread_swapout: kstack already missing?"); 533 vm_page_lock_queues(); 534 vm_page_dirty(m); 535 vm_page_unwire(m, 0); 536 vm_page_unlock_queues(); 537 } 538 VM_OBJECT_UNLOCK(ksobj); 539} 540 541/* 542 * Bring the kernel stack for a specified thread back in. 543 */ 544void 545vm_thread_swapin(struct thread *td) 546{ 547 vm_object_t ksobj; 548 vm_page_t m, ma[KSTACK_MAX_PAGES]; 549 int i, pages, rv; 550 551 pages = td->td_kstack_pages; 552 ksobj = td->td_kstack_obj; 553 VM_OBJECT_LOCK(ksobj); 554 for (i = 0; i < pages; i++) { 555 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 556 if (m->valid != VM_PAGE_BITS_ALL) { 557 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 558 if (rv != VM_PAGER_OK) 559 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 560 m = vm_page_lookup(ksobj, i); 561 m->valid = VM_PAGE_BITS_ALL; 562 } 563 ma[i] = m; 564 vm_page_lock_queues(); 565 vm_page_wire(m); 566 vm_page_wakeup(m); 567 vm_page_unlock_queues(); 568 } 569 VM_OBJECT_UNLOCK(ksobj); 570 pmap_qenter(td->td_kstack, ma, pages); 571 cpu_thread_swapin(td); 572} 573 574/* 575 * Set up a variable-sized alternate kstack. 576 */ 577void 578vm_thread_new_altkstack(struct thread *td, int pages) 579{ 580 581 td->td_altkstack = td->td_kstack; 582 td->td_altkstack_obj = td->td_kstack_obj; 583 td->td_altkstack_pages = td->td_kstack_pages; 584 585 vm_thread_new(td, pages); 586} 587 588/* 589 * Restore the original kstack. 590 */ 591void 592vm_thread_dispose_altkstack(struct thread *td) 593{ 594 595 vm_thread_dispose(td); 596 597 td->td_kstack = td->td_altkstack; 598 td->td_kstack_obj = td->td_altkstack_obj; 599 td->td_kstack_pages = td->td_altkstack_pages; 600 td->td_altkstack = 0; 601 td->td_altkstack_obj = NULL; 602 td->td_altkstack_pages = 0; 603} 604 605/* 606 * Implement fork's actions on an address space. 607 * Here we arrange for the address space to be copied or referenced, 608 * allocate a user struct (pcb and kernel stack), then call the 609 * machine-dependent layer to fill those in and make the new process 610 * ready to run. The new process is set up so that it returns directly 611 * to user mode to avoid stack copying and relocation problems. 612 */ 613void 614vm_forkproc(td, p2, td2, flags) 615 struct thread *td; 616 struct proc *p2; 617 struct thread *td2; 618 int flags; 619{ 620 struct proc *p1 = td->td_proc; 621 622 GIANT_REQUIRED; 623 624 if ((flags & RFPROC) == 0) { 625 /* 626 * Divorce the memory, if it is shared, essentially 627 * this changes shared memory amongst threads, into 628 * COW locally. 629 */ 630 if ((flags & RFMEM) == 0) { 631 if (p1->p_vmspace->vm_refcnt > 1) { 632 vmspace_unshare(p1); 633 } 634 } 635 cpu_fork(td, p2, td2, flags); 636 return; 637 } 638 639 if (flags & RFMEM) { 640 p2->p_vmspace = p1->p_vmspace; 641 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 642 } 643 644 while (vm_page_count_severe()) { 645 VM_WAIT; 646 } 647 648 if ((flags & RFMEM) == 0) { 649 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 650 if (p1->p_vmspace->vm_shm) 651 shmfork(p1, p2); 652 } 653 654 /* 655 * p_stats currently points at fields in the user struct. 656 * Copy parts of p_stats; zero the rest of p_stats (statistics). 657 */ 658#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 659 660 p2->p_stats = &p2->p_uarea->u_stats; 661 bzero(&p2->p_stats->pstat_startzero, 662 (unsigned) RANGEOF(struct pstats, pstat_startzero, pstat_endzero)); 663 bcopy(&p1->p_stats->pstat_startcopy, &p2->p_stats->pstat_startcopy, 664 (unsigned) RANGEOF(struct pstats, pstat_startcopy, pstat_endcopy)); 665#undef RANGEOF 666 667 /* 668 * cpu_fork will copy and update the pcb, set up the kernel stack, 669 * and make the child ready to run. 670 */ 671 cpu_fork(td, p2, td2, flags); 672} 673 674/* 675 * Called after process has been wait(2)'ed apon and is being reaped. 676 * The idea is to reclaim resources that we could not reclaim while 677 * the process was still executing. 678 */ 679void 680vm_waitproc(p) 681 struct proc *p; 682{ 683 684 vmspace_exitfree(p); /* and clean-out the vmspace */ 685} 686 687/* 688 * Set default limits for VM system. 689 * Called for proc 0, and then inherited by all others. 690 * 691 * XXX should probably act directly on proc0. 692 */ 693static void 694vm_init_limits(udata) 695 void *udata; 696{ 697 struct proc *p = udata; 698 struct plimit *limp; 699 int rss_limit; 700 701 /* 702 * Set up the initial limits on process VM. Set the maximum resident 703 * set size to be half of (reasonably) available memory. Since this 704 * is a soft limit, it comes into effect only when the system is out 705 * of memory - half of main memory helps to favor smaller processes, 706 * and reduces thrashing of the object cache. 707 */ 708 limp = p->p_limit; 709 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 710 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 711 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 712 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 713 /* limit the limit to no less than 2MB */ 714 rss_limit = max(cnt.v_free_count, 512); 715 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 716 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 717} 718 719void 720faultin(p) 721 struct proc *p; 722{ 723#ifdef NO_SWAPPING 724 725 PROC_LOCK_ASSERT(p, MA_OWNED); 726 if ((p->p_sflag & PS_INMEM) == 0) 727 panic("faultin: proc swapped out with NO_SWAPPING!"); 728#else /* !NO_SWAPPING */ 729 struct thread *td; 730 731 GIANT_REQUIRED; 732 PROC_LOCK_ASSERT(p, MA_OWNED); 733 /* 734 * If another process is swapping in this process, 735 * just wait until it finishes. 736 */ 737 if (p->p_sflag & PS_SWAPPINGIN) 738 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 739 else if ((p->p_sflag & PS_INMEM) == 0) { 740 /* 741 * Don't let another thread swap process p out while we are 742 * busy swapping it in. 743 */ 744 ++p->p_lock; 745 mtx_lock_spin(&sched_lock); 746 p->p_sflag |= PS_SWAPPINGIN; 747 mtx_unlock_spin(&sched_lock); 748 PROC_UNLOCK(p); 749 750 vm_proc_swapin(p); 751 FOREACH_THREAD_IN_PROC(p, td) 752 vm_thread_swapin(td); 753 754 PROC_LOCK(p); 755 mtx_lock_spin(&sched_lock); 756 p->p_sflag &= ~PS_SWAPPINGIN; 757 p->p_sflag |= PS_INMEM; 758 FOREACH_THREAD_IN_PROC(p, td) { 759 TD_CLR_SWAPPED(td); 760 if (TD_CAN_RUN(td)) 761 setrunnable(td); 762 } 763 mtx_unlock_spin(&sched_lock); 764 765 wakeup(&p->p_sflag); 766 767 /* Allow other threads to swap p out now. */ 768 --p->p_lock; 769 } 770#endif /* NO_SWAPPING */ 771} 772 773/* 774 * This swapin algorithm attempts to swap-in processes only if there 775 * is enough space for them. Of course, if a process waits for a long 776 * time, it will be swapped in anyway. 777 * 778 * XXXKSE - process with the thread with highest priority counts.. 779 * 780 * Giant is still held at this point, to be released in tsleep. 781 */ 782/* ARGSUSED*/ 783static void 784scheduler(dummy) 785 void *dummy; 786{ 787 struct proc *p; 788 struct thread *td; 789 int pri; 790 struct proc *pp; 791 int ppri; 792 793 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 794 /* GIANT_REQUIRED */ 795 796loop: 797 if (vm_page_count_min()) { 798 VM_WAIT; 799 goto loop; 800 } 801 802 pp = NULL; 803 ppri = INT_MIN; 804 sx_slock(&allproc_lock); 805 FOREACH_PROC_IN_SYSTEM(p) { 806 struct ksegrp *kg; 807 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 808 continue; 809 } 810 mtx_lock_spin(&sched_lock); 811 FOREACH_THREAD_IN_PROC(p, td) { 812 /* 813 * An otherwise runnable thread of a process 814 * swapped out has only the TDI_SWAPPED bit set. 815 * 816 */ 817 if (td->td_inhibitors == TDI_SWAPPED) { 818 kg = td->td_ksegrp; 819 pri = p->p_swtime + kg->kg_slptime; 820 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 821 pri -= p->p_nice * 8; 822 } 823 824 /* 825 * if this ksegrp is higher priority 826 * and there is enough space, then select 827 * this process instead of the previous 828 * selection. 829 */ 830 if (pri > ppri) { 831 pp = p; 832 ppri = pri; 833 } 834 } 835 } 836 mtx_unlock_spin(&sched_lock); 837 } 838 sx_sunlock(&allproc_lock); 839 840 /* 841 * Nothing to do, back to sleep. 842 */ 843 if ((p = pp) == NULL) { 844 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 845 goto loop; 846 } 847 PROC_LOCK(p); 848 849 /* 850 * Another process may be bringing or may have already 851 * brought this process in while we traverse all threads. 852 * Or, this process may even be being swapped out again. 853 */ 854 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 855 PROC_UNLOCK(p); 856 goto loop; 857 } 858 859 mtx_lock_spin(&sched_lock); 860 p->p_sflag &= ~PS_SWAPINREQ; 861 mtx_unlock_spin(&sched_lock); 862 863 /* 864 * We would like to bring someone in. (only if there is space). 865 * [What checks the space? ] 866 */ 867 faultin(p); 868 PROC_UNLOCK(p); 869 mtx_lock_spin(&sched_lock); 870 p->p_swtime = 0; 871 mtx_unlock_spin(&sched_lock); 872 goto loop; 873} 874 875#ifndef NO_SWAPPING 876 877/* 878 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 879 */ 880static int swap_idle_threshold1 = 2; 881SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 882 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 883 884/* 885 * Swap_idle_threshold2 is the time that a process can be idle before 886 * it will be swapped out, if idle swapping is enabled. 887 */ 888static int swap_idle_threshold2 = 10; 889SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 890 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 891 892/* 893 * Swapout is driven by the pageout daemon. Very simple, we find eligible 894 * procs and unwire their u-areas. We try to always "swap" at least one 895 * process in case we need the room for a swapin. 896 * If any procs have been sleeping/stopped for at least maxslp seconds, 897 * they are swapped. Else, we swap the longest-sleeping or stopped process, 898 * if any, otherwise the longest-resident process. 899 */ 900void 901swapout_procs(action) 902int action; 903{ 904 struct proc *p; 905 struct thread *td; 906 struct ksegrp *kg; 907 int didswap = 0; 908 909 GIANT_REQUIRED; 910 911retry: 912 sx_slock(&allproc_lock); 913 FOREACH_PROC_IN_SYSTEM(p) { 914 struct vmspace *vm; 915 int minslptime = 100000; 916 917 /* 918 * Watch out for a process in 919 * creation. It may have no 920 * address space or lock yet. 921 */ 922 mtx_lock_spin(&sched_lock); 923 if (p->p_state == PRS_NEW) { 924 mtx_unlock_spin(&sched_lock); 925 continue; 926 } 927 mtx_unlock_spin(&sched_lock); 928 929 /* 930 * An aio daemon switches its 931 * address space while running. 932 * Perform a quick check whether 933 * a process has P_SYSTEM. 934 */ 935 if ((p->p_flag & P_SYSTEM) != 0) 936 continue; 937 938 /* 939 * Do not swapout a process that 940 * is waiting for VM data 941 * structures as there is a possible 942 * deadlock. Test this first as 943 * this may block. 944 * 945 * Lock the map until swapout 946 * finishes, or a thread of this 947 * process may attempt to alter 948 * the map. 949 */ 950 PROC_LOCK(p); 951 vm = p->p_vmspace; 952 KASSERT(vm != NULL, 953 ("swapout_procs: a process has no address space")); 954 atomic_add_int(&vm->vm_refcnt, 1); 955 PROC_UNLOCK(p); 956 if (!vm_map_trylock(&vm->vm_map)) 957 goto nextproc1; 958 959 PROC_LOCK(p); 960 if (p->p_lock != 0 || 961 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 962 ) != 0) { 963 goto nextproc2; 964 } 965 /* 966 * only aiod changes vmspace, however it will be 967 * skipped because of the if statement above checking 968 * for P_SYSTEM 969 */ 970 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 971 goto nextproc2; 972 973 switch (p->p_state) { 974 default: 975 /* Don't swap out processes in any sort 976 * of 'special' state. */ 977 break; 978 979 case PRS_NORMAL: 980 mtx_lock_spin(&sched_lock); 981 /* 982 * do not swapout a realtime process 983 * Check all the thread groups.. 984 */ 985 FOREACH_KSEGRP_IN_PROC(p, kg) { 986 if (PRI_IS_REALTIME(kg->kg_pri_class)) 987 goto nextproc; 988 989 /* 990 * Guarantee swap_idle_threshold1 991 * time in memory. 992 */ 993 if (kg->kg_slptime < swap_idle_threshold1) 994 goto nextproc; 995 996 /* 997 * Do not swapout a process if it is 998 * waiting on a critical event of some 999 * kind or there is a thread whose 1000 * pageable memory may be accessed. 1001 * 1002 * This could be refined to support 1003 * swapping out a thread. 1004 */ 1005 FOREACH_THREAD_IN_GROUP(kg, td) { 1006 if ((td->td_priority) < PSOCK || 1007 !thread_safetoswapout(td)) 1008 goto nextproc; 1009 } 1010 /* 1011 * If the system is under memory stress, 1012 * or if we are swapping 1013 * idle processes >= swap_idle_threshold2, 1014 * then swap the process out. 1015 */ 1016 if (((action & VM_SWAP_NORMAL) == 0) && 1017 (((action & VM_SWAP_IDLE) == 0) || 1018 (kg->kg_slptime < swap_idle_threshold2))) 1019 goto nextproc; 1020 1021 if (minslptime > kg->kg_slptime) 1022 minslptime = kg->kg_slptime; 1023 } 1024 1025 /* 1026 * If the pageout daemon didn't free enough pages, 1027 * or if this process is idle and the system is 1028 * configured to swap proactively, swap it out. 1029 */ 1030 if ((action & VM_SWAP_NORMAL) || 1031 ((action & VM_SWAP_IDLE) && 1032 (minslptime > swap_idle_threshold2))) { 1033 swapout(p); 1034 didswap++; 1035 mtx_unlock_spin(&sched_lock); 1036 PROC_UNLOCK(p); 1037 vm_map_unlock(&vm->vm_map); 1038 vmspace_free(vm); 1039 sx_sunlock(&allproc_lock); 1040 goto retry; 1041 } 1042nextproc: 1043 mtx_unlock_spin(&sched_lock); 1044 } 1045nextproc2: 1046 PROC_UNLOCK(p); 1047 vm_map_unlock(&vm->vm_map); 1048nextproc1: 1049 vmspace_free(vm); 1050 continue; 1051 } 1052 sx_sunlock(&allproc_lock); 1053 /* 1054 * If we swapped something out, and another process needed memory, 1055 * then wakeup the sched process. 1056 */ 1057 if (didswap) 1058 wakeup(&proc0); 1059} 1060 1061static void 1062swapout(p) 1063 struct proc *p; 1064{ 1065 struct thread *td; 1066 1067 PROC_LOCK_ASSERT(p, MA_OWNED); 1068 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 1069#if defined(SWAP_DEBUG) 1070 printf("swapping out %d\n", p->p_pid); 1071#endif 1072 1073 /* 1074 * The states of this process and its threads may have changed 1075 * by now. Assuming that there is only one pageout daemon thread, 1076 * this process should still be in memory. 1077 */ 1078 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 1079 ("swapout: lost a swapout race?")); 1080 1081#if defined(INVARIANTS) 1082 /* 1083 * Make sure that all threads are safe to be swapped out. 1084 * 1085 * Alternatively, we could swap out only safe threads. 1086 */ 1087 FOREACH_THREAD_IN_PROC(p, td) { 1088 KASSERT(thread_safetoswapout(td), 1089 ("swapout: there is a thread not safe for swapout")); 1090 } 1091#endif /* INVARIANTS */ 1092 1093 ++p->p_stats->p_ru.ru_nswap; 1094 /* 1095 * remember the process resident count 1096 */ 1097 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1098 1099 p->p_sflag &= ~PS_INMEM; 1100 p->p_sflag |= PS_SWAPPINGOUT; 1101 PROC_UNLOCK(p); 1102 FOREACH_THREAD_IN_PROC(p, td) 1103 TD_SET_SWAPPED(td); 1104 mtx_unlock_spin(&sched_lock); 1105 1106 vm_proc_swapout(p); 1107 FOREACH_THREAD_IN_PROC(p, td) 1108 vm_thread_swapout(td); 1109 1110 PROC_LOCK(p); 1111 mtx_lock_spin(&sched_lock); 1112 p->p_sflag &= ~PS_SWAPPINGOUT; 1113 p->p_swtime = 0; 1114} 1115#endif /* !NO_SWAPPING */ 1116