vm_glue.c revision 177253
1/*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 */ 58 59#include <sys/cdefs.h> 60__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 177253 2008-03-16 10:58:09Z rwatson $"); 61 62#include "opt_vm.h" 63#include "opt_kstack_pages.h" 64#include "opt_kstack_max_pages.h" 65 66#include <sys/param.h> 67#include <sys/systm.h> 68#include <sys/limits.h> 69#include <sys/lock.h> 70#include <sys/mutex.h> 71#include <sys/proc.h> 72#include <sys/resourcevar.h> 73#include <sys/sched.h> 74#include <sys/sf_buf.h> 75#include <sys/shm.h> 76#include <sys/vmmeter.h> 77#include <sys/sx.h> 78#include <sys/sysctl.h> 79 80#include <sys/kernel.h> 81#include <sys/ktr.h> 82#include <sys/unistd.h> 83 84#include <vm/vm.h> 85#include <vm/vm_param.h> 86#include <vm/pmap.h> 87#include <vm/vm_map.h> 88#include <vm/vm_page.h> 89#include <vm/vm_pageout.h> 90#include <vm/vm_object.h> 91#include <vm/vm_kern.h> 92#include <vm/vm_extern.h> 93#include <vm/vm_pager.h> 94#include <vm/swap_pager.h> 95 96extern int maxslp; 97 98/* 99 * System initialization 100 * 101 * Note: proc0 from proc.h 102 */ 103static void vm_init_limits(void *); 104SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0); 105 106/* 107 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 108 * 109 * Note: run scheduling should be divorced from the vm system. 110 */ 111static void scheduler(void *); 112SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL); 113 114#ifndef NO_SWAPPING 115static int swapout(struct proc *); 116static void swapclear(struct proc *); 117#endif 118 119 120static volatile int proc0_rescan; 121 122 123/* 124 * MPSAFE 125 * 126 * WARNING! This code calls vm_map_check_protection() which only checks 127 * the associated vm_map_entry range. It does not determine whether the 128 * contents of the memory is actually readable or writable. In most cases 129 * just checking the vm_map_entry is sufficient within the kernel's address 130 * space. 131 */ 132int 133kernacc(addr, len, rw) 134 void *addr; 135 int len, rw; 136{ 137 boolean_t rv; 138 vm_offset_t saddr, eaddr; 139 vm_prot_t prot; 140 141 KASSERT((rw & ~VM_PROT_ALL) == 0, 142 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 143 144 if ((vm_offset_t)addr + len > kernel_map->max_offset || 145 (vm_offset_t)addr + len < (vm_offset_t)addr) 146 return (FALSE); 147 148 prot = rw; 149 saddr = trunc_page((vm_offset_t)addr); 150 eaddr = round_page((vm_offset_t)addr + len); 151 vm_map_lock_read(kernel_map); 152 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 153 vm_map_unlock_read(kernel_map); 154 return (rv == TRUE); 155} 156 157/* 158 * MPSAFE 159 * 160 * WARNING! This code calls vm_map_check_protection() which only checks 161 * the associated vm_map_entry range. It does not determine whether the 162 * contents of the memory is actually readable or writable. vmapbuf(), 163 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 164 * used in conjuction with this call. 165 */ 166int 167useracc(addr, len, rw) 168 void *addr; 169 int len, rw; 170{ 171 boolean_t rv; 172 vm_prot_t prot; 173 vm_map_t map; 174 175 KASSERT((rw & ~VM_PROT_ALL) == 0, 176 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 177 prot = rw; 178 map = &curproc->p_vmspace->vm_map; 179 if ((vm_offset_t)addr + len > vm_map_max(map) || 180 (vm_offset_t)addr + len < (vm_offset_t)addr) { 181 return (FALSE); 182 } 183 vm_map_lock_read(map); 184 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 185 round_page((vm_offset_t)addr + len), prot); 186 vm_map_unlock_read(map); 187 return (rv == TRUE); 188} 189 190int 191vslock(void *addr, size_t len) 192{ 193 vm_offset_t end, last, start; 194 vm_size_t npages; 195 int error; 196 197 last = (vm_offset_t)addr + len; 198 start = trunc_page((vm_offset_t)addr); 199 end = round_page(last); 200 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 201 return (EINVAL); 202 npages = atop(end - start); 203 if (npages > vm_page_max_wired) 204 return (ENOMEM); 205 PROC_LOCK(curproc); 206 if (ptoa(npages + 207 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) > 208 lim_cur(curproc, RLIMIT_MEMLOCK)) { 209 PROC_UNLOCK(curproc); 210 return (ENOMEM); 211 } 212 PROC_UNLOCK(curproc); 213#if 0 214 /* 215 * XXX - not yet 216 * 217 * The limit for transient usage of wired pages should be 218 * larger than for "permanent" wired pages (mlock()). 219 * 220 * Also, the sysctl code, which is the only present user 221 * of vslock(), does a hard loop on EAGAIN. 222 */ 223 if (npages + cnt.v_wire_count > vm_page_max_wired) 224 return (EAGAIN); 225#endif 226 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 227 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 228 /* 229 * Return EFAULT on error to match copy{in,out}() behaviour 230 * rather than returning ENOMEM like mlock() would. 231 */ 232 return (error == KERN_SUCCESS ? 0 : EFAULT); 233} 234 235void 236vsunlock(void *addr, size_t len) 237{ 238 239 /* Rely on the parameter sanity checks performed by vslock(). */ 240 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 241 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 242 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 243} 244 245/* 246 * Pin the page contained within the given object at the given offset. If the 247 * page is not resident, allocate and load it using the given object's pager. 248 * Return the pinned page if successful; otherwise, return NULL. 249 */ 250static vm_page_t 251vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) 252{ 253 vm_page_t m, ma[1]; 254 vm_pindex_t pindex; 255 int rv; 256 257 VM_OBJECT_LOCK(object); 258 pindex = OFF_TO_IDX(offset); 259 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 260 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 261 ma[0] = m; 262 rv = vm_pager_get_pages(object, ma, 1, 0); 263 m = vm_page_lookup(object, pindex); 264 if (m == NULL) 265 goto out; 266 if (m->valid == 0 || rv != VM_PAGER_OK) { 267 vm_page_lock_queues(); 268 vm_page_free(m); 269 vm_page_unlock_queues(); 270 m = NULL; 271 goto out; 272 } 273 } 274 vm_page_lock_queues(); 275 vm_page_hold(m); 276 vm_page_unlock_queues(); 277 vm_page_wakeup(m); 278out: 279 VM_OBJECT_UNLOCK(object); 280 return (m); 281} 282 283/* 284 * Return a CPU private mapping to the page at the given offset within the 285 * given object. The page is pinned before it is mapped. 286 */ 287struct sf_buf * 288vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset) 289{ 290 vm_page_t m; 291 292 m = vm_imgact_hold_page(object, offset); 293 if (m == NULL) 294 return (NULL); 295 sched_pin(); 296 return (sf_buf_alloc(m, SFB_CPUPRIVATE)); 297} 298 299/* 300 * Destroy the given CPU private mapping and unpin the page that it mapped. 301 */ 302void 303vm_imgact_unmap_page(struct sf_buf *sf) 304{ 305 vm_page_t m; 306 307 m = sf_buf_page(sf); 308 sf_buf_free(sf); 309 sched_unpin(); 310 vm_page_lock_queues(); 311 vm_page_unhold(m); 312 vm_page_unlock_queues(); 313} 314 315#ifndef KSTACK_MAX_PAGES 316#define KSTACK_MAX_PAGES 32 317#endif 318 319/* 320 * Create the kernel stack (including pcb for i386) for a new thread. 321 * This routine directly affects the fork perf for a process and 322 * create performance for a thread. 323 */ 324int 325vm_thread_new(struct thread *td, int pages) 326{ 327 vm_object_t ksobj; 328 vm_offset_t ks; 329 vm_page_t m, ma[KSTACK_MAX_PAGES]; 330 int i; 331 332 /* Bounds check */ 333 if (pages <= 1) 334 pages = KSTACK_PAGES; 335 else if (pages > KSTACK_MAX_PAGES) 336 pages = KSTACK_MAX_PAGES; 337 /* 338 * Allocate an object for the kstack. 339 */ 340 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 341 /* 342 * Get a kernel virtual address for this thread's kstack. 343 */ 344 ks = kmem_alloc_nofault(kernel_map, 345 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 346 if (ks == 0) { 347 printf("vm_thread_new: kstack allocation failed\n"); 348 vm_object_deallocate(ksobj); 349 return (0); 350 } 351 352 if (KSTACK_GUARD_PAGES != 0) { 353 pmap_qremove(ks, KSTACK_GUARD_PAGES); 354 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 355 } 356 td->td_kstack_obj = ksobj; 357 td->td_kstack = ks; 358 /* 359 * Knowing the number of pages allocated is useful when you 360 * want to deallocate them. 361 */ 362 td->td_kstack_pages = pages; 363 /* 364 * For the length of the stack, link in a real page of ram for each 365 * page of stack. 366 */ 367 VM_OBJECT_LOCK(ksobj); 368 for (i = 0; i < pages; i++) { 369 /* 370 * Get a kernel stack page. 371 */ 372 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY | 373 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 374 ma[i] = m; 375 m->valid = VM_PAGE_BITS_ALL; 376 } 377 VM_OBJECT_UNLOCK(ksobj); 378 pmap_qenter(ks, ma, pages); 379 return (1); 380} 381 382/* 383 * Dispose of a thread's kernel stack. 384 */ 385void 386vm_thread_dispose(struct thread *td) 387{ 388 vm_object_t ksobj; 389 vm_offset_t ks; 390 vm_page_t m; 391 int i, pages; 392 393 pages = td->td_kstack_pages; 394 ksobj = td->td_kstack_obj; 395 ks = td->td_kstack; 396 pmap_qremove(ks, pages); 397 VM_OBJECT_LOCK(ksobj); 398 for (i = 0; i < pages; i++) { 399 m = vm_page_lookup(ksobj, i); 400 if (m == NULL) 401 panic("vm_thread_dispose: kstack already missing?"); 402 vm_page_lock_queues(); 403 vm_page_unwire(m, 0); 404 vm_page_free(m); 405 vm_page_unlock_queues(); 406 } 407 VM_OBJECT_UNLOCK(ksobj); 408 vm_object_deallocate(ksobj); 409 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 410 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 411 td->td_kstack = 0; 412} 413 414/* 415 * Allow a thread's kernel stack to be paged out. 416 */ 417void 418vm_thread_swapout(struct thread *td) 419{ 420 vm_object_t ksobj; 421 vm_page_t m; 422 int i, pages; 423 424 cpu_thread_swapout(td); 425 pages = td->td_kstack_pages; 426 ksobj = td->td_kstack_obj; 427 pmap_qremove(td->td_kstack, pages); 428 VM_OBJECT_LOCK(ksobj); 429 for (i = 0; i < pages; i++) { 430 m = vm_page_lookup(ksobj, i); 431 if (m == NULL) 432 panic("vm_thread_swapout: kstack already missing?"); 433 vm_page_lock_queues(); 434 vm_page_dirty(m); 435 vm_page_unwire(m, 0); 436 vm_page_unlock_queues(); 437 } 438 VM_OBJECT_UNLOCK(ksobj); 439} 440 441/* 442 * Bring the kernel stack for a specified thread back in. 443 */ 444void 445vm_thread_swapin(struct thread *td) 446{ 447 vm_object_t ksobj; 448 vm_page_t m, ma[KSTACK_MAX_PAGES]; 449 int i, pages, rv; 450 451 pages = td->td_kstack_pages; 452 ksobj = td->td_kstack_obj; 453 VM_OBJECT_LOCK(ksobj); 454 for (i = 0; i < pages; i++) { 455 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 456 if (m->valid != VM_PAGE_BITS_ALL) { 457 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 458 if (rv != VM_PAGER_OK) 459 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 460 m = vm_page_lookup(ksobj, i); 461 m->valid = VM_PAGE_BITS_ALL; 462 } 463 ma[i] = m; 464 vm_page_lock_queues(); 465 vm_page_wire(m); 466 vm_page_unlock_queues(); 467 vm_page_wakeup(m); 468 } 469 VM_OBJECT_UNLOCK(ksobj); 470 pmap_qenter(td->td_kstack, ma, pages); 471 cpu_thread_swapin(td); 472} 473 474/* 475 * Set up a variable-sized alternate kstack. 476 */ 477int 478vm_thread_new_altkstack(struct thread *td, int pages) 479{ 480 481 td->td_altkstack = td->td_kstack; 482 td->td_altkstack_obj = td->td_kstack_obj; 483 td->td_altkstack_pages = td->td_kstack_pages; 484 485 return (vm_thread_new(td, pages)); 486} 487 488/* 489 * Restore the original kstack. 490 */ 491void 492vm_thread_dispose_altkstack(struct thread *td) 493{ 494 495 vm_thread_dispose(td); 496 497 td->td_kstack = td->td_altkstack; 498 td->td_kstack_obj = td->td_altkstack_obj; 499 td->td_kstack_pages = td->td_altkstack_pages; 500 td->td_altkstack = 0; 501 td->td_altkstack_obj = NULL; 502 td->td_altkstack_pages = 0; 503} 504 505/* 506 * Implement fork's actions on an address space. 507 * Here we arrange for the address space to be copied or referenced, 508 * allocate a user struct (pcb and kernel stack), then call the 509 * machine-dependent layer to fill those in and make the new process 510 * ready to run. The new process is set up so that it returns directly 511 * to user mode to avoid stack copying and relocation problems. 512 */ 513int 514vm_forkproc(td, p2, td2, vm2, flags) 515 struct thread *td; 516 struct proc *p2; 517 struct thread *td2; 518 struct vmspace *vm2; 519 int flags; 520{ 521 struct proc *p1 = td->td_proc; 522 int error; 523 524 if ((flags & RFPROC) == 0) { 525 /* 526 * Divorce the memory, if it is shared, essentially 527 * this changes shared memory amongst threads, into 528 * COW locally. 529 */ 530 if ((flags & RFMEM) == 0) { 531 if (p1->p_vmspace->vm_refcnt > 1) { 532 error = vmspace_unshare(p1); 533 if (error) 534 return (error); 535 } 536 } 537 cpu_fork(td, p2, td2, flags); 538 return (0); 539 } 540 541 if (flags & RFMEM) { 542 p2->p_vmspace = p1->p_vmspace; 543 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 544 } 545 546 while (vm_page_count_severe()) { 547 VM_WAIT; 548 } 549 550 if ((flags & RFMEM) == 0) { 551 p2->p_vmspace = vm2; 552 if (p1->p_vmspace->vm_shm) 553 shmfork(p1, p2); 554 } 555 556 /* 557 * cpu_fork will copy and update the pcb, set up the kernel stack, 558 * and make the child ready to run. 559 */ 560 cpu_fork(td, p2, td2, flags); 561 return (0); 562} 563 564/* 565 * Called after process has been wait(2)'ed apon and is being reaped. 566 * The idea is to reclaim resources that we could not reclaim while 567 * the process was still executing. 568 */ 569void 570vm_waitproc(p) 571 struct proc *p; 572{ 573 574 vmspace_exitfree(p); /* and clean-out the vmspace */ 575} 576 577/* 578 * Set default limits for VM system. 579 * Called for proc 0, and then inherited by all others. 580 * 581 * XXX should probably act directly on proc0. 582 */ 583static void 584vm_init_limits(udata) 585 void *udata; 586{ 587 struct proc *p = udata; 588 struct plimit *limp; 589 int rss_limit; 590 591 /* 592 * Set up the initial limits on process VM. Set the maximum resident 593 * set size to be half of (reasonably) available memory. Since this 594 * is a soft limit, it comes into effect only when the system is out 595 * of memory - half of main memory helps to favor smaller processes, 596 * and reduces thrashing of the object cache. 597 */ 598 limp = p->p_limit; 599 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 600 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 601 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 602 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 603 /* limit the limit to no less than 2MB */ 604 rss_limit = max(cnt.v_free_count, 512); 605 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 606 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 607} 608 609void 610faultin(p) 611 struct proc *p; 612{ 613#ifdef NO_SWAPPING 614 615 PROC_LOCK_ASSERT(p, MA_OWNED); 616 if ((p->p_flag & P_INMEM) == 0) 617 panic("faultin: proc swapped out with NO_SWAPPING!"); 618#else /* !NO_SWAPPING */ 619 struct thread *td; 620 621 PROC_LOCK_ASSERT(p, MA_OWNED); 622 /* 623 * If another process is swapping in this process, 624 * just wait until it finishes. 625 */ 626 if (p->p_flag & P_SWAPPINGIN) { 627 while (p->p_flag & P_SWAPPINGIN) 628 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0); 629 return; 630 } 631 if ((p->p_flag & P_INMEM) == 0) { 632 /* 633 * Don't let another thread swap process p out while we are 634 * busy swapping it in. 635 */ 636 ++p->p_lock; 637 p->p_flag |= P_SWAPPINGIN; 638 PROC_UNLOCK(p); 639 640 /* 641 * We hold no lock here because the list of threads 642 * can not change while all threads in the process are 643 * swapped out. 644 */ 645 FOREACH_THREAD_IN_PROC(p, td) 646 vm_thread_swapin(td); 647 PROC_LOCK(p); 648 PROC_SLOCK(p); 649 swapclear(p); 650 p->p_swtick = ticks; 651 PROC_SUNLOCK(p); 652 653 wakeup(&p->p_flag); 654 655 /* Allow other threads to swap p out now. */ 656 --p->p_lock; 657 } 658#endif /* NO_SWAPPING */ 659} 660 661/* 662 * This swapin algorithm attempts to swap-in processes only if there 663 * is enough space for them. Of course, if a process waits for a long 664 * time, it will be swapped in anyway. 665 * 666 * Giant is held on entry. 667 */ 668/* ARGSUSED*/ 669static void 670scheduler(dummy) 671 void *dummy; 672{ 673 struct proc *p; 674 struct thread *td; 675 struct proc *pp; 676 int slptime; 677 int swtime; 678 int ppri; 679 int pri; 680 681 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 682 mtx_unlock(&Giant); 683 684loop: 685 if (vm_page_count_min()) { 686 VM_WAIT; 687 thread_lock(&thread0); 688 proc0_rescan = 0; 689 thread_unlock(&thread0); 690 goto loop; 691 } 692 693 pp = NULL; 694 ppri = INT_MIN; 695 sx_slock(&allproc_lock); 696 FOREACH_PROC_IN_SYSTEM(p) { 697 PROC_LOCK(p); 698 if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) { 699 PROC_UNLOCK(p); 700 continue; 701 } 702 swtime = (ticks - p->p_swtick) / hz; 703 PROC_SLOCK(p); 704 FOREACH_THREAD_IN_PROC(p, td) { 705 /* 706 * An otherwise runnable thread of a process 707 * swapped out has only the TDI_SWAPPED bit set. 708 * 709 */ 710 thread_lock(td); 711 if (td->td_inhibitors == TDI_SWAPPED) { 712 slptime = (ticks - td->td_slptick) / hz; 713 pri = swtime + slptime; 714 if ((td->td_flags & TDF_SWAPINREQ) == 0) 715 pri -= p->p_nice * 8; 716 /* 717 * if this thread is higher priority 718 * and there is enough space, then select 719 * this process instead of the previous 720 * selection. 721 */ 722 if (pri > ppri) { 723 pp = p; 724 ppri = pri; 725 } 726 } 727 thread_unlock(td); 728 } 729 PROC_SUNLOCK(p); 730 PROC_UNLOCK(p); 731 } 732 sx_sunlock(&allproc_lock); 733 734 /* 735 * Nothing to do, back to sleep. 736 */ 737 if ((p = pp) == NULL) { 738 thread_lock(&thread0); 739 if (!proc0_rescan) { 740 TD_SET_IWAIT(&thread0); 741 mi_switch(SW_VOL, NULL); 742 } 743 proc0_rescan = 0; 744 thread_unlock(&thread0); 745 goto loop; 746 } 747 PROC_LOCK(p); 748 749 /* 750 * Another process may be bringing or may have already 751 * brought this process in while we traverse all threads. 752 * Or, this process may even be being swapped out again. 753 */ 754 if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) { 755 PROC_UNLOCK(p); 756 thread_lock(&thread0); 757 proc0_rescan = 0; 758 thread_unlock(&thread0); 759 goto loop; 760 } 761 762 /* 763 * We would like to bring someone in. (only if there is space). 764 * [What checks the space? ] 765 */ 766 faultin(p); 767 PROC_UNLOCK(p); 768 thread_lock(&thread0); 769 proc0_rescan = 0; 770 thread_unlock(&thread0); 771 goto loop; 772} 773 774void kick_proc0(void) 775{ 776 struct thread *td = &thread0; 777 778 /* XXX This will probably cause a LOR in some cases */ 779 thread_lock(td); 780 if (TD_AWAITING_INTR(td)) { 781 CTR2(KTR_INTR, "%s: sched_add %d", __func__, 0); 782 TD_CLR_IWAIT(td); 783 sched_add(td, SRQ_INTR); 784 } else { 785 proc0_rescan = 1; 786 CTR2(KTR_INTR, "%s: state %d", 787 __func__, td->td_state); 788 } 789 thread_unlock(td); 790 791} 792 793 794#ifndef NO_SWAPPING 795 796/* 797 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 798 */ 799static int swap_idle_threshold1 = 2; 800SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 801 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 802 803/* 804 * Swap_idle_threshold2 is the time that a process can be idle before 805 * it will be swapped out, if idle swapping is enabled. 806 */ 807static int swap_idle_threshold2 = 10; 808SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 809 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 810 811/* 812 * Swapout is driven by the pageout daemon. Very simple, we find eligible 813 * procs and swap out their stacks. We try to always "swap" at least one 814 * process in case we need the room for a swapin. 815 * If any procs have been sleeping/stopped for at least maxslp seconds, 816 * they are swapped. Else, we swap the longest-sleeping or stopped process, 817 * if any, otherwise the longest-resident process. 818 */ 819void 820swapout_procs(action) 821int action; 822{ 823 struct proc *p; 824 struct thread *td; 825 int didswap = 0; 826 827retry: 828 sx_slock(&allproc_lock); 829 FOREACH_PROC_IN_SYSTEM(p) { 830 struct vmspace *vm; 831 int minslptime = 100000; 832 int slptime; 833 834 /* 835 * Watch out for a process in 836 * creation. It may have no 837 * address space or lock yet. 838 */ 839 if (p->p_state == PRS_NEW) 840 continue; 841 /* 842 * An aio daemon switches its 843 * address space while running. 844 * Perform a quick check whether 845 * a process has P_SYSTEM. 846 */ 847 if ((p->p_flag & P_SYSTEM) != 0) 848 continue; 849 /* 850 * Do not swapout a process that 851 * is waiting for VM data 852 * structures as there is a possible 853 * deadlock. Test this first as 854 * this may block. 855 * 856 * Lock the map until swapout 857 * finishes, or a thread of this 858 * process may attempt to alter 859 * the map. 860 */ 861 vm = vmspace_acquire_ref(p); 862 if (vm == NULL) 863 continue; 864 if (!vm_map_trylock(&vm->vm_map)) 865 goto nextproc1; 866 867 PROC_LOCK(p); 868 if (p->p_lock != 0 || 869 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 870 ) != 0) { 871 goto nextproc2; 872 } 873 /* 874 * only aiod changes vmspace, however it will be 875 * skipped because of the if statement above checking 876 * for P_SYSTEM 877 */ 878 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM) 879 goto nextproc2; 880 881 switch (p->p_state) { 882 default: 883 /* Don't swap out processes in any sort 884 * of 'special' state. */ 885 break; 886 887 case PRS_NORMAL: 888 PROC_SLOCK(p); 889 /* 890 * do not swapout a realtime process 891 * Check all the thread groups.. 892 */ 893 FOREACH_THREAD_IN_PROC(p, td) { 894 thread_lock(td); 895 if (PRI_IS_REALTIME(td->td_pri_class)) { 896 thread_unlock(td); 897 goto nextproc; 898 } 899 slptime = (ticks - td->td_slptick) / hz; 900 /* 901 * Guarantee swap_idle_threshold1 902 * time in memory. 903 */ 904 if (slptime < swap_idle_threshold1) { 905 thread_unlock(td); 906 goto nextproc; 907 } 908 909 /* 910 * Do not swapout a process if it is 911 * waiting on a critical event of some 912 * kind or there is a thread whose 913 * pageable memory may be accessed. 914 * 915 * This could be refined to support 916 * swapping out a thread. 917 */ 918 if (!thread_safetoswapout(td)) { 919 thread_unlock(td); 920 goto nextproc; 921 } 922 /* 923 * If the system is under memory stress, 924 * or if we are swapping 925 * idle processes >= swap_idle_threshold2, 926 * then swap the process out. 927 */ 928 if (((action & VM_SWAP_NORMAL) == 0) && 929 (((action & VM_SWAP_IDLE) == 0) || 930 (slptime < swap_idle_threshold2))) { 931 thread_unlock(td); 932 goto nextproc; 933 } 934 935 if (minslptime > slptime) 936 minslptime = slptime; 937 thread_unlock(td); 938 } 939 940 /* 941 * If the pageout daemon didn't free enough pages, 942 * or if this process is idle and the system is 943 * configured to swap proactively, swap it out. 944 */ 945 if ((action & VM_SWAP_NORMAL) || 946 ((action & VM_SWAP_IDLE) && 947 (minslptime > swap_idle_threshold2))) { 948 if (swapout(p) == 0) 949 didswap++; 950 PROC_SUNLOCK(p); 951 PROC_UNLOCK(p); 952 vm_map_unlock(&vm->vm_map); 953 vmspace_free(vm); 954 sx_sunlock(&allproc_lock); 955 goto retry; 956 } 957nextproc: 958 PROC_SUNLOCK(p); 959 } 960nextproc2: 961 PROC_UNLOCK(p); 962 vm_map_unlock(&vm->vm_map); 963nextproc1: 964 vmspace_free(vm); 965 continue; 966 } 967 sx_sunlock(&allproc_lock); 968 /* 969 * If we swapped something out, and another process needed memory, 970 * then wakeup the sched process. 971 */ 972 if (didswap) 973 wakeup(&proc0); 974} 975 976static void 977swapclear(p) 978 struct proc *p; 979{ 980 struct thread *td; 981 982 PROC_LOCK_ASSERT(p, MA_OWNED); 983 PROC_SLOCK_ASSERT(p, MA_OWNED); 984 985 FOREACH_THREAD_IN_PROC(p, td) { 986 thread_lock(td); 987 td->td_flags |= TDF_INMEM; 988 td->td_flags &= ~TDF_SWAPINREQ; 989 TD_CLR_SWAPPED(td); 990 if (TD_CAN_RUN(td)) 991 setrunnable(td); 992 thread_unlock(td); 993 } 994 p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT); 995 p->p_flag |= P_INMEM; 996} 997 998static int 999swapout(p) 1000 struct proc *p; 1001{ 1002 struct thread *td; 1003 1004 PROC_LOCK_ASSERT(p, MA_OWNED); 1005 PROC_SLOCK_ASSERT(p, MA_OWNED | MA_NOTRECURSED); 1006#if defined(SWAP_DEBUG) 1007 printf("swapping out %d\n", p->p_pid); 1008#endif 1009 1010 /* 1011 * The states of this process and its threads may have changed 1012 * by now. Assuming that there is only one pageout daemon thread, 1013 * this process should still be in memory. 1014 */ 1015 KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM, 1016 ("swapout: lost a swapout race?")); 1017 1018 /* 1019 * remember the process resident count 1020 */ 1021 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1022 /* 1023 * Check and mark all threads before we proceed. 1024 */ 1025 p->p_flag &= ~P_INMEM; 1026 p->p_flag |= P_SWAPPINGOUT; 1027 FOREACH_THREAD_IN_PROC(p, td) { 1028 thread_lock(td); 1029 if (!thread_safetoswapout(td)) { 1030 thread_unlock(td); 1031 swapclear(p); 1032 return (EBUSY); 1033 } 1034 td->td_flags &= ~TDF_INMEM; 1035 TD_SET_SWAPPED(td); 1036 thread_unlock(td); 1037 } 1038 td = FIRST_THREAD_IN_PROC(p); 1039 ++td->td_ru.ru_nswap; 1040 PROC_SUNLOCK(p); 1041 PROC_UNLOCK(p); 1042 1043 /* 1044 * This list is stable because all threads are now prevented from 1045 * running. The list is only modified in the context of a running 1046 * thread in this process. 1047 */ 1048 FOREACH_THREAD_IN_PROC(p, td) 1049 vm_thread_swapout(td); 1050 1051 PROC_LOCK(p); 1052 p->p_flag &= ~P_SWAPPINGOUT; 1053 PROC_SLOCK(p); 1054 p->p_swtick = ticks; 1055 return (0); 1056} 1057#endif /* !NO_SWAPPING */ 1058