vm_glue.c revision 126253
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63#include <sys/cdefs.h> 64__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 126253 2004-02-26 00:27:04Z truckman $"); 65 66#include "opt_vm.h" 67#include "opt_kstack_pages.h" 68#include "opt_kstack_max_pages.h" 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/limits.h> 73#include <sys/lock.h> 74#include <sys/mutex.h> 75#include <sys/proc.h> 76#include <sys/resourcevar.h> 77#include <sys/shm.h> 78#include <sys/vmmeter.h> 79#include <sys/sx.h> 80#include <sys/sysctl.h> 81 82#include <sys/kernel.h> 83#include <sys/ktr.h> 84#include <sys/unistd.h> 85 86#include <vm/vm.h> 87#include <vm/vm_param.h> 88#include <vm/pmap.h> 89#include <vm/vm_map.h> 90#include <vm/vm_page.h> 91#include <vm/vm_pageout.h> 92#include <vm/vm_object.h> 93#include <vm/vm_kern.h> 94#include <vm/vm_extern.h> 95#include <vm/vm_pager.h> 96#include <vm/swap_pager.h> 97 98#include <sys/user.h> 99 100extern int maxslp; 101 102/* 103 * System initialization 104 * 105 * Note: proc0 from proc.h 106 */ 107static void vm_init_limits(void *); 108SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 109 110/* 111 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 112 * 113 * Note: run scheduling should be divorced from the vm system. 114 */ 115static void scheduler(void *); 116SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL) 117 118#ifndef NO_SWAPPING 119static void swapout(struct proc *); 120static void vm_proc_swapin(struct proc *p); 121static void vm_proc_swapout(struct proc *p); 122#endif 123 124/* 125 * MPSAFE 126 * 127 * WARNING! This code calls vm_map_check_protection() which only checks 128 * the associated vm_map_entry range. It does not determine whether the 129 * contents of the memory is actually readable or writable. In most cases 130 * just checking the vm_map_entry is sufficient within the kernel's address 131 * space. 132 */ 133int 134kernacc(addr, len, rw) 135 void *addr; 136 int len, rw; 137{ 138 boolean_t rv; 139 vm_offset_t saddr, eaddr; 140 vm_prot_t prot; 141 142 KASSERT((rw & ~VM_PROT_ALL) == 0, 143 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 144 prot = rw; 145 saddr = trunc_page((vm_offset_t)addr); 146 eaddr = round_page((vm_offset_t)addr + len); 147 vm_map_lock_read(kernel_map); 148 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 149 vm_map_unlock_read(kernel_map); 150 return (rv == TRUE); 151} 152 153/* 154 * MPSAFE 155 * 156 * WARNING! This code calls vm_map_check_protection() which only checks 157 * the associated vm_map_entry range. It does not determine whether the 158 * contents of the memory is actually readable or writable. vmapbuf(), 159 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 160 * used in conjuction with this call. 161 */ 162int 163useracc(addr, len, rw) 164 void *addr; 165 int len, rw; 166{ 167 boolean_t rv; 168 vm_prot_t prot; 169 vm_map_t map; 170 171 KASSERT((rw & ~VM_PROT_ALL) == 0, 172 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 173 prot = rw; 174 map = &curproc->p_vmspace->vm_map; 175 if ((vm_offset_t)addr + len > vm_map_max(map) || 176 (vm_offset_t)addr + len < (vm_offset_t)addr) { 177 return (FALSE); 178 } 179 vm_map_lock_read(map); 180 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 181 round_page((vm_offset_t)addr + len), prot); 182 vm_map_unlock_read(map); 183 return (rv == TRUE); 184} 185 186/* 187 * Create the U area for a new process. 188 * This routine directly affects the fork perf for a process. 189 */ 190void 191vm_proc_new(struct proc *p) 192{ 193 vm_page_t ma[UAREA_PAGES]; 194 vm_object_t upobj; 195 vm_offset_t up; 196 vm_page_t m; 197 u_int i; 198 199 /* 200 * Get a kernel virtual address for the U area for this process. 201 */ 202 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 203 if (up == 0) 204 panic("vm_proc_new: upage allocation failed"); 205 p->p_uarea = (struct user *)up; 206 207 /* 208 * Allocate object and page(s) for the U area. 209 */ 210 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 211 p->p_upages_obj = upobj; 212 VM_OBJECT_LOCK(upobj); 213 for (i = 0; i < UAREA_PAGES; i++) { 214 m = vm_page_grab(upobj, i, 215 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 216 ma[i] = m; 217 218 vm_page_lock_queues(); 219 vm_page_wakeup(m); 220 m->valid = VM_PAGE_BITS_ALL; 221 vm_page_unlock_queues(); 222 } 223 VM_OBJECT_UNLOCK(upobj); 224 225 /* 226 * Enter the pages into the kernel address space. 227 */ 228 pmap_qenter(up, ma, UAREA_PAGES); 229} 230 231/* 232 * Dispose the U area for a process that has exited. 233 * This routine directly impacts the exit perf of a process. 234 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called. 235 */ 236void 237vm_proc_dispose(struct proc *p) 238{ 239 vm_object_t upobj; 240 vm_offset_t up; 241 vm_page_t m; 242 243 upobj = p->p_upages_obj; 244 VM_OBJECT_LOCK(upobj); 245 if (upobj->resident_page_count != UAREA_PAGES) 246 panic("vm_proc_dispose: incorrect number of pages in upobj"); 247 vm_page_lock_queues(); 248 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 249 vm_page_busy(m); 250 vm_page_unwire(m, 0); 251 vm_page_free(m); 252 } 253 vm_page_unlock_queues(); 254 VM_OBJECT_UNLOCK(upobj); 255 up = (vm_offset_t)p->p_uarea; 256 pmap_qremove(up, UAREA_PAGES); 257 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 258 vm_object_deallocate(upobj); 259} 260 261#ifndef NO_SWAPPING 262/* 263 * Allow the U area for a process to be prejudicially paged out. 264 */ 265static void 266vm_proc_swapout(struct proc *p) 267{ 268 vm_object_t upobj; 269 vm_offset_t up; 270 vm_page_t m; 271 272 upobj = p->p_upages_obj; 273 VM_OBJECT_LOCK(upobj); 274 if (upobj->resident_page_count != UAREA_PAGES) 275 panic("vm_proc_dispose: incorrect number of pages in upobj"); 276 vm_page_lock_queues(); 277 TAILQ_FOREACH(m, &upobj->memq, listq) { 278 vm_page_dirty(m); 279 vm_page_unwire(m, 0); 280 } 281 vm_page_unlock_queues(); 282 VM_OBJECT_UNLOCK(upobj); 283 up = (vm_offset_t)p->p_uarea; 284 pmap_qremove(up, UAREA_PAGES); 285} 286 287/* 288 * Bring the U area for a specified process back in. 289 */ 290static void 291vm_proc_swapin(struct proc *p) 292{ 293 vm_page_t ma[UAREA_PAGES]; 294 vm_object_t upobj; 295 vm_offset_t up; 296 vm_page_t m; 297 int rv; 298 int i; 299 300 upobj = p->p_upages_obj; 301 VM_OBJECT_LOCK(upobj); 302 for (i = 0; i < UAREA_PAGES; i++) { 303 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 304 if (m->valid != VM_PAGE_BITS_ALL) { 305 rv = vm_pager_get_pages(upobj, &m, 1, 0); 306 if (rv != VM_PAGER_OK) 307 panic("vm_proc_swapin: cannot get upage"); 308 } 309 ma[i] = m; 310 } 311 if (upobj->resident_page_count != UAREA_PAGES) 312 panic("vm_proc_swapin: lost pages from upobj"); 313 vm_page_lock_queues(); 314 TAILQ_FOREACH(m, &upobj->memq, listq) { 315 m->valid = VM_PAGE_BITS_ALL; 316 vm_page_wire(m); 317 vm_page_wakeup(m); 318 } 319 vm_page_unlock_queues(); 320 VM_OBJECT_UNLOCK(upobj); 321 up = (vm_offset_t)p->p_uarea; 322 pmap_qenter(up, ma, UAREA_PAGES); 323} 324 325/* 326 * Swap in the UAREAs of all processes swapped out to the given device. 327 * The pages in the UAREA are marked dirty and their swap metadata is freed. 328 */ 329void 330vm_proc_swapin_all(struct swdevt *devidx) 331{ 332 struct proc *p; 333 vm_object_t object; 334 vm_page_t m; 335 336retry: 337 sx_slock(&allproc_lock); 338 FOREACH_PROC_IN_SYSTEM(p) { 339 PROC_LOCK(p); 340 object = p->p_upages_obj; 341 if (object != NULL) { 342 VM_OBJECT_LOCK(object); 343 if (swap_pager_isswapped(object, devidx)) { 344 VM_OBJECT_UNLOCK(object); 345 sx_sunlock(&allproc_lock); 346 faultin(p); 347 PROC_UNLOCK(p); 348 VM_OBJECT_LOCK(object); 349 vm_page_lock_queues(); 350 TAILQ_FOREACH(m, &object->memq, listq) 351 vm_page_dirty(m); 352 vm_page_unlock_queues(); 353 swap_pager_freespace(object, 0, 354 object->un_pager.swp.swp_bcount); 355 VM_OBJECT_UNLOCK(object); 356 goto retry; 357 } 358 VM_OBJECT_UNLOCK(object); 359 } 360 PROC_UNLOCK(p); 361 } 362 sx_sunlock(&allproc_lock); 363} 364#endif 365 366#ifndef KSTACK_MAX_PAGES 367#define KSTACK_MAX_PAGES 32 368#endif 369 370/* 371 * Create the kernel stack (including pcb for i386) for a new thread. 372 * This routine directly affects the fork perf for a process and 373 * create performance for a thread. 374 */ 375void 376vm_thread_new(struct thread *td, int pages) 377{ 378 vm_object_t ksobj; 379 vm_offset_t ks; 380 vm_page_t m, ma[KSTACK_MAX_PAGES]; 381 int i; 382 383 /* Bounds check */ 384 if (pages <= 1) 385 pages = KSTACK_PAGES; 386 else if (pages > KSTACK_MAX_PAGES) 387 pages = KSTACK_MAX_PAGES; 388 /* 389 * Allocate an object for the kstack. 390 */ 391 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 392 td->td_kstack_obj = ksobj; 393 /* 394 * Get a kernel virtual address for this thread's kstack. 395 */ 396 ks = kmem_alloc_nofault(kernel_map, 397 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 398 if (ks == 0) 399 panic("vm_thread_new: kstack allocation failed"); 400 if (KSTACK_GUARD_PAGES != 0) { 401 pmap_qremove(ks, KSTACK_GUARD_PAGES); 402 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 403 } 404 td->td_kstack = ks; 405 /* 406 * Knowing the number of pages allocated is useful when you 407 * want to deallocate them. 408 */ 409 td->td_kstack_pages = pages; 410 /* 411 * For the length of the stack, link in a real page of ram for each 412 * page of stack. 413 */ 414 VM_OBJECT_LOCK(ksobj); 415 for (i = 0; i < pages; i++) { 416 /* 417 * Get a kernel stack page. 418 */ 419 m = vm_page_grab(ksobj, i, 420 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 421 ma[i] = m; 422 vm_page_lock_queues(); 423 vm_page_wakeup(m); 424 m->valid = VM_PAGE_BITS_ALL; 425 vm_page_unlock_queues(); 426 } 427 VM_OBJECT_UNLOCK(ksobj); 428 pmap_qenter(ks, ma, pages); 429} 430 431/* 432 * Dispose of a thread's kernel stack. 433 */ 434void 435vm_thread_dispose(struct thread *td) 436{ 437 vm_object_t ksobj; 438 vm_offset_t ks; 439 vm_page_t m; 440 int i, pages; 441 442 pages = td->td_kstack_pages; 443 ksobj = td->td_kstack_obj; 444 ks = td->td_kstack; 445 pmap_qremove(ks, pages); 446 VM_OBJECT_LOCK(ksobj); 447 for (i = 0; i < pages; i++) { 448 m = vm_page_lookup(ksobj, i); 449 if (m == NULL) 450 panic("vm_thread_dispose: kstack already missing?"); 451 vm_page_lock_queues(); 452 vm_page_busy(m); 453 vm_page_unwire(m, 0); 454 vm_page_free(m); 455 vm_page_unlock_queues(); 456 } 457 VM_OBJECT_UNLOCK(ksobj); 458 vm_object_deallocate(ksobj); 459 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 460 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 461} 462 463/* 464 * Allow a thread's kernel stack to be paged out. 465 */ 466void 467vm_thread_swapout(struct thread *td) 468{ 469 vm_object_t ksobj; 470 vm_page_t m; 471 int i, pages; 472 473 cpu_thread_swapout(td); 474 pages = td->td_kstack_pages; 475 ksobj = td->td_kstack_obj; 476 pmap_qremove(td->td_kstack, pages); 477 VM_OBJECT_LOCK(ksobj); 478 for (i = 0; i < pages; i++) { 479 m = vm_page_lookup(ksobj, i); 480 if (m == NULL) 481 panic("vm_thread_swapout: kstack already missing?"); 482 vm_page_lock_queues(); 483 vm_page_dirty(m); 484 vm_page_unwire(m, 0); 485 vm_page_unlock_queues(); 486 } 487 VM_OBJECT_UNLOCK(ksobj); 488} 489 490/* 491 * Bring the kernel stack for a specified thread back in. 492 */ 493void 494vm_thread_swapin(struct thread *td) 495{ 496 vm_object_t ksobj; 497 vm_page_t m, ma[KSTACK_MAX_PAGES]; 498 int i, pages, rv; 499 500 pages = td->td_kstack_pages; 501 ksobj = td->td_kstack_obj; 502 VM_OBJECT_LOCK(ksobj); 503 for (i = 0; i < pages; i++) { 504 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 505 if (m->valid != VM_PAGE_BITS_ALL) { 506 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 507 if (rv != VM_PAGER_OK) 508 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 509 m = vm_page_lookup(ksobj, i); 510 m->valid = VM_PAGE_BITS_ALL; 511 } 512 ma[i] = m; 513 vm_page_lock_queues(); 514 vm_page_wire(m); 515 vm_page_wakeup(m); 516 vm_page_unlock_queues(); 517 } 518 VM_OBJECT_UNLOCK(ksobj); 519 pmap_qenter(td->td_kstack, ma, pages); 520 cpu_thread_swapin(td); 521} 522 523/* 524 * Set up a variable-sized alternate kstack. 525 */ 526void 527vm_thread_new_altkstack(struct thread *td, int pages) 528{ 529 530 td->td_altkstack = td->td_kstack; 531 td->td_altkstack_obj = td->td_kstack_obj; 532 td->td_altkstack_pages = td->td_kstack_pages; 533 534 vm_thread_new(td, pages); 535} 536 537/* 538 * Restore the original kstack. 539 */ 540void 541vm_thread_dispose_altkstack(struct thread *td) 542{ 543 544 vm_thread_dispose(td); 545 546 td->td_kstack = td->td_altkstack; 547 td->td_kstack_obj = td->td_altkstack_obj; 548 td->td_kstack_pages = td->td_altkstack_pages; 549 td->td_altkstack = 0; 550 td->td_altkstack_obj = NULL; 551 td->td_altkstack_pages = 0; 552} 553 554/* 555 * Implement fork's actions on an address space. 556 * Here we arrange for the address space to be copied or referenced, 557 * allocate a user struct (pcb and kernel stack), then call the 558 * machine-dependent layer to fill those in and make the new process 559 * ready to run. The new process is set up so that it returns directly 560 * to user mode to avoid stack copying and relocation problems. 561 */ 562void 563vm_forkproc(td, p2, td2, flags) 564 struct thread *td; 565 struct proc *p2; 566 struct thread *td2; 567 int flags; 568{ 569 struct proc *p1 = td->td_proc; 570 struct user *up; 571 572 GIANT_REQUIRED; 573 574 if ((flags & RFPROC) == 0) { 575 /* 576 * Divorce the memory, if it is shared, essentially 577 * this changes shared memory amongst threads, into 578 * COW locally. 579 */ 580 if ((flags & RFMEM) == 0) { 581 if (p1->p_vmspace->vm_refcnt > 1) { 582 vmspace_unshare(p1); 583 } 584 } 585 cpu_fork(td, p2, td2, flags); 586 return; 587 } 588 589 if (flags & RFMEM) { 590 p2->p_vmspace = p1->p_vmspace; 591 p1->p_vmspace->vm_refcnt++; 592 } 593 594 while (vm_page_count_severe()) { 595 VM_WAIT; 596 } 597 598 if ((flags & RFMEM) == 0) { 599 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 600 601 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 602 603 if (p1->p_vmspace->vm_shm) 604 shmfork(p1, p2); 605 } 606 607 /* XXXKSE this is unsatisfactory but should be adequate */ 608 up = p2->p_uarea; 609 MPASS(p2->p_sigacts != NULL); 610 611 /* 612 * p_stats currently points at fields in the user struct 613 * but not at &u, instead at p_addr. Copy parts of 614 * p_stats; zero the rest of p_stats (statistics). 615 */ 616 p2->p_stats = &up->u_stats; 617 bzero(&up->u_stats.pstat_startzero, 618 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 619 (caddr_t) &up->u_stats.pstat_startzero)); 620 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 621 ((caddr_t) &up->u_stats.pstat_endcopy - 622 (caddr_t) &up->u_stats.pstat_startcopy)); 623 624 /* 625 * cpu_fork will copy and update the pcb, set up the kernel stack, 626 * and make the child ready to run. 627 */ 628 cpu_fork(td, p2, td2, flags); 629} 630 631/* 632 * Called after process has been wait(2)'ed apon and is being reaped. 633 * The idea is to reclaim resources that we could not reclaim while 634 * the process was still executing. 635 */ 636void 637vm_waitproc(p) 638 struct proc *p; 639{ 640 641 GIANT_REQUIRED; 642 vmspace_exitfree(p); /* and clean-out the vmspace */ 643} 644 645/* 646 * Set default limits for VM system. 647 * Called for proc 0, and then inherited by all others. 648 * 649 * XXX should probably act directly on proc0. 650 */ 651static void 652vm_init_limits(udata) 653 void *udata; 654{ 655 struct proc *p = udata; 656 struct plimit *limp; 657 int rss_limit; 658 659 /* 660 * Set up the initial limits on process VM. Set the maximum resident 661 * set size to be half of (reasonably) available memory. Since this 662 * is a soft limit, it comes into effect only when the system is out 663 * of memory - half of main memory helps to favor smaller processes, 664 * and reduces thrashing of the object cache. 665 */ 666 limp = p->p_limit; 667 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 668 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 669 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 670 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 671 /* limit the limit to no less than 2MB */ 672 rss_limit = max(cnt.v_free_count, 512); 673 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 674 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 675} 676 677void 678faultin(p) 679 struct proc *p; 680{ 681#ifdef NO_SWAPPING 682 683 PROC_LOCK_ASSERT(p, MA_OWNED); 684 if ((p->p_sflag & PS_INMEM) == 0) 685 panic("faultin: proc swapped out with NO_SWAPPING!"); 686#else /* !NO_SWAPPING */ 687 struct thread *td; 688 689 GIANT_REQUIRED; 690 PROC_LOCK_ASSERT(p, MA_OWNED); 691 /* 692 * If another process is swapping in this process, 693 * just wait until it finishes. 694 */ 695 if (p->p_sflag & PS_SWAPPINGIN) 696 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 697 else if ((p->p_sflag & PS_INMEM) == 0) { 698 /* 699 * Don't let another thread swap process p out while we are 700 * busy swapping it in. 701 */ 702 ++p->p_lock; 703 mtx_lock_spin(&sched_lock); 704 p->p_sflag |= PS_SWAPPINGIN; 705 mtx_unlock_spin(&sched_lock); 706 PROC_UNLOCK(p); 707 708 vm_proc_swapin(p); 709 FOREACH_THREAD_IN_PROC(p, td) 710 vm_thread_swapin(td); 711 712 PROC_LOCK(p); 713 mtx_lock_spin(&sched_lock); 714 p->p_sflag &= ~PS_SWAPPINGIN; 715 p->p_sflag |= PS_INMEM; 716 FOREACH_THREAD_IN_PROC(p, td) { 717 TD_CLR_SWAPPED(td); 718 if (TD_CAN_RUN(td)) 719 setrunnable(td); 720 } 721 mtx_unlock_spin(&sched_lock); 722 723 wakeup(&p->p_sflag); 724 725 /* Allow other threads to swap p out now. */ 726 --p->p_lock; 727 } 728#endif /* NO_SWAPPING */ 729} 730 731/* 732 * This swapin algorithm attempts to swap-in processes only if there 733 * is enough space for them. Of course, if a process waits for a long 734 * time, it will be swapped in anyway. 735 * 736 * XXXKSE - process with the thread with highest priority counts.. 737 * 738 * Giant is still held at this point, to be released in tsleep. 739 */ 740/* ARGSUSED*/ 741static void 742scheduler(dummy) 743 void *dummy; 744{ 745 struct proc *p; 746 struct thread *td; 747 int pri; 748 struct proc *pp; 749 int ppri; 750 751 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 752 /* GIANT_REQUIRED */ 753 754loop: 755 if (vm_page_count_min()) { 756 VM_WAIT; 757 goto loop; 758 } 759 760 pp = NULL; 761 ppri = INT_MIN; 762 sx_slock(&allproc_lock); 763 FOREACH_PROC_IN_SYSTEM(p) { 764 struct ksegrp *kg; 765 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 766 continue; 767 } 768 mtx_lock_spin(&sched_lock); 769 FOREACH_THREAD_IN_PROC(p, td) { 770 /* 771 * An otherwise runnable thread of a process 772 * swapped out has only the TDI_SWAPPED bit set. 773 * 774 */ 775 if (td->td_inhibitors == TDI_SWAPPED) { 776 kg = td->td_ksegrp; 777 pri = p->p_swtime + kg->kg_slptime; 778 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 779 pri -= kg->kg_nice * 8; 780 } 781 782 /* 783 * if this ksegrp is higher priority 784 * and there is enough space, then select 785 * this process instead of the previous 786 * selection. 787 */ 788 if (pri > ppri) { 789 pp = p; 790 ppri = pri; 791 } 792 } 793 } 794 mtx_unlock_spin(&sched_lock); 795 } 796 sx_sunlock(&allproc_lock); 797 798 /* 799 * Nothing to do, back to sleep. 800 */ 801 if ((p = pp) == NULL) { 802 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 803 goto loop; 804 } 805 PROC_LOCK(p); 806 807 /* 808 * Another process may be bringing or may have already 809 * brought this process in while we traverse all threads. 810 * Or, this process may even be being swapped out again. 811 */ 812 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 813 PROC_UNLOCK(p); 814 goto loop; 815 } 816 817 mtx_lock_spin(&sched_lock); 818 p->p_sflag &= ~PS_SWAPINREQ; 819 mtx_unlock_spin(&sched_lock); 820 821 /* 822 * We would like to bring someone in. (only if there is space). 823 * [What checks the space? ] 824 */ 825 faultin(p); 826 PROC_UNLOCK(p); 827 mtx_lock_spin(&sched_lock); 828 p->p_swtime = 0; 829 mtx_unlock_spin(&sched_lock); 830 goto loop; 831} 832 833#ifndef NO_SWAPPING 834 835/* 836 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 837 */ 838static int swap_idle_threshold1 = 2; 839SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 840 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 841 842/* 843 * Swap_idle_threshold2 is the time that a process can be idle before 844 * it will be swapped out, if idle swapping is enabled. 845 */ 846static int swap_idle_threshold2 = 10; 847SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 848 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 849 850/* 851 * Swapout is driven by the pageout daemon. Very simple, we find eligible 852 * procs and unwire their u-areas. We try to always "swap" at least one 853 * process in case we need the room for a swapin. 854 * If any procs have been sleeping/stopped for at least maxslp seconds, 855 * they are swapped. Else, we swap the longest-sleeping or stopped process, 856 * if any, otherwise the longest-resident process. 857 */ 858void 859swapout_procs(action) 860int action; 861{ 862 struct proc *p; 863 struct thread *td; 864 struct ksegrp *kg; 865 int didswap = 0; 866 867 GIANT_REQUIRED; 868 869retry: 870 sx_slock(&allproc_lock); 871 FOREACH_PROC_IN_SYSTEM(p) { 872 struct vmspace *vm; 873 int minslptime = 100000; 874 875 /* 876 * Watch out for a process in 877 * creation. It may have no 878 * address space or lock yet. 879 */ 880 mtx_lock_spin(&sched_lock); 881 if (p->p_state == PRS_NEW) { 882 mtx_unlock_spin(&sched_lock); 883 continue; 884 } 885 mtx_unlock_spin(&sched_lock); 886 887 /* 888 * An aio daemon switches its 889 * address space while running. 890 * Perform a quick check whether 891 * a process has P_SYSTEM. 892 */ 893 if ((p->p_flag & P_SYSTEM) != 0) 894 continue; 895 896 /* 897 * Do not swapout a process that 898 * is waiting for VM data 899 * structures as there is a possible 900 * deadlock. Test this first as 901 * this may block. 902 * 903 * Lock the map until swapout 904 * finishes, or a thread of this 905 * process may attempt to alter 906 * the map. 907 */ 908 PROC_LOCK(p); 909 vm = p->p_vmspace; 910 KASSERT(vm != NULL, 911 ("swapout_procs: a process has no address space")); 912 ++vm->vm_refcnt; 913 PROC_UNLOCK(p); 914 if (!vm_map_trylock(&vm->vm_map)) 915 goto nextproc1; 916 917 PROC_LOCK(p); 918 if (p->p_lock != 0 || 919 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 920 ) != 0) { 921 goto nextproc2; 922 } 923 /* 924 * only aiod changes vmspace, however it will be 925 * skipped because of the if statement above checking 926 * for P_SYSTEM 927 */ 928 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 929 goto nextproc2; 930 931 switch (p->p_state) { 932 default: 933 /* Don't swap out processes in any sort 934 * of 'special' state. */ 935 break; 936 937 case PRS_NORMAL: 938 mtx_lock_spin(&sched_lock); 939 /* 940 * do not swapout a realtime process 941 * Check all the thread groups.. 942 */ 943 FOREACH_KSEGRP_IN_PROC(p, kg) { 944 if (PRI_IS_REALTIME(kg->kg_pri_class)) 945 goto nextproc; 946 947 /* 948 * Guarantee swap_idle_threshold1 949 * time in memory. 950 */ 951 if (kg->kg_slptime < swap_idle_threshold1) 952 goto nextproc; 953 954 /* 955 * Do not swapout a process if it is 956 * waiting on a critical event of some 957 * kind or there is a thread whose 958 * pageable memory may be accessed. 959 * 960 * This could be refined to support 961 * swapping out a thread. 962 */ 963 FOREACH_THREAD_IN_GROUP(kg, td) { 964 if ((td->td_priority) < PSOCK || 965 !thread_safetoswapout(td)) 966 goto nextproc; 967 } 968 /* 969 * If the system is under memory stress, 970 * or if we are swapping 971 * idle processes >= swap_idle_threshold2, 972 * then swap the process out. 973 */ 974 if (((action & VM_SWAP_NORMAL) == 0) && 975 (((action & VM_SWAP_IDLE) == 0) || 976 (kg->kg_slptime < swap_idle_threshold2))) 977 goto nextproc; 978 979 if (minslptime > kg->kg_slptime) 980 minslptime = kg->kg_slptime; 981 } 982 983 /* 984 * If the process has been asleep for awhile and had 985 * most of its pages taken away already, swap it out. 986 */ 987 if ((action & VM_SWAP_NORMAL) || 988 ((action & VM_SWAP_IDLE) && 989 (minslptime > swap_idle_threshold2))) { 990 swapout(p); 991 didswap++; 992 mtx_unlock_spin(&sched_lock); 993 PROC_UNLOCK(p); 994 vm_map_unlock(&vm->vm_map); 995 vmspace_free(vm); 996 sx_sunlock(&allproc_lock); 997 goto retry; 998 } 999nextproc: 1000 mtx_unlock_spin(&sched_lock); 1001 } 1002nextproc2: 1003 PROC_UNLOCK(p); 1004 vm_map_unlock(&vm->vm_map); 1005nextproc1: 1006 vmspace_free(vm); 1007 continue; 1008 } 1009 sx_sunlock(&allproc_lock); 1010 /* 1011 * If we swapped something out, and another process needed memory, 1012 * then wakeup the sched process. 1013 */ 1014 if (didswap) 1015 wakeup(&proc0); 1016} 1017 1018static void 1019swapout(p) 1020 struct proc *p; 1021{ 1022 struct thread *td; 1023 1024 PROC_LOCK_ASSERT(p, MA_OWNED); 1025 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 1026#if defined(SWAP_DEBUG) 1027 printf("swapping out %d\n", p->p_pid); 1028#endif 1029 1030 /* 1031 * The states of this process and its threads may have changed 1032 * by now. Assuming that there is only one pageout daemon thread, 1033 * this process should still be in memory. 1034 */ 1035 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 1036 ("swapout: lost a swapout race?")); 1037 1038#if defined(INVARIANTS) 1039 /* 1040 * Make sure that all threads are safe to be swapped out. 1041 * 1042 * Alternatively, we could swap out only safe threads. 1043 */ 1044 FOREACH_THREAD_IN_PROC(p, td) { 1045 KASSERT(thread_safetoswapout(td), 1046 ("swapout: there is a thread not safe for swapout")); 1047 } 1048#endif /* INVARIANTS */ 1049 1050 ++p->p_stats->p_ru.ru_nswap; 1051 /* 1052 * remember the process resident count 1053 */ 1054 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1055 1056 p->p_sflag &= ~PS_INMEM; 1057 p->p_sflag |= PS_SWAPPINGOUT; 1058 PROC_UNLOCK(p); 1059 FOREACH_THREAD_IN_PROC(p, td) 1060 TD_SET_SWAPPED(td); 1061 mtx_unlock_spin(&sched_lock); 1062 1063 vm_proc_swapout(p); 1064 FOREACH_THREAD_IN_PROC(p, td) 1065 vm_thread_swapout(td); 1066 1067 PROC_LOCK(p); 1068 mtx_lock_spin(&sched_lock); 1069 p->p_sflag &= ~PS_SWAPPINGOUT; 1070 p->p_swtime = 0; 1071} 1072#endif /* !NO_SWAPPING */ 1073