vm_glue.c revision 118390
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63#include <sys/cdefs.h> 64__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 118390 2003-08-03 13:35:31Z phk $"); 65 66#include "opt_vm.h" 67#include "opt_kstack_pages.h" 68#include "opt_kstack_max_pages.h" 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/limits.h> 73#include <sys/lock.h> 74#include <sys/mutex.h> 75#include <sys/proc.h> 76#include <sys/resourcevar.h> 77#include <sys/shm.h> 78#include <sys/vmmeter.h> 79#include <sys/sx.h> 80#include <sys/sysctl.h> 81 82#include <sys/kernel.h> 83#include <sys/ktr.h> 84#include <sys/unistd.h> 85 86#include <vm/vm.h> 87#include <vm/vm_param.h> 88#include <vm/pmap.h> 89#include <vm/vm_map.h> 90#include <vm/vm_page.h> 91#include <vm/vm_pageout.h> 92#include <vm/vm_object.h> 93#include <vm/vm_kern.h> 94#include <vm/vm_extern.h> 95#include <vm/vm_pager.h> 96#include <vm/swap_pager.h> 97 98#include <sys/user.h> 99 100extern int maxslp; 101 102/* 103 * System initialization 104 * 105 * Note: proc0 from proc.h 106 */ 107static void vm_init_limits(void *); 108SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 109 110/* 111 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 112 * 113 * Note: run scheduling should be divorced from the vm system. 114 */ 115static void scheduler(void *); 116SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 117 118#ifndef NO_SWAPPING 119static void swapout(struct proc *); 120static void vm_proc_swapin(struct proc *p); 121static void vm_proc_swapout(struct proc *p); 122#endif 123 124/* 125 * MPSAFE 126 * 127 * WARNING! This code calls vm_map_check_protection() which only checks 128 * the associated vm_map_entry range. It does not determine whether the 129 * contents of the memory is actually readable or writable. In most cases 130 * just checking the vm_map_entry is sufficient within the kernel's address 131 * space. 132 */ 133int 134kernacc(addr, len, rw) 135 void *addr; 136 int len, rw; 137{ 138 boolean_t rv; 139 vm_offset_t saddr, eaddr; 140 vm_prot_t prot; 141 142 KASSERT((rw & ~VM_PROT_ALL) == 0, 143 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 144 prot = rw; 145 saddr = trunc_page((vm_offset_t)addr); 146 eaddr = round_page((vm_offset_t)addr + len); 147 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 148 return (rv == TRUE); 149} 150 151/* 152 * MPSAFE 153 * 154 * WARNING! This code calls vm_map_check_protection() which only checks 155 * the associated vm_map_entry range. It does not determine whether the 156 * contents of the memory is actually readable or writable. vmapbuf(), 157 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 158 * used in conjuction with this call. 159 */ 160int 161useracc(addr, len, rw) 162 void *addr; 163 int len, rw; 164{ 165 boolean_t rv; 166 vm_prot_t prot; 167 vm_map_t map; 168 169 KASSERT((rw & ~VM_PROT_ALL) == 0, 170 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 171 prot = rw; 172 map = &curproc->p_vmspace->vm_map; 173 if ((vm_offset_t)addr + len > vm_map_max(map) || 174 (vm_offset_t)addr + len < (vm_offset_t)addr) { 175 return (FALSE); 176 } 177 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 178 round_page((vm_offset_t)addr + len), prot); 179 return (rv == TRUE); 180} 181 182/* 183 * MPSAFE 184 */ 185void 186vslock(addr, len) 187 void *addr; 188 u_int len; 189{ 190 191 vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), 192 round_page((vm_offset_t)addr + len), FALSE); 193} 194 195/* 196 * MPSAFE 197 */ 198void 199vsunlock(addr, len) 200 void *addr; 201 u_int len; 202{ 203 204 vm_map_unwire(&curproc->p_vmspace->vm_map, 205 trunc_page((vm_offset_t)addr), 206 round_page((vm_offset_t)addr + len), FALSE); 207} 208 209/* 210 * Create the U area for a new process. 211 * This routine directly affects the fork perf for a process. 212 */ 213void 214vm_proc_new(struct proc *p) 215{ 216 vm_page_t ma[UAREA_PAGES]; 217 vm_object_t upobj; 218 vm_offset_t up; 219 vm_page_t m; 220 u_int i; 221 222 /* 223 * Allocate object for the upage. 224 */ 225 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 226 p->p_upages_obj = upobj; 227 228 /* 229 * Get a kernel virtual address for the U area for this process. 230 */ 231 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 232 if (up == 0) 233 panic("vm_proc_new: upage allocation failed"); 234 p->p_uarea = (struct user *)up; 235 236 for (i = 0; i < UAREA_PAGES; i++) { 237 /* 238 * Get a uarea page. 239 */ 240 m = vm_page_grab(upobj, i, 241 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 242 ma[i] = m; 243 244 vm_page_lock_queues(); 245 vm_page_wakeup(m); 246 vm_page_flag_clear(m, PG_ZERO); 247 m->valid = VM_PAGE_BITS_ALL; 248 vm_page_unlock_queues(); 249 } 250 251 /* 252 * Enter the pages into the kernel address space. 253 */ 254 pmap_qenter(up, ma, UAREA_PAGES); 255} 256 257/* 258 * Dispose the U area for a process that has exited. 259 * This routine directly impacts the exit perf of a process. 260 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called. 261 */ 262void 263vm_proc_dispose(struct proc *p) 264{ 265 vm_object_t upobj; 266 vm_offset_t up; 267 vm_page_t m; 268 269 upobj = p->p_upages_obj; 270 VM_OBJECT_LOCK(upobj); 271 if (upobj->resident_page_count != UAREA_PAGES) 272 panic("vm_proc_dispose: incorrect number of pages in upobj"); 273 vm_page_lock_queues(); 274 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 275 vm_page_busy(m); 276 vm_page_unwire(m, 0); 277 vm_page_free(m); 278 } 279 vm_page_unlock_queues(); 280 VM_OBJECT_UNLOCK(upobj); 281 up = (vm_offset_t)p->p_uarea; 282 pmap_qremove(up, UAREA_PAGES); 283 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 284 vm_object_deallocate(upobj); 285} 286 287#ifndef NO_SWAPPING 288/* 289 * Allow the U area for a process to be prejudicially paged out. 290 */ 291static void 292vm_proc_swapout(struct proc *p) 293{ 294 vm_object_t upobj; 295 vm_offset_t up; 296 vm_page_t m; 297 298 upobj = p->p_upages_obj; 299 VM_OBJECT_LOCK(upobj); 300 if (upobj->resident_page_count != UAREA_PAGES) 301 panic("vm_proc_dispose: incorrect number of pages in upobj"); 302 vm_page_lock_queues(); 303 TAILQ_FOREACH(m, &upobj->memq, listq) { 304 vm_page_dirty(m); 305 vm_page_unwire(m, 0); 306 } 307 vm_page_unlock_queues(); 308 VM_OBJECT_UNLOCK(upobj); 309 up = (vm_offset_t)p->p_uarea; 310 pmap_qremove(up, UAREA_PAGES); 311} 312 313/* 314 * Bring the U area for a specified process back in. 315 */ 316static void 317vm_proc_swapin(struct proc *p) 318{ 319 vm_page_t ma[UAREA_PAGES]; 320 vm_object_t upobj; 321 vm_offset_t up; 322 vm_page_t m; 323 int rv; 324 int i; 325 326 upobj = p->p_upages_obj; 327 VM_OBJECT_LOCK(upobj); 328 for (i = 0; i < UAREA_PAGES; i++) { 329 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 330 if (m->valid != VM_PAGE_BITS_ALL) { 331 rv = vm_pager_get_pages(upobj, &m, 1, 0); 332 if (rv != VM_PAGER_OK) 333 panic("vm_proc_swapin: cannot get upage"); 334 } 335 ma[i] = m; 336 } 337 if (upobj->resident_page_count != UAREA_PAGES) 338 panic("vm_proc_swapin: lost pages from upobj"); 339 vm_page_lock_queues(); 340 TAILQ_FOREACH(m, &upobj->memq, listq) { 341 m->valid = VM_PAGE_BITS_ALL; 342 vm_page_wire(m); 343 vm_page_wakeup(m); 344 } 345 vm_page_unlock_queues(); 346 VM_OBJECT_UNLOCK(upobj); 347 up = (vm_offset_t)p->p_uarea; 348 pmap_qenter(up, ma, UAREA_PAGES); 349} 350 351/* 352 * Swap in the UAREAs of all processes swapped out to the given device. 353 * The pages in the UAREA are marked dirty and their swap metadata is freed. 354 */ 355void 356vm_proc_swapin_all(struct swdevt *devidx) 357{ 358 struct proc *p; 359 vm_object_t object; 360 vm_page_t m; 361 362retry: 363 sx_slock(&allproc_lock); 364 FOREACH_PROC_IN_SYSTEM(p) { 365 PROC_LOCK(p); 366 object = p->p_upages_obj; 367 if (object != NULL) { 368 VM_OBJECT_LOCK(object); 369 if (swap_pager_isswapped(object, devidx)) { 370 VM_OBJECT_UNLOCK(object); 371 sx_sunlock(&allproc_lock); 372 faultin(p); 373 PROC_UNLOCK(p); 374 VM_OBJECT_LOCK(object); 375 vm_page_lock_queues(); 376 TAILQ_FOREACH(m, &object->memq, listq) 377 vm_page_dirty(m); 378 vm_page_unlock_queues(); 379 swap_pager_freespace(object, 0, 380 object->un_pager.swp.swp_bcount); 381 VM_OBJECT_UNLOCK(object); 382 goto retry; 383 } 384 VM_OBJECT_UNLOCK(object); 385 } 386 PROC_UNLOCK(p); 387 } 388 sx_sunlock(&allproc_lock); 389} 390#endif 391 392#ifndef KSTACK_MAX_PAGES 393#define KSTACK_MAX_PAGES 32 394#endif 395 396/* 397 * Create the kernel stack (including pcb for i386) for a new thread. 398 * This routine directly affects the fork perf for a process and 399 * create performance for a thread. 400 */ 401void 402vm_thread_new(struct thread *td, int pages) 403{ 404 vm_object_t ksobj; 405 vm_offset_t ks; 406 vm_page_t m, ma[KSTACK_MAX_PAGES]; 407 int i; 408 409 /* Bounds check */ 410 if (pages <= 1) 411 pages = KSTACK_PAGES; 412 else if (pages > KSTACK_MAX_PAGES) 413 pages = KSTACK_MAX_PAGES; 414 /* 415 * Allocate an object for the kstack. 416 */ 417 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 418 td->td_kstack_obj = ksobj; 419 /* 420 * Get a kernel virtual address for this thread's kstack. 421 */ 422 ks = kmem_alloc_nofault(kernel_map, 423 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 424 if (ks == 0) 425 panic("vm_thread_new: kstack allocation failed"); 426 if (KSTACK_GUARD_PAGES != 0) { 427 pmap_qremove(ks, KSTACK_GUARD_PAGES); 428 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 429 } 430 td->td_kstack = ks; 431 /* 432 * Knowing the number of pages allocated is useful when you 433 * want to deallocate them. 434 */ 435 td->td_kstack_pages = pages; 436 /* 437 * For the length of the stack, link in a real page of ram for each 438 * page of stack. 439 */ 440 VM_OBJECT_LOCK(ksobj); 441 for (i = 0; i < pages; i++) { 442 /* 443 * Get a kernel stack page. 444 */ 445 m = vm_page_grab(ksobj, i, 446 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 447 ma[i] = m; 448 vm_page_lock_queues(); 449 vm_page_wakeup(m); 450 m->valid = VM_PAGE_BITS_ALL; 451 vm_page_unlock_queues(); 452 } 453 VM_OBJECT_UNLOCK(ksobj); 454 pmap_qenter(ks, ma, pages); 455} 456 457/* 458 * Dispose of a thread's kernel stack. 459 */ 460void 461vm_thread_dispose(struct thread *td) 462{ 463 vm_object_t ksobj; 464 vm_offset_t ks; 465 vm_page_t m; 466 int i, pages; 467 468 pages = td->td_kstack_pages; 469 ksobj = td->td_kstack_obj; 470 ks = td->td_kstack; 471 pmap_qremove(ks, pages); 472 VM_OBJECT_LOCK(ksobj); 473 for (i = 0; i < pages; i++) { 474 m = vm_page_lookup(ksobj, i); 475 if (m == NULL) 476 panic("vm_thread_dispose: kstack already missing?"); 477 vm_page_lock_queues(); 478 vm_page_busy(m); 479 vm_page_unwire(m, 0); 480 vm_page_free(m); 481 vm_page_unlock_queues(); 482 } 483 VM_OBJECT_UNLOCK(ksobj); 484 vm_object_deallocate(ksobj); 485 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 486 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 487} 488 489/* 490 * Allow a thread's kernel stack to be paged out. 491 */ 492void 493vm_thread_swapout(struct thread *td) 494{ 495 vm_object_t ksobj; 496 vm_page_t m; 497 int i, pages; 498 499#ifdef __alpha__ 500 /* 501 * Make sure we aren't fpcurthread. 502 */ 503 alpha_fpstate_save(td, 1); 504#endif 505 pages = td->td_kstack_pages; 506 ksobj = td->td_kstack_obj; 507 pmap_qremove(td->td_kstack, pages); 508 VM_OBJECT_LOCK(ksobj); 509 for (i = 0; i < pages; i++) { 510 m = vm_page_lookup(ksobj, i); 511 if (m == NULL) 512 panic("vm_thread_swapout: kstack already missing?"); 513 vm_page_lock_queues(); 514 vm_page_dirty(m); 515 vm_page_unwire(m, 0); 516 vm_page_unlock_queues(); 517 } 518 VM_OBJECT_UNLOCK(ksobj); 519} 520 521/* 522 * Bring the kernel stack for a specified thread back in. 523 */ 524void 525vm_thread_swapin(struct thread *td) 526{ 527 vm_object_t ksobj; 528 vm_page_t m, ma[KSTACK_MAX_PAGES]; 529 int i, pages, rv; 530 531 pages = td->td_kstack_pages; 532 ksobj = td->td_kstack_obj; 533 VM_OBJECT_LOCK(ksobj); 534 for (i = 0; i < pages; i++) { 535 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 536 if (m->valid != VM_PAGE_BITS_ALL) { 537 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 538 if (rv != VM_PAGER_OK) 539 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 540 m = vm_page_lookup(ksobj, i); 541 m->valid = VM_PAGE_BITS_ALL; 542 } 543 ma[i] = m; 544 vm_page_lock_queues(); 545 vm_page_wire(m); 546 vm_page_wakeup(m); 547 vm_page_unlock_queues(); 548 } 549 VM_OBJECT_UNLOCK(ksobj); 550 pmap_qenter(td->td_kstack, ma, pages); 551#ifdef __alpha__ 552 /* 553 * The pcb may be at a different physical address now so cache the 554 * new address. 555 */ 556 td->td_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)td->td_pcb); 557#endif 558} 559 560/* 561 * Set up a variable-sized alternate kstack. 562 */ 563void 564vm_thread_new_altkstack(struct thread *td, int pages) 565{ 566 567 td->td_altkstack = td->td_kstack; 568 td->td_altkstack_obj = td->td_kstack_obj; 569 td->td_altkstack_pages = td->td_kstack_pages; 570 571 vm_thread_new(td, pages); 572} 573 574/* 575 * Restore the original kstack. 576 */ 577void 578vm_thread_dispose_altkstack(struct thread *td) 579{ 580 581 vm_thread_dispose(td); 582 583 td->td_kstack = td->td_altkstack; 584 td->td_kstack_obj = td->td_altkstack_obj; 585 td->td_kstack_pages = td->td_altkstack_pages; 586 td->td_altkstack = 0; 587 td->td_altkstack_obj = NULL; 588 td->td_altkstack_pages = 0; 589} 590 591/* 592 * Implement fork's actions on an address space. 593 * Here we arrange for the address space to be copied or referenced, 594 * allocate a user struct (pcb and kernel stack), then call the 595 * machine-dependent layer to fill those in and make the new process 596 * ready to run. The new process is set up so that it returns directly 597 * to user mode to avoid stack copying and relocation problems. 598 */ 599void 600vm_forkproc(td, p2, td2, flags) 601 struct thread *td; 602 struct proc *p2; 603 struct thread *td2; 604 int flags; 605{ 606 struct proc *p1 = td->td_proc; 607 struct user *up; 608 609 GIANT_REQUIRED; 610 611 if ((flags & RFPROC) == 0) { 612 /* 613 * Divorce the memory, if it is shared, essentially 614 * this changes shared memory amongst threads, into 615 * COW locally. 616 */ 617 if ((flags & RFMEM) == 0) { 618 if (p1->p_vmspace->vm_refcnt > 1) { 619 vmspace_unshare(p1); 620 } 621 } 622 cpu_fork(td, p2, td2, flags); 623 return; 624 } 625 626 if (flags & RFMEM) { 627 p2->p_vmspace = p1->p_vmspace; 628 p1->p_vmspace->vm_refcnt++; 629 } 630 631 while (vm_page_count_severe()) { 632 VM_WAIT; 633 } 634 635 if ((flags & RFMEM) == 0) { 636 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 637 638 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 639 640 if (p1->p_vmspace->vm_shm) 641 shmfork(p1, p2); 642 } 643 644 /* XXXKSE this is unsatisfactory but should be adequate */ 645 up = p2->p_uarea; 646 MPASS(p2->p_sigacts != NULL); 647 648 /* 649 * p_stats currently points at fields in the user struct 650 * but not at &u, instead at p_addr. Copy parts of 651 * p_stats; zero the rest of p_stats (statistics). 652 */ 653 p2->p_stats = &up->u_stats; 654 bzero(&up->u_stats.pstat_startzero, 655 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 656 (caddr_t) &up->u_stats.pstat_startzero)); 657 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 658 ((caddr_t) &up->u_stats.pstat_endcopy - 659 (caddr_t) &up->u_stats.pstat_startcopy)); 660 661 /* 662 * cpu_fork will copy and update the pcb, set up the kernel stack, 663 * and make the child ready to run. 664 */ 665 cpu_fork(td, p2, td2, flags); 666} 667 668/* 669 * Called after process has been wait(2)'ed apon and is being reaped. 670 * The idea is to reclaim resources that we could not reclaim while 671 * the process was still executing. 672 */ 673void 674vm_waitproc(p) 675 struct proc *p; 676{ 677 678 GIANT_REQUIRED; 679 vmspace_exitfree(p); /* and clean-out the vmspace */ 680} 681 682/* 683 * Set default limits for VM system. 684 * Called for proc 0, and then inherited by all others. 685 * 686 * XXX should probably act directly on proc0. 687 */ 688static void 689vm_init_limits(udata) 690 void *udata; 691{ 692 struct proc *p = udata; 693 int rss_limit; 694 695 /* 696 * Set up the initial limits on process VM. Set the maximum resident 697 * set size to be half of (reasonably) available memory. Since this 698 * is a soft limit, it comes into effect only when the system is out 699 * of memory - half of main memory helps to favor smaller processes, 700 * and reduces thrashing of the object cache. 701 */ 702 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 703 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 704 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 705 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 706 /* limit the limit to no less than 2MB */ 707 rss_limit = max(cnt.v_free_count, 512); 708 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 709 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 710} 711 712void 713faultin(p) 714 struct proc *p; 715{ 716#ifdef NO_SWAPPING 717 718 PROC_LOCK_ASSERT(p, MA_OWNED); 719 if ((p->p_sflag & PS_INMEM) == 0) 720 panic("faultin: proc swapped out with NO_SWAPPING!"); 721#else /* !NO_SWAPPING */ 722 struct thread *td; 723 724 GIANT_REQUIRED; 725 PROC_LOCK_ASSERT(p, MA_OWNED); 726 /* 727 * If another process is swapping in this process, 728 * just wait until it finishes. 729 */ 730 if (p->p_sflag & PS_SWAPPINGIN) 731 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 732 else if ((p->p_sflag & PS_INMEM) == 0) { 733 /* 734 * Don't let another thread swap process p out while we are 735 * busy swapping it in. 736 */ 737 ++p->p_lock; 738 mtx_lock_spin(&sched_lock); 739 p->p_sflag |= PS_SWAPPINGIN; 740 mtx_unlock_spin(&sched_lock); 741 PROC_UNLOCK(p); 742 743 vm_proc_swapin(p); 744 FOREACH_THREAD_IN_PROC(p, td) 745 vm_thread_swapin(td); 746 747 PROC_LOCK(p); 748 mtx_lock_spin(&sched_lock); 749 p->p_sflag &= ~PS_SWAPPINGIN; 750 p->p_sflag |= PS_INMEM; 751 FOREACH_THREAD_IN_PROC(p, td) { 752 TD_CLR_SWAPPED(td); 753 if (TD_CAN_RUN(td)) 754 setrunnable(td); 755 } 756 mtx_unlock_spin(&sched_lock); 757 758 wakeup(&p->p_sflag); 759 760 /* Allow other threads to swap p out now. */ 761 --p->p_lock; 762 } 763#endif /* NO_SWAPPING */ 764} 765 766/* 767 * This swapin algorithm attempts to swap-in processes only if there 768 * is enough space for them. Of course, if a process waits for a long 769 * time, it will be swapped in anyway. 770 * 771 * XXXKSE - process with the thread with highest priority counts.. 772 * 773 * Giant is still held at this point, to be released in tsleep. 774 */ 775/* ARGSUSED*/ 776static void 777scheduler(dummy) 778 void *dummy; 779{ 780 struct proc *p; 781 struct thread *td; 782 int pri; 783 struct proc *pp; 784 int ppri; 785 786 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 787 /* GIANT_REQUIRED */ 788 789loop: 790 if (vm_page_count_min()) { 791 VM_WAIT; 792 goto loop; 793 } 794 795 pp = NULL; 796 ppri = INT_MIN; 797 sx_slock(&allproc_lock); 798 FOREACH_PROC_IN_SYSTEM(p) { 799 struct ksegrp *kg; 800 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 801 continue; 802 } 803 mtx_lock_spin(&sched_lock); 804 FOREACH_THREAD_IN_PROC(p, td) { 805 /* 806 * An otherwise runnable thread of a process 807 * swapped out has only the TDI_SWAPPED bit set. 808 * 809 */ 810 if (td->td_inhibitors == TDI_SWAPPED) { 811 kg = td->td_ksegrp; 812 pri = p->p_swtime + kg->kg_slptime; 813 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 814 pri -= kg->kg_nice * 8; 815 } 816 817 /* 818 * if this ksegrp is higher priority 819 * and there is enough space, then select 820 * this process instead of the previous 821 * selection. 822 */ 823 if (pri > ppri) { 824 pp = p; 825 ppri = pri; 826 } 827 } 828 } 829 mtx_unlock_spin(&sched_lock); 830 } 831 sx_sunlock(&allproc_lock); 832 833 /* 834 * Nothing to do, back to sleep. 835 */ 836 if ((p = pp) == NULL) { 837 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 838 goto loop; 839 } 840 PROC_LOCK(p); 841 842 /* 843 * Another process may be bringing or may have already 844 * brought this process in while we traverse all threads. 845 * Or, this process may even be being swapped out again. 846 */ 847 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 848 PROC_UNLOCK(p); 849 goto loop; 850 } 851 852 mtx_lock_spin(&sched_lock); 853 p->p_sflag &= ~PS_SWAPINREQ; 854 mtx_unlock_spin(&sched_lock); 855 856 /* 857 * We would like to bring someone in. (only if there is space). 858 * [What checks the space? ] 859 */ 860 faultin(p); 861 PROC_UNLOCK(p); 862 mtx_lock_spin(&sched_lock); 863 p->p_swtime = 0; 864 mtx_unlock_spin(&sched_lock); 865 goto loop; 866} 867 868#ifndef NO_SWAPPING 869 870/* 871 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 872 */ 873static int swap_idle_threshold1 = 2; 874SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 875 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 876 877/* 878 * Swap_idle_threshold2 is the time that a process can be idle before 879 * it will be swapped out, if idle swapping is enabled. 880 */ 881static int swap_idle_threshold2 = 10; 882SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 883 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 884 885/* 886 * Swapout is driven by the pageout daemon. Very simple, we find eligible 887 * procs and unwire their u-areas. We try to always "swap" at least one 888 * process in case we need the room for a swapin. 889 * If any procs have been sleeping/stopped for at least maxslp seconds, 890 * they are swapped. Else, we swap the longest-sleeping or stopped process, 891 * if any, otherwise the longest-resident process. 892 */ 893void 894swapout_procs(action) 895int action; 896{ 897 struct proc *p; 898 struct thread *td; 899 struct ksegrp *kg; 900 int didswap = 0; 901 902 GIANT_REQUIRED; 903 904retry: 905 sx_slock(&allproc_lock); 906 FOREACH_PROC_IN_SYSTEM(p) { 907 struct vmspace *vm; 908 int minslptime = 100000; 909 910 /* 911 * Watch out for a process in 912 * creation. It may have no 913 * address space or lock yet. 914 */ 915 mtx_lock_spin(&sched_lock); 916 if (p->p_state == PRS_NEW) { 917 mtx_unlock_spin(&sched_lock); 918 continue; 919 } 920 mtx_unlock_spin(&sched_lock); 921 922 /* 923 * An aio daemon switches its 924 * address space while running. 925 * Perform a quick check whether 926 * a process has P_SYSTEM. 927 */ 928 if ((p->p_flag & P_SYSTEM) != 0) 929 continue; 930 931 /* 932 * Do not swapout a process that 933 * is waiting for VM data 934 * structures as there is a possible 935 * deadlock. Test this first as 936 * this may block. 937 * 938 * Lock the map until swapout 939 * finishes, or a thread of this 940 * process may attempt to alter 941 * the map. 942 */ 943 PROC_LOCK(p); 944 vm = p->p_vmspace; 945 KASSERT(vm != NULL, 946 ("swapout_procs: a process has no address space")); 947 ++vm->vm_refcnt; 948 PROC_UNLOCK(p); 949 if (!vm_map_trylock(&vm->vm_map)) 950 goto nextproc1; 951 952 PROC_LOCK(p); 953 if (p->p_lock != 0 || 954 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 955 ) != 0) { 956 goto nextproc2; 957 } 958 /* 959 * only aiod changes vmspace, however it will be 960 * skipped because of the if statement above checking 961 * for P_SYSTEM 962 */ 963 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 964 goto nextproc2; 965 966 switch (p->p_state) { 967 default: 968 /* Don't swap out processes in any sort 969 * of 'special' state. */ 970 break; 971 972 case PRS_NORMAL: 973 mtx_lock_spin(&sched_lock); 974 /* 975 * do not swapout a realtime process 976 * Check all the thread groups.. 977 */ 978 FOREACH_KSEGRP_IN_PROC(p, kg) { 979 if (PRI_IS_REALTIME(kg->kg_pri_class)) 980 goto nextproc; 981 982 /* 983 * Guarantee swap_idle_threshold1 984 * time in memory. 985 */ 986 if (kg->kg_slptime < swap_idle_threshold1) 987 goto nextproc; 988 989 /* 990 * Do not swapout a process if it is 991 * waiting on a critical event of some 992 * kind or there is a thread whose 993 * pageable memory may be accessed. 994 * 995 * This could be refined to support 996 * swapping out a thread. 997 */ 998 FOREACH_THREAD_IN_GROUP(kg, td) { 999 if ((td->td_priority) < PSOCK || 1000 !thread_safetoswapout(td)) 1001 goto nextproc; 1002 } 1003 /* 1004 * If the system is under memory stress, 1005 * or if we are swapping 1006 * idle processes >= swap_idle_threshold2, 1007 * then swap the process out. 1008 */ 1009 if (((action & VM_SWAP_NORMAL) == 0) && 1010 (((action & VM_SWAP_IDLE) == 0) || 1011 (kg->kg_slptime < swap_idle_threshold2))) 1012 goto nextproc; 1013 1014 if (minslptime > kg->kg_slptime) 1015 minslptime = kg->kg_slptime; 1016 } 1017 1018 /* 1019 * If the process has been asleep for awhile and had 1020 * most of its pages taken away already, swap it out. 1021 */ 1022 if ((action & VM_SWAP_NORMAL) || 1023 ((action & VM_SWAP_IDLE) && 1024 (minslptime > swap_idle_threshold2))) { 1025 swapout(p); 1026 didswap++; 1027 mtx_unlock_spin(&sched_lock); 1028 PROC_UNLOCK(p); 1029 vm_map_unlock(&vm->vm_map); 1030 vmspace_free(vm); 1031 sx_sunlock(&allproc_lock); 1032 goto retry; 1033 } 1034nextproc: 1035 mtx_unlock_spin(&sched_lock); 1036 } 1037nextproc2: 1038 PROC_UNLOCK(p); 1039 vm_map_unlock(&vm->vm_map); 1040nextproc1: 1041 vmspace_free(vm); 1042 continue; 1043 } 1044 sx_sunlock(&allproc_lock); 1045 /* 1046 * If we swapped something out, and another process needed memory, 1047 * then wakeup the sched process. 1048 */ 1049 if (didswap) 1050 wakeup(&proc0); 1051} 1052 1053static void 1054swapout(p) 1055 struct proc *p; 1056{ 1057 struct thread *td; 1058 1059 PROC_LOCK_ASSERT(p, MA_OWNED); 1060 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 1061#if defined(SWAP_DEBUG) 1062 printf("swapping out %d\n", p->p_pid); 1063#endif 1064 1065 /* 1066 * The states of this process and its threads may have changed 1067 * by now. Assuming that there is only one pageout daemon thread, 1068 * this process should still be in memory. 1069 */ 1070 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 1071 ("swapout: lost a swapout race?")); 1072 1073#if defined(INVARIANTS) 1074 /* 1075 * Make sure that all threads are safe to be swapped out. 1076 * 1077 * Alternatively, we could swap out only safe threads. 1078 */ 1079 FOREACH_THREAD_IN_PROC(p, td) { 1080 KASSERT(thread_safetoswapout(td), 1081 ("swapout: there is a thread not safe for swapout")); 1082 } 1083#endif /* INVARIANTS */ 1084 1085 ++p->p_stats->p_ru.ru_nswap; 1086 /* 1087 * remember the process resident count 1088 */ 1089 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1090 1091 p->p_sflag &= ~PS_INMEM; 1092 p->p_sflag |= PS_SWAPPINGOUT; 1093 PROC_UNLOCK(p); 1094 FOREACH_THREAD_IN_PROC(p, td) 1095 TD_SET_SWAPPED(td); 1096 mtx_unlock_spin(&sched_lock); 1097 1098 vm_proc_swapout(p); 1099 FOREACH_THREAD_IN_PROC(p, td) 1100 vm_thread_swapout(td); 1101 1102 PROC_LOCK(p); 1103 mtx_lock_spin(&sched_lock); 1104 p->p_sflag &= ~PS_SWAPPINGOUT; 1105 p->p_swtime = 0; 1106} 1107#endif /* !NO_SWAPPING */ 1108