vm_glue.c revision 118771
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63#include <sys/cdefs.h> 64__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 118771 2003-08-11 07:14:08Z bms $"); 65 66#include "opt_vm.h" 67#include "opt_kstack_pages.h" 68#include "opt_kstack_max_pages.h" 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/limits.h> 73#include <sys/lock.h> 74#include <sys/mutex.h> 75#include <sys/proc.h> 76#include <sys/resourcevar.h> 77#include <sys/shm.h> 78#include <sys/vmmeter.h> 79#include <sys/sx.h> 80#include <sys/sysctl.h> 81 82#include <sys/kernel.h> 83#include <sys/ktr.h> 84#include <sys/unistd.h> 85 86#include <vm/vm.h> 87#include <vm/vm_param.h> 88#include <vm/pmap.h> 89#include <vm/vm_map.h> 90#include <vm/vm_page.h> 91#include <vm/vm_pageout.h> 92#include <vm/vm_object.h> 93#include <vm/vm_kern.h> 94#include <vm/vm_extern.h> 95#include <vm/vm_pager.h> 96#include <vm/swap_pager.h> 97 98#include <sys/user.h> 99 100extern int maxslp; 101 102/* 103 * System initialization 104 * 105 * Note: proc0 from proc.h 106 */ 107static void vm_init_limits(void *); 108SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 109 110/* 111 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 112 * 113 * Note: run scheduling should be divorced from the vm system. 114 */ 115static void scheduler(void *); 116SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 117 118#ifndef NO_SWAPPING 119static void swapout(struct proc *); 120static void vm_proc_swapin(struct proc *p); 121static void vm_proc_swapout(struct proc *p); 122#endif 123 124/* 125 * MPSAFE 126 * 127 * WARNING! This code calls vm_map_check_protection() which only checks 128 * the associated vm_map_entry range. It does not determine whether the 129 * contents of the memory is actually readable or writable. In most cases 130 * just checking the vm_map_entry is sufficient within the kernel's address 131 * space. 132 */ 133int 134kernacc(addr, len, rw) 135 void *addr; 136 int len, rw; 137{ 138 boolean_t rv; 139 vm_offset_t saddr, eaddr; 140 vm_prot_t prot; 141 142 KASSERT((rw & ~VM_PROT_ALL) == 0, 143 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 144 prot = rw; 145 saddr = trunc_page((vm_offset_t)addr); 146 eaddr = round_page((vm_offset_t)addr + len); 147 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 148 return (rv == TRUE); 149} 150 151/* 152 * MPSAFE 153 * 154 * WARNING! This code calls vm_map_check_protection() which only checks 155 * the associated vm_map_entry range. It does not determine whether the 156 * contents of the memory is actually readable or writable. vmapbuf(), 157 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 158 * used in conjuction with this call. 159 */ 160int 161useracc(addr, len, rw) 162 void *addr; 163 int len, rw; 164{ 165 boolean_t rv; 166 vm_prot_t prot; 167 vm_map_t map; 168 169 KASSERT((rw & ~VM_PROT_ALL) == 0, 170 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 171 prot = rw; 172 map = &curproc->p_vmspace->vm_map; 173 if ((vm_offset_t)addr + len > vm_map_max(map) || 174 (vm_offset_t)addr + len < (vm_offset_t)addr) { 175 return (FALSE); 176 } 177 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 178 round_page((vm_offset_t)addr + len), prot); 179 return (rv == TRUE); 180} 181 182/* 183 * MPSAFE 184 */ 185void 186vslock(addr, len) 187 void *addr; 188 u_int len; 189{ 190 191 vm_map_wire(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), 192 round_page((vm_offset_t)addr + len), 193 VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); 194} 195 196/* 197 * MPSAFE 198 */ 199void 200vsunlock(addr, len) 201 void *addr; 202 u_int len; 203{ 204 205 vm_map_unwire(&curproc->p_vmspace->vm_map, 206 trunc_page((vm_offset_t)addr), 207 round_page((vm_offset_t)addr + len), 208 VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); 209} 210 211/* 212 * Create the U area for a new process. 213 * This routine directly affects the fork perf for a process. 214 */ 215void 216vm_proc_new(struct proc *p) 217{ 218 vm_page_t ma[UAREA_PAGES]; 219 vm_object_t upobj; 220 vm_offset_t up; 221 vm_page_t m; 222 u_int i; 223 224 /* 225 * Allocate object for the upage. 226 */ 227 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 228 p->p_upages_obj = upobj; 229 230 /* 231 * Get a kernel virtual address for the U area for this process. 232 */ 233 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 234 if (up == 0) 235 panic("vm_proc_new: upage allocation failed"); 236 p->p_uarea = (struct user *)up; 237 238 for (i = 0; i < UAREA_PAGES; i++) { 239 /* 240 * Get a uarea page. 241 */ 242 m = vm_page_grab(upobj, i, 243 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 244 ma[i] = m; 245 246 vm_page_lock_queues(); 247 vm_page_wakeup(m); 248 vm_page_flag_clear(m, PG_ZERO); 249 m->valid = VM_PAGE_BITS_ALL; 250 vm_page_unlock_queues(); 251 } 252 253 /* 254 * Enter the pages into the kernel address space. 255 */ 256 pmap_qenter(up, ma, UAREA_PAGES); 257} 258 259/* 260 * Dispose the U area for a process that has exited. 261 * This routine directly impacts the exit perf of a process. 262 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called. 263 */ 264void 265vm_proc_dispose(struct proc *p) 266{ 267 vm_object_t upobj; 268 vm_offset_t up; 269 vm_page_t m; 270 271 upobj = p->p_upages_obj; 272 VM_OBJECT_LOCK(upobj); 273 if (upobj->resident_page_count != UAREA_PAGES) 274 panic("vm_proc_dispose: incorrect number of pages in upobj"); 275 vm_page_lock_queues(); 276 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 277 vm_page_busy(m); 278 vm_page_unwire(m, 0); 279 vm_page_free(m); 280 } 281 vm_page_unlock_queues(); 282 VM_OBJECT_UNLOCK(upobj); 283 up = (vm_offset_t)p->p_uarea; 284 pmap_qremove(up, UAREA_PAGES); 285 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 286 vm_object_deallocate(upobj); 287} 288 289#ifndef NO_SWAPPING 290/* 291 * Allow the U area for a process to be prejudicially paged out. 292 */ 293static void 294vm_proc_swapout(struct proc *p) 295{ 296 vm_object_t upobj; 297 vm_offset_t up; 298 vm_page_t m; 299 300 upobj = p->p_upages_obj; 301 VM_OBJECT_LOCK(upobj); 302 if (upobj->resident_page_count != UAREA_PAGES) 303 panic("vm_proc_dispose: incorrect number of pages in upobj"); 304 vm_page_lock_queues(); 305 TAILQ_FOREACH(m, &upobj->memq, listq) { 306 vm_page_dirty(m); 307 vm_page_unwire(m, 0); 308 } 309 vm_page_unlock_queues(); 310 VM_OBJECT_UNLOCK(upobj); 311 up = (vm_offset_t)p->p_uarea; 312 pmap_qremove(up, UAREA_PAGES); 313} 314 315/* 316 * Bring the U area for a specified process back in. 317 */ 318static void 319vm_proc_swapin(struct proc *p) 320{ 321 vm_page_t ma[UAREA_PAGES]; 322 vm_object_t upobj; 323 vm_offset_t up; 324 vm_page_t m; 325 int rv; 326 int i; 327 328 upobj = p->p_upages_obj; 329 VM_OBJECT_LOCK(upobj); 330 for (i = 0; i < UAREA_PAGES; i++) { 331 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 332 if (m->valid != VM_PAGE_BITS_ALL) { 333 rv = vm_pager_get_pages(upobj, &m, 1, 0); 334 if (rv != VM_PAGER_OK) 335 panic("vm_proc_swapin: cannot get upage"); 336 } 337 ma[i] = m; 338 } 339 if (upobj->resident_page_count != UAREA_PAGES) 340 panic("vm_proc_swapin: lost pages from upobj"); 341 vm_page_lock_queues(); 342 TAILQ_FOREACH(m, &upobj->memq, listq) { 343 m->valid = VM_PAGE_BITS_ALL; 344 vm_page_wire(m); 345 vm_page_wakeup(m); 346 } 347 vm_page_unlock_queues(); 348 VM_OBJECT_UNLOCK(upobj); 349 up = (vm_offset_t)p->p_uarea; 350 pmap_qenter(up, ma, UAREA_PAGES); 351} 352 353/* 354 * Swap in the UAREAs of all processes swapped out to the given device. 355 * The pages in the UAREA are marked dirty and their swap metadata is freed. 356 */ 357void 358vm_proc_swapin_all(struct swdevt *devidx) 359{ 360 struct proc *p; 361 vm_object_t object; 362 vm_page_t m; 363 364retry: 365 sx_slock(&allproc_lock); 366 FOREACH_PROC_IN_SYSTEM(p) { 367 PROC_LOCK(p); 368 object = p->p_upages_obj; 369 if (object != NULL) { 370 VM_OBJECT_LOCK(object); 371 if (swap_pager_isswapped(object, devidx)) { 372 VM_OBJECT_UNLOCK(object); 373 sx_sunlock(&allproc_lock); 374 faultin(p); 375 PROC_UNLOCK(p); 376 VM_OBJECT_LOCK(object); 377 vm_page_lock_queues(); 378 TAILQ_FOREACH(m, &object->memq, listq) 379 vm_page_dirty(m); 380 vm_page_unlock_queues(); 381 swap_pager_freespace(object, 0, 382 object->un_pager.swp.swp_bcount); 383 VM_OBJECT_UNLOCK(object); 384 goto retry; 385 } 386 VM_OBJECT_UNLOCK(object); 387 } 388 PROC_UNLOCK(p); 389 } 390 sx_sunlock(&allproc_lock); 391} 392#endif 393 394#ifndef KSTACK_MAX_PAGES 395#define KSTACK_MAX_PAGES 32 396#endif 397 398/* 399 * Create the kernel stack (including pcb for i386) for a new thread. 400 * This routine directly affects the fork perf for a process and 401 * create performance for a thread. 402 */ 403void 404vm_thread_new(struct thread *td, int pages) 405{ 406 vm_object_t ksobj; 407 vm_offset_t ks; 408 vm_page_t m, ma[KSTACK_MAX_PAGES]; 409 int i; 410 411 /* Bounds check */ 412 if (pages <= 1) 413 pages = KSTACK_PAGES; 414 else if (pages > KSTACK_MAX_PAGES) 415 pages = KSTACK_MAX_PAGES; 416 /* 417 * Allocate an object for the kstack. 418 */ 419 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 420 td->td_kstack_obj = ksobj; 421 /* 422 * Get a kernel virtual address for this thread's kstack. 423 */ 424 ks = kmem_alloc_nofault(kernel_map, 425 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 426 if (ks == 0) 427 panic("vm_thread_new: kstack allocation failed"); 428 if (KSTACK_GUARD_PAGES != 0) { 429 pmap_qremove(ks, KSTACK_GUARD_PAGES); 430 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 431 } 432 td->td_kstack = ks; 433 /* 434 * Knowing the number of pages allocated is useful when you 435 * want to deallocate them. 436 */ 437 td->td_kstack_pages = pages; 438 /* 439 * For the length of the stack, link in a real page of ram for each 440 * page of stack. 441 */ 442 VM_OBJECT_LOCK(ksobj); 443 for (i = 0; i < pages; i++) { 444 /* 445 * Get a kernel stack page. 446 */ 447 m = vm_page_grab(ksobj, i, 448 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 449 ma[i] = m; 450 vm_page_lock_queues(); 451 vm_page_wakeup(m); 452 m->valid = VM_PAGE_BITS_ALL; 453 vm_page_unlock_queues(); 454 } 455 VM_OBJECT_UNLOCK(ksobj); 456 pmap_qenter(ks, ma, pages); 457} 458 459/* 460 * Dispose of a thread's kernel stack. 461 */ 462void 463vm_thread_dispose(struct thread *td) 464{ 465 vm_object_t ksobj; 466 vm_offset_t ks; 467 vm_page_t m; 468 int i, pages; 469 470 pages = td->td_kstack_pages; 471 ksobj = td->td_kstack_obj; 472 ks = td->td_kstack; 473 pmap_qremove(ks, pages); 474 VM_OBJECT_LOCK(ksobj); 475 for (i = 0; i < pages; i++) { 476 m = vm_page_lookup(ksobj, i); 477 if (m == NULL) 478 panic("vm_thread_dispose: kstack already missing?"); 479 vm_page_lock_queues(); 480 vm_page_busy(m); 481 vm_page_unwire(m, 0); 482 vm_page_free(m); 483 vm_page_unlock_queues(); 484 } 485 VM_OBJECT_UNLOCK(ksobj); 486 vm_object_deallocate(ksobj); 487 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 488 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 489} 490 491/* 492 * Allow a thread's kernel stack to be paged out. 493 */ 494void 495vm_thread_swapout(struct thread *td) 496{ 497 vm_object_t ksobj; 498 vm_page_t m; 499 int i, pages; 500 501#ifdef __alpha__ 502 /* 503 * Make sure we aren't fpcurthread. 504 */ 505 alpha_fpstate_save(td, 1); 506#endif 507 pages = td->td_kstack_pages; 508 ksobj = td->td_kstack_obj; 509 pmap_qremove(td->td_kstack, pages); 510 VM_OBJECT_LOCK(ksobj); 511 for (i = 0; i < pages; i++) { 512 m = vm_page_lookup(ksobj, i); 513 if (m == NULL) 514 panic("vm_thread_swapout: kstack already missing?"); 515 vm_page_lock_queues(); 516 vm_page_dirty(m); 517 vm_page_unwire(m, 0); 518 vm_page_unlock_queues(); 519 } 520 VM_OBJECT_UNLOCK(ksobj); 521} 522 523/* 524 * Bring the kernel stack for a specified thread back in. 525 */ 526void 527vm_thread_swapin(struct thread *td) 528{ 529 vm_object_t ksobj; 530 vm_page_t m, ma[KSTACK_MAX_PAGES]; 531 int i, pages, rv; 532 533 pages = td->td_kstack_pages; 534 ksobj = td->td_kstack_obj; 535 VM_OBJECT_LOCK(ksobj); 536 for (i = 0; i < pages; i++) { 537 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 538 if (m->valid != VM_PAGE_BITS_ALL) { 539 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 540 if (rv != VM_PAGER_OK) 541 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 542 m = vm_page_lookup(ksobj, i); 543 m->valid = VM_PAGE_BITS_ALL; 544 } 545 ma[i] = m; 546 vm_page_lock_queues(); 547 vm_page_wire(m); 548 vm_page_wakeup(m); 549 vm_page_unlock_queues(); 550 } 551 VM_OBJECT_UNLOCK(ksobj); 552 pmap_qenter(td->td_kstack, ma, pages); 553#ifdef __alpha__ 554 /* 555 * The pcb may be at a different physical address now so cache the 556 * new address. 557 */ 558 td->td_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)td->td_pcb); 559#endif 560} 561 562/* 563 * Set up a variable-sized alternate kstack. 564 */ 565void 566vm_thread_new_altkstack(struct thread *td, int pages) 567{ 568 569 td->td_altkstack = td->td_kstack; 570 td->td_altkstack_obj = td->td_kstack_obj; 571 td->td_altkstack_pages = td->td_kstack_pages; 572 573 vm_thread_new(td, pages); 574} 575 576/* 577 * Restore the original kstack. 578 */ 579void 580vm_thread_dispose_altkstack(struct thread *td) 581{ 582 583 vm_thread_dispose(td); 584 585 td->td_kstack = td->td_altkstack; 586 td->td_kstack_obj = td->td_altkstack_obj; 587 td->td_kstack_pages = td->td_altkstack_pages; 588 td->td_altkstack = 0; 589 td->td_altkstack_obj = NULL; 590 td->td_altkstack_pages = 0; 591} 592 593/* 594 * Implement fork's actions on an address space. 595 * Here we arrange for the address space to be copied or referenced, 596 * allocate a user struct (pcb and kernel stack), then call the 597 * machine-dependent layer to fill those in and make the new process 598 * ready to run. The new process is set up so that it returns directly 599 * to user mode to avoid stack copying and relocation problems. 600 */ 601void 602vm_forkproc(td, p2, td2, flags) 603 struct thread *td; 604 struct proc *p2; 605 struct thread *td2; 606 int flags; 607{ 608 struct proc *p1 = td->td_proc; 609 struct user *up; 610 611 GIANT_REQUIRED; 612 613 if ((flags & RFPROC) == 0) { 614 /* 615 * Divorce the memory, if it is shared, essentially 616 * this changes shared memory amongst threads, into 617 * COW locally. 618 */ 619 if ((flags & RFMEM) == 0) { 620 if (p1->p_vmspace->vm_refcnt > 1) { 621 vmspace_unshare(p1); 622 } 623 } 624 cpu_fork(td, p2, td2, flags); 625 return; 626 } 627 628 if (flags & RFMEM) { 629 p2->p_vmspace = p1->p_vmspace; 630 p1->p_vmspace->vm_refcnt++; 631 } 632 633 while (vm_page_count_severe()) { 634 VM_WAIT; 635 } 636 637 if ((flags & RFMEM) == 0) { 638 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 639 640 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 641 642 if (p1->p_vmspace->vm_shm) 643 shmfork(p1, p2); 644 } 645 646 /* XXXKSE this is unsatisfactory but should be adequate */ 647 up = p2->p_uarea; 648 MPASS(p2->p_sigacts != NULL); 649 650 /* 651 * p_stats currently points at fields in the user struct 652 * but not at &u, instead at p_addr. Copy parts of 653 * p_stats; zero the rest of p_stats (statistics). 654 */ 655 p2->p_stats = &up->u_stats; 656 bzero(&up->u_stats.pstat_startzero, 657 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 658 (caddr_t) &up->u_stats.pstat_startzero)); 659 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 660 ((caddr_t) &up->u_stats.pstat_endcopy - 661 (caddr_t) &up->u_stats.pstat_startcopy)); 662 663 /* 664 * cpu_fork will copy and update the pcb, set up the kernel stack, 665 * and make the child ready to run. 666 */ 667 cpu_fork(td, p2, td2, flags); 668} 669 670/* 671 * Called after process has been wait(2)'ed apon and is being reaped. 672 * The idea is to reclaim resources that we could not reclaim while 673 * the process was still executing. 674 */ 675void 676vm_waitproc(p) 677 struct proc *p; 678{ 679 680 GIANT_REQUIRED; 681 vmspace_exitfree(p); /* and clean-out the vmspace */ 682} 683 684/* 685 * Set default limits for VM system. 686 * Called for proc 0, and then inherited by all others. 687 * 688 * XXX should probably act directly on proc0. 689 */ 690static void 691vm_init_limits(udata) 692 void *udata; 693{ 694 struct proc *p = udata; 695 int rss_limit; 696 697 /* 698 * Set up the initial limits on process VM. Set the maximum resident 699 * set size to be half of (reasonably) available memory. Since this 700 * is a soft limit, it comes into effect only when the system is out 701 * of memory - half of main memory helps to favor smaller processes, 702 * and reduces thrashing of the object cache. 703 */ 704 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 705 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 706 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 707 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 708 /* limit the limit to no less than 2MB */ 709 rss_limit = max(cnt.v_free_count, 512); 710 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 711 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 712} 713 714void 715faultin(p) 716 struct proc *p; 717{ 718#ifdef NO_SWAPPING 719 720 PROC_LOCK_ASSERT(p, MA_OWNED); 721 if ((p->p_sflag & PS_INMEM) == 0) 722 panic("faultin: proc swapped out with NO_SWAPPING!"); 723#else /* !NO_SWAPPING */ 724 struct thread *td; 725 726 GIANT_REQUIRED; 727 PROC_LOCK_ASSERT(p, MA_OWNED); 728 /* 729 * If another process is swapping in this process, 730 * just wait until it finishes. 731 */ 732 if (p->p_sflag & PS_SWAPPINGIN) 733 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 734 else if ((p->p_sflag & PS_INMEM) == 0) { 735 /* 736 * Don't let another thread swap process p out while we are 737 * busy swapping it in. 738 */ 739 ++p->p_lock; 740 mtx_lock_spin(&sched_lock); 741 p->p_sflag |= PS_SWAPPINGIN; 742 mtx_unlock_spin(&sched_lock); 743 PROC_UNLOCK(p); 744 745 vm_proc_swapin(p); 746 FOREACH_THREAD_IN_PROC(p, td) 747 vm_thread_swapin(td); 748 749 PROC_LOCK(p); 750 mtx_lock_spin(&sched_lock); 751 p->p_sflag &= ~PS_SWAPPINGIN; 752 p->p_sflag |= PS_INMEM; 753 FOREACH_THREAD_IN_PROC(p, td) { 754 TD_CLR_SWAPPED(td); 755 if (TD_CAN_RUN(td)) 756 setrunnable(td); 757 } 758 mtx_unlock_spin(&sched_lock); 759 760 wakeup(&p->p_sflag); 761 762 /* Allow other threads to swap p out now. */ 763 --p->p_lock; 764 } 765#endif /* NO_SWAPPING */ 766} 767 768/* 769 * This swapin algorithm attempts to swap-in processes only if there 770 * is enough space for them. Of course, if a process waits for a long 771 * time, it will be swapped in anyway. 772 * 773 * XXXKSE - process with the thread with highest priority counts.. 774 * 775 * Giant is still held at this point, to be released in tsleep. 776 */ 777/* ARGSUSED*/ 778static void 779scheduler(dummy) 780 void *dummy; 781{ 782 struct proc *p; 783 struct thread *td; 784 int pri; 785 struct proc *pp; 786 int ppri; 787 788 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 789 /* GIANT_REQUIRED */ 790 791loop: 792 if (vm_page_count_min()) { 793 VM_WAIT; 794 goto loop; 795 } 796 797 pp = NULL; 798 ppri = INT_MIN; 799 sx_slock(&allproc_lock); 800 FOREACH_PROC_IN_SYSTEM(p) { 801 struct ksegrp *kg; 802 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 803 continue; 804 } 805 mtx_lock_spin(&sched_lock); 806 FOREACH_THREAD_IN_PROC(p, td) { 807 /* 808 * An otherwise runnable thread of a process 809 * swapped out has only the TDI_SWAPPED bit set. 810 * 811 */ 812 if (td->td_inhibitors == TDI_SWAPPED) { 813 kg = td->td_ksegrp; 814 pri = p->p_swtime + kg->kg_slptime; 815 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 816 pri -= kg->kg_nice * 8; 817 } 818 819 /* 820 * if this ksegrp is higher priority 821 * and there is enough space, then select 822 * this process instead of the previous 823 * selection. 824 */ 825 if (pri > ppri) { 826 pp = p; 827 ppri = pri; 828 } 829 } 830 } 831 mtx_unlock_spin(&sched_lock); 832 } 833 sx_sunlock(&allproc_lock); 834 835 /* 836 * Nothing to do, back to sleep. 837 */ 838 if ((p = pp) == NULL) { 839 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 840 goto loop; 841 } 842 PROC_LOCK(p); 843 844 /* 845 * Another process may be bringing or may have already 846 * brought this process in while we traverse all threads. 847 * Or, this process may even be being swapped out again. 848 */ 849 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 850 PROC_UNLOCK(p); 851 goto loop; 852 } 853 854 mtx_lock_spin(&sched_lock); 855 p->p_sflag &= ~PS_SWAPINREQ; 856 mtx_unlock_spin(&sched_lock); 857 858 /* 859 * We would like to bring someone in. (only if there is space). 860 * [What checks the space? ] 861 */ 862 faultin(p); 863 PROC_UNLOCK(p); 864 mtx_lock_spin(&sched_lock); 865 p->p_swtime = 0; 866 mtx_unlock_spin(&sched_lock); 867 goto loop; 868} 869 870#ifndef NO_SWAPPING 871 872/* 873 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 874 */ 875static int swap_idle_threshold1 = 2; 876SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 877 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 878 879/* 880 * Swap_idle_threshold2 is the time that a process can be idle before 881 * it will be swapped out, if idle swapping is enabled. 882 */ 883static int swap_idle_threshold2 = 10; 884SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 885 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 886 887/* 888 * Swapout is driven by the pageout daemon. Very simple, we find eligible 889 * procs and unwire their u-areas. We try to always "swap" at least one 890 * process in case we need the room for a swapin. 891 * If any procs have been sleeping/stopped for at least maxslp seconds, 892 * they are swapped. Else, we swap the longest-sleeping or stopped process, 893 * if any, otherwise the longest-resident process. 894 */ 895void 896swapout_procs(action) 897int action; 898{ 899 struct proc *p; 900 struct thread *td; 901 struct ksegrp *kg; 902 int didswap = 0; 903 904 GIANT_REQUIRED; 905 906retry: 907 sx_slock(&allproc_lock); 908 FOREACH_PROC_IN_SYSTEM(p) { 909 struct vmspace *vm; 910 int minslptime = 100000; 911 912 /* 913 * Watch out for a process in 914 * creation. It may have no 915 * address space or lock yet. 916 */ 917 mtx_lock_spin(&sched_lock); 918 if (p->p_state == PRS_NEW) { 919 mtx_unlock_spin(&sched_lock); 920 continue; 921 } 922 mtx_unlock_spin(&sched_lock); 923 924 /* 925 * An aio daemon switches its 926 * address space while running. 927 * Perform a quick check whether 928 * a process has P_SYSTEM. 929 */ 930 if ((p->p_flag & P_SYSTEM) != 0) 931 continue; 932 933 /* 934 * Do not swapout a process that 935 * is waiting for VM data 936 * structures as there is a possible 937 * deadlock. Test this first as 938 * this may block. 939 * 940 * Lock the map until swapout 941 * finishes, or a thread of this 942 * process may attempt to alter 943 * the map. 944 */ 945 PROC_LOCK(p); 946 vm = p->p_vmspace; 947 KASSERT(vm != NULL, 948 ("swapout_procs: a process has no address space")); 949 ++vm->vm_refcnt; 950 PROC_UNLOCK(p); 951 if (!vm_map_trylock(&vm->vm_map)) 952 goto nextproc1; 953 954 PROC_LOCK(p); 955 if (p->p_lock != 0 || 956 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 957 ) != 0) { 958 goto nextproc2; 959 } 960 /* 961 * only aiod changes vmspace, however it will be 962 * skipped because of the if statement above checking 963 * for P_SYSTEM 964 */ 965 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 966 goto nextproc2; 967 968 switch (p->p_state) { 969 default: 970 /* Don't swap out processes in any sort 971 * of 'special' state. */ 972 break; 973 974 case PRS_NORMAL: 975 mtx_lock_spin(&sched_lock); 976 /* 977 * do not swapout a realtime process 978 * Check all the thread groups.. 979 */ 980 FOREACH_KSEGRP_IN_PROC(p, kg) { 981 if (PRI_IS_REALTIME(kg->kg_pri_class)) 982 goto nextproc; 983 984 /* 985 * Guarantee swap_idle_threshold1 986 * time in memory. 987 */ 988 if (kg->kg_slptime < swap_idle_threshold1) 989 goto nextproc; 990 991 /* 992 * Do not swapout a process if it is 993 * waiting on a critical event of some 994 * kind or there is a thread whose 995 * pageable memory may be accessed. 996 * 997 * This could be refined to support 998 * swapping out a thread. 999 */ 1000 FOREACH_THREAD_IN_GROUP(kg, td) { 1001 if ((td->td_priority) < PSOCK || 1002 !thread_safetoswapout(td)) 1003 goto nextproc; 1004 } 1005 /* 1006 * If the system is under memory stress, 1007 * or if we are swapping 1008 * idle processes >= swap_idle_threshold2, 1009 * then swap the process out. 1010 */ 1011 if (((action & VM_SWAP_NORMAL) == 0) && 1012 (((action & VM_SWAP_IDLE) == 0) || 1013 (kg->kg_slptime < swap_idle_threshold2))) 1014 goto nextproc; 1015 1016 if (minslptime > kg->kg_slptime) 1017 minslptime = kg->kg_slptime; 1018 } 1019 1020 /* 1021 * If the process has been asleep for awhile and had 1022 * most of its pages taken away already, swap it out. 1023 */ 1024 if ((action & VM_SWAP_NORMAL) || 1025 ((action & VM_SWAP_IDLE) && 1026 (minslptime > swap_idle_threshold2))) { 1027 swapout(p); 1028 didswap++; 1029 mtx_unlock_spin(&sched_lock); 1030 PROC_UNLOCK(p); 1031 vm_map_unlock(&vm->vm_map); 1032 vmspace_free(vm); 1033 sx_sunlock(&allproc_lock); 1034 goto retry; 1035 } 1036nextproc: 1037 mtx_unlock_spin(&sched_lock); 1038 } 1039nextproc2: 1040 PROC_UNLOCK(p); 1041 vm_map_unlock(&vm->vm_map); 1042nextproc1: 1043 vmspace_free(vm); 1044 continue; 1045 } 1046 sx_sunlock(&allproc_lock); 1047 /* 1048 * If we swapped something out, and another process needed memory, 1049 * then wakeup the sched process. 1050 */ 1051 if (didswap) 1052 wakeup(&proc0); 1053} 1054 1055static void 1056swapout(p) 1057 struct proc *p; 1058{ 1059 struct thread *td; 1060 1061 PROC_LOCK_ASSERT(p, MA_OWNED); 1062 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 1063#if defined(SWAP_DEBUG) 1064 printf("swapping out %d\n", p->p_pid); 1065#endif 1066 1067 /* 1068 * The states of this process and its threads may have changed 1069 * by now. Assuming that there is only one pageout daemon thread, 1070 * this process should still be in memory. 1071 */ 1072 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 1073 ("swapout: lost a swapout race?")); 1074 1075#if defined(INVARIANTS) 1076 /* 1077 * Make sure that all threads are safe to be swapped out. 1078 * 1079 * Alternatively, we could swap out only safe threads. 1080 */ 1081 FOREACH_THREAD_IN_PROC(p, td) { 1082 KASSERT(thread_safetoswapout(td), 1083 ("swapout: there is a thread not safe for swapout")); 1084 } 1085#endif /* INVARIANTS */ 1086 1087 ++p->p_stats->p_ru.ru_nswap; 1088 /* 1089 * remember the process resident count 1090 */ 1091 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1092 1093 p->p_sflag &= ~PS_INMEM; 1094 p->p_sflag |= PS_SWAPPINGOUT; 1095 PROC_UNLOCK(p); 1096 FOREACH_THREAD_IN_PROC(p, td) 1097 TD_SET_SWAPPED(td); 1098 mtx_unlock_spin(&sched_lock); 1099 1100 vm_proc_swapout(p); 1101 FOREACH_THREAD_IN_PROC(p, td) 1102 vm_thread_swapout(td); 1103 1104 PROC_LOCK(p); 1105 mtx_lock_spin(&sched_lock); 1106 p->p_sflag &= ~PS_SWAPPINGOUT; 1107 p->p_swtime = 0; 1108} 1109#endif /* !NO_SWAPPING */ 1110