vm_glue.c revision 127007
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63#include <sys/cdefs.h> 64__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 127007 2004-03-15 06:42:40Z truckman $"); 65 66#include "opt_vm.h" 67#include "opt_kstack_pages.h" 68#include "opt_kstack_max_pages.h" 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/limits.h> 73#include <sys/lock.h> 74#include <sys/mutex.h> 75#include <sys/proc.h> 76#include <sys/resourcevar.h> 77#include <sys/shm.h> 78#include <sys/vmmeter.h> 79#include <sys/sx.h> 80#include <sys/sysctl.h> 81 82#include <sys/kernel.h> 83#include <sys/ktr.h> 84#include <sys/unistd.h> 85 86#include <vm/vm.h> 87#include <vm/vm_param.h> 88#include <vm/pmap.h> 89#include <vm/vm_map.h> 90#include <vm/vm_page.h> 91#include <vm/vm_pageout.h> 92#include <vm/vm_object.h> 93#include <vm/vm_kern.h> 94#include <vm/vm_extern.h> 95#include <vm/vm_pager.h> 96#include <vm/swap_pager.h> 97 98#include <sys/user.h> 99 100extern int maxslp; 101 102/* 103 * System initialization 104 * 105 * Note: proc0 from proc.h 106 */ 107static void vm_init_limits(void *); 108SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 109 110/* 111 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 112 * 113 * Note: run scheduling should be divorced from the vm system. 114 */ 115static void scheduler(void *); 116SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL) 117 118#ifndef NO_SWAPPING 119static void swapout(struct proc *); 120static void vm_proc_swapin(struct proc *p); 121static void vm_proc_swapout(struct proc *p); 122#endif 123 124/* 125 * MPSAFE 126 * 127 * WARNING! This code calls vm_map_check_protection() which only checks 128 * the associated vm_map_entry range. It does not determine whether the 129 * contents of the memory is actually readable or writable. In most cases 130 * just checking the vm_map_entry is sufficient within the kernel's address 131 * space. 132 */ 133int 134kernacc(addr, len, rw) 135 void *addr; 136 int len, rw; 137{ 138 boolean_t rv; 139 vm_offset_t saddr, eaddr; 140 vm_prot_t prot; 141 142 KASSERT((rw & ~VM_PROT_ALL) == 0, 143 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 144 prot = rw; 145 saddr = trunc_page((vm_offset_t)addr); 146 eaddr = round_page((vm_offset_t)addr + len); 147 vm_map_lock_read(kernel_map); 148 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 149 vm_map_unlock_read(kernel_map); 150 return (rv == TRUE); 151} 152 153/* 154 * MPSAFE 155 * 156 * WARNING! This code calls vm_map_check_protection() which only checks 157 * the associated vm_map_entry range. It does not determine whether the 158 * contents of the memory is actually readable or writable. vmapbuf(), 159 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 160 * used in conjuction with this call. 161 */ 162int 163useracc(addr, len, rw) 164 void *addr; 165 int len, rw; 166{ 167 boolean_t rv; 168 vm_prot_t prot; 169 vm_map_t map; 170 171 KASSERT((rw & ~VM_PROT_ALL) == 0, 172 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 173 prot = rw; 174 map = &curproc->p_vmspace->vm_map; 175 if ((vm_offset_t)addr + len > vm_map_max(map) || 176 (vm_offset_t)addr + len < (vm_offset_t)addr) { 177 return (FALSE); 178 } 179 vm_map_lock_read(map); 180 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 181 round_page((vm_offset_t)addr + len), prot); 182 vm_map_unlock_read(map); 183 return (rv == TRUE); 184} 185 186/* 187 * MPSAFE 188 */ 189int 190vslock(addr, len) 191 void *addr; 192 size_t len; 193{ 194 vm_offset_t start, end; 195 int error, npages; 196 197 start = trunc_page((vm_offset_t)addr); 198 end = round_page((vm_offset_t)addr + len); 199 200 /* disable wrap around */ 201 if (end <= start) 202 return (EINVAL); 203 204 npages = atop(end - start); 205 206 if (npages > vm_page_max_wired) 207 return (ENOMEM); 208 209 PROC_LOCK(curproc); 210 if (npages + pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map)) > 211 atop(lim_cur(curproc, RLIMIT_MEMLOCK))) { 212 PROC_UNLOCK(curproc); 213 return (ENOMEM); 214 } 215 PROC_UNLOCK(curproc); 216 217#if 0 218 /* 219 * XXX - not yet 220 * 221 * The limit for transient usage of wired pages should be 222 * larger than for "permanent" wired pages (mlock()). 223 * 224 * Also, the sysctl code, which is the only present user 225 * of vslock(), does a hard loop on EAGAIN. 226 */ 227 if (npages + cnt.v_wire_count > vm_page_max_wired) 228 return (EAGAIN); 229#endif 230 231 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 232 VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 233 234 /* 235 * Return EFAULT on error to match copy{in,out}() behaviour 236 * rather than returning ENOMEM like mlock() would. 237 */ 238 return (error == KERN_SUCCESS ? 0 : EFAULT); 239} 240 241/* 242 * MPSAFE 243 */ 244void 245vsunlock(addr, len) 246 void *addr; 247 size_t len; 248{ 249 250 /* Rely on the parameter sanity checks performed by vslock(). */ 251 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 252 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 253 VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); 254} 255 256/* 257 * Create the U area for a new process. 258 * This routine directly affects the fork perf for a process. 259 */ 260void 261vm_proc_new(struct proc *p) 262{ 263 vm_page_t ma[UAREA_PAGES]; 264 vm_object_t upobj; 265 vm_offset_t up; 266 vm_page_t m; 267 u_int i; 268 269 /* 270 * Get a kernel virtual address for the U area for this process. 271 */ 272 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 273 if (up == 0) 274 panic("vm_proc_new: upage allocation failed"); 275 p->p_uarea = (struct user *)up; 276 277 /* 278 * Allocate object and page(s) for the U area. 279 */ 280 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 281 p->p_upages_obj = upobj; 282 VM_OBJECT_LOCK(upobj); 283 for (i = 0; i < UAREA_PAGES; i++) { 284 m = vm_page_grab(upobj, i, 285 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 286 ma[i] = m; 287 288 vm_page_lock_queues(); 289 vm_page_wakeup(m); 290 m->valid = VM_PAGE_BITS_ALL; 291 vm_page_unlock_queues(); 292 } 293 VM_OBJECT_UNLOCK(upobj); 294 295 /* 296 * Enter the pages into the kernel address space. 297 */ 298 pmap_qenter(up, ma, UAREA_PAGES); 299} 300 301/* 302 * Dispose the U area for a process that has exited. 303 * This routine directly impacts the exit perf of a process. 304 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called. 305 */ 306void 307vm_proc_dispose(struct proc *p) 308{ 309 vm_object_t upobj; 310 vm_offset_t up; 311 vm_page_t m; 312 313 upobj = p->p_upages_obj; 314 VM_OBJECT_LOCK(upobj); 315 if (upobj->resident_page_count != UAREA_PAGES) 316 panic("vm_proc_dispose: incorrect number of pages in upobj"); 317 vm_page_lock_queues(); 318 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 319 vm_page_busy(m); 320 vm_page_unwire(m, 0); 321 vm_page_free(m); 322 } 323 vm_page_unlock_queues(); 324 VM_OBJECT_UNLOCK(upobj); 325 up = (vm_offset_t)p->p_uarea; 326 pmap_qremove(up, UAREA_PAGES); 327 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 328 vm_object_deallocate(upobj); 329} 330 331#ifndef NO_SWAPPING 332/* 333 * Allow the U area for a process to be prejudicially paged out. 334 */ 335static void 336vm_proc_swapout(struct proc *p) 337{ 338 vm_object_t upobj; 339 vm_offset_t up; 340 vm_page_t m; 341 342 upobj = p->p_upages_obj; 343 VM_OBJECT_LOCK(upobj); 344 if (upobj->resident_page_count != UAREA_PAGES) 345 panic("vm_proc_dispose: incorrect number of pages in upobj"); 346 vm_page_lock_queues(); 347 TAILQ_FOREACH(m, &upobj->memq, listq) { 348 vm_page_dirty(m); 349 vm_page_unwire(m, 0); 350 } 351 vm_page_unlock_queues(); 352 VM_OBJECT_UNLOCK(upobj); 353 up = (vm_offset_t)p->p_uarea; 354 pmap_qremove(up, UAREA_PAGES); 355} 356 357/* 358 * Bring the U area for a specified process back in. 359 */ 360static void 361vm_proc_swapin(struct proc *p) 362{ 363 vm_page_t ma[UAREA_PAGES]; 364 vm_object_t upobj; 365 vm_offset_t up; 366 vm_page_t m; 367 int rv; 368 int i; 369 370 upobj = p->p_upages_obj; 371 VM_OBJECT_LOCK(upobj); 372 for (i = 0; i < UAREA_PAGES; i++) { 373 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 374 if (m->valid != VM_PAGE_BITS_ALL) { 375 rv = vm_pager_get_pages(upobj, &m, 1, 0); 376 if (rv != VM_PAGER_OK) 377 panic("vm_proc_swapin: cannot get upage"); 378 } 379 ma[i] = m; 380 } 381 if (upobj->resident_page_count != UAREA_PAGES) 382 panic("vm_proc_swapin: lost pages from upobj"); 383 vm_page_lock_queues(); 384 TAILQ_FOREACH(m, &upobj->memq, listq) { 385 m->valid = VM_PAGE_BITS_ALL; 386 vm_page_wire(m); 387 vm_page_wakeup(m); 388 } 389 vm_page_unlock_queues(); 390 VM_OBJECT_UNLOCK(upobj); 391 up = (vm_offset_t)p->p_uarea; 392 pmap_qenter(up, ma, UAREA_PAGES); 393} 394 395/* 396 * Swap in the UAREAs of all processes swapped out to the given device. 397 * The pages in the UAREA are marked dirty and their swap metadata is freed. 398 */ 399void 400vm_proc_swapin_all(struct swdevt *devidx) 401{ 402 struct proc *p; 403 vm_object_t object; 404 vm_page_t m; 405 406retry: 407 sx_slock(&allproc_lock); 408 FOREACH_PROC_IN_SYSTEM(p) { 409 PROC_LOCK(p); 410 object = p->p_upages_obj; 411 if (object != NULL) { 412 VM_OBJECT_LOCK(object); 413 if (swap_pager_isswapped(object, devidx)) { 414 VM_OBJECT_UNLOCK(object); 415 sx_sunlock(&allproc_lock); 416 faultin(p); 417 PROC_UNLOCK(p); 418 VM_OBJECT_LOCK(object); 419 vm_page_lock_queues(); 420 TAILQ_FOREACH(m, &object->memq, listq) 421 vm_page_dirty(m); 422 vm_page_unlock_queues(); 423 swap_pager_freespace(object, 0, 424 object->un_pager.swp.swp_bcount); 425 VM_OBJECT_UNLOCK(object); 426 goto retry; 427 } 428 VM_OBJECT_UNLOCK(object); 429 } 430 PROC_UNLOCK(p); 431 } 432 sx_sunlock(&allproc_lock); 433} 434#endif 435 436#ifndef KSTACK_MAX_PAGES 437#define KSTACK_MAX_PAGES 32 438#endif 439 440/* 441 * Create the kernel stack (including pcb for i386) for a new thread. 442 * This routine directly affects the fork perf for a process and 443 * create performance for a thread. 444 */ 445void 446vm_thread_new(struct thread *td, int pages) 447{ 448 vm_object_t ksobj; 449 vm_offset_t ks; 450 vm_page_t m, ma[KSTACK_MAX_PAGES]; 451 int i; 452 453 /* Bounds check */ 454 if (pages <= 1) 455 pages = KSTACK_PAGES; 456 else if (pages > KSTACK_MAX_PAGES) 457 pages = KSTACK_MAX_PAGES; 458 /* 459 * Allocate an object for the kstack. 460 */ 461 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 462 td->td_kstack_obj = ksobj; 463 /* 464 * Get a kernel virtual address for this thread's kstack. 465 */ 466 ks = kmem_alloc_nofault(kernel_map, 467 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 468 if (ks == 0) 469 panic("vm_thread_new: kstack allocation failed"); 470 if (KSTACK_GUARD_PAGES != 0) { 471 pmap_qremove(ks, KSTACK_GUARD_PAGES); 472 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 473 } 474 td->td_kstack = ks; 475 /* 476 * Knowing the number of pages allocated is useful when you 477 * want to deallocate them. 478 */ 479 td->td_kstack_pages = pages; 480 /* 481 * For the length of the stack, link in a real page of ram for each 482 * page of stack. 483 */ 484 VM_OBJECT_LOCK(ksobj); 485 for (i = 0; i < pages; i++) { 486 /* 487 * Get a kernel stack page. 488 */ 489 m = vm_page_grab(ksobj, i, 490 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 491 ma[i] = m; 492 vm_page_lock_queues(); 493 vm_page_wakeup(m); 494 m->valid = VM_PAGE_BITS_ALL; 495 vm_page_unlock_queues(); 496 } 497 VM_OBJECT_UNLOCK(ksobj); 498 pmap_qenter(ks, ma, pages); 499} 500 501/* 502 * Dispose of a thread's kernel stack. 503 */ 504void 505vm_thread_dispose(struct thread *td) 506{ 507 vm_object_t ksobj; 508 vm_offset_t ks; 509 vm_page_t m; 510 int i, pages; 511 512 pages = td->td_kstack_pages; 513 ksobj = td->td_kstack_obj; 514 ks = td->td_kstack; 515 pmap_qremove(ks, pages); 516 VM_OBJECT_LOCK(ksobj); 517 for (i = 0; i < pages; i++) { 518 m = vm_page_lookup(ksobj, i); 519 if (m == NULL) 520 panic("vm_thread_dispose: kstack already missing?"); 521 vm_page_lock_queues(); 522 vm_page_busy(m); 523 vm_page_unwire(m, 0); 524 vm_page_free(m); 525 vm_page_unlock_queues(); 526 } 527 VM_OBJECT_UNLOCK(ksobj); 528 vm_object_deallocate(ksobj); 529 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 530 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 531} 532 533/* 534 * Allow a thread's kernel stack to be paged out. 535 */ 536void 537vm_thread_swapout(struct thread *td) 538{ 539 vm_object_t ksobj; 540 vm_page_t m; 541 int i, pages; 542 543 cpu_thread_swapout(td); 544 pages = td->td_kstack_pages; 545 ksobj = td->td_kstack_obj; 546 pmap_qremove(td->td_kstack, pages); 547 VM_OBJECT_LOCK(ksobj); 548 for (i = 0; i < pages; i++) { 549 m = vm_page_lookup(ksobj, i); 550 if (m == NULL) 551 panic("vm_thread_swapout: kstack already missing?"); 552 vm_page_lock_queues(); 553 vm_page_dirty(m); 554 vm_page_unwire(m, 0); 555 vm_page_unlock_queues(); 556 } 557 VM_OBJECT_UNLOCK(ksobj); 558} 559 560/* 561 * Bring the kernel stack for a specified thread back in. 562 */ 563void 564vm_thread_swapin(struct thread *td) 565{ 566 vm_object_t ksobj; 567 vm_page_t m, ma[KSTACK_MAX_PAGES]; 568 int i, pages, rv; 569 570 pages = td->td_kstack_pages; 571 ksobj = td->td_kstack_obj; 572 VM_OBJECT_LOCK(ksobj); 573 for (i = 0; i < pages; i++) { 574 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 575 if (m->valid != VM_PAGE_BITS_ALL) { 576 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 577 if (rv != VM_PAGER_OK) 578 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 579 m = vm_page_lookup(ksobj, i); 580 m->valid = VM_PAGE_BITS_ALL; 581 } 582 ma[i] = m; 583 vm_page_lock_queues(); 584 vm_page_wire(m); 585 vm_page_wakeup(m); 586 vm_page_unlock_queues(); 587 } 588 VM_OBJECT_UNLOCK(ksobj); 589 pmap_qenter(td->td_kstack, ma, pages); 590 cpu_thread_swapin(td); 591} 592 593/* 594 * Set up a variable-sized alternate kstack. 595 */ 596void 597vm_thread_new_altkstack(struct thread *td, int pages) 598{ 599 600 td->td_altkstack = td->td_kstack; 601 td->td_altkstack_obj = td->td_kstack_obj; 602 td->td_altkstack_pages = td->td_kstack_pages; 603 604 vm_thread_new(td, pages); 605} 606 607/* 608 * Restore the original kstack. 609 */ 610void 611vm_thread_dispose_altkstack(struct thread *td) 612{ 613 614 vm_thread_dispose(td); 615 616 td->td_kstack = td->td_altkstack; 617 td->td_kstack_obj = td->td_altkstack_obj; 618 td->td_kstack_pages = td->td_altkstack_pages; 619 td->td_altkstack = 0; 620 td->td_altkstack_obj = NULL; 621 td->td_altkstack_pages = 0; 622} 623 624/* 625 * Implement fork's actions on an address space. 626 * Here we arrange for the address space to be copied or referenced, 627 * allocate a user struct (pcb and kernel stack), then call the 628 * machine-dependent layer to fill those in and make the new process 629 * ready to run. The new process is set up so that it returns directly 630 * to user mode to avoid stack copying and relocation problems. 631 */ 632void 633vm_forkproc(td, p2, td2, flags) 634 struct thread *td; 635 struct proc *p2; 636 struct thread *td2; 637 int flags; 638{ 639 struct proc *p1 = td->td_proc; 640 struct user *up; 641 642 GIANT_REQUIRED; 643 644 if ((flags & RFPROC) == 0) { 645 /* 646 * Divorce the memory, if it is shared, essentially 647 * this changes shared memory amongst threads, into 648 * COW locally. 649 */ 650 if ((flags & RFMEM) == 0) { 651 if (p1->p_vmspace->vm_refcnt > 1) { 652 vmspace_unshare(p1); 653 } 654 } 655 cpu_fork(td, p2, td2, flags); 656 return; 657 } 658 659 if (flags & RFMEM) { 660 p2->p_vmspace = p1->p_vmspace; 661 p1->p_vmspace->vm_refcnt++; 662 } 663 664 while (vm_page_count_severe()) { 665 VM_WAIT; 666 } 667 668 if ((flags & RFMEM) == 0) { 669 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 670 if (p1->p_vmspace->vm_shm) 671 shmfork(p1, p2); 672 } 673 674 /* XXXKSE this is unsatisfactory but should be adequate */ 675 up = p2->p_uarea; 676 MPASS(p2->p_sigacts != NULL); 677 678 /* 679 * p_stats currently points at fields in the user struct 680 * but not at &u, instead at p_addr. Copy parts of 681 * p_stats; zero the rest of p_stats (statistics). 682 */ 683 p2->p_stats = &up->u_stats; 684 bzero(&up->u_stats.pstat_startzero, 685 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 686 (caddr_t) &up->u_stats.pstat_startzero)); 687 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 688 ((caddr_t) &up->u_stats.pstat_endcopy - 689 (caddr_t) &up->u_stats.pstat_startcopy)); 690 691 /* 692 * cpu_fork will copy and update the pcb, set up the kernel stack, 693 * and make the child ready to run. 694 */ 695 cpu_fork(td, p2, td2, flags); 696} 697 698/* 699 * Called after process has been wait(2)'ed apon and is being reaped. 700 * The idea is to reclaim resources that we could not reclaim while 701 * the process was still executing. 702 */ 703void 704vm_waitproc(p) 705 struct proc *p; 706{ 707 708 GIANT_REQUIRED; 709 vmspace_exitfree(p); /* and clean-out the vmspace */ 710} 711 712/* 713 * Set default limits for VM system. 714 * Called for proc 0, and then inherited by all others. 715 * 716 * XXX should probably act directly on proc0. 717 */ 718static void 719vm_init_limits(udata) 720 void *udata; 721{ 722 struct proc *p = udata; 723 struct plimit *limp; 724 int rss_limit; 725 726 /* 727 * Set up the initial limits on process VM. Set the maximum resident 728 * set size to be half of (reasonably) available memory. Since this 729 * is a soft limit, it comes into effect only when the system is out 730 * of memory - half of main memory helps to favor smaller processes, 731 * and reduces thrashing of the object cache. 732 */ 733 limp = p->p_limit; 734 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 735 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 736 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 737 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 738 /* limit the limit to no less than 2MB */ 739 rss_limit = max(cnt.v_free_count, 512); 740 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 741 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 742} 743 744void 745faultin(p) 746 struct proc *p; 747{ 748#ifdef NO_SWAPPING 749 750 PROC_LOCK_ASSERT(p, MA_OWNED); 751 if ((p->p_sflag & PS_INMEM) == 0) 752 panic("faultin: proc swapped out with NO_SWAPPING!"); 753#else /* !NO_SWAPPING */ 754 struct thread *td; 755 756 GIANT_REQUIRED; 757 PROC_LOCK_ASSERT(p, MA_OWNED); 758 /* 759 * If another process is swapping in this process, 760 * just wait until it finishes. 761 */ 762 if (p->p_sflag & PS_SWAPPINGIN) 763 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 764 else if ((p->p_sflag & PS_INMEM) == 0) { 765 /* 766 * Don't let another thread swap process p out while we are 767 * busy swapping it in. 768 */ 769 ++p->p_lock; 770 mtx_lock_spin(&sched_lock); 771 p->p_sflag |= PS_SWAPPINGIN; 772 mtx_unlock_spin(&sched_lock); 773 PROC_UNLOCK(p); 774 775 vm_proc_swapin(p); 776 FOREACH_THREAD_IN_PROC(p, td) 777 vm_thread_swapin(td); 778 779 PROC_LOCK(p); 780 mtx_lock_spin(&sched_lock); 781 p->p_sflag &= ~PS_SWAPPINGIN; 782 p->p_sflag |= PS_INMEM; 783 FOREACH_THREAD_IN_PROC(p, td) { 784 TD_CLR_SWAPPED(td); 785 if (TD_CAN_RUN(td)) 786 setrunnable(td); 787 } 788 mtx_unlock_spin(&sched_lock); 789 790 wakeup(&p->p_sflag); 791 792 /* Allow other threads to swap p out now. */ 793 --p->p_lock; 794 } 795#endif /* NO_SWAPPING */ 796} 797 798/* 799 * This swapin algorithm attempts to swap-in processes only if there 800 * is enough space for them. Of course, if a process waits for a long 801 * time, it will be swapped in anyway. 802 * 803 * XXXKSE - process with the thread with highest priority counts.. 804 * 805 * Giant is still held at this point, to be released in tsleep. 806 */ 807/* ARGSUSED*/ 808static void 809scheduler(dummy) 810 void *dummy; 811{ 812 struct proc *p; 813 struct thread *td; 814 int pri; 815 struct proc *pp; 816 int ppri; 817 818 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 819 /* GIANT_REQUIRED */ 820 821loop: 822 if (vm_page_count_min()) { 823 VM_WAIT; 824 goto loop; 825 } 826 827 pp = NULL; 828 ppri = INT_MIN; 829 sx_slock(&allproc_lock); 830 FOREACH_PROC_IN_SYSTEM(p) { 831 struct ksegrp *kg; 832 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 833 continue; 834 } 835 mtx_lock_spin(&sched_lock); 836 FOREACH_THREAD_IN_PROC(p, td) { 837 /* 838 * An otherwise runnable thread of a process 839 * swapped out has only the TDI_SWAPPED bit set. 840 * 841 */ 842 if (td->td_inhibitors == TDI_SWAPPED) { 843 kg = td->td_ksegrp; 844 pri = p->p_swtime + kg->kg_slptime; 845 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 846 pri -= kg->kg_nice * 8; 847 } 848 849 /* 850 * if this ksegrp is higher priority 851 * and there is enough space, then select 852 * this process instead of the previous 853 * selection. 854 */ 855 if (pri > ppri) { 856 pp = p; 857 ppri = pri; 858 } 859 } 860 } 861 mtx_unlock_spin(&sched_lock); 862 } 863 sx_sunlock(&allproc_lock); 864 865 /* 866 * Nothing to do, back to sleep. 867 */ 868 if ((p = pp) == NULL) { 869 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 870 goto loop; 871 } 872 PROC_LOCK(p); 873 874 /* 875 * Another process may be bringing or may have already 876 * brought this process in while we traverse all threads. 877 * Or, this process may even be being swapped out again. 878 */ 879 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 880 PROC_UNLOCK(p); 881 goto loop; 882 } 883 884 mtx_lock_spin(&sched_lock); 885 p->p_sflag &= ~PS_SWAPINREQ; 886 mtx_unlock_spin(&sched_lock); 887 888 /* 889 * We would like to bring someone in. (only if there is space). 890 * [What checks the space? ] 891 */ 892 faultin(p); 893 PROC_UNLOCK(p); 894 mtx_lock_spin(&sched_lock); 895 p->p_swtime = 0; 896 mtx_unlock_spin(&sched_lock); 897 goto loop; 898} 899 900#ifndef NO_SWAPPING 901 902/* 903 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 904 */ 905static int swap_idle_threshold1 = 2; 906SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 907 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 908 909/* 910 * Swap_idle_threshold2 is the time that a process can be idle before 911 * it will be swapped out, if idle swapping is enabled. 912 */ 913static int swap_idle_threshold2 = 10; 914SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 915 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 916 917/* 918 * Swapout is driven by the pageout daemon. Very simple, we find eligible 919 * procs and unwire their u-areas. We try to always "swap" at least one 920 * process in case we need the room for a swapin. 921 * If any procs have been sleeping/stopped for at least maxslp seconds, 922 * they are swapped. Else, we swap the longest-sleeping or stopped process, 923 * if any, otherwise the longest-resident process. 924 */ 925void 926swapout_procs(action) 927int action; 928{ 929 struct proc *p; 930 struct thread *td; 931 struct ksegrp *kg; 932 int didswap = 0; 933 934 GIANT_REQUIRED; 935 936retry: 937 sx_slock(&allproc_lock); 938 FOREACH_PROC_IN_SYSTEM(p) { 939 struct vmspace *vm; 940 int minslptime = 100000; 941 942 /* 943 * Watch out for a process in 944 * creation. It may have no 945 * address space or lock yet. 946 */ 947 mtx_lock_spin(&sched_lock); 948 if (p->p_state == PRS_NEW) { 949 mtx_unlock_spin(&sched_lock); 950 continue; 951 } 952 mtx_unlock_spin(&sched_lock); 953 954 /* 955 * An aio daemon switches its 956 * address space while running. 957 * Perform a quick check whether 958 * a process has P_SYSTEM. 959 */ 960 if ((p->p_flag & P_SYSTEM) != 0) 961 continue; 962 963 /* 964 * Do not swapout a process that 965 * is waiting for VM data 966 * structures as there is a possible 967 * deadlock. Test this first as 968 * this may block. 969 * 970 * Lock the map until swapout 971 * finishes, or a thread of this 972 * process may attempt to alter 973 * the map. 974 */ 975 PROC_LOCK(p); 976 vm = p->p_vmspace; 977 KASSERT(vm != NULL, 978 ("swapout_procs: a process has no address space")); 979 ++vm->vm_refcnt; 980 PROC_UNLOCK(p); 981 if (!vm_map_trylock(&vm->vm_map)) 982 goto nextproc1; 983 984 PROC_LOCK(p); 985 if (p->p_lock != 0 || 986 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 987 ) != 0) { 988 goto nextproc2; 989 } 990 /* 991 * only aiod changes vmspace, however it will be 992 * skipped because of the if statement above checking 993 * for P_SYSTEM 994 */ 995 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 996 goto nextproc2; 997 998 switch (p->p_state) { 999 default: 1000 /* Don't swap out processes in any sort 1001 * of 'special' state. */ 1002 break; 1003 1004 case PRS_NORMAL: 1005 mtx_lock_spin(&sched_lock); 1006 /* 1007 * do not swapout a realtime process 1008 * Check all the thread groups.. 1009 */ 1010 FOREACH_KSEGRP_IN_PROC(p, kg) { 1011 if (PRI_IS_REALTIME(kg->kg_pri_class)) 1012 goto nextproc; 1013 1014 /* 1015 * Guarantee swap_idle_threshold1 1016 * time in memory. 1017 */ 1018 if (kg->kg_slptime < swap_idle_threshold1) 1019 goto nextproc; 1020 1021 /* 1022 * Do not swapout a process if it is 1023 * waiting on a critical event of some 1024 * kind or there is a thread whose 1025 * pageable memory may be accessed. 1026 * 1027 * This could be refined to support 1028 * swapping out a thread. 1029 */ 1030 FOREACH_THREAD_IN_GROUP(kg, td) { 1031 if ((td->td_priority) < PSOCK || 1032 !thread_safetoswapout(td)) 1033 goto nextproc; 1034 } 1035 /* 1036 * If the system is under memory stress, 1037 * or if we are swapping 1038 * idle processes >= swap_idle_threshold2, 1039 * then swap the process out. 1040 */ 1041 if (((action & VM_SWAP_NORMAL) == 0) && 1042 (((action & VM_SWAP_IDLE) == 0) || 1043 (kg->kg_slptime < swap_idle_threshold2))) 1044 goto nextproc; 1045 1046 if (minslptime > kg->kg_slptime) 1047 minslptime = kg->kg_slptime; 1048 } 1049 1050 /* 1051 * If the process has been asleep for awhile and had 1052 * most of its pages taken away already, swap it out. 1053 */ 1054 if ((action & VM_SWAP_NORMAL) || 1055 ((action & VM_SWAP_IDLE) && 1056 (minslptime > swap_idle_threshold2))) { 1057 swapout(p); 1058 didswap++; 1059 mtx_unlock_spin(&sched_lock); 1060 PROC_UNLOCK(p); 1061 vm_map_unlock(&vm->vm_map); 1062 vmspace_free(vm); 1063 sx_sunlock(&allproc_lock); 1064 goto retry; 1065 } 1066nextproc: 1067 mtx_unlock_spin(&sched_lock); 1068 } 1069nextproc2: 1070 PROC_UNLOCK(p); 1071 vm_map_unlock(&vm->vm_map); 1072nextproc1: 1073 vmspace_free(vm); 1074 continue; 1075 } 1076 sx_sunlock(&allproc_lock); 1077 /* 1078 * If we swapped something out, and another process needed memory, 1079 * then wakeup the sched process. 1080 */ 1081 if (didswap) 1082 wakeup(&proc0); 1083} 1084 1085static void 1086swapout(p) 1087 struct proc *p; 1088{ 1089 struct thread *td; 1090 1091 PROC_LOCK_ASSERT(p, MA_OWNED); 1092 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 1093#if defined(SWAP_DEBUG) 1094 printf("swapping out %d\n", p->p_pid); 1095#endif 1096 1097 /* 1098 * The states of this process and its threads may have changed 1099 * by now. Assuming that there is only one pageout daemon thread, 1100 * this process should still be in memory. 1101 */ 1102 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 1103 ("swapout: lost a swapout race?")); 1104 1105#if defined(INVARIANTS) 1106 /* 1107 * Make sure that all threads are safe to be swapped out. 1108 * 1109 * Alternatively, we could swap out only safe threads. 1110 */ 1111 FOREACH_THREAD_IN_PROC(p, td) { 1112 KASSERT(thread_safetoswapout(td), 1113 ("swapout: there is a thread not safe for swapout")); 1114 } 1115#endif /* INVARIANTS */ 1116 1117 ++p->p_stats->p_ru.ru_nswap; 1118 /* 1119 * remember the process resident count 1120 */ 1121 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1122 1123 p->p_sflag &= ~PS_INMEM; 1124 p->p_sflag |= PS_SWAPPINGOUT; 1125 PROC_UNLOCK(p); 1126 FOREACH_THREAD_IN_PROC(p, td) 1127 TD_SET_SWAPPED(td); 1128 mtx_unlock_spin(&sched_lock); 1129 1130 vm_proc_swapout(p); 1131 FOREACH_THREAD_IN_PROC(p, td) 1132 vm_thread_swapout(td); 1133 1134 PROC_LOCK(p); 1135 mtx_lock_spin(&sched_lock); 1136 p->p_sflag &= ~PS_SWAPPINGOUT; 1137 p->p_swtime = 0; 1138} 1139#endif /* !NO_SWAPPING */ 1140