vm_glue.c revision 126668
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63#include <sys/cdefs.h> 64__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 126668 2004-03-05 22:03:11Z truckman $"); 65 66#include "opt_vm.h" 67#include "opt_kstack_pages.h" 68#include "opt_kstack_max_pages.h" 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/limits.h> 73#include <sys/lock.h> 74#include <sys/mutex.h> 75#include <sys/proc.h> 76#include <sys/resourcevar.h> 77#include <sys/shm.h> 78#include <sys/vmmeter.h> 79#include <sys/sx.h> 80#include <sys/sysctl.h> 81 82#include <sys/kernel.h> 83#include <sys/ktr.h> 84#include <sys/unistd.h> 85 86#include <vm/vm.h> 87#include <vm/vm_param.h> 88#include <vm/pmap.h> 89#include <vm/vm_map.h> 90#include <vm/vm_page.h> 91#include <vm/vm_pageout.h> 92#include <vm/vm_object.h> 93#include <vm/vm_kern.h> 94#include <vm/vm_extern.h> 95#include <vm/vm_pager.h> 96#include <vm/swap_pager.h> 97 98#include <sys/user.h> 99 100extern int maxslp; 101 102/* 103 * System initialization 104 * 105 * Note: proc0 from proc.h 106 */ 107static void vm_init_limits(void *); 108SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 109 110/* 111 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 112 * 113 * Note: run scheduling should be divorced from the vm system. 114 */ 115static void scheduler(void *); 116SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL) 117 118#ifndef NO_SWAPPING 119static void swapout(struct proc *); 120static void vm_proc_swapin(struct proc *p); 121static void vm_proc_swapout(struct proc *p); 122#endif 123 124/* 125 * MPSAFE 126 * 127 * WARNING! This code calls vm_map_check_protection() which only checks 128 * the associated vm_map_entry range. It does not determine whether the 129 * contents of the memory is actually readable or writable. In most cases 130 * just checking the vm_map_entry is sufficient within the kernel's address 131 * space. 132 */ 133int 134kernacc(addr, len, rw) 135 void *addr; 136 int len, rw; 137{ 138 boolean_t rv; 139 vm_offset_t saddr, eaddr; 140 vm_prot_t prot; 141 142 KASSERT((rw & ~VM_PROT_ALL) == 0, 143 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 144 prot = rw; 145 saddr = trunc_page((vm_offset_t)addr); 146 eaddr = round_page((vm_offset_t)addr + len); 147 vm_map_lock_read(kernel_map); 148 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 149 vm_map_unlock_read(kernel_map); 150 return (rv == TRUE); 151} 152 153/* 154 * MPSAFE 155 * 156 * WARNING! This code calls vm_map_check_protection() which only checks 157 * the associated vm_map_entry range. It does not determine whether the 158 * contents of the memory is actually readable or writable. vmapbuf(), 159 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 160 * used in conjuction with this call. 161 */ 162int 163useracc(addr, len, rw) 164 void *addr; 165 int len, rw; 166{ 167 boolean_t rv; 168 vm_prot_t prot; 169 vm_map_t map; 170 171 KASSERT((rw & ~VM_PROT_ALL) == 0, 172 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 173 prot = rw; 174 map = &curproc->p_vmspace->vm_map; 175 if ((vm_offset_t)addr + len > vm_map_max(map) || 176 (vm_offset_t)addr + len < (vm_offset_t)addr) { 177 return (FALSE); 178 } 179 vm_map_lock_read(map); 180 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 181 round_page((vm_offset_t)addr + len), prot); 182 vm_map_unlock_read(map); 183 return (rv == TRUE); 184} 185 186/* 187 * MPSAFE 188 */ 189int 190vslock(td, addr, size) 191 struct thread *td; 192 vm_offset_t addr; 193 vm_size_t size; 194{ 195 vm_offset_t start, end; 196 struct proc *proc = td->td_proc; 197 int error, npages; 198 199 start = trunc_page(addr); 200 end = round_page(addr + size); 201 202 /* disable wrap around */ 203 if (end <= start) 204 return (EINVAL); 205 206 npages = atop(end - start); 207 208 if (npages > vm_page_max_wired) 209 return (ENOMEM); 210 211 PROC_LOCK(proc); 212 if (npages + pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map)) > 213 atop(lim_cur(proc, RLIMIT_MEMLOCK))) { 214 PROC_UNLOCK(proc); 215 return (ENOMEM); 216 } 217 PROC_UNLOCK(proc); 218 219#if 0 220 /* 221 * XXX - not yet 222 * 223 * The limit for transient usage of wired pages should be 224 * larger than for "permanent" wired pages (mlock()). 225 * 226 * Also, the sysctl code, which is the only present user 227 * of vslock(), does a hard loop on EAGAIN. 228 */ 229 if (npages + cnt.v_wire_count > vm_page_max_wired) 230 return (EAGAIN); 231#endif 232 233 error = vm_map_wire(&proc->p_vmspace->vm_map, start, end, 234 VM_MAP_WIRE_USER|VM_MAP_WIRE_NOHOLES); 235 236 /* EINVAL is probably a better error to return than ENOMEM */ 237 return (error == KERN_SUCCESS ? 0 : EINVAL); 238} 239 240/* 241 * MPSAFE 242 */ 243int 244vsunlock(td, addr, size) 245 struct thread *td; 246 vm_offset_t addr; 247 vm_size_t size; 248{ 249 vm_offset_t start, end; 250 int error; 251 252 start = trunc_page(addr); 253 end = round_page(addr + size); 254 255 /* disable wrap around */ 256 if (end <= start) 257 return (EINVAL); 258 259 error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end, 260 VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES); 261 return (error == KERN_SUCCESS ? 0 : EINVAL); 262} 263 264/* 265 * Create the U area for a new process. 266 * This routine directly affects the fork perf for a process. 267 */ 268void 269vm_proc_new(struct proc *p) 270{ 271 vm_page_t ma[UAREA_PAGES]; 272 vm_object_t upobj; 273 vm_offset_t up; 274 vm_page_t m; 275 u_int i; 276 277 /* 278 * Get a kernel virtual address for the U area for this process. 279 */ 280 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 281 if (up == 0) 282 panic("vm_proc_new: upage allocation failed"); 283 p->p_uarea = (struct user *)up; 284 285 /* 286 * Allocate object and page(s) for the U area. 287 */ 288 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 289 p->p_upages_obj = upobj; 290 VM_OBJECT_LOCK(upobj); 291 for (i = 0; i < UAREA_PAGES; i++) { 292 m = vm_page_grab(upobj, i, 293 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 294 ma[i] = m; 295 296 vm_page_lock_queues(); 297 vm_page_wakeup(m); 298 m->valid = VM_PAGE_BITS_ALL; 299 vm_page_unlock_queues(); 300 } 301 VM_OBJECT_UNLOCK(upobj); 302 303 /* 304 * Enter the pages into the kernel address space. 305 */ 306 pmap_qenter(up, ma, UAREA_PAGES); 307} 308 309/* 310 * Dispose the U area for a process that has exited. 311 * This routine directly impacts the exit perf of a process. 312 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called. 313 */ 314void 315vm_proc_dispose(struct proc *p) 316{ 317 vm_object_t upobj; 318 vm_offset_t up; 319 vm_page_t m; 320 321 upobj = p->p_upages_obj; 322 VM_OBJECT_LOCK(upobj); 323 if (upobj->resident_page_count != UAREA_PAGES) 324 panic("vm_proc_dispose: incorrect number of pages in upobj"); 325 vm_page_lock_queues(); 326 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 327 vm_page_busy(m); 328 vm_page_unwire(m, 0); 329 vm_page_free(m); 330 } 331 vm_page_unlock_queues(); 332 VM_OBJECT_UNLOCK(upobj); 333 up = (vm_offset_t)p->p_uarea; 334 pmap_qremove(up, UAREA_PAGES); 335 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 336 vm_object_deallocate(upobj); 337} 338 339#ifndef NO_SWAPPING 340/* 341 * Allow the U area for a process to be prejudicially paged out. 342 */ 343static void 344vm_proc_swapout(struct proc *p) 345{ 346 vm_object_t upobj; 347 vm_offset_t up; 348 vm_page_t m; 349 350 upobj = p->p_upages_obj; 351 VM_OBJECT_LOCK(upobj); 352 if (upobj->resident_page_count != UAREA_PAGES) 353 panic("vm_proc_dispose: incorrect number of pages in upobj"); 354 vm_page_lock_queues(); 355 TAILQ_FOREACH(m, &upobj->memq, listq) { 356 vm_page_dirty(m); 357 vm_page_unwire(m, 0); 358 } 359 vm_page_unlock_queues(); 360 VM_OBJECT_UNLOCK(upobj); 361 up = (vm_offset_t)p->p_uarea; 362 pmap_qremove(up, UAREA_PAGES); 363} 364 365/* 366 * Bring the U area for a specified process back in. 367 */ 368static void 369vm_proc_swapin(struct proc *p) 370{ 371 vm_page_t ma[UAREA_PAGES]; 372 vm_object_t upobj; 373 vm_offset_t up; 374 vm_page_t m; 375 int rv; 376 int i; 377 378 upobj = p->p_upages_obj; 379 VM_OBJECT_LOCK(upobj); 380 for (i = 0; i < UAREA_PAGES; i++) { 381 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 382 if (m->valid != VM_PAGE_BITS_ALL) { 383 rv = vm_pager_get_pages(upobj, &m, 1, 0); 384 if (rv != VM_PAGER_OK) 385 panic("vm_proc_swapin: cannot get upage"); 386 } 387 ma[i] = m; 388 } 389 if (upobj->resident_page_count != UAREA_PAGES) 390 panic("vm_proc_swapin: lost pages from upobj"); 391 vm_page_lock_queues(); 392 TAILQ_FOREACH(m, &upobj->memq, listq) { 393 m->valid = VM_PAGE_BITS_ALL; 394 vm_page_wire(m); 395 vm_page_wakeup(m); 396 } 397 vm_page_unlock_queues(); 398 VM_OBJECT_UNLOCK(upobj); 399 up = (vm_offset_t)p->p_uarea; 400 pmap_qenter(up, ma, UAREA_PAGES); 401} 402 403/* 404 * Swap in the UAREAs of all processes swapped out to the given device. 405 * The pages in the UAREA are marked dirty and their swap metadata is freed. 406 */ 407void 408vm_proc_swapin_all(struct swdevt *devidx) 409{ 410 struct proc *p; 411 vm_object_t object; 412 vm_page_t m; 413 414retry: 415 sx_slock(&allproc_lock); 416 FOREACH_PROC_IN_SYSTEM(p) { 417 PROC_LOCK(p); 418 object = p->p_upages_obj; 419 if (object != NULL) { 420 VM_OBJECT_LOCK(object); 421 if (swap_pager_isswapped(object, devidx)) { 422 VM_OBJECT_UNLOCK(object); 423 sx_sunlock(&allproc_lock); 424 faultin(p); 425 PROC_UNLOCK(p); 426 VM_OBJECT_LOCK(object); 427 vm_page_lock_queues(); 428 TAILQ_FOREACH(m, &object->memq, listq) 429 vm_page_dirty(m); 430 vm_page_unlock_queues(); 431 swap_pager_freespace(object, 0, 432 object->un_pager.swp.swp_bcount); 433 VM_OBJECT_UNLOCK(object); 434 goto retry; 435 } 436 VM_OBJECT_UNLOCK(object); 437 } 438 PROC_UNLOCK(p); 439 } 440 sx_sunlock(&allproc_lock); 441} 442#endif 443 444#ifndef KSTACK_MAX_PAGES 445#define KSTACK_MAX_PAGES 32 446#endif 447 448/* 449 * Create the kernel stack (including pcb for i386) for a new thread. 450 * This routine directly affects the fork perf for a process and 451 * create performance for a thread. 452 */ 453void 454vm_thread_new(struct thread *td, int pages) 455{ 456 vm_object_t ksobj; 457 vm_offset_t ks; 458 vm_page_t m, ma[KSTACK_MAX_PAGES]; 459 int i; 460 461 /* Bounds check */ 462 if (pages <= 1) 463 pages = KSTACK_PAGES; 464 else if (pages > KSTACK_MAX_PAGES) 465 pages = KSTACK_MAX_PAGES; 466 /* 467 * Allocate an object for the kstack. 468 */ 469 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 470 td->td_kstack_obj = ksobj; 471 /* 472 * Get a kernel virtual address for this thread's kstack. 473 */ 474 ks = kmem_alloc_nofault(kernel_map, 475 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 476 if (ks == 0) 477 panic("vm_thread_new: kstack allocation failed"); 478 if (KSTACK_GUARD_PAGES != 0) { 479 pmap_qremove(ks, KSTACK_GUARD_PAGES); 480 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 481 } 482 td->td_kstack = ks; 483 /* 484 * Knowing the number of pages allocated is useful when you 485 * want to deallocate them. 486 */ 487 td->td_kstack_pages = pages; 488 /* 489 * For the length of the stack, link in a real page of ram for each 490 * page of stack. 491 */ 492 VM_OBJECT_LOCK(ksobj); 493 for (i = 0; i < pages; i++) { 494 /* 495 * Get a kernel stack page. 496 */ 497 m = vm_page_grab(ksobj, i, 498 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 499 ma[i] = m; 500 vm_page_lock_queues(); 501 vm_page_wakeup(m); 502 m->valid = VM_PAGE_BITS_ALL; 503 vm_page_unlock_queues(); 504 } 505 VM_OBJECT_UNLOCK(ksobj); 506 pmap_qenter(ks, ma, pages); 507} 508 509/* 510 * Dispose of a thread's kernel stack. 511 */ 512void 513vm_thread_dispose(struct thread *td) 514{ 515 vm_object_t ksobj; 516 vm_offset_t ks; 517 vm_page_t m; 518 int i, pages; 519 520 pages = td->td_kstack_pages; 521 ksobj = td->td_kstack_obj; 522 ks = td->td_kstack; 523 pmap_qremove(ks, pages); 524 VM_OBJECT_LOCK(ksobj); 525 for (i = 0; i < pages; i++) { 526 m = vm_page_lookup(ksobj, i); 527 if (m == NULL) 528 panic("vm_thread_dispose: kstack already missing?"); 529 vm_page_lock_queues(); 530 vm_page_busy(m); 531 vm_page_unwire(m, 0); 532 vm_page_free(m); 533 vm_page_unlock_queues(); 534 } 535 VM_OBJECT_UNLOCK(ksobj); 536 vm_object_deallocate(ksobj); 537 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 538 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 539} 540 541/* 542 * Allow a thread's kernel stack to be paged out. 543 */ 544void 545vm_thread_swapout(struct thread *td) 546{ 547 vm_object_t ksobj; 548 vm_page_t m; 549 int i, pages; 550 551 cpu_thread_swapout(td); 552 pages = td->td_kstack_pages; 553 ksobj = td->td_kstack_obj; 554 pmap_qremove(td->td_kstack, pages); 555 VM_OBJECT_LOCK(ksobj); 556 for (i = 0; i < pages; i++) { 557 m = vm_page_lookup(ksobj, i); 558 if (m == NULL) 559 panic("vm_thread_swapout: kstack already missing?"); 560 vm_page_lock_queues(); 561 vm_page_dirty(m); 562 vm_page_unwire(m, 0); 563 vm_page_unlock_queues(); 564 } 565 VM_OBJECT_UNLOCK(ksobj); 566} 567 568/* 569 * Bring the kernel stack for a specified thread back in. 570 */ 571void 572vm_thread_swapin(struct thread *td) 573{ 574 vm_object_t ksobj; 575 vm_page_t m, ma[KSTACK_MAX_PAGES]; 576 int i, pages, rv; 577 578 pages = td->td_kstack_pages; 579 ksobj = td->td_kstack_obj; 580 VM_OBJECT_LOCK(ksobj); 581 for (i = 0; i < pages; i++) { 582 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 583 if (m->valid != VM_PAGE_BITS_ALL) { 584 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 585 if (rv != VM_PAGER_OK) 586 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 587 m = vm_page_lookup(ksobj, i); 588 m->valid = VM_PAGE_BITS_ALL; 589 } 590 ma[i] = m; 591 vm_page_lock_queues(); 592 vm_page_wire(m); 593 vm_page_wakeup(m); 594 vm_page_unlock_queues(); 595 } 596 VM_OBJECT_UNLOCK(ksobj); 597 pmap_qenter(td->td_kstack, ma, pages); 598 cpu_thread_swapin(td); 599} 600 601/* 602 * Set up a variable-sized alternate kstack. 603 */ 604void 605vm_thread_new_altkstack(struct thread *td, int pages) 606{ 607 608 td->td_altkstack = td->td_kstack; 609 td->td_altkstack_obj = td->td_kstack_obj; 610 td->td_altkstack_pages = td->td_kstack_pages; 611 612 vm_thread_new(td, pages); 613} 614 615/* 616 * Restore the original kstack. 617 */ 618void 619vm_thread_dispose_altkstack(struct thread *td) 620{ 621 622 vm_thread_dispose(td); 623 624 td->td_kstack = td->td_altkstack; 625 td->td_kstack_obj = td->td_altkstack_obj; 626 td->td_kstack_pages = td->td_altkstack_pages; 627 td->td_altkstack = 0; 628 td->td_altkstack_obj = NULL; 629 td->td_altkstack_pages = 0; 630} 631 632/* 633 * Implement fork's actions on an address space. 634 * Here we arrange for the address space to be copied or referenced, 635 * allocate a user struct (pcb and kernel stack), then call the 636 * machine-dependent layer to fill those in and make the new process 637 * ready to run. The new process is set up so that it returns directly 638 * to user mode to avoid stack copying and relocation problems. 639 */ 640void 641vm_forkproc(td, p2, td2, flags) 642 struct thread *td; 643 struct proc *p2; 644 struct thread *td2; 645 int flags; 646{ 647 struct proc *p1 = td->td_proc; 648 struct user *up; 649 650 GIANT_REQUIRED; 651 652 if ((flags & RFPROC) == 0) { 653 /* 654 * Divorce the memory, if it is shared, essentially 655 * this changes shared memory amongst threads, into 656 * COW locally. 657 */ 658 if ((flags & RFMEM) == 0) { 659 if (p1->p_vmspace->vm_refcnt > 1) { 660 vmspace_unshare(p1); 661 } 662 } 663 cpu_fork(td, p2, td2, flags); 664 return; 665 } 666 667 if (flags & RFMEM) { 668 p2->p_vmspace = p1->p_vmspace; 669 p1->p_vmspace->vm_refcnt++; 670 } 671 672 while (vm_page_count_severe()) { 673 VM_WAIT; 674 } 675 676 if ((flags & RFMEM) == 0) { 677 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 678 679 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 680 681 if (p1->p_vmspace->vm_shm) 682 shmfork(p1, p2); 683 } 684 685 /* XXXKSE this is unsatisfactory but should be adequate */ 686 up = p2->p_uarea; 687 MPASS(p2->p_sigacts != NULL); 688 689 /* 690 * p_stats currently points at fields in the user struct 691 * but not at &u, instead at p_addr. Copy parts of 692 * p_stats; zero the rest of p_stats (statistics). 693 */ 694 p2->p_stats = &up->u_stats; 695 bzero(&up->u_stats.pstat_startzero, 696 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 697 (caddr_t) &up->u_stats.pstat_startzero)); 698 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 699 ((caddr_t) &up->u_stats.pstat_endcopy - 700 (caddr_t) &up->u_stats.pstat_startcopy)); 701 702 /* 703 * cpu_fork will copy and update the pcb, set up the kernel stack, 704 * and make the child ready to run. 705 */ 706 cpu_fork(td, p2, td2, flags); 707} 708 709/* 710 * Called after process has been wait(2)'ed apon and is being reaped. 711 * The idea is to reclaim resources that we could not reclaim while 712 * the process was still executing. 713 */ 714void 715vm_waitproc(p) 716 struct proc *p; 717{ 718 719 GIANT_REQUIRED; 720 vmspace_exitfree(p); /* and clean-out the vmspace */ 721} 722 723/* 724 * Set default limits for VM system. 725 * Called for proc 0, and then inherited by all others. 726 * 727 * XXX should probably act directly on proc0. 728 */ 729static void 730vm_init_limits(udata) 731 void *udata; 732{ 733 struct proc *p = udata; 734 struct plimit *limp; 735 int rss_limit; 736 737 /* 738 * Set up the initial limits on process VM. Set the maximum resident 739 * set size to be half of (reasonably) available memory. Since this 740 * is a soft limit, it comes into effect only when the system is out 741 * of memory - half of main memory helps to favor smaller processes, 742 * and reduces thrashing of the object cache. 743 */ 744 limp = p->p_limit; 745 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 746 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 747 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 748 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 749 /* limit the limit to no less than 2MB */ 750 rss_limit = max(cnt.v_free_count, 512); 751 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 752 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 753} 754 755void 756faultin(p) 757 struct proc *p; 758{ 759#ifdef NO_SWAPPING 760 761 PROC_LOCK_ASSERT(p, MA_OWNED); 762 if ((p->p_sflag & PS_INMEM) == 0) 763 panic("faultin: proc swapped out with NO_SWAPPING!"); 764#else /* !NO_SWAPPING */ 765 struct thread *td; 766 767 GIANT_REQUIRED; 768 PROC_LOCK_ASSERT(p, MA_OWNED); 769 /* 770 * If another process is swapping in this process, 771 * just wait until it finishes. 772 */ 773 if (p->p_sflag & PS_SWAPPINGIN) 774 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 775 else if ((p->p_sflag & PS_INMEM) == 0) { 776 /* 777 * Don't let another thread swap process p out while we are 778 * busy swapping it in. 779 */ 780 ++p->p_lock; 781 mtx_lock_spin(&sched_lock); 782 p->p_sflag |= PS_SWAPPINGIN; 783 mtx_unlock_spin(&sched_lock); 784 PROC_UNLOCK(p); 785 786 vm_proc_swapin(p); 787 FOREACH_THREAD_IN_PROC(p, td) 788 vm_thread_swapin(td); 789 790 PROC_LOCK(p); 791 mtx_lock_spin(&sched_lock); 792 p->p_sflag &= ~PS_SWAPPINGIN; 793 p->p_sflag |= PS_INMEM; 794 FOREACH_THREAD_IN_PROC(p, td) { 795 TD_CLR_SWAPPED(td); 796 if (TD_CAN_RUN(td)) 797 setrunnable(td); 798 } 799 mtx_unlock_spin(&sched_lock); 800 801 wakeup(&p->p_sflag); 802 803 /* Allow other threads to swap p out now. */ 804 --p->p_lock; 805 } 806#endif /* NO_SWAPPING */ 807} 808 809/* 810 * This swapin algorithm attempts to swap-in processes only if there 811 * is enough space for them. Of course, if a process waits for a long 812 * time, it will be swapped in anyway. 813 * 814 * XXXKSE - process with the thread with highest priority counts.. 815 * 816 * Giant is still held at this point, to be released in tsleep. 817 */ 818/* ARGSUSED*/ 819static void 820scheduler(dummy) 821 void *dummy; 822{ 823 struct proc *p; 824 struct thread *td; 825 int pri; 826 struct proc *pp; 827 int ppri; 828 829 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 830 /* GIANT_REQUIRED */ 831 832loop: 833 if (vm_page_count_min()) { 834 VM_WAIT; 835 goto loop; 836 } 837 838 pp = NULL; 839 ppri = INT_MIN; 840 sx_slock(&allproc_lock); 841 FOREACH_PROC_IN_SYSTEM(p) { 842 struct ksegrp *kg; 843 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 844 continue; 845 } 846 mtx_lock_spin(&sched_lock); 847 FOREACH_THREAD_IN_PROC(p, td) { 848 /* 849 * An otherwise runnable thread of a process 850 * swapped out has only the TDI_SWAPPED bit set. 851 * 852 */ 853 if (td->td_inhibitors == TDI_SWAPPED) { 854 kg = td->td_ksegrp; 855 pri = p->p_swtime + kg->kg_slptime; 856 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 857 pri -= kg->kg_nice * 8; 858 } 859 860 /* 861 * if this ksegrp is higher priority 862 * and there is enough space, then select 863 * this process instead of the previous 864 * selection. 865 */ 866 if (pri > ppri) { 867 pp = p; 868 ppri = pri; 869 } 870 } 871 } 872 mtx_unlock_spin(&sched_lock); 873 } 874 sx_sunlock(&allproc_lock); 875 876 /* 877 * Nothing to do, back to sleep. 878 */ 879 if ((p = pp) == NULL) { 880 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 881 goto loop; 882 } 883 PROC_LOCK(p); 884 885 /* 886 * Another process may be bringing or may have already 887 * brought this process in while we traverse all threads. 888 * Or, this process may even be being swapped out again. 889 */ 890 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 891 PROC_UNLOCK(p); 892 goto loop; 893 } 894 895 mtx_lock_spin(&sched_lock); 896 p->p_sflag &= ~PS_SWAPINREQ; 897 mtx_unlock_spin(&sched_lock); 898 899 /* 900 * We would like to bring someone in. (only if there is space). 901 * [What checks the space? ] 902 */ 903 faultin(p); 904 PROC_UNLOCK(p); 905 mtx_lock_spin(&sched_lock); 906 p->p_swtime = 0; 907 mtx_unlock_spin(&sched_lock); 908 goto loop; 909} 910 911#ifndef NO_SWAPPING 912 913/* 914 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 915 */ 916static int swap_idle_threshold1 = 2; 917SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 918 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 919 920/* 921 * Swap_idle_threshold2 is the time that a process can be idle before 922 * it will be swapped out, if idle swapping is enabled. 923 */ 924static int swap_idle_threshold2 = 10; 925SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 926 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 927 928/* 929 * Swapout is driven by the pageout daemon. Very simple, we find eligible 930 * procs and unwire their u-areas. We try to always "swap" at least one 931 * process in case we need the room for a swapin. 932 * If any procs have been sleeping/stopped for at least maxslp seconds, 933 * they are swapped. Else, we swap the longest-sleeping or stopped process, 934 * if any, otherwise the longest-resident process. 935 */ 936void 937swapout_procs(action) 938int action; 939{ 940 struct proc *p; 941 struct thread *td; 942 struct ksegrp *kg; 943 int didswap = 0; 944 945 GIANT_REQUIRED; 946 947retry: 948 sx_slock(&allproc_lock); 949 FOREACH_PROC_IN_SYSTEM(p) { 950 struct vmspace *vm; 951 int minslptime = 100000; 952 953 /* 954 * Watch out for a process in 955 * creation. It may have no 956 * address space or lock yet. 957 */ 958 mtx_lock_spin(&sched_lock); 959 if (p->p_state == PRS_NEW) { 960 mtx_unlock_spin(&sched_lock); 961 continue; 962 } 963 mtx_unlock_spin(&sched_lock); 964 965 /* 966 * An aio daemon switches its 967 * address space while running. 968 * Perform a quick check whether 969 * a process has P_SYSTEM. 970 */ 971 if ((p->p_flag & P_SYSTEM) != 0) 972 continue; 973 974 /* 975 * Do not swapout a process that 976 * is waiting for VM data 977 * structures as there is a possible 978 * deadlock. Test this first as 979 * this may block. 980 * 981 * Lock the map until swapout 982 * finishes, or a thread of this 983 * process may attempt to alter 984 * the map. 985 */ 986 PROC_LOCK(p); 987 vm = p->p_vmspace; 988 KASSERT(vm != NULL, 989 ("swapout_procs: a process has no address space")); 990 ++vm->vm_refcnt; 991 PROC_UNLOCK(p); 992 if (!vm_map_trylock(&vm->vm_map)) 993 goto nextproc1; 994 995 PROC_LOCK(p); 996 if (p->p_lock != 0 || 997 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 998 ) != 0) { 999 goto nextproc2; 1000 } 1001 /* 1002 * only aiod changes vmspace, however it will be 1003 * skipped because of the if statement above checking 1004 * for P_SYSTEM 1005 */ 1006 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 1007 goto nextproc2; 1008 1009 switch (p->p_state) { 1010 default: 1011 /* Don't swap out processes in any sort 1012 * of 'special' state. */ 1013 break; 1014 1015 case PRS_NORMAL: 1016 mtx_lock_spin(&sched_lock); 1017 /* 1018 * do not swapout a realtime process 1019 * Check all the thread groups.. 1020 */ 1021 FOREACH_KSEGRP_IN_PROC(p, kg) { 1022 if (PRI_IS_REALTIME(kg->kg_pri_class)) 1023 goto nextproc; 1024 1025 /* 1026 * Guarantee swap_idle_threshold1 1027 * time in memory. 1028 */ 1029 if (kg->kg_slptime < swap_idle_threshold1) 1030 goto nextproc; 1031 1032 /* 1033 * Do not swapout a process if it is 1034 * waiting on a critical event of some 1035 * kind or there is a thread whose 1036 * pageable memory may be accessed. 1037 * 1038 * This could be refined to support 1039 * swapping out a thread. 1040 */ 1041 FOREACH_THREAD_IN_GROUP(kg, td) { 1042 if ((td->td_priority) < PSOCK || 1043 !thread_safetoswapout(td)) 1044 goto nextproc; 1045 } 1046 /* 1047 * If the system is under memory stress, 1048 * or if we are swapping 1049 * idle processes >= swap_idle_threshold2, 1050 * then swap the process out. 1051 */ 1052 if (((action & VM_SWAP_NORMAL) == 0) && 1053 (((action & VM_SWAP_IDLE) == 0) || 1054 (kg->kg_slptime < swap_idle_threshold2))) 1055 goto nextproc; 1056 1057 if (minslptime > kg->kg_slptime) 1058 minslptime = kg->kg_slptime; 1059 } 1060 1061 /* 1062 * If the process has been asleep for awhile and had 1063 * most of its pages taken away already, swap it out. 1064 */ 1065 if ((action & VM_SWAP_NORMAL) || 1066 ((action & VM_SWAP_IDLE) && 1067 (minslptime > swap_idle_threshold2))) { 1068 swapout(p); 1069 didswap++; 1070 mtx_unlock_spin(&sched_lock); 1071 PROC_UNLOCK(p); 1072 vm_map_unlock(&vm->vm_map); 1073 vmspace_free(vm); 1074 sx_sunlock(&allproc_lock); 1075 goto retry; 1076 } 1077nextproc: 1078 mtx_unlock_spin(&sched_lock); 1079 } 1080nextproc2: 1081 PROC_UNLOCK(p); 1082 vm_map_unlock(&vm->vm_map); 1083nextproc1: 1084 vmspace_free(vm); 1085 continue; 1086 } 1087 sx_sunlock(&allproc_lock); 1088 /* 1089 * If we swapped something out, and another process needed memory, 1090 * then wakeup the sched process. 1091 */ 1092 if (didswap) 1093 wakeup(&proc0); 1094} 1095 1096static void 1097swapout(p) 1098 struct proc *p; 1099{ 1100 struct thread *td; 1101 1102 PROC_LOCK_ASSERT(p, MA_OWNED); 1103 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 1104#if defined(SWAP_DEBUG) 1105 printf("swapping out %d\n", p->p_pid); 1106#endif 1107 1108 /* 1109 * The states of this process and its threads may have changed 1110 * by now. Assuming that there is only one pageout daemon thread, 1111 * this process should still be in memory. 1112 */ 1113 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 1114 ("swapout: lost a swapout race?")); 1115 1116#if defined(INVARIANTS) 1117 /* 1118 * Make sure that all threads are safe to be swapped out. 1119 * 1120 * Alternatively, we could swap out only safe threads. 1121 */ 1122 FOREACH_THREAD_IN_PROC(p, td) { 1123 KASSERT(thread_safetoswapout(td), 1124 ("swapout: there is a thread not safe for swapout")); 1125 } 1126#endif /* INVARIANTS */ 1127 1128 ++p->p_stats->p_ru.ru_nswap; 1129 /* 1130 * remember the process resident count 1131 */ 1132 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1133 1134 p->p_sflag &= ~PS_INMEM; 1135 p->p_sflag |= PS_SWAPPINGOUT; 1136 PROC_UNLOCK(p); 1137 FOREACH_THREAD_IN_PROC(p, td) 1138 TD_SET_SWAPPED(td); 1139 mtx_unlock_spin(&sched_lock); 1140 1141 vm_proc_swapout(p); 1142 FOREACH_THREAD_IN_PROC(p, td) 1143 vm_thread_swapout(td); 1144 1145 PROC_LOCK(p); 1146 mtx_lock_spin(&sched_lock); 1147 p->p_sflag &= ~PS_SWAPPINGOUT; 1148 p->p_swtime = 0; 1149} 1150#endif /* !NO_SWAPPING */ 1151