vm_glue.c revision 136923
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 */ 58 59#include <sys/cdefs.h> 60__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 136923 2004-10-24 18:46:32Z alc $"); 61 62#include "opt_vm.h" 63#include "opt_kstack_pages.h" 64#include "opt_kstack_max_pages.h" 65 66#include <sys/param.h> 67#include <sys/systm.h> 68#include <sys/limits.h> 69#include <sys/lock.h> 70#include <sys/mutex.h> 71#include <sys/proc.h> 72#include <sys/resourcevar.h> 73#include <sys/shm.h> 74#include <sys/vmmeter.h> 75#include <sys/sx.h> 76#include <sys/sysctl.h> 77 78#include <sys/kernel.h> 79#include <sys/ktr.h> 80#include <sys/unistd.h> 81 82#include <vm/vm.h> 83#include <vm/vm_param.h> 84#include <vm/pmap.h> 85#include <vm/vm_map.h> 86#include <vm/vm_page.h> 87#include <vm/vm_pageout.h> 88#include <vm/vm_object.h> 89#include <vm/vm_kern.h> 90#include <vm/vm_extern.h> 91#include <vm/vm_pager.h> 92#include <vm/swap_pager.h> 93 94#include <sys/user.h> 95 96extern int maxslp; 97 98/* 99 * System initialization 100 * 101 * Note: proc0 from proc.h 102 */ 103static void vm_init_limits(void *); 104SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 105 106/* 107 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 108 * 109 * Note: run scheduling should be divorced from the vm system. 110 */ 111static void scheduler(void *); 112SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL) 113 114#ifndef NO_SWAPPING 115static void swapout(struct proc *); 116static void vm_proc_swapin(struct proc *p); 117static void vm_proc_swapout(struct proc *p); 118#endif 119 120/* 121 * MPSAFE 122 * 123 * WARNING! This code calls vm_map_check_protection() which only checks 124 * the associated vm_map_entry range. It does not determine whether the 125 * contents of the memory is actually readable or writable. In most cases 126 * just checking the vm_map_entry is sufficient within the kernel's address 127 * space. 128 */ 129int 130kernacc(addr, len, rw) 131 void *addr; 132 int len, rw; 133{ 134 boolean_t rv; 135 vm_offset_t saddr, eaddr; 136 vm_prot_t prot; 137 138 KASSERT((rw & ~VM_PROT_ALL) == 0, 139 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 140 prot = rw; 141 saddr = trunc_page((vm_offset_t)addr); 142 eaddr = round_page((vm_offset_t)addr + len); 143 vm_map_lock_read(kernel_map); 144 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 145 vm_map_unlock_read(kernel_map); 146 return (rv == TRUE); 147} 148 149/* 150 * MPSAFE 151 * 152 * WARNING! This code calls vm_map_check_protection() which only checks 153 * the associated vm_map_entry range. It does not determine whether the 154 * contents of the memory is actually readable or writable. vmapbuf(), 155 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 156 * used in conjuction with this call. 157 */ 158int 159useracc(addr, len, rw) 160 void *addr; 161 int len, rw; 162{ 163 boolean_t rv; 164 vm_prot_t prot; 165 vm_map_t map; 166 167 KASSERT((rw & ~VM_PROT_ALL) == 0, 168 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 169 prot = rw; 170 map = &curproc->p_vmspace->vm_map; 171 if ((vm_offset_t)addr + len > vm_map_max(map) || 172 (vm_offset_t)addr + len < (vm_offset_t)addr) { 173 return (FALSE); 174 } 175 vm_map_lock_read(map); 176 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 177 round_page((vm_offset_t)addr + len), prot); 178 vm_map_unlock_read(map); 179 return (rv == TRUE); 180} 181 182int 183vslock(void *addr, size_t len) 184{ 185 vm_offset_t end, last, start; 186 vm_size_t npages; 187 int error; 188 189 last = (vm_offset_t)addr + len; 190 start = trunc_page((vm_offset_t)addr); 191 end = round_page(last); 192 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 193 return (EINVAL); 194 npages = atop(end - start); 195 if (npages > vm_page_max_wired) 196 return (ENOMEM); 197 PROC_LOCK(curproc); 198 if (ptoa(npages + 199 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) > 200 lim_cur(curproc, RLIMIT_MEMLOCK)) { 201 PROC_UNLOCK(curproc); 202 return (ENOMEM); 203 } 204 PROC_UNLOCK(curproc); 205#if 0 206 /* 207 * XXX - not yet 208 * 209 * The limit for transient usage of wired pages should be 210 * larger than for "permanent" wired pages (mlock()). 211 * 212 * Also, the sysctl code, which is the only present user 213 * of vslock(), does a hard loop on EAGAIN. 214 */ 215 if (npages + cnt.v_wire_count > vm_page_max_wired) 216 return (EAGAIN); 217#endif 218 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 219 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 220 /* 221 * Return EFAULT on error to match copy{in,out}() behaviour 222 * rather than returning ENOMEM like mlock() would. 223 */ 224 return (error == KERN_SUCCESS ? 0 : EFAULT); 225} 226 227void 228vsunlock(void *addr, size_t len) 229{ 230 231 /* Rely on the parameter sanity checks performed by vslock(). */ 232 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 233 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 234 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 235} 236 237/* 238 * Create the U area for a new process. 239 * This routine directly affects the fork perf for a process. 240 */ 241void 242vm_proc_new(struct proc *p) 243{ 244 vm_page_t ma[UAREA_PAGES]; 245 vm_object_t upobj; 246 vm_offset_t up; 247 vm_page_t m; 248 u_int i; 249 250 /* 251 * Get a kernel virtual address for the U area for this process. 252 */ 253 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 254 if (up == 0) 255 panic("vm_proc_new: upage allocation failed"); 256 p->p_uarea = (struct user *)up; 257 258 /* 259 * Allocate object and page(s) for the U area. 260 */ 261 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 262 p->p_upages_obj = upobj; 263 VM_OBJECT_LOCK(upobj); 264 for (i = 0; i < UAREA_PAGES; i++) { 265 m = vm_page_grab(upobj, i, VM_ALLOC_NOBUSY | 266 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 267 ma[i] = m; 268 m->valid = VM_PAGE_BITS_ALL; 269 } 270 VM_OBJECT_UNLOCK(upobj); 271 272 /* 273 * Enter the pages into the kernel address space. 274 */ 275 pmap_qenter(up, ma, UAREA_PAGES); 276} 277 278/* 279 * Dispose the U area for a process that has exited. 280 * This routine directly impacts the exit perf of a process. 281 * 282 * XXX UNUSED 283 * U areas of free proc structures are no longer freed and are never 284 * swapped out. Ideally we would free U areas lazily, when low on memory. 285 */ 286void 287vm_proc_dispose(struct proc *p) 288{ 289 vm_object_t upobj; 290 vm_offset_t up; 291 vm_page_t m; 292 293 upobj = p->p_upages_obj; 294 VM_OBJECT_LOCK(upobj); 295 if (upobj->resident_page_count != UAREA_PAGES) 296 panic("vm_proc_dispose: incorrect number of pages in upobj"); 297 vm_page_lock_queues(); 298 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 299 vm_page_busy(m); 300 vm_page_unwire(m, 0); 301 vm_page_free(m); 302 } 303 vm_page_unlock_queues(); 304 VM_OBJECT_UNLOCK(upobj); 305 up = (vm_offset_t)p->p_uarea; 306 pmap_qremove(up, UAREA_PAGES); 307 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 308 vm_object_deallocate(upobj); 309} 310 311#ifndef NO_SWAPPING 312/* 313 * Allow the U area for a process to be prejudicially paged out. 314 */ 315static void 316vm_proc_swapout(struct proc *p) 317{ 318 vm_object_t upobj; 319 vm_offset_t up; 320 vm_page_t m; 321 322 upobj = p->p_upages_obj; 323 VM_OBJECT_LOCK(upobj); 324 if (upobj->resident_page_count != UAREA_PAGES) 325 panic("vm_proc_dispose: incorrect number of pages in upobj"); 326 vm_page_lock_queues(); 327 TAILQ_FOREACH(m, &upobj->memq, listq) { 328 vm_page_dirty(m); 329 vm_page_unwire(m, 0); 330 } 331 vm_page_unlock_queues(); 332 VM_OBJECT_UNLOCK(upobj); 333 up = (vm_offset_t)p->p_uarea; 334 pmap_qremove(up, UAREA_PAGES); 335} 336 337/* 338 * Bring the U area for a specified process back in. 339 */ 340static void 341vm_proc_swapin(struct proc *p) 342{ 343 vm_page_t ma[UAREA_PAGES]; 344 vm_object_t upobj; 345 vm_offset_t up; 346 vm_page_t m; 347 int rv; 348 int i; 349 350 upobj = p->p_upages_obj; 351 VM_OBJECT_LOCK(upobj); 352 for (i = 0; i < UAREA_PAGES; i++) { 353 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 354 if (m->valid != VM_PAGE_BITS_ALL) { 355 rv = vm_pager_get_pages(upobj, &m, 1, 0); 356 if (rv != VM_PAGER_OK) 357 panic("vm_proc_swapin: cannot get upage"); 358 } 359 ma[i] = m; 360 } 361 if (upobj->resident_page_count != UAREA_PAGES) 362 panic("vm_proc_swapin: lost pages from upobj"); 363 vm_page_lock_queues(); 364 TAILQ_FOREACH(m, &upobj->memq, listq) { 365 m->valid = VM_PAGE_BITS_ALL; 366 vm_page_wire(m); 367 vm_page_wakeup(m); 368 } 369 vm_page_unlock_queues(); 370 VM_OBJECT_UNLOCK(upobj); 371 up = (vm_offset_t)p->p_uarea; 372 pmap_qenter(up, ma, UAREA_PAGES); 373} 374 375/* 376 * Swap in the UAREAs of all processes swapped out to the given device. 377 * The pages in the UAREA are marked dirty and their swap metadata is freed. 378 */ 379void 380vm_proc_swapin_all(struct swdevt *devidx) 381{ 382 struct proc *p; 383 vm_object_t object; 384 vm_page_t m; 385 386retry: 387 sx_slock(&allproc_lock); 388 FOREACH_PROC_IN_SYSTEM(p) { 389 PROC_LOCK(p); 390 object = p->p_upages_obj; 391 if (object != NULL) { 392 VM_OBJECT_LOCK(object); 393 if (swap_pager_isswapped(object, devidx)) { 394 VM_OBJECT_UNLOCK(object); 395 sx_sunlock(&allproc_lock); 396 faultin(p); 397 PROC_UNLOCK(p); 398 VM_OBJECT_LOCK(object); 399 vm_page_lock_queues(); 400 TAILQ_FOREACH(m, &object->memq, listq) 401 vm_page_dirty(m); 402 vm_page_unlock_queues(); 403 swap_pager_freespace(object, 0, 404 object->un_pager.swp.swp_bcount); 405 VM_OBJECT_UNLOCK(object); 406 goto retry; 407 } 408 VM_OBJECT_UNLOCK(object); 409 } 410 PROC_UNLOCK(p); 411 } 412 sx_sunlock(&allproc_lock); 413} 414#endif 415 416#ifndef KSTACK_MAX_PAGES 417#define KSTACK_MAX_PAGES 32 418#endif 419 420/* 421 * Create the kernel stack (including pcb for i386) for a new thread. 422 * This routine directly affects the fork perf for a process and 423 * create performance for a thread. 424 */ 425void 426vm_thread_new(struct thread *td, int pages) 427{ 428 vm_object_t ksobj; 429 vm_offset_t ks; 430 vm_page_t m, ma[KSTACK_MAX_PAGES]; 431 int i; 432 433 /* Bounds check */ 434 if (pages <= 1) 435 pages = KSTACK_PAGES; 436 else if (pages > KSTACK_MAX_PAGES) 437 pages = KSTACK_MAX_PAGES; 438 /* 439 * Allocate an object for the kstack. 440 */ 441 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 442 td->td_kstack_obj = ksobj; 443 /* 444 * Get a kernel virtual address for this thread's kstack. 445 */ 446 ks = kmem_alloc_nofault(kernel_map, 447 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 448 if (ks == 0) 449 panic("vm_thread_new: kstack allocation failed"); 450 if (KSTACK_GUARD_PAGES != 0) { 451 pmap_qremove(ks, KSTACK_GUARD_PAGES); 452 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 453 } 454 td->td_kstack = ks; 455 /* 456 * Knowing the number of pages allocated is useful when you 457 * want to deallocate them. 458 */ 459 td->td_kstack_pages = pages; 460 /* 461 * For the length of the stack, link in a real page of ram for each 462 * page of stack. 463 */ 464 VM_OBJECT_LOCK(ksobj); 465 for (i = 0; i < pages; i++) { 466 /* 467 * Get a kernel stack page. 468 */ 469 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY | 470 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 471 ma[i] = m; 472 m->valid = VM_PAGE_BITS_ALL; 473 } 474 VM_OBJECT_UNLOCK(ksobj); 475 pmap_qenter(ks, ma, pages); 476} 477 478/* 479 * Dispose of a thread's kernel stack. 480 */ 481void 482vm_thread_dispose(struct thread *td) 483{ 484 vm_object_t ksobj; 485 vm_offset_t ks; 486 vm_page_t m; 487 int i, pages; 488 489 pages = td->td_kstack_pages; 490 ksobj = td->td_kstack_obj; 491 ks = td->td_kstack; 492 pmap_qremove(ks, pages); 493 VM_OBJECT_LOCK(ksobj); 494 for (i = 0; i < pages; i++) { 495 m = vm_page_lookup(ksobj, i); 496 if (m == NULL) 497 panic("vm_thread_dispose: kstack already missing?"); 498 vm_page_lock_queues(); 499 vm_page_busy(m); 500 vm_page_unwire(m, 0); 501 vm_page_free(m); 502 vm_page_unlock_queues(); 503 } 504 VM_OBJECT_UNLOCK(ksobj); 505 vm_object_deallocate(ksobj); 506 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 507 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 508} 509 510/* 511 * Allow a thread's kernel stack to be paged out. 512 */ 513void 514vm_thread_swapout(struct thread *td) 515{ 516 vm_object_t ksobj; 517 vm_page_t m; 518 int i, pages; 519 520 cpu_thread_swapout(td); 521 pages = td->td_kstack_pages; 522 ksobj = td->td_kstack_obj; 523 pmap_qremove(td->td_kstack, pages); 524 VM_OBJECT_LOCK(ksobj); 525 for (i = 0; i < pages; i++) { 526 m = vm_page_lookup(ksobj, i); 527 if (m == NULL) 528 panic("vm_thread_swapout: kstack already missing?"); 529 vm_page_lock_queues(); 530 vm_page_dirty(m); 531 vm_page_unwire(m, 0); 532 vm_page_unlock_queues(); 533 } 534 VM_OBJECT_UNLOCK(ksobj); 535} 536 537/* 538 * Bring the kernel stack for a specified thread back in. 539 */ 540void 541vm_thread_swapin(struct thread *td) 542{ 543 vm_object_t ksobj; 544 vm_page_t m, ma[KSTACK_MAX_PAGES]; 545 int i, pages, rv; 546 547 pages = td->td_kstack_pages; 548 ksobj = td->td_kstack_obj; 549 VM_OBJECT_LOCK(ksobj); 550 for (i = 0; i < pages; i++) { 551 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 552 if (m->valid != VM_PAGE_BITS_ALL) { 553 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 554 if (rv != VM_PAGER_OK) 555 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 556 m = vm_page_lookup(ksobj, i); 557 m->valid = VM_PAGE_BITS_ALL; 558 } 559 ma[i] = m; 560 vm_page_lock_queues(); 561 vm_page_wire(m); 562 vm_page_wakeup(m); 563 vm_page_unlock_queues(); 564 } 565 VM_OBJECT_UNLOCK(ksobj); 566 pmap_qenter(td->td_kstack, ma, pages); 567 cpu_thread_swapin(td); 568} 569 570/* 571 * Set up a variable-sized alternate kstack. 572 */ 573void 574vm_thread_new_altkstack(struct thread *td, int pages) 575{ 576 577 td->td_altkstack = td->td_kstack; 578 td->td_altkstack_obj = td->td_kstack_obj; 579 td->td_altkstack_pages = td->td_kstack_pages; 580 581 vm_thread_new(td, pages); 582} 583 584/* 585 * Restore the original kstack. 586 */ 587void 588vm_thread_dispose_altkstack(struct thread *td) 589{ 590 591 vm_thread_dispose(td); 592 593 td->td_kstack = td->td_altkstack; 594 td->td_kstack_obj = td->td_altkstack_obj; 595 td->td_kstack_pages = td->td_altkstack_pages; 596 td->td_altkstack = 0; 597 td->td_altkstack_obj = NULL; 598 td->td_altkstack_pages = 0; 599} 600 601/* 602 * Implement fork's actions on an address space. 603 * Here we arrange for the address space to be copied or referenced, 604 * allocate a user struct (pcb and kernel stack), then call the 605 * machine-dependent layer to fill those in and make the new process 606 * ready to run. The new process is set up so that it returns directly 607 * to user mode to avoid stack copying and relocation problems. 608 */ 609void 610vm_forkproc(td, p2, td2, flags) 611 struct thread *td; 612 struct proc *p2; 613 struct thread *td2; 614 int flags; 615{ 616 struct proc *p1 = td->td_proc; 617 618 if ((flags & RFPROC) == 0) { 619 /* 620 * Divorce the memory, if it is shared, essentially 621 * this changes shared memory amongst threads, into 622 * COW locally. 623 */ 624 if ((flags & RFMEM) == 0) { 625 if (p1->p_vmspace->vm_refcnt > 1) { 626 vmspace_unshare(p1); 627 } 628 } 629 cpu_fork(td, p2, td2, flags); 630 return; 631 } 632 633 if (flags & RFMEM) { 634 p2->p_vmspace = p1->p_vmspace; 635 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 636 } 637 638 while (vm_page_count_severe()) { 639 VM_WAIT; 640 } 641 642 if ((flags & RFMEM) == 0) { 643 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 644 if (p1->p_vmspace->vm_shm) 645 shmfork(p1, p2); 646 } 647 648 /* 649 * p_stats currently points at fields in the user struct. 650 * Copy parts of p_stats; zero the rest of p_stats (statistics). 651 */ 652#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start)) 653 654 p2->p_stats = &p2->p_uarea->u_stats; 655 bzero(&p2->p_stats->pstat_startzero, 656 (unsigned) RANGEOF(struct pstats, pstat_startzero, pstat_endzero)); 657 bcopy(&p1->p_stats->pstat_startcopy, &p2->p_stats->pstat_startcopy, 658 (unsigned) RANGEOF(struct pstats, pstat_startcopy, pstat_endcopy)); 659#undef RANGEOF 660 661 /* 662 * cpu_fork will copy and update the pcb, set up the kernel stack, 663 * and make the child ready to run. 664 */ 665 cpu_fork(td, p2, td2, flags); 666} 667 668/* 669 * Called after process has been wait(2)'ed apon and is being reaped. 670 * The idea is to reclaim resources that we could not reclaim while 671 * the process was still executing. 672 */ 673void 674vm_waitproc(p) 675 struct proc *p; 676{ 677 678 vmspace_exitfree(p); /* and clean-out the vmspace */ 679} 680 681/* 682 * Set default limits for VM system. 683 * Called for proc 0, and then inherited by all others. 684 * 685 * XXX should probably act directly on proc0. 686 */ 687static void 688vm_init_limits(udata) 689 void *udata; 690{ 691 struct proc *p = udata; 692 struct plimit *limp; 693 int rss_limit; 694 695 /* 696 * Set up the initial limits on process VM. Set the maximum resident 697 * set size to be half of (reasonably) available memory. Since this 698 * is a soft limit, it comes into effect only when the system is out 699 * of memory - half of main memory helps to favor smaller processes, 700 * and reduces thrashing of the object cache. 701 */ 702 limp = p->p_limit; 703 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 704 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 705 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 706 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 707 /* limit the limit to no less than 2MB */ 708 rss_limit = max(cnt.v_free_count, 512); 709 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 710 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 711} 712 713void 714faultin(p) 715 struct proc *p; 716{ 717#ifdef NO_SWAPPING 718 719 PROC_LOCK_ASSERT(p, MA_OWNED); 720 if ((p->p_sflag & PS_INMEM) == 0) 721 panic("faultin: proc swapped out with NO_SWAPPING!"); 722#else /* !NO_SWAPPING */ 723 struct thread *td; 724 725 GIANT_REQUIRED; 726 PROC_LOCK_ASSERT(p, MA_OWNED); 727 /* 728 * If another process is swapping in this process, 729 * just wait until it finishes. 730 */ 731 if (p->p_sflag & PS_SWAPPINGIN) 732 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 733 else if ((p->p_sflag & PS_INMEM) == 0) { 734 /* 735 * Don't let another thread swap process p out while we are 736 * busy swapping it in. 737 */ 738 ++p->p_lock; 739 mtx_lock_spin(&sched_lock); 740 p->p_sflag |= PS_SWAPPINGIN; 741 mtx_unlock_spin(&sched_lock); 742 PROC_UNLOCK(p); 743 744 vm_proc_swapin(p); 745 FOREACH_THREAD_IN_PROC(p, td) 746 vm_thread_swapin(td); 747 748 PROC_LOCK(p); 749 mtx_lock_spin(&sched_lock); 750 p->p_sflag &= ~PS_SWAPPINGIN; 751 p->p_sflag |= PS_INMEM; 752 FOREACH_THREAD_IN_PROC(p, td) { 753 TD_CLR_SWAPPED(td); 754 if (TD_CAN_RUN(td)) 755 setrunnable(td); 756 } 757 mtx_unlock_spin(&sched_lock); 758 759 wakeup(&p->p_sflag); 760 761 /* Allow other threads to swap p out now. */ 762 --p->p_lock; 763 } 764#endif /* NO_SWAPPING */ 765} 766 767/* 768 * This swapin algorithm attempts to swap-in processes only if there 769 * is enough space for them. Of course, if a process waits for a long 770 * time, it will be swapped in anyway. 771 * 772 * XXXKSE - process with the thread with highest priority counts.. 773 * 774 * Giant is still held at this point, to be released in tsleep. 775 */ 776/* ARGSUSED*/ 777static void 778scheduler(dummy) 779 void *dummy; 780{ 781 struct proc *p; 782 struct thread *td; 783 int pri; 784 struct proc *pp; 785 int ppri; 786 787 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 788 /* GIANT_REQUIRED */ 789 790loop: 791 if (vm_page_count_min()) { 792 VM_WAIT; 793 goto loop; 794 } 795 796 pp = NULL; 797 ppri = INT_MIN; 798 sx_slock(&allproc_lock); 799 FOREACH_PROC_IN_SYSTEM(p) { 800 struct ksegrp *kg; 801 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 802 continue; 803 } 804 mtx_lock_spin(&sched_lock); 805 FOREACH_THREAD_IN_PROC(p, td) { 806 /* 807 * An otherwise runnable thread of a process 808 * swapped out has only the TDI_SWAPPED bit set. 809 * 810 */ 811 if (td->td_inhibitors == TDI_SWAPPED) { 812 kg = td->td_ksegrp; 813 pri = p->p_swtime + kg->kg_slptime; 814 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 815 pri -= p->p_nice * 8; 816 } 817 818 /* 819 * if this ksegrp is higher priority 820 * and there is enough space, then select 821 * this process instead of the previous 822 * selection. 823 */ 824 if (pri > ppri) { 825 pp = p; 826 ppri = pri; 827 } 828 } 829 } 830 mtx_unlock_spin(&sched_lock); 831 } 832 sx_sunlock(&allproc_lock); 833 834 /* 835 * Nothing to do, back to sleep. 836 */ 837 if ((p = pp) == NULL) { 838 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 839 goto loop; 840 } 841 PROC_LOCK(p); 842 843 /* 844 * Another process may be bringing or may have already 845 * brought this process in while we traverse all threads. 846 * Or, this process may even be being swapped out again. 847 */ 848 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 849 PROC_UNLOCK(p); 850 goto loop; 851 } 852 853 mtx_lock_spin(&sched_lock); 854 p->p_sflag &= ~PS_SWAPINREQ; 855 mtx_unlock_spin(&sched_lock); 856 857 /* 858 * We would like to bring someone in. (only if there is space). 859 * [What checks the space? ] 860 */ 861 faultin(p); 862 PROC_UNLOCK(p); 863 mtx_lock_spin(&sched_lock); 864 p->p_swtime = 0; 865 mtx_unlock_spin(&sched_lock); 866 goto loop; 867} 868 869#ifndef NO_SWAPPING 870 871/* 872 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 873 */ 874static int swap_idle_threshold1 = 2; 875SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 876 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 877 878/* 879 * Swap_idle_threshold2 is the time that a process can be idle before 880 * it will be swapped out, if idle swapping is enabled. 881 */ 882static int swap_idle_threshold2 = 10; 883SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 884 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 885 886/* 887 * Swapout is driven by the pageout daemon. Very simple, we find eligible 888 * procs and unwire their u-areas. We try to always "swap" at least one 889 * process in case we need the room for a swapin. 890 * If any procs have been sleeping/stopped for at least maxslp seconds, 891 * they are swapped. Else, we swap the longest-sleeping or stopped process, 892 * if any, otherwise the longest-resident process. 893 */ 894void 895swapout_procs(action) 896int action; 897{ 898 struct proc *p; 899 struct thread *td; 900 struct ksegrp *kg; 901 int didswap = 0; 902 903 GIANT_REQUIRED; 904 905retry: 906 sx_slock(&allproc_lock); 907 FOREACH_PROC_IN_SYSTEM(p) { 908 struct vmspace *vm; 909 int minslptime = 100000; 910 911 /* 912 * Watch out for a process in 913 * creation. It may have no 914 * address space or lock yet. 915 */ 916 mtx_lock_spin(&sched_lock); 917 if (p->p_state == PRS_NEW) { 918 mtx_unlock_spin(&sched_lock); 919 continue; 920 } 921 mtx_unlock_spin(&sched_lock); 922 923 /* 924 * An aio daemon switches its 925 * address space while running. 926 * Perform a quick check whether 927 * a process has P_SYSTEM. 928 */ 929 if ((p->p_flag & P_SYSTEM) != 0) 930 continue; 931 932 /* 933 * Do not swapout a process that 934 * is waiting for VM data 935 * structures as there is a possible 936 * deadlock. Test this first as 937 * this may block. 938 * 939 * Lock the map until swapout 940 * finishes, or a thread of this 941 * process may attempt to alter 942 * the map. 943 */ 944 PROC_LOCK(p); 945 vm = p->p_vmspace; 946 KASSERT(vm != NULL, 947 ("swapout_procs: a process has no address space")); 948 atomic_add_int(&vm->vm_refcnt, 1); 949 PROC_UNLOCK(p); 950 if (!vm_map_trylock(&vm->vm_map)) 951 goto nextproc1; 952 953 PROC_LOCK(p); 954 if (p->p_lock != 0 || 955 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 956 ) != 0) { 957 goto nextproc2; 958 } 959 /* 960 * only aiod changes vmspace, however it will be 961 * skipped because of the if statement above checking 962 * for P_SYSTEM 963 */ 964 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 965 goto nextproc2; 966 967 switch (p->p_state) { 968 default: 969 /* Don't swap out processes in any sort 970 * of 'special' state. */ 971 break; 972 973 case PRS_NORMAL: 974 mtx_lock_spin(&sched_lock); 975 /* 976 * do not swapout a realtime process 977 * Check all the thread groups.. 978 */ 979 FOREACH_KSEGRP_IN_PROC(p, kg) { 980 if (PRI_IS_REALTIME(kg->kg_pri_class)) 981 goto nextproc; 982 983 /* 984 * Guarantee swap_idle_threshold1 985 * time in memory. 986 */ 987 if (kg->kg_slptime < swap_idle_threshold1) 988 goto nextproc; 989 990 /* 991 * Do not swapout a process if it is 992 * waiting on a critical event of some 993 * kind or there is a thread whose 994 * pageable memory may be accessed. 995 * 996 * This could be refined to support 997 * swapping out a thread. 998 */ 999 FOREACH_THREAD_IN_GROUP(kg, td) { 1000 if ((td->td_priority) < PSOCK || 1001 !thread_safetoswapout(td)) 1002 goto nextproc; 1003 } 1004 /* 1005 * If the system is under memory stress, 1006 * or if we are swapping 1007 * idle processes >= swap_idle_threshold2, 1008 * then swap the process out. 1009 */ 1010 if (((action & VM_SWAP_NORMAL) == 0) && 1011 (((action & VM_SWAP_IDLE) == 0) || 1012 (kg->kg_slptime < swap_idle_threshold2))) 1013 goto nextproc; 1014 1015 if (minslptime > kg->kg_slptime) 1016 minslptime = kg->kg_slptime; 1017 } 1018 1019 /* 1020 * If the pageout daemon didn't free enough pages, 1021 * or if this process is idle and the system is 1022 * configured to swap proactively, swap it out. 1023 */ 1024 if ((action & VM_SWAP_NORMAL) || 1025 ((action & VM_SWAP_IDLE) && 1026 (minslptime > swap_idle_threshold2))) { 1027 swapout(p); 1028 didswap++; 1029 mtx_unlock_spin(&sched_lock); 1030 PROC_UNLOCK(p); 1031 vm_map_unlock(&vm->vm_map); 1032 vmspace_free(vm); 1033 sx_sunlock(&allproc_lock); 1034 goto retry; 1035 } 1036nextproc: 1037 mtx_unlock_spin(&sched_lock); 1038 } 1039nextproc2: 1040 PROC_UNLOCK(p); 1041 vm_map_unlock(&vm->vm_map); 1042nextproc1: 1043 vmspace_free(vm); 1044 continue; 1045 } 1046 sx_sunlock(&allproc_lock); 1047 /* 1048 * If we swapped something out, and another process needed memory, 1049 * then wakeup the sched process. 1050 */ 1051 if (didswap) 1052 wakeup(&proc0); 1053} 1054 1055static void 1056swapout(p) 1057 struct proc *p; 1058{ 1059 struct thread *td; 1060 1061 PROC_LOCK_ASSERT(p, MA_OWNED); 1062 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 1063#if defined(SWAP_DEBUG) 1064 printf("swapping out %d\n", p->p_pid); 1065#endif 1066 1067 /* 1068 * The states of this process and its threads may have changed 1069 * by now. Assuming that there is only one pageout daemon thread, 1070 * this process should still be in memory. 1071 */ 1072 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 1073 ("swapout: lost a swapout race?")); 1074 1075#if defined(INVARIANTS) 1076 /* 1077 * Make sure that all threads are safe to be swapped out. 1078 * 1079 * Alternatively, we could swap out only safe threads. 1080 */ 1081 FOREACH_THREAD_IN_PROC(p, td) { 1082 KASSERT(thread_safetoswapout(td), 1083 ("swapout: there is a thread not safe for swapout")); 1084 } 1085#endif /* INVARIANTS */ 1086 1087 ++p->p_stats->p_ru.ru_nswap; 1088 /* 1089 * remember the process resident count 1090 */ 1091 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1092 1093 p->p_sflag &= ~PS_INMEM; 1094 p->p_sflag |= PS_SWAPPINGOUT; 1095 PROC_UNLOCK(p); 1096 FOREACH_THREAD_IN_PROC(p, td) 1097 TD_SET_SWAPPED(td); 1098 mtx_unlock_spin(&sched_lock); 1099 1100 vm_proc_swapout(p); 1101 FOREACH_THREAD_IN_PROC(p, td) 1102 vm_thread_swapout(td); 1103 1104 PROC_LOCK(p); 1105 mtx_lock_spin(&sched_lock); 1106 p->p_sflag &= ~PS_SWAPPINGOUT; 1107 p->p_swtime = 0; 1108} 1109#endif /* !NO_SWAPPING */ 1110