vm_glue.c revision 127008
1219019Sgabor/* 2219019Sgabor * Copyright (c) 1991, 1993 3219019Sgabor * The Regents of the University of California. All rights reserved. 4219019Sgabor * 5219019Sgabor * This code is derived from software contributed to Berkeley by 6219019Sgabor * The Mach Operating System project at Carnegie-Mellon University. 7219019Sgabor * 8219019Sgabor * Redistribution and use in source and binary forms, with or without 9219019Sgabor * modification, are permitted provided that the following conditions 10219019Sgabor * are met: 11219019Sgabor * 1. Redistributions of source code must retain the above copyright 12219019Sgabor * notice, this list of conditions and the following disclaimer. 13219019Sgabor * 2. Redistributions in binary form must reproduce the above copyright 14219019Sgabor * notice, this list of conditions and the following disclaimer in the 15219019Sgabor * documentation and/or other materials provided with the distribution. 16219019Sgabor * 3. All advertising materials mentioning features or use of this software 17219019Sgabor * must display the following acknowledgement: 18219019Sgabor * This product includes software developed by the University of 19219019Sgabor * California, Berkeley and its contributors. 20219019Sgabor * 4. Neither the name of the University nor the names of its contributors 21219019Sgabor * may be used to endorse or promote products derived from this software 22219019Sgabor * without specific prior written permission. 23219019Sgabor * 24219019Sgabor * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25219019Sgabor * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26219019Sgabor * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27219019Sgabor * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28219019Sgabor * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29219019Sgabor * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30219019Sgabor * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31219019Sgabor * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32219019Sgabor * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33219019Sgabor * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34219019Sgabor * SUCH DAMAGE. 35219019Sgabor * 36219019Sgabor * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37219019Sgabor * 38219019Sgabor * 39219019Sgabor * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40219019Sgabor * All rights reserved. 41219019Sgabor * 42219019Sgabor * Permission to use, copy, modify and distribute this software and 43219019Sgabor * its documentation is hereby granted, provided that both the copyright 44219019Sgabor * notice and this permission notice appear in all copies of the 45219019Sgabor * software, derivative works or modified versions, and any portions 46219019Sgabor * thereof, and that both notices appear in supporting documentation. 47219019Sgabor * 48219019Sgabor * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49219019Sgabor * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50219019Sgabor * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51219019Sgabor * 52219019Sgabor * Carnegie Mellon requests users of this software to return to 53219019Sgabor * 54219019Sgabor * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55219019Sgabor * School of Computer Science 56219019Sgabor * Carnegie Mellon University 57219019Sgabor * Pittsburgh PA 15213-3890 58219019Sgabor * 59219019Sgabor * any improvements or extensions that they make and grant Carnegie the 60219019Sgabor * rights to redistribute these changes. 61219019Sgabor */ 62219019Sgabor 63219019Sgabor#include <sys/cdefs.h> 64219019Sgabor__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 127008 2004-03-15 06:43:51Z truckman $"); 65219019Sgabor 66219019Sgabor#include "opt_vm.h" 67219019Sgabor#include "opt_kstack_pages.h" 68219019Sgabor#include "opt_kstack_max_pages.h" 69219019Sgabor 70219019Sgabor#include <sys/param.h> 71219019Sgabor#include <sys/systm.h> 72219019Sgabor#include <sys/limits.h> 73219019Sgabor#include <sys/lock.h> 74219019Sgabor#include <sys/mutex.h> 75219019Sgabor#include <sys/proc.h> 76219019Sgabor#include <sys/resourcevar.h> 77#include <sys/shm.h> 78#include <sys/vmmeter.h> 79#include <sys/sx.h> 80#include <sys/sysctl.h> 81 82#include <sys/kernel.h> 83#include <sys/ktr.h> 84#include <sys/unistd.h> 85 86#include <vm/vm.h> 87#include <vm/vm_param.h> 88#include <vm/pmap.h> 89#include <vm/vm_map.h> 90#include <vm/vm_page.h> 91#include <vm/vm_pageout.h> 92#include <vm/vm_object.h> 93#include <vm/vm_kern.h> 94#include <vm/vm_extern.h> 95#include <vm/vm_pager.h> 96#include <vm/swap_pager.h> 97 98#include <sys/user.h> 99 100extern int maxslp; 101 102/* 103 * System initialization 104 * 105 * Note: proc0 from proc.h 106 */ 107static void vm_init_limits(void *); 108SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 109 110/* 111 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 112 * 113 * Note: run scheduling should be divorced from the vm system. 114 */ 115static void scheduler(void *); 116SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL) 117 118#ifndef NO_SWAPPING 119static void swapout(struct proc *); 120static void vm_proc_swapin(struct proc *p); 121static void vm_proc_swapout(struct proc *p); 122#endif 123 124/* 125 * MPSAFE 126 * 127 * WARNING! This code calls vm_map_check_protection() which only checks 128 * the associated vm_map_entry range. It does not determine whether the 129 * contents of the memory is actually readable or writable. In most cases 130 * just checking the vm_map_entry is sufficient within the kernel's address 131 * space. 132 */ 133int 134kernacc(addr, len, rw) 135 void *addr; 136 int len, rw; 137{ 138 boolean_t rv; 139 vm_offset_t saddr, eaddr; 140 vm_prot_t prot; 141 142 KASSERT((rw & ~VM_PROT_ALL) == 0, 143 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 144 prot = rw; 145 saddr = trunc_page((vm_offset_t)addr); 146 eaddr = round_page((vm_offset_t)addr + len); 147 vm_map_lock_read(kernel_map); 148 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 149 vm_map_unlock_read(kernel_map); 150 return (rv == TRUE); 151} 152 153/* 154 * MPSAFE 155 * 156 * WARNING! This code calls vm_map_check_protection() which only checks 157 * the associated vm_map_entry range. It does not determine whether the 158 * contents of the memory is actually readable or writable. vmapbuf(), 159 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 160 * used in conjuction with this call. 161 */ 162int 163useracc(addr, len, rw) 164 void *addr; 165 int len, rw; 166{ 167 boolean_t rv; 168 vm_prot_t prot; 169 vm_map_t map; 170 171 KASSERT((rw & ~VM_PROT_ALL) == 0, 172 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 173 prot = rw; 174 map = &curproc->p_vmspace->vm_map; 175 if ((vm_offset_t)addr + len > vm_map_max(map) || 176 (vm_offset_t)addr + len < (vm_offset_t)addr) { 177 return (FALSE); 178 } 179 vm_map_lock_read(map); 180 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 181 round_page((vm_offset_t)addr + len), prot); 182 vm_map_unlock_read(map); 183 return (rv == TRUE); 184} 185 186int 187vslock(void *addr, size_t len) 188{ 189 vm_offset_t end, start; 190 int error, npages; 191 192 start = trunc_page((vm_offset_t)addr); 193 end = round_page((vm_offset_t)addr + len); 194 if (end <= start) 195 return (EINVAL); 196 npages = atop(end - start); 197 if (npages > vm_page_max_wired) 198 return (ENOMEM); 199 PROC_LOCK(curproc); 200 if (npages + pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map)) > 201 atop(lim_cur(curproc, RLIMIT_MEMLOCK))) { 202 PROC_UNLOCK(curproc); 203 return (ENOMEM); 204 } 205 PROC_UNLOCK(curproc); 206#if 0 207 /* 208 * XXX - not yet 209 * 210 * The limit for transient usage of wired pages should be 211 * larger than for "permanent" wired pages (mlock()). 212 * 213 * Also, the sysctl code, which is the only present user 214 * of vslock(), does a hard loop on EAGAIN. 215 */ 216 if (npages + cnt.v_wire_count > vm_page_max_wired) 217 return (EAGAIN); 218#endif 219 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 220 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES); 221 /* 222 * Return EFAULT on error to match copy{in,out}() behaviour 223 * rather than returning ENOMEM like mlock() would. 224 */ 225 return (error == KERN_SUCCESS ? 0 : EFAULT); 226} 227 228void 229vsunlock(void *addr, size_t len) 230{ 231 232 /* Rely on the parameter sanity checks performed by vslock(). */ 233 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 234 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 235 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 236} 237 238/* 239 * Create the U area for a new process. 240 * This routine directly affects the fork perf for a process. 241 */ 242void 243vm_proc_new(struct proc *p) 244{ 245 vm_page_t ma[UAREA_PAGES]; 246 vm_object_t upobj; 247 vm_offset_t up; 248 vm_page_t m; 249 u_int i; 250 251 /* 252 * Get a kernel virtual address for the U area for this process. 253 */ 254 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 255 if (up == 0) 256 panic("vm_proc_new: upage allocation failed"); 257 p->p_uarea = (struct user *)up; 258 259 /* 260 * Allocate object and page(s) for the U area. 261 */ 262 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 263 p->p_upages_obj = upobj; 264 VM_OBJECT_LOCK(upobj); 265 for (i = 0; i < UAREA_PAGES; i++) { 266 m = vm_page_grab(upobj, i, 267 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 268 ma[i] = m; 269 270 vm_page_lock_queues(); 271 vm_page_wakeup(m); 272 m->valid = VM_PAGE_BITS_ALL; 273 vm_page_unlock_queues(); 274 } 275 VM_OBJECT_UNLOCK(upobj); 276 277 /* 278 * Enter the pages into the kernel address space. 279 */ 280 pmap_qenter(up, ma, UAREA_PAGES); 281} 282 283/* 284 * Dispose the U area for a process that has exited. 285 * This routine directly impacts the exit perf of a process. 286 * XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called. 287 */ 288void 289vm_proc_dispose(struct proc *p) 290{ 291 vm_object_t upobj; 292 vm_offset_t up; 293 vm_page_t m; 294 295 upobj = p->p_upages_obj; 296 VM_OBJECT_LOCK(upobj); 297 if (upobj->resident_page_count != UAREA_PAGES) 298 panic("vm_proc_dispose: incorrect number of pages in upobj"); 299 vm_page_lock_queues(); 300 while ((m = TAILQ_FIRST(&upobj->memq)) != NULL) { 301 vm_page_busy(m); 302 vm_page_unwire(m, 0); 303 vm_page_free(m); 304 } 305 vm_page_unlock_queues(); 306 VM_OBJECT_UNLOCK(upobj); 307 up = (vm_offset_t)p->p_uarea; 308 pmap_qremove(up, UAREA_PAGES); 309 kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE); 310 vm_object_deallocate(upobj); 311} 312 313#ifndef NO_SWAPPING 314/* 315 * Allow the U area for a process to be prejudicially paged out. 316 */ 317static void 318vm_proc_swapout(struct proc *p) 319{ 320 vm_object_t upobj; 321 vm_offset_t up; 322 vm_page_t m; 323 324 upobj = p->p_upages_obj; 325 VM_OBJECT_LOCK(upobj); 326 if (upobj->resident_page_count != UAREA_PAGES) 327 panic("vm_proc_dispose: incorrect number of pages in upobj"); 328 vm_page_lock_queues(); 329 TAILQ_FOREACH(m, &upobj->memq, listq) { 330 vm_page_dirty(m); 331 vm_page_unwire(m, 0); 332 } 333 vm_page_unlock_queues(); 334 VM_OBJECT_UNLOCK(upobj); 335 up = (vm_offset_t)p->p_uarea; 336 pmap_qremove(up, UAREA_PAGES); 337} 338 339/* 340 * Bring the U area for a specified process back in. 341 */ 342static void 343vm_proc_swapin(struct proc *p) 344{ 345 vm_page_t ma[UAREA_PAGES]; 346 vm_object_t upobj; 347 vm_offset_t up; 348 vm_page_t m; 349 int rv; 350 int i; 351 352 upobj = p->p_upages_obj; 353 VM_OBJECT_LOCK(upobj); 354 for (i = 0; i < UAREA_PAGES; i++) { 355 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 356 if (m->valid != VM_PAGE_BITS_ALL) { 357 rv = vm_pager_get_pages(upobj, &m, 1, 0); 358 if (rv != VM_PAGER_OK) 359 panic("vm_proc_swapin: cannot get upage"); 360 } 361 ma[i] = m; 362 } 363 if (upobj->resident_page_count != UAREA_PAGES) 364 panic("vm_proc_swapin: lost pages from upobj"); 365 vm_page_lock_queues(); 366 TAILQ_FOREACH(m, &upobj->memq, listq) { 367 m->valid = VM_PAGE_BITS_ALL; 368 vm_page_wire(m); 369 vm_page_wakeup(m); 370 } 371 vm_page_unlock_queues(); 372 VM_OBJECT_UNLOCK(upobj); 373 up = (vm_offset_t)p->p_uarea; 374 pmap_qenter(up, ma, UAREA_PAGES); 375} 376 377/* 378 * Swap in the UAREAs of all processes swapped out to the given device. 379 * The pages in the UAREA are marked dirty and their swap metadata is freed. 380 */ 381void 382vm_proc_swapin_all(struct swdevt *devidx) 383{ 384 struct proc *p; 385 vm_object_t object; 386 vm_page_t m; 387 388retry: 389 sx_slock(&allproc_lock); 390 FOREACH_PROC_IN_SYSTEM(p) { 391 PROC_LOCK(p); 392 object = p->p_upages_obj; 393 if (object != NULL) { 394 VM_OBJECT_LOCK(object); 395 if (swap_pager_isswapped(object, devidx)) { 396 VM_OBJECT_UNLOCK(object); 397 sx_sunlock(&allproc_lock); 398 faultin(p); 399 PROC_UNLOCK(p); 400 VM_OBJECT_LOCK(object); 401 vm_page_lock_queues(); 402 TAILQ_FOREACH(m, &object->memq, listq) 403 vm_page_dirty(m); 404 vm_page_unlock_queues(); 405 swap_pager_freespace(object, 0, 406 object->un_pager.swp.swp_bcount); 407 VM_OBJECT_UNLOCK(object); 408 goto retry; 409 } 410 VM_OBJECT_UNLOCK(object); 411 } 412 PROC_UNLOCK(p); 413 } 414 sx_sunlock(&allproc_lock); 415} 416#endif 417 418#ifndef KSTACK_MAX_PAGES 419#define KSTACK_MAX_PAGES 32 420#endif 421 422/* 423 * Create the kernel stack (including pcb for i386) for a new thread. 424 * This routine directly affects the fork perf for a process and 425 * create performance for a thread. 426 */ 427void 428vm_thread_new(struct thread *td, int pages) 429{ 430 vm_object_t ksobj; 431 vm_offset_t ks; 432 vm_page_t m, ma[KSTACK_MAX_PAGES]; 433 int i; 434 435 /* Bounds check */ 436 if (pages <= 1) 437 pages = KSTACK_PAGES; 438 else if (pages > KSTACK_MAX_PAGES) 439 pages = KSTACK_MAX_PAGES; 440 /* 441 * Allocate an object for the kstack. 442 */ 443 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 444 td->td_kstack_obj = ksobj; 445 /* 446 * Get a kernel virtual address for this thread's kstack. 447 */ 448 ks = kmem_alloc_nofault(kernel_map, 449 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 450 if (ks == 0) 451 panic("vm_thread_new: kstack allocation failed"); 452 if (KSTACK_GUARD_PAGES != 0) { 453 pmap_qremove(ks, KSTACK_GUARD_PAGES); 454 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 455 } 456 td->td_kstack = ks; 457 /* 458 * Knowing the number of pages allocated is useful when you 459 * want to deallocate them. 460 */ 461 td->td_kstack_pages = pages; 462 /* 463 * For the length of the stack, link in a real page of ram for each 464 * page of stack. 465 */ 466 VM_OBJECT_LOCK(ksobj); 467 for (i = 0; i < pages; i++) { 468 /* 469 * Get a kernel stack page. 470 */ 471 m = vm_page_grab(ksobj, i, 472 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 473 ma[i] = m; 474 vm_page_lock_queues(); 475 vm_page_wakeup(m); 476 m->valid = VM_PAGE_BITS_ALL; 477 vm_page_unlock_queues(); 478 } 479 VM_OBJECT_UNLOCK(ksobj); 480 pmap_qenter(ks, ma, pages); 481} 482 483/* 484 * Dispose of a thread's kernel stack. 485 */ 486void 487vm_thread_dispose(struct thread *td) 488{ 489 vm_object_t ksobj; 490 vm_offset_t ks; 491 vm_page_t m; 492 int i, pages; 493 494 pages = td->td_kstack_pages; 495 ksobj = td->td_kstack_obj; 496 ks = td->td_kstack; 497 pmap_qremove(ks, pages); 498 VM_OBJECT_LOCK(ksobj); 499 for (i = 0; i < pages; i++) { 500 m = vm_page_lookup(ksobj, i); 501 if (m == NULL) 502 panic("vm_thread_dispose: kstack already missing?"); 503 vm_page_lock_queues(); 504 vm_page_busy(m); 505 vm_page_unwire(m, 0); 506 vm_page_free(m); 507 vm_page_unlock_queues(); 508 } 509 VM_OBJECT_UNLOCK(ksobj); 510 vm_object_deallocate(ksobj); 511 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 512 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 513} 514 515/* 516 * Allow a thread's kernel stack to be paged out. 517 */ 518void 519vm_thread_swapout(struct thread *td) 520{ 521 vm_object_t ksobj; 522 vm_page_t m; 523 int i, pages; 524 525 cpu_thread_swapout(td); 526 pages = td->td_kstack_pages; 527 ksobj = td->td_kstack_obj; 528 pmap_qremove(td->td_kstack, pages); 529 VM_OBJECT_LOCK(ksobj); 530 for (i = 0; i < pages; i++) { 531 m = vm_page_lookup(ksobj, i); 532 if (m == NULL) 533 panic("vm_thread_swapout: kstack already missing?"); 534 vm_page_lock_queues(); 535 vm_page_dirty(m); 536 vm_page_unwire(m, 0); 537 vm_page_unlock_queues(); 538 } 539 VM_OBJECT_UNLOCK(ksobj); 540} 541 542/* 543 * Bring the kernel stack for a specified thread back in. 544 */ 545void 546vm_thread_swapin(struct thread *td) 547{ 548 vm_object_t ksobj; 549 vm_page_t m, ma[KSTACK_MAX_PAGES]; 550 int i, pages, rv; 551 552 pages = td->td_kstack_pages; 553 ksobj = td->td_kstack_obj; 554 VM_OBJECT_LOCK(ksobj); 555 for (i = 0; i < pages; i++) { 556 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 557 if (m->valid != VM_PAGE_BITS_ALL) { 558 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 559 if (rv != VM_PAGER_OK) 560 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 561 m = vm_page_lookup(ksobj, i); 562 m->valid = VM_PAGE_BITS_ALL; 563 } 564 ma[i] = m; 565 vm_page_lock_queues(); 566 vm_page_wire(m); 567 vm_page_wakeup(m); 568 vm_page_unlock_queues(); 569 } 570 VM_OBJECT_UNLOCK(ksobj); 571 pmap_qenter(td->td_kstack, ma, pages); 572 cpu_thread_swapin(td); 573} 574 575/* 576 * Set up a variable-sized alternate kstack. 577 */ 578void 579vm_thread_new_altkstack(struct thread *td, int pages) 580{ 581 582 td->td_altkstack = td->td_kstack; 583 td->td_altkstack_obj = td->td_kstack_obj; 584 td->td_altkstack_pages = td->td_kstack_pages; 585 586 vm_thread_new(td, pages); 587} 588 589/* 590 * Restore the original kstack. 591 */ 592void 593vm_thread_dispose_altkstack(struct thread *td) 594{ 595 596 vm_thread_dispose(td); 597 598 td->td_kstack = td->td_altkstack; 599 td->td_kstack_obj = td->td_altkstack_obj; 600 td->td_kstack_pages = td->td_altkstack_pages; 601 td->td_altkstack = 0; 602 td->td_altkstack_obj = NULL; 603 td->td_altkstack_pages = 0; 604} 605 606/* 607 * Implement fork's actions on an address space. 608 * Here we arrange for the address space to be copied or referenced, 609 * allocate a user struct (pcb and kernel stack), then call the 610 * machine-dependent layer to fill those in and make the new process 611 * ready to run. The new process is set up so that it returns directly 612 * to user mode to avoid stack copying and relocation problems. 613 */ 614void 615vm_forkproc(td, p2, td2, flags) 616 struct thread *td; 617 struct proc *p2; 618 struct thread *td2; 619 int flags; 620{ 621 struct proc *p1 = td->td_proc; 622 struct user *up; 623 624 GIANT_REQUIRED; 625 626 if ((flags & RFPROC) == 0) { 627 /* 628 * Divorce the memory, if it is shared, essentially 629 * this changes shared memory amongst threads, into 630 * COW locally. 631 */ 632 if ((flags & RFMEM) == 0) { 633 if (p1->p_vmspace->vm_refcnt > 1) { 634 vmspace_unshare(p1); 635 } 636 } 637 cpu_fork(td, p2, td2, flags); 638 return; 639 } 640 641 if (flags & RFMEM) { 642 p2->p_vmspace = p1->p_vmspace; 643 p1->p_vmspace->vm_refcnt++; 644 } 645 646 while (vm_page_count_severe()) { 647 VM_WAIT; 648 } 649 650 if ((flags & RFMEM) == 0) { 651 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 652 if (p1->p_vmspace->vm_shm) 653 shmfork(p1, p2); 654 } 655 656 /* XXXKSE this is unsatisfactory but should be adequate */ 657 up = p2->p_uarea; 658 MPASS(p2->p_sigacts != NULL); 659 660 /* 661 * p_stats currently points at fields in the user struct 662 * but not at &u, instead at p_addr. Copy parts of 663 * p_stats; zero the rest of p_stats (statistics). 664 */ 665 p2->p_stats = &up->u_stats; 666 bzero(&up->u_stats.pstat_startzero, 667 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 668 (caddr_t) &up->u_stats.pstat_startzero)); 669 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 670 ((caddr_t) &up->u_stats.pstat_endcopy - 671 (caddr_t) &up->u_stats.pstat_startcopy)); 672 673 /* 674 * cpu_fork will copy and update the pcb, set up the kernel stack, 675 * and make the child ready to run. 676 */ 677 cpu_fork(td, p2, td2, flags); 678} 679 680/* 681 * Called after process has been wait(2)'ed apon and is being reaped. 682 * The idea is to reclaim resources that we could not reclaim while 683 * the process was still executing. 684 */ 685void 686vm_waitproc(p) 687 struct proc *p; 688{ 689 690 GIANT_REQUIRED; 691 vmspace_exitfree(p); /* and clean-out the vmspace */ 692} 693 694/* 695 * Set default limits for VM system. 696 * Called for proc 0, and then inherited by all others. 697 * 698 * XXX should probably act directly on proc0. 699 */ 700static void 701vm_init_limits(udata) 702 void *udata; 703{ 704 struct proc *p = udata; 705 struct plimit *limp; 706 int rss_limit; 707 708 /* 709 * Set up the initial limits on process VM. Set the maximum resident 710 * set size to be half of (reasonably) available memory. Since this 711 * is a soft limit, it comes into effect only when the system is out 712 * of memory - half of main memory helps to favor smaller processes, 713 * and reduces thrashing of the object cache. 714 */ 715 limp = p->p_limit; 716 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 717 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 718 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 719 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 720 /* limit the limit to no less than 2MB */ 721 rss_limit = max(cnt.v_free_count, 512); 722 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 723 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 724} 725 726void 727faultin(p) 728 struct proc *p; 729{ 730#ifdef NO_SWAPPING 731 732 PROC_LOCK_ASSERT(p, MA_OWNED); 733 if ((p->p_sflag & PS_INMEM) == 0) 734 panic("faultin: proc swapped out with NO_SWAPPING!"); 735#else /* !NO_SWAPPING */ 736 struct thread *td; 737 738 GIANT_REQUIRED; 739 PROC_LOCK_ASSERT(p, MA_OWNED); 740 /* 741 * If another process is swapping in this process, 742 * just wait until it finishes. 743 */ 744 if (p->p_sflag & PS_SWAPPINGIN) 745 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 746 else if ((p->p_sflag & PS_INMEM) == 0) { 747 /* 748 * Don't let another thread swap process p out while we are 749 * busy swapping it in. 750 */ 751 ++p->p_lock; 752 mtx_lock_spin(&sched_lock); 753 p->p_sflag |= PS_SWAPPINGIN; 754 mtx_unlock_spin(&sched_lock); 755 PROC_UNLOCK(p); 756 757 vm_proc_swapin(p); 758 FOREACH_THREAD_IN_PROC(p, td) 759 vm_thread_swapin(td); 760 761 PROC_LOCK(p); 762 mtx_lock_spin(&sched_lock); 763 p->p_sflag &= ~PS_SWAPPINGIN; 764 p->p_sflag |= PS_INMEM; 765 FOREACH_THREAD_IN_PROC(p, td) { 766 TD_CLR_SWAPPED(td); 767 if (TD_CAN_RUN(td)) 768 setrunnable(td); 769 } 770 mtx_unlock_spin(&sched_lock); 771 772 wakeup(&p->p_sflag); 773 774 /* Allow other threads to swap p out now. */ 775 --p->p_lock; 776 } 777#endif /* NO_SWAPPING */ 778} 779 780/* 781 * This swapin algorithm attempts to swap-in processes only if there 782 * is enough space for them. Of course, if a process waits for a long 783 * time, it will be swapped in anyway. 784 * 785 * XXXKSE - process with the thread with highest priority counts.. 786 * 787 * Giant is still held at this point, to be released in tsleep. 788 */ 789/* ARGSUSED*/ 790static void 791scheduler(dummy) 792 void *dummy; 793{ 794 struct proc *p; 795 struct thread *td; 796 int pri; 797 struct proc *pp; 798 int ppri; 799 800 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 801 /* GIANT_REQUIRED */ 802 803loop: 804 if (vm_page_count_min()) { 805 VM_WAIT; 806 goto loop; 807 } 808 809 pp = NULL; 810 ppri = INT_MIN; 811 sx_slock(&allproc_lock); 812 FOREACH_PROC_IN_SYSTEM(p) { 813 struct ksegrp *kg; 814 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 815 continue; 816 } 817 mtx_lock_spin(&sched_lock); 818 FOREACH_THREAD_IN_PROC(p, td) { 819 /* 820 * An otherwise runnable thread of a process 821 * swapped out has only the TDI_SWAPPED bit set. 822 * 823 */ 824 if (td->td_inhibitors == TDI_SWAPPED) { 825 kg = td->td_ksegrp; 826 pri = p->p_swtime + kg->kg_slptime; 827 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 828 pri -= kg->kg_nice * 8; 829 } 830 831 /* 832 * if this ksegrp is higher priority 833 * and there is enough space, then select 834 * this process instead of the previous 835 * selection. 836 */ 837 if (pri > ppri) { 838 pp = p; 839 ppri = pri; 840 } 841 } 842 } 843 mtx_unlock_spin(&sched_lock); 844 } 845 sx_sunlock(&allproc_lock); 846 847 /* 848 * Nothing to do, back to sleep. 849 */ 850 if ((p = pp) == NULL) { 851 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 852 goto loop; 853 } 854 PROC_LOCK(p); 855 856 /* 857 * Another process may be bringing or may have already 858 * brought this process in while we traverse all threads. 859 * Or, this process may even be being swapped out again. 860 */ 861 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { 862 PROC_UNLOCK(p); 863 goto loop; 864 } 865 866 mtx_lock_spin(&sched_lock); 867 p->p_sflag &= ~PS_SWAPINREQ; 868 mtx_unlock_spin(&sched_lock); 869 870 /* 871 * We would like to bring someone in. (only if there is space). 872 * [What checks the space? ] 873 */ 874 faultin(p); 875 PROC_UNLOCK(p); 876 mtx_lock_spin(&sched_lock); 877 p->p_swtime = 0; 878 mtx_unlock_spin(&sched_lock); 879 goto loop; 880} 881 882#ifndef NO_SWAPPING 883 884/* 885 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 886 */ 887static int swap_idle_threshold1 = 2; 888SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 889 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 890 891/* 892 * Swap_idle_threshold2 is the time that a process can be idle before 893 * it will be swapped out, if idle swapping is enabled. 894 */ 895static int swap_idle_threshold2 = 10; 896SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 897 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 898 899/* 900 * Swapout is driven by the pageout daemon. Very simple, we find eligible 901 * procs and unwire their u-areas. We try to always "swap" at least one 902 * process in case we need the room for a swapin. 903 * If any procs have been sleeping/stopped for at least maxslp seconds, 904 * they are swapped. Else, we swap the longest-sleeping or stopped process, 905 * if any, otherwise the longest-resident process. 906 */ 907void 908swapout_procs(action) 909int action; 910{ 911 struct proc *p; 912 struct thread *td; 913 struct ksegrp *kg; 914 int didswap = 0; 915 916 GIANT_REQUIRED; 917 918retry: 919 sx_slock(&allproc_lock); 920 FOREACH_PROC_IN_SYSTEM(p) { 921 struct vmspace *vm; 922 int minslptime = 100000; 923 924 /* 925 * Watch out for a process in 926 * creation. It may have no 927 * address space or lock yet. 928 */ 929 mtx_lock_spin(&sched_lock); 930 if (p->p_state == PRS_NEW) { 931 mtx_unlock_spin(&sched_lock); 932 continue; 933 } 934 mtx_unlock_spin(&sched_lock); 935 936 /* 937 * An aio daemon switches its 938 * address space while running. 939 * Perform a quick check whether 940 * a process has P_SYSTEM. 941 */ 942 if ((p->p_flag & P_SYSTEM) != 0) 943 continue; 944 945 /* 946 * Do not swapout a process that 947 * is waiting for VM data 948 * structures as there is a possible 949 * deadlock. Test this first as 950 * this may block. 951 * 952 * Lock the map until swapout 953 * finishes, or a thread of this 954 * process may attempt to alter 955 * the map. 956 */ 957 PROC_LOCK(p); 958 vm = p->p_vmspace; 959 KASSERT(vm != NULL, 960 ("swapout_procs: a process has no address space")); 961 ++vm->vm_refcnt; 962 PROC_UNLOCK(p); 963 if (!vm_map_trylock(&vm->vm_map)) 964 goto nextproc1; 965 966 PROC_LOCK(p); 967 if (p->p_lock != 0 || 968 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 969 ) != 0) { 970 goto nextproc2; 971 } 972 /* 973 * only aiod changes vmspace, however it will be 974 * skipped because of the if statement above checking 975 * for P_SYSTEM 976 */ 977 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM) 978 goto nextproc2; 979 980 switch (p->p_state) { 981 default: 982 /* Don't swap out processes in any sort 983 * of 'special' state. */ 984 break; 985 986 case PRS_NORMAL: 987 mtx_lock_spin(&sched_lock); 988 /* 989 * do not swapout a realtime process 990 * Check all the thread groups.. 991 */ 992 FOREACH_KSEGRP_IN_PROC(p, kg) { 993 if (PRI_IS_REALTIME(kg->kg_pri_class)) 994 goto nextproc; 995 996 /* 997 * Guarantee swap_idle_threshold1 998 * time in memory. 999 */ 1000 if (kg->kg_slptime < swap_idle_threshold1) 1001 goto nextproc; 1002 1003 /* 1004 * Do not swapout a process if it is 1005 * waiting on a critical event of some 1006 * kind or there is a thread whose 1007 * pageable memory may be accessed. 1008 * 1009 * This could be refined to support 1010 * swapping out a thread. 1011 */ 1012 FOREACH_THREAD_IN_GROUP(kg, td) { 1013 if ((td->td_priority) < PSOCK || 1014 !thread_safetoswapout(td)) 1015 goto nextproc; 1016 } 1017 /* 1018 * If the system is under memory stress, 1019 * or if we are swapping 1020 * idle processes >= swap_idle_threshold2, 1021 * then swap the process out. 1022 */ 1023 if (((action & VM_SWAP_NORMAL) == 0) && 1024 (((action & VM_SWAP_IDLE) == 0) || 1025 (kg->kg_slptime < swap_idle_threshold2))) 1026 goto nextproc; 1027 1028 if (minslptime > kg->kg_slptime) 1029 minslptime = kg->kg_slptime; 1030 } 1031 1032 /* 1033 * If the process has been asleep for awhile and had 1034 * most of its pages taken away already, swap it out. 1035 */ 1036 if ((action & VM_SWAP_NORMAL) || 1037 ((action & VM_SWAP_IDLE) && 1038 (minslptime > swap_idle_threshold2))) { 1039 swapout(p); 1040 didswap++; 1041 mtx_unlock_spin(&sched_lock); 1042 PROC_UNLOCK(p); 1043 vm_map_unlock(&vm->vm_map); 1044 vmspace_free(vm); 1045 sx_sunlock(&allproc_lock); 1046 goto retry; 1047 } 1048nextproc: 1049 mtx_unlock_spin(&sched_lock); 1050 } 1051nextproc2: 1052 PROC_UNLOCK(p); 1053 vm_map_unlock(&vm->vm_map); 1054nextproc1: 1055 vmspace_free(vm); 1056 continue; 1057 } 1058 sx_sunlock(&allproc_lock); 1059 /* 1060 * If we swapped something out, and another process needed memory, 1061 * then wakeup the sched process. 1062 */ 1063 if (didswap) 1064 wakeup(&proc0); 1065} 1066 1067static void 1068swapout(p) 1069 struct proc *p; 1070{ 1071 struct thread *td; 1072 1073 PROC_LOCK_ASSERT(p, MA_OWNED); 1074 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); 1075#if defined(SWAP_DEBUG) 1076 printf("swapping out %d\n", p->p_pid); 1077#endif 1078 1079 /* 1080 * The states of this process and its threads may have changed 1081 * by now. Assuming that there is only one pageout daemon thread, 1082 * this process should still be in memory. 1083 */ 1084 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM, 1085 ("swapout: lost a swapout race?")); 1086 1087#if defined(INVARIANTS) 1088 /* 1089 * Make sure that all threads are safe to be swapped out. 1090 * 1091 * Alternatively, we could swap out only safe threads. 1092 */ 1093 FOREACH_THREAD_IN_PROC(p, td) { 1094 KASSERT(thread_safetoswapout(td), 1095 ("swapout: there is a thread not safe for swapout")); 1096 } 1097#endif /* INVARIANTS */ 1098 1099 ++p->p_stats->p_ru.ru_nswap; 1100 /* 1101 * remember the process resident count 1102 */ 1103 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 1104 1105 p->p_sflag &= ~PS_INMEM; 1106 p->p_sflag |= PS_SWAPPINGOUT; 1107 PROC_UNLOCK(p); 1108 FOREACH_THREAD_IN_PROC(p, td) 1109 TD_SET_SWAPPED(td); 1110 mtx_unlock_spin(&sched_lock); 1111 1112 vm_proc_swapout(p); 1113 FOREACH_THREAD_IN_PROC(p, td) 1114 vm_thread_swapout(td); 1115 1116 PROC_LOCK(p); 1117 mtx_lock_spin(&sched_lock); 1118 p->p_sflag &= ~PS_SWAPPINGOUT; 1119 p->p_swtime = 0; 1120} 1121#endif /* !NO_SWAPPING */ 1122