1/*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 */ 58 59#include <sys/cdefs.h>
| 1/*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 */ 58 59#include <sys/cdefs.h>
|
60__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 170307 2007-06-05 00:00:57Z jeff $");
| 60__FBSDID("$FreeBSD: head/sys/vm/vm_glue.c 172207 2007-09-17 05:31:39Z jeff $");
|
61 62#include "opt_vm.h" 63#include "opt_kstack_pages.h" 64#include "opt_kstack_max_pages.h" 65 66#include <sys/param.h> 67#include <sys/systm.h> 68#include <sys/limits.h> 69#include <sys/lock.h> 70#include <sys/mutex.h> 71#include <sys/proc.h> 72#include <sys/resourcevar.h> 73#include <sys/sched.h> 74#include <sys/sf_buf.h> 75#include <sys/shm.h> 76#include <sys/vmmeter.h> 77#include <sys/sx.h> 78#include <sys/sysctl.h> 79 80#include <sys/kernel.h> 81#include <sys/ktr.h> 82#include <sys/unistd.h> 83 84#include <vm/vm.h> 85#include <vm/vm_param.h> 86#include <vm/pmap.h> 87#include <vm/vm_map.h> 88#include <vm/vm_page.h> 89#include <vm/vm_pageout.h> 90#include <vm/vm_object.h> 91#include <vm/vm_kern.h> 92#include <vm/vm_extern.h> 93#include <vm/vm_pager.h> 94#include <vm/swap_pager.h> 95 96extern int maxslp; 97 98/* 99 * System initialization 100 * 101 * Note: proc0 from proc.h 102 */ 103static void vm_init_limits(void *); 104SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 105 106/* 107 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 108 * 109 * Note: run scheduling should be divorced from the vm system. 110 */ 111static void scheduler(void *); 112SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL) 113 114#ifndef NO_SWAPPING
| 61 62#include "opt_vm.h" 63#include "opt_kstack_pages.h" 64#include "opt_kstack_max_pages.h" 65 66#include <sys/param.h> 67#include <sys/systm.h> 68#include <sys/limits.h> 69#include <sys/lock.h> 70#include <sys/mutex.h> 71#include <sys/proc.h> 72#include <sys/resourcevar.h> 73#include <sys/sched.h> 74#include <sys/sf_buf.h> 75#include <sys/shm.h> 76#include <sys/vmmeter.h> 77#include <sys/sx.h> 78#include <sys/sysctl.h> 79 80#include <sys/kernel.h> 81#include <sys/ktr.h> 82#include <sys/unistd.h> 83 84#include <vm/vm.h> 85#include <vm/vm_param.h> 86#include <vm/pmap.h> 87#include <vm/vm_map.h> 88#include <vm/vm_page.h> 89#include <vm/vm_pageout.h> 90#include <vm/vm_object.h> 91#include <vm/vm_kern.h> 92#include <vm/vm_extern.h> 93#include <vm/vm_pager.h> 94#include <vm/swap_pager.h> 95 96extern int maxslp; 97 98/* 99 * System initialization 100 * 101 * Note: proc0 from proc.h 102 */ 103static void vm_init_limits(void *); 104SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 105 106/* 107 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 108 * 109 * Note: run scheduling should be divorced from the vm system. 110 */ 111static void scheduler(void *); 112SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL) 113 114#ifndef NO_SWAPPING
|
115static void swapout(struct proc *);
| 115static int swapout(struct proc *); 116static void swapclear(struct proc *);
|
116#endif 117 118 119static volatile int proc0_rescan; 120 121 122/* 123 * MPSAFE 124 * 125 * WARNING! This code calls vm_map_check_protection() which only checks 126 * the associated vm_map_entry range. It does not determine whether the 127 * contents of the memory is actually readable or writable. In most cases 128 * just checking the vm_map_entry is sufficient within the kernel's address 129 * space. 130 */ 131int 132kernacc(addr, len, rw) 133 void *addr; 134 int len, rw; 135{ 136 boolean_t rv; 137 vm_offset_t saddr, eaddr; 138 vm_prot_t prot; 139 140 KASSERT((rw & ~VM_PROT_ALL) == 0, 141 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 142 143 if ((vm_offset_t)addr + len > kernel_map->max_offset || 144 (vm_offset_t)addr + len < (vm_offset_t)addr) 145 return (FALSE); 146 147 prot = rw; 148 saddr = trunc_page((vm_offset_t)addr); 149 eaddr = round_page((vm_offset_t)addr + len); 150 vm_map_lock_read(kernel_map); 151 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 152 vm_map_unlock_read(kernel_map); 153 return (rv == TRUE); 154} 155 156/* 157 * MPSAFE 158 * 159 * WARNING! This code calls vm_map_check_protection() which only checks 160 * the associated vm_map_entry range. It does not determine whether the 161 * contents of the memory is actually readable or writable. vmapbuf(), 162 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 163 * used in conjuction with this call. 164 */ 165int 166useracc(addr, len, rw) 167 void *addr; 168 int len, rw; 169{ 170 boolean_t rv; 171 vm_prot_t prot; 172 vm_map_t map; 173 174 KASSERT((rw & ~VM_PROT_ALL) == 0, 175 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 176 prot = rw; 177 map = &curproc->p_vmspace->vm_map; 178 if ((vm_offset_t)addr + len > vm_map_max(map) || 179 (vm_offset_t)addr + len < (vm_offset_t)addr) { 180 return (FALSE); 181 } 182 vm_map_lock_read(map); 183 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 184 round_page((vm_offset_t)addr + len), prot); 185 vm_map_unlock_read(map); 186 return (rv == TRUE); 187} 188 189int 190vslock(void *addr, size_t len) 191{ 192 vm_offset_t end, last, start; 193 vm_size_t npages; 194 int error; 195 196 last = (vm_offset_t)addr + len; 197 start = trunc_page((vm_offset_t)addr); 198 end = round_page(last); 199 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 200 return (EINVAL); 201 npages = atop(end - start); 202 if (npages > vm_page_max_wired) 203 return (ENOMEM); 204 PROC_LOCK(curproc); 205 if (ptoa(npages + 206 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) > 207 lim_cur(curproc, RLIMIT_MEMLOCK)) { 208 PROC_UNLOCK(curproc); 209 return (ENOMEM); 210 } 211 PROC_UNLOCK(curproc); 212#if 0 213 /* 214 * XXX - not yet 215 * 216 * The limit for transient usage of wired pages should be 217 * larger than for "permanent" wired pages (mlock()). 218 * 219 * Also, the sysctl code, which is the only present user 220 * of vslock(), does a hard loop on EAGAIN. 221 */ 222 if (npages + cnt.v_wire_count > vm_page_max_wired) 223 return (EAGAIN); 224#endif 225 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 226 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 227 /* 228 * Return EFAULT on error to match copy{in,out}() behaviour 229 * rather than returning ENOMEM like mlock() would. 230 */ 231 return (error == KERN_SUCCESS ? 0 : EFAULT); 232} 233 234void 235vsunlock(void *addr, size_t len) 236{ 237 238 /* Rely on the parameter sanity checks performed by vslock(). */ 239 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 240 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 241 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 242} 243 244/* 245 * Pin the page contained within the given object at the given offset. If the 246 * page is not resident, allocate and load it using the given object's pager. 247 * Return the pinned page if successful; otherwise, return NULL. 248 */ 249static vm_page_t 250vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) 251{ 252 vm_page_t m, ma[1]; 253 vm_pindex_t pindex; 254 int rv; 255 256 VM_OBJECT_LOCK(object); 257 pindex = OFF_TO_IDX(offset); 258 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 259 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 260 ma[0] = m; 261 rv = vm_pager_get_pages(object, ma, 1, 0); 262 m = vm_page_lookup(object, pindex); 263 if (m == NULL) 264 goto out; 265 if (m->valid == 0 || rv != VM_PAGER_OK) { 266 vm_page_lock_queues(); 267 vm_page_free(m); 268 vm_page_unlock_queues(); 269 m = NULL; 270 goto out; 271 } 272 } 273 vm_page_lock_queues(); 274 vm_page_hold(m); 275 vm_page_unlock_queues(); 276 vm_page_wakeup(m); 277out: 278 VM_OBJECT_UNLOCK(object); 279 return (m); 280} 281 282/* 283 * Return a CPU private mapping to the page at the given offset within the 284 * given object. The page is pinned before it is mapped. 285 */ 286struct sf_buf * 287vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset) 288{ 289 vm_page_t m; 290 291 m = vm_imgact_hold_page(object, offset); 292 if (m == NULL) 293 return (NULL); 294 sched_pin(); 295 return (sf_buf_alloc(m, SFB_CPUPRIVATE)); 296} 297 298/* 299 * Destroy the given CPU private mapping and unpin the page that it mapped. 300 */ 301void 302vm_imgact_unmap_page(struct sf_buf *sf) 303{ 304 vm_page_t m; 305 306 m = sf_buf_page(sf); 307 sf_buf_free(sf); 308 sched_unpin(); 309 vm_page_lock_queues(); 310 vm_page_unhold(m); 311 vm_page_unlock_queues(); 312} 313 314#ifndef KSTACK_MAX_PAGES 315#define KSTACK_MAX_PAGES 32 316#endif 317 318/* 319 * Create the kernel stack (including pcb for i386) for a new thread. 320 * This routine directly affects the fork perf for a process and 321 * create performance for a thread. 322 */ 323void 324vm_thread_new(struct thread *td, int pages) 325{ 326 vm_object_t ksobj; 327 vm_offset_t ks; 328 vm_page_t m, ma[KSTACK_MAX_PAGES]; 329 int i; 330 331 /* Bounds check */ 332 if (pages <= 1) 333 pages = KSTACK_PAGES; 334 else if (pages > KSTACK_MAX_PAGES) 335 pages = KSTACK_MAX_PAGES; 336 /* 337 * Allocate an object for the kstack. 338 */ 339 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 340 td->td_kstack_obj = ksobj; 341 /* 342 * Get a kernel virtual address for this thread's kstack. 343 */ 344 ks = kmem_alloc_nofault(kernel_map, 345 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 346 if (ks == 0) 347 panic("vm_thread_new: kstack allocation failed"); 348 if (KSTACK_GUARD_PAGES != 0) { 349 pmap_qremove(ks, KSTACK_GUARD_PAGES); 350 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 351 } 352 td->td_kstack = ks; 353 /* 354 * Knowing the number of pages allocated is useful when you 355 * want to deallocate them. 356 */ 357 td->td_kstack_pages = pages; 358 /* 359 * For the length of the stack, link in a real page of ram for each 360 * page of stack. 361 */ 362 VM_OBJECT_LOCK(ksobj); 363 for (i = 0; i < pages; i++) { 364 /* 365 * Get a kernel stack page. 366 */ 367 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY | 368 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 369 ma[i] = m; 370 m->valid = VM_PAGE_BITS_ALL; 371 } 372 VM_OBJECT_UNLOCK(ksobj); 373 pmap_qenter(ks, ma, pages); 374} 375 376/* 377 * Dispose of a thread's kernel stack. 378 */ 379void 380vm_thread_dispose(struct thread *td) 381{ 382 vm_object_t ksobj; 383 vm_offset_t ks; 384 vm_page_t m; 385 int i, pages; 386 387 pages = td->td_kstack_pages; 388 ksobj = td->td_kstack_obj; 389 ks = td->td_kstack; 390 pmap_qremove(ks, pages); 391 VM_OBJECT_LOCK(ksobj); 392 for (i = 0; i < pages; i++) { 393 m = vm_page_lookup(ksobj, i); 394 if (m == NULL) 395 panic("vm_thread_dispose: kstack already missing?"); 396 vm_page_lock_queues(); 397 vm_page_unwire(m, 0); 398 vm_page_free(m); 399 vm_page_unlock_queues(); 400 } 401 VM_OBJECT_UNLOCK(ksobj); 402 vm_object_deallocate(ksobj); 403 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 404 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 405} 406 407/* 408 * Allow a thread's kernel stack to be paged out. 409 */ 410void 411vm_thread_swapout(struct thread *td) 412{ 413 vm_object_t ksobj; 414 vm_page_t m; 415 int i, pages; 416 417 cpu_thread_swapout(td); 418 pages = td->td_kstack_pages; 419 ksobj = td->td_kstack_obj; 420 pmap_qremove(td->td_kstack, pages); 421 VM_OBJECT_LOCK(ksobj); 422 for (i = 0; i < pages; i++) { 423 m = vm_page_lookup(ksobj, i); 424 if (m == NULL) 425 panic("vm_thread_swapout: kstack already missing?"); 426 vm_page_lock_queues(); 427 vm_page_dirty(m); 428 vm_page_unwire(m, 0); 429 vm_page_unlock_queues(); 430 } 431 VM_OBJECT_UNLOCK(ksobj); 432} 433 434/* 435 * Bring the kernel stack for a specified thread back in. 436 */ 437void 438vm_thread_swapin(struct thread *td) 439{ 440 vm_object_t ksobj; 441 vm_page_t m, ma[KSTACK_MAX_PAGES]; 442 int i, pages, rv; 443 444 pages = td->td_kstack_pages; 445 ksobj = td->td_kstack_obj; 446 VM_OBJECT_LOCK(ksobj); 447 for (i = 0; i < pages; i++) { 448 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 449 if (m->valid != VM_PAGE_BITS_ALL) { 450 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 451 if (rv != VM_PAGER_OK) 452 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 453 m = vm_page_lookup(ksobj, i); 454 m->valid = VM_PAGE_BITS_ALL; 455 } 456 ma[i] = m; 457 vm_page_lock_queues(); 458 vm_page_wire(m); 459 vm_page_unlock_queues(); 460 vm_page_wakeup(m); 461 } 462 VM_OBJECT_UNLOCK(ksobj); 463 pmap_qenter(td->td_kstack, ma, pages); 464 cpu_thread_swapin(td); 465} 466 467/* 468 * Set up a variable-sized alternate kstack. 469 */ 470void 471vm_thread_new_altkstack(struct thread *td, int pages) 472{ 473 474 td->td_altkstack = td->td_kstack; 475 td->td_altkstack_obj = td->td_kstack_obj; 476 td->td_altkstack_pages = td->td_kstack_pages; 477 478 vm_thread_new(td, pages); 479} 480 481/* 482 * Restore the original kstack. 483 */ 484void 485vm_thread_dispose_altkstack(struct thread *td) 486{ 487 488 vm_thread_dispose(td); 489 490 td->td_kstack = td->td_altkstack; 491 td->td_kstack_obj = td->td_altkstack_obj; 492 td->td_kstack_pages = td->td_altkstack_pages; 493 td->td_altkstack = 0; 494 td->td_altkstack_obj = NULL; 495 td->td_altkstack_pages = 0; 496} 497 498/* 499 * Implement fork's actions on an address space. 500 * Here we arrange for the address space to be copied or referenced, 501 * allocate a user struct (pcb and kernel stack), then call the 502 * machine-dependent layer to fill those in and make the new process 503 * ready to run. The new process is set up so that it returns directly 504 * to user mode to avoid stack copying and relocation problems. 505 */ 506void 507vm_forkproc(td, p2, td2, flags) 508 struct thread *td; 509 struct proc *p2; 510 struct thread *td2; 511 int flags; 512{ 513 struct proc *p1 = td->td_proc; 514 515 if ((flags & RFPROC) == 0) { 516 /* 517 * Divorce the memory, if it is shared, essentially 518 * this changes shared memory amongst threads, into 519 * COW locally. 520 */ 521 if ((flags & RFMEM) == 0) { 522 if (p1->p_vmspace->vm_refcnt > 1) { 523 vmspace_unshare(p1); 524 } 525 } 526 cpu_fork(td, p2, td2, flags); 527 return; 528 } 529 530 if (flags & RFMEM) { 531 p2->p_vmspace = p1->p_vmspace; 532 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 533 } 534 535 while (vm_page_count_severe()) { 536 VM_WAIT; 537 } 538 539 if ((flags & RFMEM) == 0) { 540 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 541 if (p1->p_vmspace->vm_shm) 542 shmfork(p1, p2); 543 } 544 545 /* 546 * cpu_fork will copy and update the pcb, set up the kernel stack, 547 * and make the child ready to run. 548 */ 549 cpu_fork(td, p2, td2, flags); 550} 551 552/* 553 * Called after process has been wait(2)'ed apon and is being reaped. 554 * The idea is to reclaim resources that we could not reclaim while 555 * the process was still executing. 556 */ 557void 558vm_waitproc(p) 559 struct proc *p; 560{ 561 562 vmspace_exitfree(p); /* and clean-out the vmspace */ 563} 564 565/* 566 * Set default limits for VM system. 567 * Called for proc 0, and then inherited by all others. 568 * 569 * XXX should probably act directly on proc0. 570 */ 571static void 572vm_init_limits(udata) 573 void *udata; 574{ 575 struct proc *p = udata; 576 struct plimit *limp; 577 int rss_limit; 578 579 /* 580 * Set up the initial limits on process VM. Set the maximum resident 581 * set size to be half of (reasonably) available memory. Since this 582 * is a soft limit, it comes into effect only when the system is out 583 * of memory - half of main memory helps to favor smaller processes, 584 * and reduces thrashing of the object cache. 585 */ 586 limp = p->p_limit; 587 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 588 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 589 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 590 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 591 /* limit the limit to no less than 2MB */ 592 rss_limit = max(cnt.v_free_count, 512); 593 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 594 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 595} 596 597void 598faultin(p) 599 struct proc *p; 600{ 601#ifdef NO_SWAPPING 602 603 PROC_LOCK_ASSERT(p, MA_OWNED);
| 117#endif 118 119 120static volatile int proc0_rescan; 121 122 123/* 124 * MPSAFE 125 * 126 * WARNING! This code calls vm_map_check_protection() which only checks 127 * the associated vm_map_entry range. It does not determine whether the 128 * contents of the memory is actually readable or writable. In most cases 129 * just checking the vm_map_entry is sufficient within the kernel's address 130 * space. 131 */ 132int 133kernacc(addr, len, rw) 134 void *addr; 135 int len, rw; 136{ 137 boolean_t rv; 138 vm_offset_t saddr, eaddr; 139 vm_prot_t prot; 140 141 KASSERT((rw & ~VM_PROT_ALL) == 0, 142 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 143 144 if ((vm_offset_t)addr + len > kernel_map->max_offset || 145 (vm_offset_t)addr + len < (vm_offset_t)addr) 146 return (FALSE); 147 148 prot = rw; 149 saddr = trunc_page((vm_offset_t)addr); 150 eaddr = round_page((vm_offset_t)addr + len); 151 vm_map_lock_read(kernel_map); 152 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 153 vm_map_unlock_read(kernel_map); 154 return (rv == TRUE); 155} 156 157/* 158 * MPSAFE 159 * 160 * WARNING! This code calls vm_map_check_protection() which only checks 161 * the associated vm_map_entry range. It does not determine whether the 162 * contents of the memory is actually readable or writable. vmapbuf(), 163 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be 164 * used in conjuction with this call. 165 */ 166int 167useracc(addr, len, rw) 168 void *addr; 169 int len, rw; 170{ 171 boolean_t rv; 172 vm_prot_t prot; 173 vm_map_t map; 174 175 KASSERT((rw & ~VM_PROT_ALL) == 0, 176 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 177 prot = rw; 178 map = &curproc->p_vmspace->vm_map; 179 if ((vm_offset_t)addr + len > vm_map_max(map) || 180 (vm_offset_t)addr + len < (vm_offset_t)addr) { 181 return (FALSE); 182 } 183 vm_map_lock_read(map); 184 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 185 round_page((vm_offset_t)addr + len), prot); 186 vm_map_unlock_read(map); 187 return (rv == TRUE); 188} 189 190int 191vslock(void *addr, size_t len) 192{ 193 vm_offset_t end, last, start; 194 vm_size_t npages; 195 int error; 196 197 last = (vm_offset_t)addr + len; 198 start = trunc_page((vm_offset_t)addr); 199 end = round_page(last); 200 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr) 201 return (EINVAL); 202 npages = atop(end - start); 203 if (npages > vm_page_max_wired) 204 return (ENOMEM); 205 PROC_LOCK(curproc); 206 if (ptoa(npages + 207 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) > 208 lim_cur(curproc, RLIMIT_MEMLOCK)) { 209 PROC_UNLOCK(curproc); 210 return (ENOMEM); 211 } 212 PROC_UNLOCK(curproc); 213#if 0 214 /* 215 * XXX - not yet 216 * 217 * The limit for transient usage of wired pages should be 218 * larger than for "permanent" wired pages (mlock()). 219 * 220 * Also, the sysctl code, which is the only present user 221 * of vslock(), does a hard loop on EAGAIN. 222 */ 223 if (npages + cnt.v_wire_count > vm_page_max_wired) 224 return (EAGAIN); 225#endif 226 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end, 227 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 228 /* 229 * Return EFAULT on error to match copy{in,out}() behaviour 230 * rather than returning ENOMEM like mlock() would. 231 */ 232 return (error == KERN_SUCCESS ? 0 : EFAULT); 233} 234 235void 236vsunlock(void *addr, size_t len) 237{ 238 239 /* Rely on the parameter sanity checks performed by vslock(). */ 240 (void)vm_map_unwire(&curproc->p_vmspace->vm_map, 241 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), 242 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); 243} 244 245/* 246 * Pin the page contained within the given object at the given offset. If the 247 * page is not resident, allocate and load it using the given object's pager. 248 * Return the pinned page if successful; otherwise, return NULL. 249 */ 250static vm_page_t 251vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset) 252{ 253 vm_page_t m, ma[1]; 254 vm_pindex_t pindex; 255 int rv; 256 257 VM_OBJECT_LOCK(object); 258 pindex = OFF_TO_IDX(offset); 259 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 260 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 261 ma[0] = m; 262 rv = vm_pager_get_pages(object, ma, 1, 0); 263 m = vm_page_lookup(object, pindex); 264 if (m == NULL) 265 goto out; 266 if (m->valid == 0 || rv != VM_PAGER_OK) { 267 vm_page_lock_queues(); 268 vm_page_free(m); 269 vm_page_unlock_queues(); 270 m = NULL; 271 goto out; 272 } 273 } 274 vm_page_lock_queues(); 275 vm_page_hold(m); 276 vm_page_unlock_queues(); 277 vm_page_wakeup(m); 278out: 279 VM_OBJECT_UNLOCK(object); 280 return (m); 281} 282 283/* 284 * Return a CPU private mapping to the page at the given offset within the 285 * given object. The page is pinned before it is mapped. 286 */ 287struct sf_buf * 288vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset) 289{ 290 vm_page_t m; 291 292 m = vm_imgact_hold_page(object, offset); 293 if (m == NULL) 294 return (NULL); 295 sched_pin(); 296 return (sf_buf_alloc(m, SFB_CPUPRIVATE)); 297} 298 299/* 300 * Destroy the given CPU private mapping and unpin the page that it mapped. 301 */ 302void 303vm_imgact_unmap_page(struct sf_buf *sf) 304{ 305 vm_page_t m; 306 307 m = sf_buf_page(sf); 308 sf_buf_free(sf); 309 sched_unpin(); 310 vm_page_lock_queues(); 311 vm_page_unhold(m); 312 vm_page_unlock_queues(); 313} 314 315#ifndef KSTACK_MAX_PAGES 316#define KSTACK_MAX_PAGES 32 317#endif 318 319/* 320 * Create the kernel stack (including pcb for i386) for a new thread. 321 * This routine directly affects the fork perf for a process and 322 * create performance for a thread. 323 */ 324void 325vm_thread_new(struct thread *td, int pages) 326{ 327 vm_object_t ksobj; 328 vm_offset_t ks; 329 vm_page_t m, ma[KSTACK_MAX_PAGES]; 330 int i; 331 332 /* Bounds check */ 333 if (pages <= 1) 334 pages = KSTACK_PAGES; 335 else if (pages > KSTACK_MAX_PAGES) 336 pages = KSTACK_MAX_PAGES; 337 /* 338 * Allocate an object for the kstack. 339 */ 340 ksobj = vm_object_allocate(OBJT_DEFAULT, pages); 341 td->td_kstack_obj = ksobj; 342 /* 343 * Get a kernel virtual address for this thread's kstack. 344 */ 345 ks = kmem_alloc_nofault(kernel_map, 346 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 347 if (ks == 0) 348 panic("vm_thread_new: kstack allocation failed"); 349 if (KSTACK_GUARD_PAGES != 0) { 350 pmap_qremove(ks, KSTACK_GUARD_PAGES); 351 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 352 } 353 td->td_kstack = ks; 354 /* 355 * Knowing the number of pages allocated is useful when you 356 * want to deallocate them. 357 */ 358 td->td_kstack_pages = pages; 359 /* 360 * For the length of the stack, link in a real page of ram for each 361 * page of stack. 362 */ 363 VM_OBJECT_LOCK(ksobj); 364 for (i = 0; i < pages; i++) { 365 /* 366 * Get a kernel stack page. 367 */ 368 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY | 369 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED); 370 ma[i] = m; 371 m->valid = VM_PAGE_BITS_ALL; 372 } 373 VM_OBJECT_UNLOCK(ksobj); 374 pmap_qenter(ks, ma, pages); 375} 376 377/* 378 * Dispose of a thread's kernel stack. 379 */ 380void 381vm_thread_dispose(struct thread *td) 382{ 383 vm_object_t ksobj; 384 vm_offset_t ks; 385 vm_page_t m; 386 int i, pages; 387 388 pages = td->td_kstack_pages; 389 ksobj = td->td_kstack_obj; 390 ks = td->td_kstack; 391 pmap_qremove(ks, pages); 392 VM_OBJECT_LOCK(ksobj); 393 for (i = 0; i < pages; i++) { 394 m = vm_page_lookup(ksobj, i); 395 if (m == NULL) 396 panic("vm_thread_dispose: kstack already missing?"); 397 vm_page_lock_queues(); 398 vm_page_unwire(m, 0); 399 vm_page_free(m); 400 vm_page_unlock_queues(); 401 } 402 VM_OBJECT_UNLOCK(ksobj); 403 vm_object_deallocate(ksobj); 404 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE), 405 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE); 406} 407 408/* 409 * Allow a thread's kernel stack to be paged out. 410 */ 411void 412vm_thread_swapout(struct thread *td) 413{ 414 vm_object_t ksobj; 415 vm_page_t m; 416 int i, pages; 417 418 cpu_thread_swapout(td); 419 pages = td->td_kstack_pages; 420 ksobj = td->td_kstack_obj; 421 pmap_qremove(td->td_kstack, pages); 422 VM_OBJECT_LOCK(ksobj); 423 for (i = 0; i < pages; i++) { 424 m = vm_page_lookup(ksobj, i); 425 if (m == NULL) 426 panic("vm_thread_swapout: kstack already missing?"); 427 vm_page_lock_queues(); 428 vm_page_dirty(m); 429 vm_page_unwire(m, 0); 430 vm_page_unlock_queues(); 431 } 432 VM_OBJECT_UNLOCK(ksobj); 433} 434 435/* 436 * Bring the kernel stack for a specified thread back in. 437 */ 438void 439vm_thread_swapin(struct thread *td) 440{ 441 vm_object_t ksobj; 442 vm_page_t m, ma[KSTACK_MAX_PAGES]; 443 int i, pages, rv; 444 445 pages = td->td_kstack_pages; 446 ksobj = td->td_kstack_obj; 447 VM_OBJECT_LOCK(ksobj); 448 for (i = 0; i < pages; i++) { 449 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 450 if (m->valid != VM_PAGE_BITS_ALL) { 451 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 452 if (rv != VM_PAGER_OK) 453 panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid); 454 m = vm_page_lookup(ksobj, i); 455 m->valid = VM_PAGE_BITS_ALL; 456 } 457 ma[i] = m; 458 vm_page_lock_queues(); 459 vm_page_wire(m); 460 vm_page_unlock_queues(); 461 vm_page_wakeup(m); 462 } 463 VM_OBJECT_UNLOCK(ksobj); 464 pmap_qenter(td->td_kstack, ma, pages); 465 cpu_thread_swapin(td); 466} 467 468/* 469 * Set up a variable-sized alternate kstack. 470 */ 471void 472vm_thread_new_altkstack(struct thread *td, int pages) 473{ 474 475 td->td_altkstack = td->td_kstack; 476 td->td_altkstack_obj = td->td_kstack_obj; 477 td->td_altkstack_pages = td->td_kstack_pages; 478 479 vm_thread_new(td, pages); 480} 481 482/* 483 * Restore the original kstack. 484 */ 485void 486vm_thread_dispose_altkstack(struct thread *td) 487{ 488 489 vm_thread_dispose(td); 490 491 td->td_kstack = td->td_altkstack; 492 td->td_kstack_obj = td->td_altkstack_obj; 493 td->td_kstack_pages = td->td_altkstack_pages; 494 td->td_altkstack = 0; 495 td->td_altkstack_obj = NULL; 496 td->td_altkstack_pages = 0; 497} 498 499/* 500 * Implement fork's actions on an address space. 501 * Here we arrange for the address space to be copied or referenced, 502 * allocate a user struct (pcb and kernel stack), then call the 503 * machine-dependent layer to fill those in and make the new process 504 * ready to run. The new process is set up so that it returns directly 505 * to user mode to avoid stack copying and relocation problems. 506 */ 507void 508vm_forkproc(td, p2, td2, flags) 509 struct thread *td; 510 struct proc *p2; 511 struct thread *td2; 512 int flags; 513{ 514 struct proc *p1 = td->td_proc; 515 516 if ((flags & RFPROC) == 0) { 517 /* 518 * Divorce the memory, if it is shared, essentially 519 * this changes shared memory amongst threads, into 520 * COW locally. 521 */ 522 if ((flags & RFMEM) == 0) { 523 if (p1->p_vmspace->vm_refcnt > 1) { 524 vmspace_unshare(p1); 525 } 526 } 527 cpu_fork(td, p2, td2, flags); 528 return; 529 } 530 531 if (flags & RFMEM) { 532 p2->p_vmspace = p1->p_vmspace; 533 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1); 534 } 535 536 while (vm_page_count_severe()) { 537 VM_WAIT; 538 } 539 540 if ((flags & RFMEM) == 0) { 541 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 542 if (p1->p_vmspace->vm_shm) 543 shmfork(p1, p2); 544 } 545 546 /* 547 * cpu_fork will copy and update the pcb, set up the kernel stack, 548 * and make the child ready to run. 549 */ 550 cpu_fork(td, p2, td2, flags); 551} 552 553/* 554 * Called after process has been wait(2)'ed apon and is being reaped. 555 * The idea is to reclaim resources that we could not reclaim while 556 * the process was still executing. 557 */ 558void 559vm_waitproc(p) 560 struct proc *p; 561{ 562 563 vmspace_exitfree(p); /* and clean-out the vmspace */ 564} 565 566/* 567 * Set default limits for VM system. 568 * Called for proc 0, and then inherited by all others. 569 * 570 * XXX should probably act directly on proc0. 571 */ 572static void 573vm_init_limits(udata) 574 void *udata; 575{ 576 struct proc *p = udata; 577 struct plimit *limp; 578 int rss_limit; 579 580 /* 581 * Set up the initial limits on process VM. Set the maximum resident 582 * set size to be half of (reasonably) available memory. Since this 583 * is a soft limit, it comes into effect only when the system is out 584 * of memory - half of main memory helps to favor smaller processes, 585 * and reduces thrashing of the object cache. 586 */ 587 limp = p->p_limit; 588 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 589 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 590 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 591 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 592 /* limit the limit to no less than 2MB */ 593 rss_limit = max(cnt.v_free_count, 512); 594 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 595 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 596} 597 598void 599faultin(p) 600 struct proc *p; 601{ 602#ifdef NO_SWAPPING 603 604 PROC_LOCK_ASSERT(p, MA_OWNED);
|
604 if ((p->p_sflag & PS_INMEM) == 0)
| 605 if ((p->p_flag & P_INMEM) == 0)
|
605 panic("faultin: proc swapped out with NO_SWAPPING!"); 606#else /* !NO_SWAPPING */ 607 struct thread *td; 608 609 PROC_LOCK_ASSERT(p, MA_OWNED); 610 /* 611 * If another process is swapping in this process, 612 * just wait until it finishes. 613 */
| 606 panic("faultin: proc swapped out with NO_SWAPPING!"); 607#else /* !NO_SWAPPING */ 608 struct thread *td; 609 610 PROC_LOCK_ASSERT(p, MA_OWNED); 611 /* 612 * If another process is swapping in this process, 613 * just wait until it finishes. 614 */
|
614 if (p->p_sflag & PS_SWAPPINGIN) 615 msleep(&p->p_sflag, &p->p_mtx, PVM, "faultin", 0); 616 else if ((p->p_sflag & PS_INMEM) == 0) {
| 615 if (p->p_flag & P_SWAPPINGIN) { 616 while (p->p_flag & P_SWAPPINGIN) 617 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0); 618 return; 619 } 620 if ((p->p_flag & P_INMEM) == 0) {
|
617 /* 618 * Don't let another thread swap process p out while we are 619 * busy swapping it in. 620 */ 621 ++p->p_lock;
| 621 /* 622 * Don't let another thread swap process p out while we are 623 * busy swapping it in. 624 */ 625 ++p->p_lock;
|
622 PROC_SLOCK(p); 623 p->p_sflag |= PS_SWAPPINGIN; 624 PROC_SUNLOCK(p);
| 626 p->p_flag |= P_SWAPPINGIN;
|
625 PROC_UNLOCK(p); 626
| 627 PROC_UNLOCK(p); 628
|
| 629 /* 630 * We hold no lock here because the list of threads 631 * can not change while all threads in the process are 632 * swapped out. 633 */
|
627 FOREACH_THREAD_IN_PROC(p, td) 628 vm_thread_swapin(td);
| 634 FOREACH_THREAD_IN_PROC(p, td) 635 vm_thread_swapin(td);
|
629
| |
630 PROC_LOCK(p); 631 PROC_SLOCK(p);
| 636 PROC_LOCK(p); 637 PROC_SLOCK(p);
|
632 p->p_sflag &= ~PS_SWAPPINGIN; 633 p->p_sflag |= PS_INMEM; 634 FOREACH_THREAD_IN_PROC(p, td) { 635 thread_lock(td); 636 TD_CLR_SWAPPED(td); 637 if (TD_CAN_RUN(td)) 638 setrunnable(td); 639 thread_unlock(td); 640 }
| 638 swapclear(p); 639 p->p_swtime = 0;
|
641 PROC_SUNLOCK(p); 642
| 640 PROC_SUNLOCK(p); 641
|
643 wakeup(&p->p_sflag);
| 642 wakeup(&p->p_flag);
|
644 645 /* Allow other threads to swap p out now. */ 646 --p->p_lock; 647 } 648#endif /* NO_SWAPPING */ 649} 650 651/* 652 * This swapin algorithm attempts to swap-in processes only if there 653 * is enough space for them. Of course, if a process waits for a long 654 * time, it will be swapped in anyway. 655 * 656 * XXXKSE - process with the thread with highest priority counts.. 657 * 658 * Giant is held on entry. 659 */ 660/* ARGSUSED*/ 661static void 662scheduler(dummy) 663 void *dummy; 664{ 665 struct proc *p; 666 struct thread *td; 667 int pri; 668 struct proc *pp; 669 int ppri; 670 671 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 672 mtx_unlock(&Giant); 673 674loop: 675 if (vm_page_count_min()) { 676 VM_WAIT; 677 thread_lock(&thread0); 678 proc0_rescan = 0; 679 thread_unlock(&thread0); 680 goto loop; 681 } 682 683 pp = NULL; 684 ppri = INT_MIN; 685 sx_slock(&allproc_lock); 686 FOREACH_PROC_IN_SYSTEM(p) {
| 643 644 /* Allow other threads to swap p out now. */ 645 --p->p_lock; 646 } 647#endif /* NO_SWAPPING */ 648} 649 650/* 651 * This swapin algorithm attempts to swap-in processes only if there 652 * is enough space for them. Of course, if a process waits for a long 653 * time, it will be swapped in anyway. 654 * 655 * XXXKSE - process with the thread with highest priority counts.. 656 * 657 * Giant is held on entry. 658 */ 659/* ARGSUSED*/ 660static void 661scheduler(dummy) 662 void *dummy; 663{ 664 struct proc *p; 665 struct thread *td; 666 int pri; 667 struct proc *pp; 668 int ppri; 669 670 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 671 mtx_unlock(&Giant); 672 673loop: 674 if (vm_page_count_min()) { 675 VM_WAIT; 676 thread_lock(&thread0); 677 proc0_rescan = 0; 678 thread_unlock(&thread0); 679 goto loop; 680 } 681 682 pp = NULL; 683 ppri = INT_MIN; 684 sx_slock(&allproc_lock); 685 FOREACH_PROC_IN_SYSTEM(p) {
|
687 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
| 686 PROC_LOCK(p); 687 if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) { 688 PROC_UNLOCK(p);
|
688 continue; 689 } 690 PROC_SLOCK(p); 691 FOREACH_THREAD_IN_PROC(p, td) { 692 /* 693 * An otherwise runnable thread of a process 694 * swapped out has only the TDI_SWAPPED bit set. 695 * 696 */ 697 thread_lock(td); 698 if (td->td_inhibitors == TDI_SWAPPED) { 699 pri = p->p_swtime + td->td_slptime;
| 689 continue; 690 } 691 PROC_SLOCK(p); 692 FOREACH_THREAD_IN_PROC(p, td) { 693 /* 694 * An otherwise runnable thread of a process 695 * swapped out has only the TDI_SWAPPED bit set. 696 * 697 */ 698 thread_lock(td); 699 if (td->td_inhibitors == TDI_SWAPPED) { 700 pri = p->p_swtime + td->td_slptime;
|
700 if ((p->p_sflag & PS_SWAPINREQ) == 0) {
| 701 if ((td->td_flags & TDF_SWAPINREQ) == 0)
|
701 pri -= p->p_nice * 8;
| 702 pri -= p->p_nice * 8;
|
702 } 703
| |
704 /* 705 * if this thread is higher priority 706 * and there is enough space, then select 707 * this process instead of the previous 708 * selection. 709 */ 710 if (pri > ppri) { 711 pp = p; 712 ppri = pri; 713 } 714 } 715 thread_unlock(td); 716 } 717 PROC_SUNLOCK(p);
| 703 /* 704 * if this thread is higher priority 705 * and there is enough space, then select 706 * this process instead of the previous 707 * selection. 708 */ 709 if (pri > ppri) { 710 pp = p; 711 ppri = pri; 712 } 713 } 714 thread_unlock(td); 715 } 716 PROC_SUNLOCK(p);
|
| 717 PROC_UNLOCK(p);
|
718 } 719 sx_sunlock(&allproc_lock); 720 721 /* 722 * Nothing to do, back to sleep. 723 */ 724 if ((p = pp) == NULL) { 725 thread_lock(&thread0); 726 if (!proc0_rescan) { 727 TD_SET_IWAIT(&thread0); 728 mi_switch(SW_VOL, NULL); 729 } 730 proc0_rescan = 0; 731 thread_unlock(&thread0); 732 goto loop; 733 } 734 PROC_LOCK(p); 735 736 /* 737 * Another process may be bringing or may have already 738 * brought this process in while we traverse all threads. 739 * Or, this process may even be being swapped out again. 740 */
| 718 } 719 sx_sunlock(&allproc_lock); 720 721 /* 722 * Nothing to do, back to sleep. 723 */ 724 if ((p = pp) == NULL) { 725 thread_lock(&thread0); 726 if (!proc0_rescan) { 727 TD_SET_IWAIT(&thread0); 728 mi_switch(SW_VOL, NULL); 729 } 730 proc0_rescan = 0; 731 thread_unlock(&thread0); 732 goto loop; 733 } 734 PROC_LOCK(p); 735 736 /* 737 * Another process may be bringing or may have already 738 * brought this process in while we traverse all threads. 739 * Or, this process may even be being swapped out again. 740 */
|
741 if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
| 741 if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
|
742 PROC_UNLOCK(p); 743 thread_lock(&thread0); 744 proc0_rescan = 0; 745 thread_unlock(&thread0); 746 goto loop; 747 } 748
| 742 PROC_UNLOCK(p); 743 thread_lock(&thread0); 744 proc0_rescan = 0; 745 thread_unlock(&thread0); 746 goto loop; 747 } 748
|
749 PROC_SLOCK(p); 750 p->p_sflag &= ~PS_SWAPINREQ; 751 PROC_SUNLOCK(p); 752
| |
753 /* 754 * We would like to bring someone in. (only if there is space). 755 * [What checks the space? ] 756 */ 757 faultin(p); 758 PROC_UNLOCK(p);
| 749 /* 750 * We would like to bring someone in. (only if there is space). 751 * [What checks the space? ] 752 */ 753 faultin(p); 754 PROC_UNLOCK(p);
|
759 PROC_SLOCK(p); 760 p->p_swtime = 0; 761 PROC_SUNLOCK(p);
| |
762 thread_lock(&thread0); 763 proc0_rescan = 0; 764 thread_unlock(&thread0); 765 goto loop; 766} 767 768void kick_proc0(void) 769{ 770 struct thread *td = &thread0; 771 772 /* XXX This will probably cause a LOR in some cases */ 773 thread_lock(td); 774 if (TD_AWAITING_INTR(td)) { 775 CTR2(KTR_INTR, "%s: sched_add %d", __func__, 0); 776 TD_CLR_IWAIT(td); 777 sched_add(td, SRQ_INTR); 778 } else { 779 proc0_rescan = 1; 780 CTR2(KTR_INTR, "%s: state %d", 781 __func__, td->td_state); 782 } 783 thread_unlock(td); 784 785} 786 787 788#ifndef NO_SWAPPING 789 790/* 791 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 792 */ 793static int swap_idle_threshold1 = 2; 794SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 795 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 796 797/* 798 * Swap_idle_threshold2 is the time that a process can be idle before 799 * it will be swapped out, if idle swapping is enabled. 800 */ 801static int swap_idle_threshold2 = 10; 802SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 803 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 804 805/* 806 * Swapout is driven by the pageout daemon. Very simple, we find eligible
| 755 thread_lock(&thread0); 756 proc0_rescan = 0; 757 thread_unlock(&thread0); 758 goto loop; 759} 760 761void kick_proc0(void) 762{ 763 struct thread *td = &thread0; 764 765 /* XXX This will probably cause a LOR in some cases */ 766 thread_lock(td); 767 if (TD_AWAITING_INTR(td)) { 768 CTR2(KTR_INTR, "%s: sched_add %d", __func__, 0); 769 TD_CLR_IWAIT(td); 770 sched_add(td, SRQ_INTR); 771 } else { 772 proc0_rescan = 1; 773 CTR2(KTR_INTR, "%s: state %d", 774 __func__, td->td_state); 775 } 776 thread_unlock(td); 777 778} 779 780 781#ifndef NO_SWAPPING 782 783/* 784 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 785 */ 786static int swap_idle_threshold1 = 2; 787SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW, 788 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process"); 789 790/* 791 * Swap_idle_threshold2 is the time that a process can be idle before 792 * it will be swapped out, if idle swapping is enabled. 793 */ 794static int swap_idle_threshold2 = 10; 795SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW, 796 &swap_idle_threshold2, 0, "Time before a process will be swapped out"); 797 798/* 799 * Swapout is driven by the pageout daemon. Very simple, we find eligible
|
807 * procs and unwire their u-areas. We try to always "swap" at least one
| 800 * procs and swap out their stacks. We try to always "swap" at least one
|
808 * process in case we need the room for a swapin. 809 * If any procs have been sleeping/stopped for at least maxslp seconds, 810 * they are swapped. Else, we swap the longest-sleeping or stopped process, 811 * if any, otherwise the longest-resident process. 812 */ 813void 814swapout_procs(action) 815int action; 816{ 817 struct proc *p; 818 struct thread *td; 819 int didswap = 0; 820 821retry: 822 sx_slock(&allproc_lock); 823 FOREACH_PROC_IN_SYSTEM(p) { 824 struct vmspace *vm; 825 int minslptime = 100000; 826 827 /* 828 * Watch out for a process in 829 * creation. It may have no 830 * address space or lock yet. 831 */
| 801 * process in case we need the room for a swapin. 802 * If any procs have been sleeping/stopped for at least maxslp seconds, 803 * they are swapped. Else, we swap the longest-sleeping or stopped process, 804 * if any, otherwise the longest-resident process. 805 */ 806void 807swapout_procs(action) 808int action; 809{ 810 struct proc *p; 811 struct thread *td; 812 int didswap = 0; 813 814retry: 815 sx_slock(&allproc_lock); 816 FOREACH_PROC_IN_SYSTEM(p) { 817 struct vmspace *vm; 818 int minslptime = 100000; 819 820 /* 821 * Watch out for a process in 822 * creation. It may have no 823 * address space or lock yet. 824 */
|
832 PROC_SLOCK(p); 833 if (p->p_state == PRS_NEW) { 834 PROC_SUNLOCK(p);
| 825 if (p->p_state == PRS_NEW)
|
835 continue;
| 826 continue;
|
836 } 837 PROC_SUNLOCK(p); 838
| |
839 /* 840 * An aio daemon switches its 841 * address space while running. 842 * Perform a quick check whether 843 * a process has P_SYSTEM. 844 */ 845 if ((p->p_flag & P_SYSTEM) != 0) 846 continue;
| 827 /* 828 * An aio daemon switches its 829 * address space while running. 830 * Perform a quick check whether 831 * a process has P_SYSTEM. 832 */ 833 if ((p->p_flag & P_SYSTEM) != 0) 834 continue;
|
847
| |
848 /* 849 * Do not swapout a process that 850 * is waiting for VM data 851 * structures as there is a possible 852 * deadlock. Test this first as 853 * this may block. 854 * 855 * Lock the map until swapout 856 * finishes, or a thread of this 857 * process may attempt to alter 858 * the map. 859 */ 860 vm = vmspace_acquire_ref(p); 861 if (vm == NULL) 862 continue; 863 if (!vm_map_trylock(&vm->vm_map)) 864 goto nextproc1; 865 866 PROC_LOCK(p); 867 if (p->p_lock != 0 || 868 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 869 ) != 0) { 870 goto nextproc2; 871 } 872 /* 873 * only aiod changes vmspace, however it will be 874 * skipped because of the if statement above checking 875 * for P_SYSTEM 876 */
| 835 /* 836 * Do not swapout a process that 837 * is waiting for VM data 838 * structures as there is a possible 839 * deadlock. Test this first as 840 * this may block. 841 * 842 * Lock the map until swapout 843 * finishes, or a thread of this 844 * process may attempt to alter 845 * the map. 846 */ 847 vm = vmspace_acquire_ref(p); 848 if (vm == NULL) 849 continue; 850 if (!vm_map_trylock(&vm->vm_map)) 851 goto nextproc1; 852 853 PROC_LOCK(p); 854 if (p->p_lock != 0 || 855 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT) 856 ) != 0) { 857 goto nextproc2; 858 } 859 /* 860 * only aiod changes vmspace, however it will be 861 * skipped because of the if statement above checking 862 * for P_SYSTEM 863 */
|
877 if ((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) != PS_INMEM)
| 864 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
|
878 goto nextproc2; 879 880 switch (p->p_state) { 881 default: 882 /* Don't swap out processes in any sort 883 * of 'special' state. */ 884 break; 885 886 case PRS_NORMAL: 887 PROC_SLOCK(p); 888 /* 889 * do not swapout a realtime process 890 * Check all the thread groups.. 891 */ 892 FOREACH_THREAD_IN_PROC(p, td) {
| 865 goto nextproc2; 866 867 switch (p->p_state) { 868 default: 869 /* Don't swap out processes in any sort 870 * of 'special' state. */ 871 break; 872 873 case PRS_NORMAL: 874 PROC_SLOCK(p); 875 /* 876 * do not swapout a realtime process 877 * Check all the thread groups.. 878 */ 879 FOREACH_THREAD_IN_PROC(p, td) {
|
893 if (PRI_IS_REALTIME(td->td_pri_class))
| 880 thread_lock(td); 881 if (PRI_IS_REALTIME(td->td_pri_class)) { 882 thread_unlock(td);
|
894 goto nextproc;
| 883 goto nextproc;
|
| 884 }
|
895 896 /* 897 * Guarantee swap_idle_threshold1 898 * time in memory. 899 */
| 885 886 /* 887 * Guarantee swap_idle_threshold1 888 * time in memory. 889 */
|
900 if (td->td_slptime < swap_idle_threshold1)
| 890 if (td->td_slptime < swap_idle_threshold1) { 891 thread_unlock(td);
|
901 goto nextproc;
| 892 goto nextproc;
|
| 893 }
|
902 903 /* 904 * Do not swapout a process if it is 905 * waiting on a critical event of some 906 * kind or there is a thread whose 907 * pageable memory may be accessed. 908 * 909 * This could be refined to support 910 * swapping out a thread. 911 */ 912 if ((td->td_priority) < PSOCK ||
| 894 895 /* 896 * Do not swapout a process if it is 897 * waiting on a critical event of some 898 * kind or there is a thread whose 899 * pageable memory may be accessed. 900 * 901 * This could be refined to support 902 * swapping out a thread. 903 */ 904 if ((td->td_priority) < PSOCK ||
|
913 !thread_safetoswapout(td))
| 905 !thread_safetoswapout(td)) { 906 thread_unlock(td);
|
914 goto nextproc;
| 907 goto nextproc;
|
| 908 }
|
915 /* 916 * If the system is under memory stress, 917 * or if we are swapping 918 * idle processes >= swap_idle_threshold2, 919 * then swap the process out. 920 */ 921 if (((action & VM_SWAP_NORMAL) == 0) && 922 (((action & VM_SWAP_IDLE) == 0) ||
| 909 /* 910 * If the system is under memory stress, 911 * or if we are swapping 912 * idle processes >= swap_idle_threshold2, 913 * then swap the process out. 914 */ 915 if (((action & VM_SWAP_NORMAL) == 0) && 916 (((action & VM_SWAP_IDLE) == 0) ||
|
923 (td->td_slptime < swap_idle_threshold2)))
| 917 (td->td_slptime < swap_idle_threshold2))) { 918 thread_unlock(td);
|
924 goto nextproc;
| 919 goto nextproc;
|
| 920 }
|
925 926 if (minslptime > td->td_slptime) 927 minslptime = td->td_slptime;
| 921 922 if (minslptime > td->td_slptime) 923 minslptime = td->td_slptime;
|
| 924 thread_unlock(td);
|
928 } 929 930 /* 931 * If the pageout daemon didn't free enough pages, 932 * or if this process is idle and the system is 933 * configured to swap proactively, swap it out. 934 */ 935 if ((action & VM_SWAP_NORMAL) || 936 ((action & VM_SWAP_IDLE) && 937 (minslptime > swap_idle_threshold2))) {
| 925 } 926 927 /* 928 * If the pageout daemon didn't free enough pages, 929 * or if this process is idle and the system is 930 * configured to swap proactively, swap it out. 931 */ 932 if ((action & VM_SWAP_NORMAL) || 933 ((action & VM_SWAP_IDLE) && 934 (minslptime > swap_idle_threshold2))) {
|
938 swapout(p); 939 didswap++;
| 935 if (swapout(p) == 0) 936 didswap++;
|
940 PROC_SUNLOCK(p); 941 PROC_UNLOCK(p); 942 vm_map_unlock(&vm->vm_map); 943 vmspace_free(vm); 944 sx_sunlock(&allproc_lock); 945 goto retry; 946 } 947nextproc: 948 PROC_SUNLOCK(p); 949 } 950nextproc2: 951 PROC_UNLOCK(p); 952 vm_map_unlock(&vm->vm_map); 953nextproc1: 954 vmspace_free(vm); 955 continue; 956 } 957 sx_sunlock(&allproc_lock); 958 /* 959 * If we swapped something out, and another process needed memory, 960 * then wakeup the sched process. 961 */ 962 if (didswap) 963 wakeup(&proc0); 964} 965 966static void
| 937 PROC_SUNLOCK(p); 938 PROC_UNLOCK(p); 939 vm_map_unlock(&vm->vm_map); 940 vmspace_free(vm); 941 sx_sunlock(&allproc_lock); 942 goto retry; 943 } 944nextproc: 945 PROC_SUNLOCK(p); 946 } 947nextproc2: 948 PROC_UNLOCK(p); 949 vm_map_unlock(&vm->vm_map); 950nextproc1: 951 vmspace_free(vm); 952 continue; 953 } 954 sx_sunlock(&allproc_lock); 955 /* 956 * If we swapped something out, and another process needed memory, 957 * then wakeup the sched process. 958 */ 959 if (didswap) 960 wakeup(&proc0); 961} 962 963static void
|
| 964swapclear(p) 965 struct proc *p; 966{ 967 struct thread *td; 968 969 PROC_LOCK_ASSERT(p, MA_OWNED); 970 PROC_SLOCK_ASSERT(p, MA_OWNED); 971 972 FOREACH_THREAD_IN_PROC(p, td) { 973 thread_lock(td); 974 td->td_flags |= TDF_INMEM; 975 td->td_flags &= ~TDF_SWAPINREQ; 976 TD_CLR_SWAPPED(td); 977 if (TD_CAN_RUN(td)) 978 setrunnable(td); 979 thread_unlock(td); 980 } 981 p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT); 982 p->p_flag |= P_INMEM; 983} 984 985static int
|
967swapout(p) 968 struct proc *p; 969{ 970 struct thread *td; 971 972 PROC_LOCK_ASSERT(p, MA_OWNED);
| 986swapout(p) 987 struct proc *p; 988{ 989 struct thread *td; 990 991 PROC_LOCK_ASSERT(p, MA_OWNED);
|
973 mtx_assert(&p->p_slock, MA_OWNED | MA_NOTRECURSED);
| 992 PROC_SLOCK_ASSERT(p, MA_OWNED | MA_NOTRECURSED);
|
974#if defined(SWAP_DEBUG) 975 printf("swapping out %d\n", p->p_pid); 976#endif 977 978 /* 979 * The states of this process and its threads may have changed 980 * by now. Assuming that there is only one pageout daemon thread, 981 * this process should still be in memory. 982 */
| 993#if defined(SWAP_DEBUG) 994 printf("swapping out %d\n", p->p_pid); 995#endif 996 997 /* 998 * The states of this process and its threads may have changed 999 * by now. Assuming that there is only one pageout daemon thread, 1000 * this process should still be in memory. 1001 */
|
983 KASSERT((p->p_sflag & (PS_INMEM|PS_SWAPPINGOUT|PS_SWAPPINGIN)) == PS_INMEM,
| 1002 KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
|
984 ("swapout: lost a swapout race?")); 985
| 1003 ("swapout: lost a swapout race?")); 1004
|
986#if defined(INVARIANTS)
| |
987 /*
| 1005 /*
|
988 * Make sure that all threads are safe to be swapped out. 989 * 990 * Alternatively, we could swap out only safe threads. 991 */ 992 FOREACH_THREAD_IN_PROC(p, td) { 993 KASSERT(thread_safetoswapout(td), 994 ("swapout: there is a thread not safe for swapout")); 995 } 996#endif /* INVARIANTS */ 997 td = FIRST_THREAD_IN_PROC(p); 998 ++td->td_ru.ru_nswap; 999 /*
| |
1000 * remember the process resident count 1001 */ 1002 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
| 1006 * remember the process resident count 1007 */ 1008 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
|
1003 1004 p->p_sflag &= ~PS_INMEM; 1005 p->p_sflag |= PS_SWAPPINGOUT; 1006 PROC_UNLOCK(p);
| 1009 /* 1010 * Check and mark all threads before we proceed. 1011 */ 1012 p->p_flag &= ~P_INMEM; 1013 p->p_flag |= P_SWAPPINGOUT;
|
1007 FOREACH_THREAD_IN_PROC(p, td) { 1008 thread_lock(td);
| 1014 FOREACH_THREAD_IN_PROC(p, td) { 1015 thread_lock(td);
|
| 1016 if (!thread_safetoswapout(td)) { 1017 thread_unlock(td); 1018 swapclear(p); 1019 return (EBUSY); 1020 } 1021 td->td_flags &= ~TDF_INMEM;
|
1009 TD_SET_SWAPPED(td); 1010 thread_unlock(td); 1011 }
| 1022 TD_SET_SWAPPED(td); 1023 thread_unlock(td); 1024 }
|
| 1025 td = FIRST_THREAD_IN_PROC(p); 1026 ++td->td_ru.ru_nswap;
|
1012 PROC_SUNLOCK(p);
| 1027 PROC_SUNLOCK(p);
|
| 1028 PROC_UNLOCK(p);
|
1013
| 1029
|
| 1030 /* 1031 * This list is stable because all threads are now prevented from 1032 * running. The list is only modified in the context of a running 1033 * thread in this process. 1034 */
|
1014 FOREACH_THREAD_IN_PROC(p, td) 1015 vm_thread_swapout(td); 1016 1017 PROC_LOCK(p);
| 1035 FOREACH_THREAD_IN_PROC(p, td) 1036 vm_thread_swapout(td); 1037 1038 PROC_LOCK(p);
|
| 1039 p->p_flag &= ~P_SWAPPINGOUT;
|
1018 PROC_SLOCK(p);
| 1040 PROC_SLOCK(p);
|
1019 p->p_sflag &= ~PS_SWAPPINGOUT;
| |
1020 p->p_swtime = 0;
| 1041 p->p_swtime = 0;
|
| 1042 return (0);
|
1021} 1022#endif /* !NO_SWAPPING */
| 1043} 1044#endif /* !NO_SWAPPING */
|