vm_glue.c revision 76778
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $FreeBSD: head/sys/vm/vm_glue.c 76778 2001-05-18 00:08:38Z jhb $ 63 */ 64 65#include "opt_rlimit.h" 66#include "opt_vm.h" 67 68#include <sys/param.h> 69#include <sys/systm.h> 70#include <sys/lock.h> 71#include <sys/mutex.h> 72#include <sys/proc.h> 73#include <sys/resourcevar.h> 74#include <sys/shm.h> 75#include <sys/vmmeter.h> 76#include <sys/sx.h> 77#include <sys/sysctl.h> 78 79#include <sys/kernel.h> 80#include <sys/ktr.h> 81#include <sys/unistd.h> 82 83#include <machine/limits.h> 84 85#include <vm/vm.h> 86#include <vm/vm_param.h> 87#include <vm/pmap.h> 88#include <vm/vm_map.h> 89#include <vm/vm_page.h> 90#include <vm/vm_pageout.h> 91#include <vm/vm_kern.h> 92#include <vm/vm_extern.h> 93 94#include <sys/user.h> 95 96extern int maxslp; 97 98/* 99 * System initialization 100 * 101 * Note: proc0 from proc.h 102 */ 103 104static void vm_init_limits __P((void *)); 105SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 106 107/* 108 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 109 * 110 * Note: run scheduling should be divorced from the vm system. 111 */ 112static void scheduler __P((void *)); 113SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 114 115 116static void swapout __P((struct proc *)); 117 118int 119kernacc(addr, len, rw) 120 caddr_t addr; 121 int len, rw; 122{ 123 boolean_t rv; 124 vm_offset_t saddr, eaddr; 125 vm_prot_t prot; 126 127 KASSERT((rw & (~VM_PROT_ALL)) == 0, 128 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 129 prot = rw; 130 saddr = trunc_page((vm_offset_t)addr); 131 eaddr = round_page((vm_offset_t)addr + len); 132 vm_map_lock_read(kernel_map); 133 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 134 vm_map_unlock_read(kernel_map); 135 return (rv == TRUE); 136} 137 138int 139useracc(addr, len, rw) 140 caddr_t addr; 141 int len, rw; 142{ 143 boolean_t rv; 144 vm_prot_t prot; 145 vm_map_t map; 146 vm_map_entry_t save_hint; 147 148 KASSERT((rw & (~VM_PROT_ALL)) == 0, 149 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 150 prot = rw; 151 /* 152 * XXX - check separately to disallow access to user area and user 153 * page tables - they are in the map. 154 * 155 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was once 156 * only used (as an end address) in trap.c. Use it as an end address 157 * here too. This bogusness has spread. I just fixed where it was 158 * used as a max in vm_mmap.c. 159 */ 160 if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS 161 || (vm_offset_t) addr + len < (vm_offset_t) addr) { 162 return (FALSE); 163 } 164 map = &curproc->p_vmspace->vm_map; 165 vm_map_lock_read(map); 166 /* 167 * We save the map hint, and restore it. Useracc appears to distort 168 * the map hint unnecessarily. 169 */ 170 save_hint = map->hint; 171 rv = vm_map_check_protection(map, 172 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len), prot); 173 map->hint = save_hint; 174 vm_map_unlock_read(map); 175 176 return (rv == TRUE); 177} 178 179void 180vslock(addr, len) 181 caddr_t addr; 182 u_int len; 183{ 184 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), 185 round_page((vm_offset_t)addr + len), FALSE); 186} 187 188void 189vsunlock(addr, len) 190 caddr_t addr; 191 u_int len; 192{ 193 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page((vm_offset_t)addr), 194 round_page((vm_offset_t)addr + len), TRUE); 195} 196 197/* 198 * Implement fork's actions on an address space. 199 * Here we arrange for the address space to be copied or referenced, 200 * allocate a user struct (pcb and kernel stack), then call the 201 * machine-dependent layer to fill those in and make the new process 202 * ready to run. The new process is set up so that it returns directly 203 * to user mode to avoid stack copying and relocation problems. 204 */ 205void 206vm_fork(p1, p2, flags) 207 register struct proc *p1, *p2; 208 int flags; 209{ 210 register struct user *up; 211 212 if ((flags & RFPROC) == 0) { 213 /* 214 * Divorce the memory, if it is shared, essentially 215 * this changes shared memory amongst threads, into 216 * COW locally. 217 */ 218 if ((flags & RFMEM) == 0) { 219 if (p1->p_vmspace->vm_refcnt > 1) { 220 vmspace_unshare(p1); 221 } 222 } 223 cpu_fork(p1, p2, flags); 224 return; 225 } 226 227 if (flags & RFMEM) { 228 p2->p_vmspace = p1->p_vmspace; 229 p1->p_vmspace->vm_refcnt++; 230 } 231 232 while (vm_page_count_severe()) { 233 VM_WAIT; 234 } 235 236 if ((flags & RFMEM) == 0) { 237 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 238 239 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 240 241 if (p1->p_vmspace->vm_shm) 242 shmfork(p1, p2); 243 } 244 245 pmap_new_proc(p2); 246 247 up = p2->p_addr; 248 249 /* 250 * p_stats currently points at fields in the user struct 251 * but not at &u, instead at p_addr. Copy parts of 252 * p_stats; zero the rest of p_stats (statistics). 253 * 254 * If procsig->ps_refcnt is 1 and p2->p_sigacts is NULL we dont' need 255 * to share sigacts, so we use the up->u_sigacts. 256 */ 257 p2->p_stats = &up->u_stats; 258 if (p2->p_sigacts == NULL) { 259 if (p2->p_procsig->ps_refcnt != 1) 260 printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid); 261 p2->p_sigacts = &up->u_sigacts; 262 up->u_sigacts = *p1->p_sigacts; 263 } 264 265 bzero(&up->u_stats.pstat_startzero, 266 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 267 (caddr_t) &up->u_stats.pstat_startzero)); 268 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 269 ((caddr_t) &up->u_stats.pstat_endcopy - 270 (caddr_t) &up->u_stats.pstat_startcopy)); 271 272 273 /* 274 * cpu_fork will copy and update the pcb, set up the kernel stack, 275 * and make the child ready to run. 276 */ 277 cpu_fork(p1, p2, flags); 278} 279 280/* 281 * Set default limits for VM system. 282 * Called for proc 0, and then inherited by all others. 283 * 284 * XXX should probably act directly on proc0. 285 */ 286static void 287vm_init_limits(udata) 288 void *udata; 289{ 290 register struct proc *p = udata; 291 int rss_limit; 292 293 /* 294 * Set up the initial limits on process VM. Set the maximum resident 295 * set size to be half of (reasonably) available memory. Since this 296 * is a soft limit, it comes into effect only when the system is out 297 * of memory - half of main memory helps to favor smaller processes, 298 * and reduces thrashing of the object cache. 299 */ 300 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 301 p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; 302 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; 303 p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ; 304 /* limit the limit to no less than 2MB */ 305 rss_limit = max(cnt.v_free_count, 512); 306 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 307 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 308} 309 310/* 311 * Must be called with the proc struc mutex held. 312 */ 313void 314faultin(p) 315 struct proc *p; 316{ 317 318 PROC_LOCK_ASSERT(p, MA_OWNED); 319 mtx_lock_spin(&sched_lock); 320 if ((p->p_sflag & PS_INMEM) == 0) { 321 322 ++p->p_lock; 323 mtx_unlock_spin(&sched_lock); 324 PROC_UNLOCK(p); 325 326 mtx_assert(&Giant, MA_OWNED); 327 pmap_swapin_proc(p); 328 329 PROC_LOCK(p); 330 mtx_lock_spin(&sched_lock); 331 if (p->p_stat == SRUN) { 332 setrunqueue(p); 333 } 334 335 p->p_sflag |= PS_INMEM; 336 337 /* undo the effect of setting SLOCK above */ 338 --p->p_lock; 339 } 340 mtx_unlock_spin(&sched_lock); 341} 342 343/* 344 * This swapin algorithm attempts to swap-in processes only if there 345 * is enough space for them. Of course, if a process waits for a long 346 * time, it will be swapped in anyway. 347 * 348 * Giant is still held at this point, to be released in tsleep. 349 */ 350/* ARGSUSED*/ 351static void 352scheduler(dummy) 353 void *dummy; 354{ 355 register struct proc *p; 356 register int pri; 357 struct proc *pp; 358 int ppri; 359 360 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); 361 362loop: 363 if (vm_page_count_min()) { 364 VM_WAIT; 365 goto loop; 366 } 367 368 mtx_unlock(&Giant); 369 pp = NULL; 370 ppri = INT_MIN; 371 sx_slock(&allproc_lock); 372 LIST_FOREACH(p, &allproc, p_list) { 373 mtx_lock_spin(&sched_lock); 374 if (p->p_stat == SRUN && 375 (p->p_sflag & (PS_INMEM | PS_SWAPPING)) == 0) { 376 377 pri = p->p_swtime + p->p_slptime; 378 if ((p->p_sflag & PS_SWAPINREQ) == 0) { 379 pri -= p->p_nice * 8; 380 } 381 382 /* 383 * if this process is higher priority and there is 384 * enough space, then select this process instead of 385 * the previous selection. 386 */ 387 if (pri > ppri) { 388 pp = p; 389 ppri = pri; 390 } 391 } 392 mtx_unlock_spin(&sched_lock); 393 } 394 sx_sunlock(&allproc_lock); 395 396 /* 397 * Nothing to do, back to sleep. 398 */ 399 if ((p = pp) == NULL) { 400 tsleep(&proc0, PVM, "sched", maxslp * hz / 2); 401 mtx_lock(&Giant); 402 goto loop; 403 } 404 mtx_lock_spin(&sched_lock); 405 p->p_sflag &= ~PS_SWAPINREQ; 406 mtx_unlock_spin(&sched_lock); 407 408 /* 409 * We would like to bring someone in. (only if there is space). 410 */ 411 mtx_lock(&Giant); 412 PROC_LOCK(p); 413 faultin(p); 414 PROC_UNLOCK(p); 415 mtx_lock_spin(&sched_lock); 416 p->p_swtime = 0; 417 mtx_unlock_spin(&sched_lock); 418 goto loop; 419} 420 421#ifndef NO_SWAPPING 422 423/* 424 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 425 */ 426static int swap_idle_threshold1 = 2; 427SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, 428 CTLFLAG_RW, &swap_idle_threshold1, 0, ""); 429 430/* 431 * Swap_idle_threshold2 is the time that a process can be idle before 432 * it will be swapped out, if idle swapping is enabled. 433 */ 434static int swap_idle_threshold2 = 10; 435SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, 436 CTLFLAG_RW, &swap_idle_threshold2, 0, ""); 437 438/* 439 * Swapout is driven by the pageout daemon. Very simple, we find eligible 440 * procs and unwire their u-areas. We try to always "swap" at least one 441 * process in case we need the room for a swapin. 442 * If any procs have been sleeping/stopped for at least maxslp seconds, 443 * they are swapped. Else, we swap the longest-sleeping or stopped process, 444 * if any, otherwise the longest-resident process. 445 */ 446void 447swapout_procs(action) 448int action; 449{ 450 register struct proc *p; 451 struct proc *outp, *outp2; 452 int outpri, outpri2; 453 int didswap = 0; 454 455 outp = outp2 = NULL; 456 outpri = outpri2 = INT_MIN; 457 sx_slock(&allproc_lock); 458retry: 459 LIST_FOREACH(p, &allproc, p_list) { 460 struct vmspace *vm; 461 462 PROC_LOCK(p); 463 if (p->p_lock != 0 || 464 (p->p_flag & (P_TRACED|P_SYSTEM|P_WEXIT)) != 0) { 465 PROC_UNLOCK(p); 466 continue; 467 } 468 vm = p->p_vmspace; 469 mtx_lock_spin(&sched_lock); 470 if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) { 471 mtx_unlock_spin(&sched_lock); 472 PROC_UNLOCK(p); 473 continue; 474 } 475 476 switch (p->p_stat) { 477 default: 478 mtx_unlock_spin(&sched_lock); 479 PROC_UNLOCK(p); 480 continue; 481 482 case SSLEEP: 483 case SSTOP: 484 /* 485 * do not swapout a realtime process 486 */ 487 if (PRI_IS_REALTIME(p->p_pri.pri_class)) { 488 mtx_unlock_spin(&sched_lock); 489 PROC_UNLOCK(p); 490 continue; 491 } 492 493 /* 494 * Do not swapout a process waiting on a critical 495 * event of some kind. Also guarantee swap_idle_threshold1 496 * time in memory. 497 */ 498 if (((p->p_pri.pri_level) < PSOCK) || 499 (p->p_slptime < swap_idle_threshold1)) { 500 mtx_unlock_spin(&sched_lock); 501 PROC_UNLOCK(p); 502 continue; 503 } 504 505 /* 506 * If the system is under memory stress, or if we are swapping 507 * idle processes >= swap_idle_threshold2, then swap the process 508 * out. 509 */ 510 if (((action & VM_SWAP_NORMAL) == 0) && 511 (((action & VM_SWAP_IDLE) == 0) || 512 (p->p_slptime < swap_idle_threshold2))) { 513 mtx_unlock_spin(&sched_lock); 514 PROC_UNLOCK(p); 515 continue; 516 } 517 mtx_unlock_spin(&sched_lock); 518 519#if 0 520 /* 521 * XXX: This is broken. We release the lock we 522 * acquire before calling swapout, so we could 523 * still deadlock if another CPU locks this process' 524 * VM data structures after we release the lock but 525 * before we call swapout(). 526 */ 527 ++vm->vm_refcnt; 528 /* 529 * do not swapout a process that is waiting for VM 530 * data structures there is a possible deadlock. 531 */ 532 if (lockmgr(&vm->vm_map.lock, 533 LK_EXCLUSIVE | LK_NOWAIT, 534 (void *)0, curproc)) { 535 vmspace_free(vm); 536 PROC_UNLOCK(p); 537 continue; 538 } 539 vm_map_unlock(&vm->vm_map); 540#endif 541 /* 542 * If the process has been asleep for awhile and had 543 * most of its pages taken away already, swap it out. 544 */ 545 if ((action & VM_SWAP_NORMAL) || 546 ((action & VM_SWAP_IDLE) && 547 (p->p_slptime > swap_idle_threshold2))) { 548 swapout(p); 549 vmspace_free(vm); 550 didswap++; 551 goto retry; 552 } 553 PROC_UNLOCK(p); 554 } 555 } 556 sx_sunlock(&allproc_lock); 557 /* 558 * If we swapped something out, and another process needed memory, 559 * then wakeup the sched process. 560 */ 561 if (didswap) 562 wakeup(&proc0); 563} 564 565static void 566swapout(p) 567 register struct proc *p; 568{ 569 570 PROC_LOCK_ASSERT(p, MA_OWNED); 571#if defined(SWAP_DEBUG) 572 printf("swapping out %d\n", p->p_pid); 573#endif 574 ++p->p_stats->p_ru.ru_nswap; 575 /* 576 * remember the process resident count 577 */ 578 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 579 580 mtx_lock_spin(&sched_lock); 581 p->p_sflag &= ~PS_INMEM; 582 p->p_sflag |= PS_SWAPPING; 583 PROC_UNLOCK_NOSWITCH(p); 584 if (p->p_stat == SRUN) 585 remrunqueue(p); 586 mtx_unlock_spin(&sched_lock); 587 588 pmap_swapout_proc(p); 589 590 mtx_lock_spin(&sched_lock); 591 p->p_sflag &= ~PS_SWAPPING; 592 p->p_swtime = 0; 593 mtx_unlock_spin(&sched_lock); 594} 595#endif /* !NO_SWAPPING */ 596