vm_glue.c revision 3449
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $Id: vm_glue.c,v 1.7 1994/09/12 15:06:12 davidg Exp $ 63 */ 64 65#include <sys/param.h> 66#include <sys/systm.h> 67#include <sys/proc.h> 68#include <sys/resourcevar.h> 69#include <sys/buf.h> 70#include <sys/user.h> 71 72#include <sys/kernel.h> 73#include <sys/dkstat.h> 74 75#include <vm/vm.h> 76#include <vm/vm_page.h> 77#include <vm/vm_pageout.h> 78#include <vm/vm_kern.h> 79 80#include <machine/stdarg.h> 81#include <machine/cpu.h> 82 83extern char kstack[]; 84int avefree = 0; /* XXX */ 85int readbuffers = 0; /* XXX allow kgdb to read kernel buffer pool */ 86/* vm_map_t upages_map; */ 87 88int 89kernacc(addr, len, rw) 90 caddr_t addr; 91 int len, rw; 92{ 93 boolean_t rv; 94 vm_offset_t saddr, eaddr; 95 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 96 97 saddr = trunc_page(addr); 98 eaddr = round_page(addr+len); 99 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 100 return(rv == TRUE); 101} 102 103int 104useracc(addr, len, rw) 105 caddr_t addr; 106 int len, rw; 107{ 108 boolean_t rv; 109 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 110 111 /* 112 * XXX - check separately to disallow access to user area and user 113 * page tables - they are in the map. 114 * 115 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was 116 * once only used (as an end address) in trap.c. Use it as an end 117 * address here too. This bogusness has spread. I just fixed 118 * where it was used as a max in vm_mmap.c. 119 */ 120 if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS 121 || (vm_offset_t) addr + len < (vm_offset_t) addr) { 122 return (FALSE); 123 } 124 125 rv = vm_map_check_protection(&curproc->p_vmspace->vm_map, 126 trunc_page(addr), round_page(addr+len), prot); 127 return(rv == TRUE); 128} 129 130#ifdef KGDB 131/* 132 * Change protections on kernel pages from addr to addr+len 133 * (presumably so debugger can plant a breakpoint). 134 * All addresses are assumed to reside in the Sysmap, 135 */ 136chgkprot(addr, len, rw) 137 register caddr_t addr; 138 int len, rw; 139{ 140 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 141 142 vm_map_protect(kernel_map, trunc_page(addr), 143 round_page(addr+len), prot, FALSE); 144} 145#endif 146void 147vslock(addr, len) 148 caddr_t addr; 149 u_int len; 150{ 151 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 152 round_page(addr+len), FALSE); 153} 154 155void 156vsunlock(addr, len, dirtied) 157 caddr_t addr; 158 u_int len; 159 int dirtied; 160{ 161#ifdef lint 162 dirtied++; 163#endif lint 164 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 165 round_page(addr+len), TRUE); 166} 167 168/* 169 * Implement fork's actions on an address space. 170 * Here we arrange for the address space to be copied or referenced, 171 * allocate a user struct (pcb and kernel stack), then call the 172 * machine-dependent layer to fill those in and make the new process 173 * ready to run. 174 * NOTE: the kernel stack may be at a different location in the child 175 * process, and thus addresses of automatic variables may be invalid 176 * after cpu_fork returns in the child process. We do nothing here 177 * after cpu_fork returns. 178 */ 179int 180vm_fork(p1, p2, isvfork) 181 register struct proc *p1, *p2; 182 int isvfork; 183{ 184 register struct user *up; 185 vm_offset_t addr, ptaddr; 186 int i; 187 struct vm_map *vp; 188 189 while( cnt.v_free_count < cnt.v_free_min) 190 VM_WAIT; 191 192 /* 193 * avoid copying any of the parent's pagetables or other per-process 194 * objects that reside in the map by marking all of them non-inheritable 195 */ 196 (void)vm_map_inherit(&p1->p_vmspace->vm_map, 197 UPT_MIN_ADDRESS - UPAGES * NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE); 198 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 199 200#ifdef SYSVSHM 201 if (p1->p_vmspace->vm_shm) 202 shmfork(p1, p2, isvfork); 203#endif 204 205 /* 206 * Allocate a wired-down (for now) pcb and kernel stack for the process 207 */ 208 209 addr = (vm_offset_t) kstack; 210 211 vp = &p2->p_vmspace->vm_map; 212 213 /* ream out old pagetables and kernel stack */ 214 (void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr); 215 216 /* get new pagetables and kernel stack */ 217 (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE); 218 219 /* force in the page table encompassing the UPAGES */ 220 ptaddr = trunc_page((u_int)vtopte(addr)); 221 vm_map_pageable(vp, ptaddr, ptaddr + NBPG, FALSE); 222 223 /* and force in (demand-zero) the UPAGES */ 224 vm_map_pageable(vp, addr, addr + UPAGES * NBPG, FALSE); 225 226 /* get a kernel virtual address for the UPAGES for this proc */ 227 up = (struct user *)kmem_alloc_pageable(kernel_map, UPAGES * NBPG); 228 229 /* and force-map the upages into the kernel pmap */ 230 for (i = 0; i < UPAGES; i++) 231 pmap_enter(vm_map_pmap(kernel_map), 232 ((vm_offset_t) up) + NBPG * i, 233 pmap_extract(vp->pmap, addr + NBPG * i), 234 VM_PROT_READ|VM_PROT_WRITE, 1); 235 236 /* and allow the UPAGES page table entry to be paged (at the vm system level) */ 237 vm_map_pageable(vp, ptaddr, ptaddr + NBPG, TRUE); 238 239 p2->p_addr = up; 240 241 /* 242 * p_stats and p_sigacts currently point at fields 243 * in the user struct but not at &u, instead at p_addr. 244 * Copy p_sigacts and parts of p_stats; zero the rest 245 * of p_stats (statistics). 246 */ 247 p2->p_stats = &up->u_stats; 248 p2->p_sigacts = &up->u_sigacts; 249 up->u_sigacts = *p1->p_sigacts; 250 bzero(&up->u_stats.pstat_startzero, 251 (unsigned) ((caddr_t)&up->u_stats.pstat_endzero - 252 (caddr_t)&up->u_stats.pstat_startzero)); 253 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 254 ((caddr_t)&up->u_stats.pstat_endcopy - 255 (caddr_t)&up->u_stats.pstat_startcopy)); 256 257 258 /* 259 * cpu_fork will copy and update the kernel stack and pcb, 260 * and make the child ready to run. It marks the child 261 * so that it can return differently than the parent. 262 * It returns twice, once in the parent process and 263 * once in the child. 264 */ 265 return (cpu_fork(p1, p2)); 266} 267 268/* 269 * Set default limits for VM system. 270 * Called for proc 0, and then inherited by all others. 271 */ 272void 273vm_init_limits(p) 274 register struct proc *p; 275{ 276 int rss_limit; 277 278 /* 279 * Set up the initial limits on process VM. 280 * Set the maximum resident set size to be half 281 * of (reasonably) available memory. Since this 282 * is a soft limit, it comes into effect only 283 * when the system is out of memory - half of 284 * main memory helps to favor smaller processes, 285 * and reduces thrashing of the object cache. 286 */ 287 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 288 p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; 289 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; 290 p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ; 291 /* limit the limit to no less than 128K */ 292 rss_limit = max(cnt.v_free_count / 2, 32); 293 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 294 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 295} 296 297#ifdef DEBUG 298int enableswap = 1; 299int swapdebug = 0; 300#define SDB_FOLLOW 1 301#define SDB_SWAPIN 2 302#define SDB_SWAPOUT 4 303#endif 304 305void 306faultin(p) 307struct proc *p; 308{ 309 vm_offset_t i; 310 vm_offset_t ptaddr; 311 int s; 312 313 if ((p->p_flag & P_INMEM) == 0) { 314 vm_map_t map; 315 316 ++p->p_lock; 317 318 map = &p->p_vmspace->vm_map; 319 /* force the page table encompassing the kernel stack (upages) */ 320 ptaddr = trunc_page((u_int)vtopte(kstack)); 321 vm_map_pageable(map, ptaddr, ptaddr + NBPG, FALSE); 322 323 /* wire in the UPAGES */ 324 vm_map_pageable(map, (vm_offset_t) kstack, 325 (vm_offset_t) kstack + UPAGES * NBPG, FALSE); 326 327 /* and map them nicely into the kernel pmap */ 328 for (i = 0; i < UPAGES; i++) { 329 vm_offset_t off = i * NBPG; 330 vm_offset_t pa = (vm_offset_t) 331 pmap_extract(&p->p_vmspace->vm_pmap, 332 (vm_offset_t) kstack + off); 333 pmap_enter(vm_map_pmap(kernel_map), 334 ((vm_offset_t)p->p_addr) + off, 335 pa, VM_PROT_READ|VM_PROT_WRITE, 1); 336 } 337 338 /* and let the page table pages go (at least above pmap level) */ 339 vm_map_pageable(map, ptaddr, ptaddr + NBPG, TRUE); 340 341 s = splhigh(); 342 343 if (p->p_stat == SRUN) 344 setrunqueue(p); 345 346 p->p_flag |= P_INMEM; 347 348 /* undo the effect of setting SLOCK above */ 349 --p->p_lock; 350 splx(s); 351 352 } 353 354} 355 356int swapinreq; 357int percentactive; 358/* 359 * This swapin algorithm attempts to swap-in processes only if there 360 * is enough space for them. Of course, if a process waits for a long 361 * time, it will be swapped in anyway. 362 */ 363void 364scheduler() 365{ 366 register struct proc *p; 367 register int pri; 368 struct proc *pp; 369 int ppri; 370 int lastidle, lastrun; 371 int curidle, currun; 372 int forceload; 373 int percent; 374 int ntries; 375 376 lastidle = 0; 377 lastrun = 0; 378 379loop: 380 ntries = 0; 381 382 curidle = cp_time[CP_IDLE]; 383 currun = cp_time[CP_USER] + cp_time[CP_SYS] + cp_time[CP_NICE]; 384 percent = (100*(currun-lastrun)) / ( 1 + (currun-lastrun) + (curidle-lastidle)); 385 lastrun = currun; 386 lastidle = curidle; 387 if( percent > 100) 388 percent = 100; 389 percentactive = percent; 390 391 if( percentactive < 25) 392 forceload = 1; 393 else 394 forceload = 0; 395 396loop1: 397 pp = NULL; 398 ppri = INT_MIN; 399 for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { 400 if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) { 401 int mempri; 402 pri = p->p_swtime + p->p_slptime - p->p_nice * 8; 403 mempri = pri > 0 ? pri : 0; 404 /* 405 * if this process is higher priority and there is 406 * enough space, then select this process instead 407 * of the previous selection. 408 */ 409 if (pri > ppri && 410 (((cnt.v_free_count + (mempri * (4*PAGE_SIZE) / PAGE_SIZE) >= (p->p_vmspace->vm_swrss)) || (ntries > 0 && forceload)))) { 411 pp = p; 412 ppri = pri; 413 } 414 } 415 } 416 417 if ((pp == NULL) && (ntries == 0) && forceload) { 418 ++ntries; 419 goto loop1; 420 } 421 422 /* 423 * Nothing to do, back to sleep 424 */ 425 if ((p = pp) == NULL) { 426 tsleep((caddr_t)&proc0, PVM, "sched", 0); 427 goto loop; 428 } 429 430 /* 431 * We would like to bring someone in. (only if there is space). 432 */ 433/* 434 printf("swapin: %d, free: %d, res: %d, min: %d\n", 435 p->p_pid, cnt.v_free_count, cnt.v_free_reserved, cnt.v_free_min); 436*/ 437 (void) splhigh(); 438 if ((forceload && (cnt.v_free_count > (cnt.v_free_reserved + UPAGES + 1))) || 439 (cnt.v_free_count >= cnt.v_free_min)) { 440 spl0(); 441 faultin(p); 442 p->p_swtime = 0; 443 goto loop; 444 } 445 /* 446 * log the memory shortage 447 */ 448 swapinreq += p->p_vmspace->vm_swrss; 449 /* 450 * Not enough memory, jab the pageout daemon and wait til the 451 * coast is clear. 452 */ 453 if( cnt.v_free_count < cnt.v_free_min) { 454 VM_WAIT; 455 } else { 456 tsleep((caddr_t)&proc0, PVM, "sched", 0); 457 } 458 (void) spl0(); 459 goto loop; 460} 461 462#define swappable(p) \ 463 (((p)->p_lock == 0) && \ 464 ((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO)) == P_INMEM) 465 466extern int vm_pageout_free_min; 467/* 468 * Swapout is driven by the pageout daemon. Very simple, we find eligible 469 * procs and unwire their u-areas. We try to always "swap" at least one 470 * process in case we need the room for a swapin. 471 * If any procs have been sleeping/stopped for at least maxslp seconds, 472 * they are swapped. Else, we swap the longest-sleeping or stopped process, 473 * if any, otherwise the longest-resident process. 474 */ 475void 476swapout_threads() 477{ 478 register struct proc *p; 479 struct proc *outp, *outp2; 480 int outpri, outpri2; 481 int tpri; 482 int didswap = 0; 483 int swapneeded = swapinreq; 484 extern int maxslp; 485 int runnablenow; 486 487 runnablenow = 0; 488 outp = outp2 = NULL; 489 outpri = outpri2 = INT_MIN; 490 for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { 491 if (!swappable(p)) 492 continue; 493 switch (p->p_stat) { 494 case SRUN: 495 ++runnablenow; 496 /* 497 * count the process as being in a runnable state 498 */ 499 if ((tpri = p->p_swtime + p->p_nice * 8) > outpri2) { 500 outp2 = p; 501 outpri2 = tpri; 502 } 503 continue; 504 505 case SSLEEP: 506 case SSTOP: 507 /* 508 * do not swapout a process that is waiting for VM datastructures 509 * there is a possible deadlock. 510 */ 511 if (!lock_try_write( &p->p_vmspace->vm_map.lock)) { 512 continue; 513 } 514 vm_map_unlock( &p->p_vmspace->vm_map); 515 /* 516 * If the process has been asleep for awhile and had most 517 * of its pages taken away already, swap it out. 518 */ 519 if ((p->p_slptime > maxslp) && (p->p_vmspace->vm_pmap.pm_stats.resident_count <= 6)) { 520 swapout(p); 521 didswap++; 522 } else if ((tpri = p->p_slptime + p->p_nice * 8) > outpri) { 523 outp = p; 524 outpri = tpri ; 525 } 526 continue; 527 } 528 } 529 /* 530 * We swapout only if there are more than two runnable processes or if 531 * another process needs some space to swapin. 532 */ 533 if ((swapinreq || ((percentactive > 90) && (runnablenow > 2))) && 534 (((cnt.v_free_count + cnt.v_inactive_count) <= (cnt.v_free_target + cnt.v_inactive_target)) || 535 (cnt.v_free_count < cnt.v_free_min))) { 536 if ((p = outp) == 0) { 537 p = outp2; 538 } 539 540 /* 541 * Only swapout processes that have already had most 542 * of their pages taken away. 543 */ 544 if (p && (p->p_vmspace->vm_pmap.pm_stats.resident_count <= 6)) { 545 swapout(p); 546 didswap = 1; 547 } 548 } 549 550 /* 551 * if we previously had found a process to swapout, and we need to swapout 552 * more then try again. 553 */ 554#if 0 555 if( p && swapinreq) 556 goto swapmore; 557#endif 558 559 /* 560 * If we swapped something out, and another process needed memory, 561 * then wakeup the sched process. 562 */ 563 if (didswap) { 564 if (swapneeded) 565 wakeup((caddr_t)&proc0); 566 swapinreq = 0; 567 } 568} 569 570void 571swapout(p) 572 register struct proc *p; 573{ 574 vm_map_t map = &p->p_vmspace->vm_map; 575 576 ++p->p_stats->p_ru.ru_nswap; 577 /* 578 * remember the process resident count 579 */ 580 p->p_vmspace->vm_swrss = 581 p->p_vmspace->vm_pmap.pm_stats.resident_count; 582 /* 583 * and decrement the amount of needed space 584 */ 585 swapinreq -= min(swapinreq, p->p_vmspace->vm_pmap.pm_stats.resident_count); 586 587 (void) splhigh(); 588 p->p_flag &= ~P_INMEM; 589 if (p->p_stat == SRUN) 590 remrq(p); 591 (void) spl0(); 592 593 ++p->p_lock; 594/* let the upages be paged */ 595 pmap_remove(vm_map_pmap(kernel_map), 596 (vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * NBPG); 597 598 vm_map_pageable(map, (vm_offset_t) kstack, 599 (vm_offset_t) kstack + UPAGES * NBPG, TRUE); 600 601 --p->p_lock; 602 p->p_swtime = 0; 603} 604 605/* 606 * The rest of these routines fake thread handling 607 */ 608 609#ifndef assert_wait 610void 611assert_wait(event, ruptible) 612 int event; 613 boolean_t ruptible; 614{ 615#ifdef lint 616 ruptible++; 617#endif 618 curproc->p_thread = event; 619} 620#endif 621 622void 623thread_block(char *msg) 624{ 625 if (curproc->p_thread) 626 tsleep((caddr_t)curproc->p_thread, PVM, msg, 0); 627} 628 629 630void 631thread_sleep_(event, lock, wmesg) 632 int event; 633 simple_lock_t lock; 634 char *wmesg; 635{ 636 637 curproc->p_thread = event; 638 simple_unlock(lock); 639 if (curproc->p_thread) { 640 tsleep((caddr_t)event, PVM, wmesg, 0); 641 } 642} 643 644#ifndef thread_wakeup 645void 646thread_wakeup(event) 647 int event; 648{ 649 wakeup((caddr_t)event); 650} 651#endif 652 653/* 654 * DEBUG stuff 655 */ 656 657int indent = 0; 658 659#include <machine/stdarg.h> /* see subr_prf.c */ 660 661/*ARGSUSED2*/ 662void 663#if __STDC__ 664iprintf(const char *fmt, ...) 665#else 666iprintf(fmt /* , va_alist */) 667 char *fmt; 668 /* va_dcl */ 669#endif 670{ 671 register int i; 672 va_list ap; 673 674 for (i = indent; i >= 8; i -= 8) 675 printf("\t"); 676 while (--i >= 0) 677 printf(" "); 678 va_start(ap, fmt); 679 printf("%r", fmt, ap); 680 va_end(ap); 681} 682