vm_glue.c revision 15018
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $Id: vm_glue.c,v 1.43 1996/03/11 06:11:39 hsu Exp $ 63 */ 64 65#include "opt_ddb.h" 66 67#include <sys/param.h> 68#include <sys/systm.h> 69#include <sys/proc.h> 70#include <sys/resourcevar.h> 71#include <sys/buf.h> 72#include <sys/shm.h> 73#include <sys/vmmeter.h> 74 75#include <sys/kernel.h> 76#include <sys/dkstat.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <vm/vm_inherit.h> 81#include <vm/vm_prot.h> 82#include <vm/lock.h> 83#include <vm/pmap.h> 84#include <vm/vm_map.h> 85#include <vm/vm_page.h> 86#include <vm/vm_pageout.h> 87#include <vm/vm_kern.h> 88#include <vm/vm_extern.h> 89#include <vm/vm_object.h> 90#include <vm/vm_pager.h> 91 92#include <sys/user.h> 93 94#include <machine/stdarg.h> 95#include <machine/cpu.h> 96 97/* 98 * System initialization 99 * 100 * Note: proc0 from proc.h 101 */ 102 103static void vm_init_limits __P((void *)); 104SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 105 106/* 107 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 108 * 109 * Note: run scheduling should be divorced from the vm system. 110 */ 111static void scheduler __P((void *)); 112SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 113 114 115static void swapout __P((struct proc *)); 116 117extern char kstack[]; 118 119/* vm_map_t upages_map; */ 120 121int 122kernacc(addr, len, rw) 123 caddr_t addr; 124 int len, rw; 125{ 126 boolean_t rv; 127 vm_offset_t saddr, eaddr; 128 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 129 130 saddr = trunc_page(addr); 131 eaddr = round_page(addr + len); 132 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 133 return (rv == TRUE); 134} 135 136int 137useracc(addr, len, rw) 138 caddr_t addr; 139 int len, rw; 140{ 141 boolean_t rv; 142 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 143 144 /* 145 * XXX - check separately to disallow access to user area and user 146 * page tables - they are in the map. 147 * 148 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was once 149 * only used (as an end address) in trap.c. Use it as an end address 150 * here too. This bogusness has spread. I just fixed where it was 151 * used as a max in vm_mmap.c. 152 */ 153 if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS 154 || (vm_offset_t) addr + len < (vm_offset_t) addr) { 155 return (FALSE); 156 } 157 rv = vm_map_check_protection(&curproc->p_vmspace->vm_map, 158 trunc_page(addr), round_page(addr + len), prot); 159 return (rv == TRUE); 160} 161 162#ifdef KGDB 163/* 164 * Change protections on kernel pages from addr to addr+len 165 * (presumably so debugger can plant a breakpoint). 166 * All addresses are assumed to reside in the Sysmap, 167 */ 168chgkprot(addr, len, rw) 169 register caddr_t addr; 170 int len, rw; 171{ 172 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 173 174 vm_map_protect(kernel_map, trunc_page(addr), 175 round_page(addr + len), prot, FALSE); 176} 177#endif 178void 179vslock(addr, len) 180 caddr_t addr; 181 u_int len; 182{ 183 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 184 round_page(addr + len), FALSE); 185} 186 187void 188vsunlock(addr, len, dirtied) 189 caddr_t addr; 190 u_int len; 191 int dirtied; 192{ 193#ifdef lint 194 dirtied++; 195#endif /* lint */ 196 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 197 round_page(addr + len), TRUE); 198} 199 200/* 201 * Implement fork's actions on an address space. 202 * Here we arrange for the address space to be copied or referenced, 203 * allocate a user struct (pcb and kernel stack), then call the 204 * machine-dependent layer to fill those in and make the new process 205 * ready to run. 206 * NOTE: the kernel stack may be at a different location in the child 207 * process, and thus addresses of automatic variables may be invalid 208 * after cpu_fork returns in the child process. We do nothing here 209 * after cpu_fork returns. 210 */ 211int 212vm_fork(p1, p2) 213 register struct proc *p1, *p2; 214{ 215 register struct user *up; 216 vm_offset_t addr, ptaddr, ptpa; 217 int error, i; 218 vm_map_t map; 219 pmap_t pvp; 220 vm_page_t stkm; 221 222 while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 223 VM_WAIT; 224 } 225 226 /* 227 * avoid copying any of the parent's pagetables or other per-process 228 * objects that reside in the map by marking all of them 229 * non-inheritable 230 */ 231 (void) vm_map_inherit(&p1->p_vmspace->vm_map, 232 UPT_MIN_ADDRESS - UPAGES * PAGE_SIZE, VM_MAX_ADDRESS, VM_INHERIT_NONE); 233 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 234 235 if (p1->p_vmspace->vm_shm) 236 shmfork(p1, p2); 237 238 /* 239 * Allocate a wired-down (for now) pcb and kernel stack for the 240 * process 241 */ 242 243 addr = (vm_offset_t) kstack; 244 245 map = &p2->p_vmspace->vm_map; 246 pvp = &p2->p_vmspace->vm_pmap; 247 248 /* 249 * allocate object for the upages 250 */ 251 p2->p_vmspace->vm_upages_obj = vm_object_allocate( OBJT_DEFAULT, 252 UPAGES); 253 254 /* 255 * put upages into the address space 256 */ 257 error = vm_map_find(map, p2->p_vmspace->vm_upages_obj, 0, 258 &addr, UPT_MIN_ADDRESS - addr, FALSE, VM_PROT_ALL, 259 VM_PROT_ALL, 0); 260 if (error != KERN_SUCCESS) 261 panic("vm_fork: vm_map_find (UPAGES) failed, addr=0x%x, error=%d", addr, error); 262 263 addr += UPAGES * PAGE_SIZE; 264 /* allocate space for page tables */ 265 error = vm_map_find(map, NULL, 0, &addr, UPT_MAX_ADDRESS - addr, FALSE, 266 VM_PROT_ALL, VM_PROT_ALL, 0); 267 if (error != KERN_SUCCESS) 268 panic("vm_fork: vm_map_find (PTES) failed, addr=0x%x, error=%d", addr, error); 269 270 /* get a kernel virtual address for the UPAGES for this proc */ 271 up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE); 272 if (up == NULL) 273 panic("vm_fork: u_map allocation failed"); 274 275 /* 276 * create a pagetable page for the UPAGES in the process address space 277 */ 278 ptaddr = trunc_page((u_int) vtopte(kstack)); 279 (void) vm_fault(map, ptaddr, VM_PROT_READ|VM_PROT_WRITE, FALSE); 280 ptpa = pmap_extract(pvp, ptaddr); 281 if (ptpa == 0) { 282 panic("vm_fork: no pte for UPAGES"); 283 } 284 285 /* 286 * hold the page table page for the kernel stack, and fault them in 287 */ 288 stkm = PHYS_TO_VM_PAGE(ptpa); 289 vm_page_hold(stkm); 290 291 for(i=0;i<UPAGES;i++) { 292 vm_page_t m; 293 294 /* 295 * Get a kernel stack page 296 */ 297 while ((m = vm_page_alloc(p2->p_vmspace->vm_upages_obj, 298 i, VM_ALLOC_NORMAL)) == NULL) { 299 VM_WAIT; 300 } 301 302 /* 303 * Wire the page 304 */ 305 vm_page_wire(m); 306 m->flags &= ~PG_BUSY; 307 308 /* 309 * Enter the page into both the kernel and the process 310 * address space. 311 */ 312 pmap_enter( pvp, (vm_offset_t) kstack + i * PAGE_SIZE, 313 VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, 1); 314 pmap_kenter(((vm_offset_t) up) + i * PAGE_SIZE, 315 VM_PAGE_TO_PHYS(m)); 316 m->flags &= ~PG_ZERO; 317 m->valid = VM_PAGE_BITS_ALL; 318 } 319 /* 320 * The page table page for the kernel stack should be held in memory 321 * now. 322 */ 323 vm_page_unhold(stkm); 324 325 p2->p_addr = up; 326 327 /* 328 * p_stats and p_sigacts currently point at fields in the user struct 329 * but not at &u, instead at p_addr. Copy p_sigacts and parts of 330 * p_stats; zero the rest of p_stats (statistics). 331 */ 332 p2->p_stats = &up->u_stats; 333 p2->p_sigacts = &up->u_sigacts; 334 up->u_sigacts = *p1->p_sigacts; 335 bzero(&up->u_stats.pstat_startzero, 336 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 337 (caddr_t) &up->u_stats.pstat_startzero)); 338 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 339 ((caddr_t) &up->u_stats.pstat_endcopy - 340 (caddr_t) &up->u_stats.pstat_startcopy)); 341 342 343 /* 344 * cpu_fork will copy and update the kernel stack and pcb, and make 345 * the child ready to run. It marks the child so that it can return 346 * differently than the parent. It returns twice, once in the parent 347 * process and once in the child. 348 */ 349 return (cpu_fork(p1, p2)); 350} 351 352/* 353 * Set default limits for VM system. 354 * Called for proc 0, and then inherited by all others. 355 * 356 * XXX should probably act directly on proc0. 357 */ 358static void 359vm_init_limits(udata) 360 void *udata; 361{ 362 register struct proc *p = udata; 363 int rss_limit; 364 365 /* 366 * Set up the initial limits on process VM. Set the maximum resident 367 * set size to be half of (reasonably) available memory. Since this 368 * is a soft limit, it comes into effect only when the system is out 369 * of memory - half of main memory helps to favor smaller processes, 370 * and reduces thrashing of the object cache. 371 */ 372 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 373 p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; 374 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; 375 p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ; 376 /* limit the limit to no less than 2MB */ 377 rss_limit = max(cnt.v_free_count, 512); 378 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 379 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 380} 381 382void 383faultin(p) 384 struct proc *p; 385{ 386 vm_offset_t i; 387 vm_offset_t ptaddr; 388 int s; 389 390 if ((p->p_flag & P_INMEM) == 0) { 391 vm_map_t map = &p->p_vmspace->vm_map; 392 pmap_t pmap = &p->p_vmspace->vm_pmap; 393 vm_page_t stkm, m; 394 vm_offset_t ptpa; 395 int error; 396 397 ++p->p_lock; 398 399 ptaddr = trunc_page((u_int) vtopte(kstack)); 400 (void) vm_fault(map, ptaddr, VM_PROT_READ|VM_PROT_WRITE, FALSE); 401 ptpa = pmap_extract(&p->p_vmspace->vm_pmap, ptaddr); 402 if (ptpa == 0) { 403 panic("vm_fork: no pte for UPAGES"); 404 } 405 stkm = PHYS_TO_VM_PAGE(ptpa); 406 vm_page_hold(stkm); 407 408 for(i=0;i<UPAGES;i++) { 409 int s; 410 s = splhigh(); 411 412retry: 413 if ((m = vm_page_lookup(p->p_vmspace->vm_upages_obj, i)) == NULL) { 414 if ((m = vm_page_alloc(p->p_vmspace->vm_upages_obj, i, VM_ALLOC_NORMAL)) == NULL) { 415 VM_WAIT; 416 goto retry; 417 } 418 } else { 419 if ((m->flags & PG_BUSY) || m->busy) { 420 m->flags |= PG_WANTED; 421 tsleep(m, PVM, "swinuw",0); 422 goto retry; 423 } 424 } 425 vm_page_wire(m); 426 if (m->valid == VM_PAGE_BITS_ALL) 427 m->flags &= ~PG_BUSY; 428 splx(s); 429 430 pmap_enter( pmap, (vm_offset_t) kstack + i * PAGE_SIZE, 431 VM_PAGE_TO_PHYS(m), VM_PROT_READ|VM_PROT_WRITE, TRUE); 432 pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE, 433 VM_PAGE_TO_PHYS(m)); 434 if (m->valid != VM_PAGE_BITS_ALL) { 435 int rv; 436 rv = vm_pager_get_pages(p->p_vmspace->vm_upages_obj, 437 &m, 1, 0); 438 if (rv != VM_PAGER_OK) 439 panic("faultin: cannot get upages for proc: %d\n", p->p_pid); 440 m->valid = VM_PAGE_BITS_ALL; 441 m->flags &= ~PG_BUSY; 442 } 443 } 444 vm_page_unhold(stkm); 445 446 447 s = splhigh(); 448 449 if (p->p_stat == SRUN) 450 setrunqueue(p); 451 452 p->p_flag |= P_INMEM; 453 454 /* undo the effect of setting SLOCK above */ 455 --p->p_lock; 456 splx(s); 457 458 } 459} 460 461/* 462 * This swapin algorithm attempts to swap-in processes only if there 463 * is enough space for them. Of course, if a process waits for a long 464 * time, it will be swapped in anyway. 465 */ 466/* ARGSUSED*/ 467static void 468scheduler(dummy) 469 void *dummy; 470{ 471 register struct proc *p; 472 register int pri; 473 struct proc *pp; 474 int ppri; 475 476loop: 477 while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 478 VM_WAIT; 479 } 480 481 pp = NULL; 482 ppri = INT_MIN; 483 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 484 if (p->p_stat == SRUN && 485 (p->p_flag & (P_INMEM | P_SWAPPING)) == 0) { 486 int mempri; 487 488 pri = p->p_swtime + p->p_slptime - p->p_nice * 8; 489 mempri = pri > 0 ? pri : 0; 490 /* 491 * if this process is higher priority and there is 492 * enough space, then select this process instead of 493 * the previous selection. 494 */ 495 if (pri > ppri) { 496 pp = p; 497 ppri = pri; 498 } 499 } 500 } 501 502 /* 503 * Nothing to do, back to sleep 504 */ 505 if ((p = pp) == NULL) { 506 tsleep(&proc0, PVM, "sched", 0); 507 goto loop; 508 } 509 /* 510 * We would like to bring someone in. (only if there is space). 511 */ 512 faultin(p); 513 p->p_swtime = 0; 514 goto loop; 515} 516 517#ifndef NO_SWAPPING 518 519#define swappable(p) \ 520 (((p)->p_lock == 0) && \ 521 ((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO|P_SWAPPING)) == P_INMEM) 522 523/* 524 * Swapout is driven by the pageout daemon. Very simple, we find eligible 525 * procs and unwire their u-areas. We try to always "swap" at least one 526 * process in case we need the room for a swapin. 527 * If any procs have been sleeping/stopped for at least maxslp seconds, 528 * they are swapped. Else, we swap the longest-sleeping or stopped process, 529 * if any, otherwise the longest-resident process. 530 */ 531void 532swapout_procs() 533{ 534 register struct proc *p; 535 struct proc *outp, *outp2; 536 int outpri, outpri2; 537 int didswap = 0; 538 539 outp = outp2 = NULL; 540 outpri = outpri2 = INT_MIN; 541retry: 542 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 543 if (!swappable(p)) 544 continue; 545 switch (p->p_stat) { 546 default: 547 continue; 548 549 case SSLEEP: 550 case SSTOP: 551 /* 552 * do not swapout a realtime process 553 */ 554 if (p->p_rtprio.type == RTP_PRIO_REALTIME) 555 continue; 556 557 /* 558 * do not swapout a process waiting on a critical 559 * event of some kind 560 */ 561 if (((p->p_priority & 0x7f) < PSOCK) || 562 (p->p_slptime <= 4)) 563 continue; 564 565 vm_map_reference(&p->p_vmspace->vm_map); 566 /* 567 * do not swapout a process that is waiting for VM 568 * datastructures there is a possible deadlock. 569 */ 570 if (!lock_try_write(&p->p_vmspace->vm_map.lock)) { 571 vm_map_deallocate(&p->p_vmspace->vm_map); 572 continue; 573 } 574 vm_map_unlock(&p->p_vmspace->vm_map); 575 /* 576 * If the process has been asleep for awhile and had 577 * most of its pages taken away already, swap it out. 578 */ 579 swapout(p); 580 vm_map_deallocate(&p->p_vmspace->vm_map); 581 didswap++; 582 goto retry; 583 } 584 } 585 /* 586 * If we swapped something out, and another process needed memory, 587 * then wakeup the sched process. 588 */ 589 if (didswap) 590 wakeup(&proc0); 591} 592 593static void 594swapout(p) 595 register struct proc *p; 596{ 597 vm_map_t map = &p->p_vmspace->vm_map; 598 pmap_t pmap = &p->p_vmspace->vm_pmap; 599 vm_offset_t ptaddr; 600 int i; 601 602 ++p->p_stats->p_ru.ru_nswap; 603 /* 604 * remember the process resident count 605 */ 606 p->p_vmspace->vm_swrss = 607 p->p_vmspace->vm_pmap.pm_stats.resident_count; 608 609 (void) splhigh(); 610 p->p_flag &= ~P_INMEM; 611 p->p_flag |= P_SWAPPING; 612 if (p->p_stat == SRUN) 613 remrq(p); 614 (void) spl0(); 615 616 /* 617 * let the upages be paged 618 */ 619 for(i=0;i<UPAGES;i++) { 620 vm_page_t m; 621 if ((m = vm_page_lookup(p->p_vmspace->vm_upages_obj, i)) == NULL) 622 panic("swapout: upage already missing???"); 623 m->dirty = VM_PAGE_BITS_ALL; 624 vm_page_unwire(m); 625 pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i); 626 } 627 pmap_remove(pmap, (vm_offset_t) kstack, 628 (vm_offset_t) kstack + PAGE_SIZE * UPAGES); 629 630 p->p_flag &= ~P_SWAPPING; 631 p->p_swtime = 0; 632} 633#endif /* !NO_SWAPPING */ 634 635#ifdef DDB 636/* 637 * DEBUG stuff 638 */ 639 640int indent; 641 642#include <machine/stdarg.h> /* see subr_prf.c */ 643 644/*ARGSUSED2*/ 645void 646#if __STDC__ 647iprintf(const char *fmt,...) 648#else 649iprintf(fmt /* , va_alist */ ) 650 char *fmt; 651 652 /* va_dcl */ 653#endif 654{ 655 register int i; 656 va_list ap; 657 658 for (i = indent; i >= 8; i -= 8) 659 printf("\t"); 660 while (--i >= 0) 661 printf(" "); 662 va_start(ap, fmt); 663 vprintf(fmt, ap); 664 va_end(ap); 665} 666#endif /* DDB */ 667