vm_glue.c revision 12662
155682Smarkm/* 2233294Sstas * Copyright (c) 1991, 1993 3233294Sstas * The Regents of the University of California. All rights reserved. 4233294Sstas * 555682Smarkm * This code is derived from software contributed to Berkeley by 6233294Sstas * The Mach Operating System project at Carnegie-Mellon University. 7233294Sstas * 8233294Sstas * Redistribution and use in source and binary forms, with or without 955682Smarkm * modification, are permitted provided that the following conditions 10233294Sstas * are met: 11233294Sstas * 1. Redistributions of source code must retain the above copyright 1255682Smarkm * notice, this list of conditions and the following disclaimer. 13233294Sstas * 2. Redistributions in binary form must reproduce the above copyright 14233294Sstas * notice, this list of conditions and the following disclaimer in the 15233294Sstas * documentation and/or other materials provided with the distribution. 1655682Smarkm * 3. All advertising materials mentioning features or use of this software 17233294Sstas * must display the following acknowledgement: 18233294Sstas * This product includes software developed by the University of 19233294Sstas * California, Berkeley and its contributors. 2055682Smarkm * 4. Neither the name of the University nor the names of its contributors 21233294Sstas * may be used to endorse or promote products derived from this software 22233294Sstas * without specific prior written permission. 23233294Sstas * 24233294Sstas * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25233294Sstas * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26233294Sstas * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27233294Sstas * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28233294Sstas * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29233294Sstas * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30233294Sstas * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31233294Sstas * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3255682Smarkm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3355682Smarkm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3455682Smarkm * SUCH DAMAGE. 3555682Smarkm * 36233294Sstas * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 3755682Smarkm * 3890926Snectar * 3990926Snectar * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40233294Sstas * All rights reserved. 41233294Sstas * 4290926Snectar * Permission to use, copy, modify and distribute this software and 4390926Snectar * its documentation is hereby granted, provided that both the copyright 4490926Snectar * notice and this permission notice appear in all copies of the 4590926Snectar * software, derivative works or modified versions, and any portions 4690926Snectar * thereof, and that both notices appear in supporting documentation. 4790926Snectar * 4890926Snectar * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 4990926Snectar * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50178825Sdfr * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51178825Sdfr * 52233294Sstas * Carnegie Mellon requests users of this software to return to 53178825Sdfr * 54178825Sdfr * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 5590926Snectar * School of Computer Science 5690926Snectar * Carnegie Mellon University 57233294Sstas * Pittsburgh PA 15213-3890 5890926Snectar * 5990926Snectar * any improvements or extensions that they make and grant Carnegie the 6090926Snectar * rights to redistribute these changes. 6190926Snectar * 6290926Snectar * $Id: vm_glue.c,v 1.31 1995/12/02 17:11:18 bde Exp $ 6390926Snectar */ 6490926Snectar 6590926Snectar#include <sys/param.h> 6690926Snectar#include <sys/systm.h> 6790926Snectar#include <sys/proc.h> 6890926Snectar#include <sys/resourcevar.h> 6990926Snectar#include <sys/buf.h> 7090926Snectar#include <sys/shm.h> 71233294Sstas#include <sys/vmmeter.h> 7290926Snectar 7390926Snectar#include <sys/kernel.h> 7490926Snectar#include <sys/dkstat.h> 75233294Sstas 7690926Snectar#include <vm/vm.h> 77178825Sdfr#include <vm/vm_param.h> 7890926Snectar#include <vm/vm_inherit.h> 7990926Snectar#include <vm/vm_prot.h> 8090926Snectar#include <vm/lock.h> 8190926Snectar#include <vm/pmap.h> 8290926Snectar#include <vm/vm_map.h> 8390926Snectar#include <vm/vm_page.h> 8490926Snectar#include <vm/vm_pageout.h> 8555682Smarkm#include <vm/vm_kern.h> 86178825Sdfr#include <vm/vm_extern.h> 8755682Smarkm 8878527Sassar#include <sys/user.h> 8978527Sassar 9078527Sassar#include <machine/stdarg.h> 9178527Sassar#include <machine/cpu.h> 9278527Sassar 93233294Sstas/* 94233294Sstas * System initialization 95178825Sdfr * 96233294Sstas * Note: proc0 from proc.h 9790926Snectar */ 9878527Sassar 9978527Sassarstatic void vm_init_limits __P((void *)); 100178825SdfrSYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 101178825Sdfr 10290926Snectar/* 103178825Sdfr * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 10478527Sassar * 105178825Sdfr * Note: run scheduling should be divorced from the vm system. 10678527Sassar */ 10778527Sassarstatic void scheduler __P((void *)); 10878527SassarSYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 10978527Sassar 110178825Sdfr 11178527Sassarextern char kstack[]; 112233294Sstas 113233294Sstas/* vm_map_t upages_map; */ 11478527Sassar 11578527Sassarint 11678527Sassarkernacc(addr, len, rw) 117178825Sdfr caddr_t addr; 11878527Sassar int len, rw; 11978527Sassar{ 12078527Sassar boolean_t rv; 12178527Sassar vm_offset_t saddr, eaddr; 12278527Sassar vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 123233294Sstas 124233294Sstas saddr = trunc_page(addr); 12555682Smarkm eaddr = round_page(addr + len); 12655682Smarkm rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 12755682Smarkm return (rv == TRUE); 12855682Smarkm} 12955682Smarkm 13055682Smarkmint 13155682Smarkmuseracc(addr, len, rw) 13255682Smarkm caddr_t addr; 133233294Sstas int len, rw; 134142403Snectar{ 135233294Sstas boolean_t rv; 136178825Sdfr vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 137142403Snectar 138142403Snectar /* 13955682Smarkm * XXX - check separately to disallow access to user area and user 14055682Smarkm * page tables - they are in the map. 14155682Smarkm * 14255682Smarkm * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was once 14355682Smarkm * only used (as an end address) in trap.c. Use it as an end address 14455682Smarkm * here too. This bogusness has spread. I just fixed where it was 14555682Smarkm * used as a max in vm_mmap.c. 14690926Snectar */ 14790926Snectar if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS 14890926Snectar || (vm_offset_t) addr + len < (vm_offset_t) addr) { 149178825Sdfr return (FALSE); 150178825Sdfr } 15190926Snectar rv = vm_map_check_protection(&curproc->p_vmspace->vm_map, 15290926Snectar trunc_page(addr), round_page(addr + len), prot); 153233294Sstas return (rv == TRUE); 154233294Sstas} 155233294Sstas 156178825Sdfr#ifdef KGDB 157178825Sdfr/* 15890926Snectar * Change protections on kernel pages from addr to addr+len 15990926Snectar * (presumably so debugger can plant a breakpoint). 160233294Sstas * All addresses are assumed to reside in the Sysmap, 16155682Smarkm */ 16255682Smarkmchgkprot(addr, len, rw) 163178825Sdfr register caddr_t addr; 16455682Smarkm int len, rw; 165233294Sstas{ 16655682Smarkm vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 167178825Sdfr 16855682Smarkm vm_map_protect(kernel_map, trunc_page(addr), 16955682Smarkm round_page(addr + len), prot, FALSE); 17055682Smarkm} 17172445Sassar#endif 172233294Sstasvoid 17372445Sassarvslock(addr, len) 174178825Sdfr caddr_t addr; 17572445Sassar u_int len; 17672445Sassar{ 177233294Sstas vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 178233294Sstas round_page(addr + len), FALSE); 17955682Smarkm} 18072445Sassar 181233294Sstasvoid 18272445Sassarvsunlock(addr, len, dirtied) 18372445Sassar caddr_t addr; 18472445Sassar u_int len; 185178825Sdfr int dirtied; 18672445Sassar{ 18772445Sassar#ifdef lint 188178825Sdfr dirtied++; 189233294Sstas#endif /* lint */ 19055682Smarkm vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 19155682Smarkm round_page(addr + len), TRUE); 19255682Smarkm} 19355682Smarkm 19455682Smarkm/* 19555682Smarkm * Implement fork's actions on an address space. 19655682Smarkm * Here we arrange for the address space to be copied or referenced, 19772445Sassar * allocate a user struct (pcb and kernel stack), then call the 198233294Sstas * machine-dependent layer to fill those in and make the new process 19972445Sassar * ready to run. 20072445Sassar * NOTE: the kernel stack may be at a different location in the child 20172445Sassar * process, and thus addresses of automatic variables may be invalid 202178825Sdfr * after cpu_fork returns in the child process. We do nothing here 20372445Sassar * after cpu_fork returns. 20472445Sassar */ 20555682Smarkmint 20678527Sassarvm_fork(p1, p2, isvfork) 20778527Sassar register struct proc *p1, *p2; 20878527Sassar int isvfork; 209233294Sstas{ 21078527Sassar register struct user *up; 21178527Sassar vm_offset_t addr, ptaddr; 212178825Sdfr int error, i; 213178825Sdfr struct vm_map *vp; 21478527Sassar 21578527Sassar while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 21678527Sassar VM_WAIT; 21778527Sassar } 21878527Sassar 21978527Sassar /* 22078527Sassar * avoid copying any of the parent's pagetables or other per-process 22178527Sassar * objects that reside in the map by marking all of them 22278527Sassar * non-inheritable 22378527Sassar */ 22478527Sassar (void) vm_map_inherit(&p1->p_vmspace->vm_map, 22578527Sassar UPT_MIN_ADDRESS - UPAGES * PAGE_SIZE, VM_MAX_ADDRESS, VM_INHERIT_NONE); 22678527Sassar p2->p_vmspace = vmspace_fork(p1->p_vmspace); 22755682Smarkm 22855682Smarkm#ifdef SYSVSHM 229233294Sstas if (p1->p_vmspace->vm_shm) 23055682Smarkm shmfork(p1, p2, isvfork); 23155682Smarkm#endif 23255682Smarkm 23378527Sassar /* 23478527Sassar * Allocate a wired-down (for now) pcb and kernel stack for the 23578527Sassar * process 23678527Sassar */ 23778527Sassar 238178825Sdfr addr = (vm_offset_t) kstack; 23955682Smarkm 240 vp = &p2->p_vmspace->vm_map; 241 242 /* get new pagetables and kernel stack */ 243 (void) vm_map_find(vp, NULL, 0, &addr, UPT_MAX_ADDRESS - addr, FALSE); 244 245 /* force in the page table encompassing the UPAGES */ 246 ptaddr = trunc_page((u_int) vtopte(addr)); 247 error = vm_map_pageable(vp, ptaddr, ptaddr + PAGE_SIZE, FALSE); 248 if (error) 249 panic("vm_fork: wire of PT failed. error=%d", error); 250 251 /* and force in (demand-zero) the UPAGES */ 252 error = vm_map_pageable(vp, addr, addr + UPAGES * PAGE_SIZE, FALSE); 253 if (error) 254 panic("vm_fork: wire of UPAGES failed. error=%d", error); 255 256 /* get a kernel virtual address for the UPAGES for this proc */ 257 up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE); 258 if (up == NULL) 259 panic("vm_fork: u_map allocation failed"); 260 261 /* and force-map the upages into the kernel pmap */ 262 for (i = 0; i < UPAGES; i++) 263 pmap_kenter(((vm_offset_t) up) + PAGE_SIZE * i, 264 pmap_extract(vp->pmap, addr + PAGE_SIZE * i)); 265 266 p2->p_addr = up; 267 268 /* 269 * p_stats and p_sigacts currently point at fields in the user struct 270 * but not at &u, instead at p_addr. Copy p_sigacts and parts of 271 * p_stats; zero the rest of p_stats (statistics). 272 */ 273 p2->p_stats = &up->u_stats; 274 p2->p_sigacts = &up->u_sigacts; 275 up->u_sigacts = *p1->p_sigacts; 276 bzero(&up->u_stats.pstat_startzero, 277 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 278 (caddr_t) &up->u_stats.pstat_startzero)); 279 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 280 ((caddr_t) &up->u_stats.pstat_endcopy - 281 (caddr_t) &up->u_stats.pstat_startcopy)); 282 283 284 /* 285 * cpu_fork will copy and update the kernel stack and pcb, and make 286 * the child ready to run. It marks the child so that it can return 287 * differently than the parent. It returns twice, once in the parent 288 * process and once in the child. 289 */ 290 return (cpu_fork(p1, p2)); 291} 292 293/* 294 * Set default limits for VM system. 295 * Called for proc 0, and then inherited by all others. 296 * 297 * XXX should probably act directly on proc0. 298 */ 299static void 300vm_init_limits(udata) 301 void *udata; 302{ 303 register struct proc *p = udata; 304 int rss_limit; 305 306 /* 307 * Set up the initial limits on process VM. Set the maximum resident 308 * set size to be half of (reasonably) available memory. Since this 309 * is a soft limit, it comes into effect only when the system is out 310 * of memory - half of main memory helps to favor smaller processes, 311 * and reduces thrashing of the object cache. 312 */ 313 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 314 p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; 315 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; 316 p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ; 317 /* limit the limit to no less than 2MB */ 318 rss_limit = max(cnt.v_free_count, 512); 319 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 320 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 321} 322 323void 324faultin(p) 325 struct proc *p; 326{ 327 vm_offset_t i; 328 vm_offset_t ptaddr; 329 int s; 330 331 if ((p->p_flag & P_INMEM) == 0) { 332 vm_map_t map; 333 int error; 334 335 ++p->p_lock; 336 337 map = &p->p_vmspace->vm_map; 338 /* force the page table encompassing the kernel stack (upages) */ 339 ptaddr = trunc_page((u_int) vtopte(kstack)); 340 error = vm_map_pageable(map, ptaddr, ptaddr + PAGE_SIZE, FALSE); 341 if (error) 342 panic("faultin: wire of PT failed. error=%d", error); 343 344 /* wire in the UPAGES */ 345 error = vm_map_pageable(map, (vm_offset_t) kstack, 346 (vm_offset_t) kstack + UPAGES * PAGE_SIZE, FALSE); 347 if (error) 348 panic("faultin: wire of UPAGES failed. error=%d", error); 349 350 /* and map them nicely into the kernel pmap */ 351 for (i = 0; i < UPAGES; i++) { 352 vm_offset_t off = i * PAGE_SIZE; 353 vm_offset_t pa = (vm_offset_t) 354 pmap_extract(&p->p_vmspace->vm_pmap, 355 (vm_offset_t) kstack + off); 356 357 if (pa == 0) 358 panic("faultin: missing page for UPAGES\n"); 359 360 pmap_kenter(((vm_offset_t) p->p_addr) + off, pa); 361 } 362 363 s = splhigh(); 364 365 if (p->p_stat == SRUN) 366 setrunqueue(p); 367 368 p->p_flag |= P_INMEM; 369 370 /* undo the effect of setting SLOCK above */ 371 --p->p_lock; 372 splx(s); 373 374 } 375} 376 377/* 378 * This swapin algorithm attempts to swap-in processes only if there 379 * is enough space for them. Of course, if a process waits for a long 380 * time, it will be swapped in anyway. 381 */ 382/* ARGSUSED*/ 383static void 384scheduler(dummy) 385 void *dummy; 386{ 387 register struct proc *p; 388 register int pri; 389 struct proc *pp; 390 int ppri; 391 392loop: 393 while ((cnt.v_free_count + cnt.v_cache_count) < (cnt.v_free_reserved + UPAGES + 2)) { 394 VM_WAIT; 395 } 396 397 pp = NULL; 398 ppri = INT_MIN; 399 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 400 if (p->p_stat == SRUN && (p->p_flag & (P_INMEM | P_SWAPPING)) == 0) { 401 int mempri; 402 403 pri = p->p_swtime + p->p_slptime - p->p_nice * 8; 404 mempri = pri > 0 ? pri : 0; 405 /* 406 * if this process is higher priority and there is 407 * enough space, then select this process instead of 408 * the previous selection. 409 */ 410 if (pri > ppri) { 411 pp = p; 412 ppri = pri; 413 } 414 } 415 } 416 417 /* 418 * Nothing to do, back to sleep 419 */ 420 if ((p = pp) == NULL) { 421 tsleep(&proc0, PVM, "sched", 0); 422 goto loop; 423 } 424 /* 425 * We would like to bring someone in. (only if there is space). 426 */ 427 faultin(p); 428 p->p_swtime = 0; 429 goto loop; 430} 431 432#define swappable(p) \ 433 (((p)->p_lock == 0) && \ 434 ((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO|P_SWAPPING)) == P_INMEM) 435 436extern int vm_pageout_free_min; 437 438/* 439 * Swapout is driven by the pageout daemon. Very simple, we find eligible 440 * procs and unwire their u-areas. We try to always "swap" at least one 441 * process in case we need the room for a swapin. 442 * If any procs have been sleeping/stopped for at least maxslp seconds, 443 * they are swapped. Else, we swap the longest-sleeping or stopped process, 444 * if any, otherwise the longest-resident process. 445 */ 446void 447swapout_procs() 448{ 449 register struct proc *p; 450 struct proc *outp, *outp2; 451 int outpri, outpri2; 452 int didswap = 0; 453 454 outp = outp2 = NULL; 455 outpri = outpri2 = INT_MIN; 456retry: 457 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 458 if (!swappable(p)) 459 continue; 460 switch (p->p_stat) { 461 default: 462 continue; 463 464 case SSLEEP: 465 case SSTOP: 466 /* 467 * do not swapout a realtime process 468 */ 469 if (p->p_rtprio.type == RTP_PRIO_REALTIME) 470 continue; 471 472 /* 473 * do not swapout a process waiting on a critical 474 * event of some kind 475 */ 476 if (((p->p_priority & 0x7f) < PSOCK) || 477 (p->p_slptime <= 4)) 478 continue; 479 480 vm_map_reference(&p->p_vmspace->vm_map); 481 /* 482 * do not swapout a process that is waiting for VM 483 * datastructures there is a possible deadlock. 484 */ 485 if (!lock_try_write(&p->p_vmspace->vm_map.lock)) { 486 vm_map_deallocate(&p->p_vmspace->vm_map); 487 continue; 488 } 489 vm_map_unlock(&p->p_vmspace->vm_map); 490 /* 491 * If the process has been asleep for awhile and had 492 * most of its pages taken away already, swap it out. 493 */ 494 swapout(p); 495 vm_map_deallocate(&p->p_vmspace->vm_map); 496 didswap++; 497 goto retry; 498 } 499 } 500 /* 501 * If we swapped something out, and another process needed memory, 502 * then wakeup the sched process. 503 */ 504 if (didswap) 505 wakeup(&proc0); 506} 507 508void 509swapout(p) 510 register struct proc *p; 511{ 512 vm_map_t map = &p->p_vmspace->vm_map; 513 vm_offset_t ptaddr; 514 int i; 515 516 ++p->p_stats->p_ru.ru_nswap; 517 /* 518 * remember the process resident count 519 */ 520 p->p_vmspace->vm_swrss = 521 p->p_vmspace->vm_pmap.pm_stats.resident_count; 522 523 (void) splhigh(); 524 p->p_flag &= ~P_INMEM; 525 p->p_flag |= P_SWAPPING; 526 if (p->p_stat == SRUN) 527 remrq(p); 528 (void) spl0(); 529 530 /* 531 * let the upages be paged 532 */ 533 for(i=0;i<UPAGES;i++) 534 pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i); 535 536 vm_map_pageable(map, (vm_offset_t) kstack, 537 (vm_offset_t) kstack + UPAGES * PAGE_SIZE, TRUE); 538 539 ptaddr = trunc_page((u_int) vtopte(kstack)); 540 vm_map_pageable(map, ptaddr, ptaddr + PAGE_SIZE, TRUE); 541 542 p->p_flag &= ~P_SWAPPING; 543 p->p_swtime = 0; 544} 545 546#ifdef DDB 547/* 548 * DEBUG stuff 549 */ 550 551int indent; 552 553#include <machine/stdarg.h> /* see subr_prf.c */ 554 555/*ARGSUSED2*/ 556void 557#if __STDC__ 558iprintf(const char *fmt,...) 559#else 560iprintf(fmt /* , va_alist */ ) 561 char *fmt; 562 563 /* va_dcl */ 564#endif 565{ 566 register int i; 567 va_list ap; 568 569 for (i = indent; i >= 8; i -= 8) 570 printf("\t"); 571 while (--i >= 0) 572 printf(" "); 573 va_start(ap, fmt); 574 printf("%r", fmt, ap); 575 va_end(ap); 576} 577#endif /* DDB */ 578