vm_glue.c revision 13228
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $Id: vm_glue.c,v 1.33 1995/12/14 09:54:57 phk Exp $ 63 */ 64 65#include "opt_sysvipc.h" 66#include "opt_ddb.h" 67 68#include <sys/param.h> 69#include <sys/systm.h> 70#include <sys/proc.h> 71#include <sys/resourcevar.h> 72#include <sys/buf.h> 73#include <sys/shm.h> 74#include <sys/vmmeter.h> 75 76#include <sys/kernel.h> 77#include <sys/dkstat.h> 78 79#include <vm/vm.h> 80#include <vm/vm_param.h> 81#include <vm/vm_inherit.h> 82#include <vm/vm_prot.h> 83#include <vm/lock.h> 84#include <vm/pmap.h> 85#include <vm/vm_map.h> 86#include <vm/vm_page.h> 87#include <vm/vm_pageout.h> 88#include <vm/vm_kern.h> 89#include <vm/vm_extern.h> 90 91#include <sys/user.h> 92 93#include <machine/stdarg.h> 94#include <machine/cpu.h> 95 96/* 97 * System initialization 98 * 99 * Note: proc0 from proc.h 100 */ 101 102static void vm_init_limits __P((void *)); 103SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) 104 105/* 106 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 107 * 108 * Note: run scheduling should be divorced from the vm system. 109 */ 110static void scheduler __P((void *)); 111SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 112 113 114static void swapout __P((struct proc *)); 115 116extern char kstack[]; 117 118/* vm_map_t upages_map; */ 119 120int 121kernacc(addr, len, rw) 122 caddr_t addr; 123 int len, rw; 124{ 125 boolean_t rv; 126 vm_offset_t saddr, eaddr; 127 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 128 129 saddr = trunc_page(addr); 130 eaddr = round_page(addr + len); 131 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 132 return (rv == TRUE); 133} 134 135int 136useracc(addr, len, rw) 137 caddr_t addr; 138 int len, rw; 139{ 140 boolean_t rv; 141 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 142 143 /* 144 * XXX - check separately to disallow access to user area and user 145 * page tables - they are in the map. 146 * 147 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was once 148 * only used (as an end address) in trap.c. Use it as an end address 149 * here too. This bogusness has spread. I just fixed where it was 150 * used as a max in vm_mmap.c. 151 */ 152 if ((vm_offset_t) addr + len > /* XXX */ VM_MAXUSER_ADDRESS 153 || (vm_offset_t) addr + len < (vm_offset_t) addr) { 154 return (FALSE); 155 } 156 rv = vm_map_check_protection(&curproc->p_vmspace->vm_map, 157 trunc_page(addr), round_page(addr + len), prot); 158 return (rv == TRUE); 159} 160 161#ifdef KGDB 162/* 163 * Change protections on kernel pages from addr to addr+len 164 * (presumably so debugger can plant a breakpoint). 165 * All addresses are assumed to reside in the Sysmap, 166 */ 167chgkprot(addr, len, rw) 168 register caddr_t addr; 169 int len, rw; 170{ 171 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 172 173 vm_map_protect(kernel_map, trunc_page(addr), 174 round_page(addr + len), prot, FALSE); 175} 176#endif 177void 178vslock(addr, len) 179 caddr_t addr; 180 u_int len; 181{ 182 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 183 round_page(addr + len), FALSE); 184} 185 186void 187vsunlock(addr, len, dirtied) 188 caddr_t addr; 189 u_int len; 190 int dirtied; 191{ 192#ifdef lint 193 dirtied++; 194#endif /* lint */ 195 vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 196 round_page(addr + len), TRUE); 197} 198 199/* 200 * Implement fork's actions on an address space. 201 * Here we arrange for the address space to be copied or referenced, 202 * allocate a user struct (pcb and kernel stack), then call the 203 * machine-dependent layer to fill those in and make the new process 204 * ready to run. 205 * NOTE: the kernel stack may be at a different location in the child 206 * process, and thus addresses of automatic variables may be invalid 207 * after cpu_fork returns in the child process. We do nothing here 208 * after cpu_fork returns. 209 */ 210int 211vm_fork(p1, p2, isvfork) 212 register struct proc *p1, *p2; 213 int isvfork; 214{ 215 register struct user *up; 216 vm_offset_t addr, ptaddr; 217 int error, i; 218 struct vm_map *vp; 219 220 while ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 221 VM_WAIT; 222 } 223 224 /* 225 * avoid copying any of the parent's pagetables or other per-process 226 * objects that reside in the map by marking all of them 227 * non-inheritable 228 */ 229 (void) vm_map_inherit(&p1->p_vmspace->vm_map, 230 UPT_MIN_ADDRESS - UPAGES * PAGE_SIZE, VM_MAX_ADDRESS, VM_INHERIT_NONE); 231 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 232 233#ifdef SYSVSHM 234 if (p1->p_vmspace->vm_shm) 235 shmfork(p1, p2, isvfork); 236#endif 237 238 /* 239 * Allocate a wired-down (for now) pcb and kernel stack for the 240 * process 241 */ 242 243 addr = (vm_offset_t) kstack; 244 245 vp = &p2->p_vmspace->vm_map; 246 247 /* get new pagetables and kernel stack */ 248 (void) vm_map_find(vp, NULL, 0, &addr, UPT_MAX_ADDRESS - addr, FALSE); 249 250 /* force in the page table encompassing the UPAGES */ 251 ptaddr = trunc_page((u_int) vtopte(addr)); 252 error = vm_map_pageable(vp, ptaddr, ptaddr + PAGE_SIZE, FALSE); 253 if (error) 254 panic("vm_fork: wire of PT failed. error=%d", error); 255 256 /* and force in (demand-zero) the UPAGES */ 257 error = vm_map_pageable(vp, addr, addr + UPAGES * PAGE_SIZE, FALSE); 258 if (error) 259 panic("vm_fork: wire of UPAGES failed. error=%d", error); 260 261 /* get a kernel virtual address for the UPAGES for this proc */ 262 up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE); 263 if (up == NULL) 264 panic("vm_fork: u_map allocation failed"); 265 266 /* and force-map the upages into the kernel pmap */ 267 for (i = 0; i < UPAGES; i++) 268 pmap_kenter(((vm_offset_t) up) + PAGE_SIZE * i, 269 pmap_extract(vp->pmap, addr + PAGE_SIZE * i)); 270 271 p2->p_addr = up; 272 273 /* 274 * p_stats and p_sigacts currently point at fields in the user struct 275 * but not at &u, instead at p_addr. Copy p_sigacts and parts of 276 * p_stats; zero the rest of p_stats (statistics). 277 */ 278 p2->p_stats = &up->u_stats; 279 p2->p_sigacts = &up->u_sigacts; 280 up->u_sigacts = *p1->p_sigacts; 281 bzero(&up->u_stats.pstat_startzero, 282 (unsigned) ((caddr_t) &up->u_stats.pstat_endzero - 283 (caddr_t) &up->u_stats.pstat_startzero)); 284 bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 285 ((caddr_t) &up->u_stats.pstat_endcopy - 286 (caddr_t) &up->u_stats.pstat_startcopy)); 287 288 289 /* 290 * cpu_fork will copy and update the kernel stack and pcb, and make 291 * the child ready to run. It marks the child so that it can return 292 * differently than the parent. It returns twice, once in the parent 293 * process and once in the child. 294 */ 295 return (cpu_fork(p1, p2)); 296} 297 298/* 299 * Set default limits for VM system. 300 * Called for proc 0, and then inherited by all others. 301 * 302 * XXX should probably act directly on proc0. 303 */ 304static void 305vm_init_limits(udata) 306 void *udata; 307{ 308 register struct proc *p = udata; 309 int rss_limit; 310 311 /* 312 * Set up the initial limits on process VM. Set the maximum resident 313 * set size to be half of (reasonably) available memory. Since this 314 * is a soft limit, it comes into effect only when the system is out 315 * of memory - half of main memory helps to favor smaller processes, 316 * and reduces thrashing of the object cache. 317 */ 318 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 319 p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; 320 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; 321 p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ; 322 /* limit the limit to no less than 2MB */ 323 rss_limit = max(cnt.v_free_count, 512); 324 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 325 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 326} 327 328void 329faultin(p) 330 struct proc *p; 331{ 332 vm_offset_t i; 333 vm_offset_t ptaddr; 334 int s; 335 336 if ((p->p_flag & P_INMEM) == 0) { 337 vm_map_t map; 338 int error; 339 340 ++p->p_lock; 341 342 map = &p->p_vmspace->vm_map; 343 /* force the page table encompassing the kernel stack (upages) */ 344 ptaddr = trunc_page((u_int) vtopte(kstack)); 345 error = vm_map_pageable(map, ptaddr, ptaddr + PAGE_SIZE, FALSE); 346 if (error) 347 panic("faultin: wire of PT failed. error=%d", error); 348 349 /* wire in the UPAGES */ 350 error = vm_map_pageable(map, (vm_offset_t) kstack, 351 (vm_offset_t) kstack + UPAGES * PAGE_SIZE, FALSE); 352 if (error) 353 panic("faultin: wire of UPAGES failed. error=%d", error); 354 355 /* and map them nicely into the kernel pmap */ 356 for (i = 0; i < UPAGES; i++) { 357 vm_offset_t off = i * PAGE_SIZE; 358 vm_offset_t pa = (vm_offset_t) 359 pmap_extract(&p->p_vmspace->vm_pmap, 360 (vm_offset_t) kstack + off); 361 362 if (pa == 0) 363 panic("faultin: missing page for UPAGES\n"); 364 365 pmap_kenter(((vm_offset_t) p->p_addr) + off, pa); 366 } 367 368 s = splhigh(); 369 370 if (p->p_stat == SRUN) 371 setrunqueue(p); 372 373 p->p_flag |= P_INMEM; 374 375 /* undo the effect of setting SLOCK above */ 376 --p->p_lock; 377 splx(s); 378 379 } 380} 381 382/* 383 * This swapin algorithm attempts to swap-in processes only if there 384 * is enough space for them. Of course, if a process waits for a long 385 * time, it will be swapped in anyway. 386 */ 387/* ARGSUSED*/ 388static void 389scheduler(dummy) 390 void *dummy; 391{ 392 register struct proc *p; 393 register int pri; 394 struct proc *pp; 395 int ppri; 396 397loop: 398 while ((cnt.v_free_count + cnt.v_cache_count) < (cnt.v_free_reserved + UPAGES + 2)) { 399 VM_WAIT; 400 } 401 402 pp = NULL; 403 ppri = INT_MIN; 404 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 405 if (p->p_stat == SRUN && (p->p_flag & (P_INMEM | P_SWAPPING)) == 0) { 406 int mempri; 407 408 pri = p->p_swtime + p->p_slptime - p->p_nice * 8; 409 mempri = pri > 0 ? pri : 0; 410 /* 411 * if this process is higher priority and there is 412 * enough space, then select this process instead of 413 * the previous selection. 414 */ 415 if (pri > ppri) { 416 pp = p; 417 ppri = pri; 418 } 419 } 420 } 421 422 /* 423 * Nothing to do, back to sleep 424 */ 425 if ((p = pp) == NULL) { 426 tsleep(&proc0, PVM, "sched", 0); 427 goto loop; 428 } 429 /* 430 * We would like to bring someone in. (only if there is space). 431 */ 432 faultin(p); 433 p->p_swtime = 0; 434 goto loop; 435} 436 437#define swappable(p) \ 438 (((p)->p_lock == 0) && \ 439 ((p)->p_flag & (P_TRACED|P_NOSWAP|P_SYSTEM|P_INMEM|P_WEXIT|P_PHYSIO|P_SWAPPING)) == P_INMEM) 440 441extern int vm_pageout_free_min; 442 443/* 444 * Swapout is driven by the pageout daemon. Very simple, we find eligible 445 * procs and unwire their u-areas. We try to always "swap" at least one 446 * process in case we need the room for a swapin. 447 * If any procs have been sleeping/stopped for at least maxslp seconds, 448 * they are swapped. Else, we swap the longest-sleeping or stopped process, 449 * if any, otherwise the longest-resident process. 450 */ 451void 452swapout_procs() 453{ 454 register struct proc *p; 455 struct proc *outp, *outp2; 456 int outpri, outpri2; 457 int didswap = 0; 458 459 outp = outp2 = NULL; 460 outpri = outpri2 = INT_MIN; 461retry: 462 for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 463 if (!swappable(p)) 464 continue; 465 switch (p->p_stat) { 466 default: 467 continue; 468 469 case SSLEEP: 470 case SSTOP: 471 /* 472 * do not swapout a realtime process 473 */ 474 if (p->p_rtprio.type == RTP_PRIO_REALTIME) 475 continue; 476 477 /* 478 * do not swapout a process waiting on a critical 479 * event of some kind 480 */ 481 if (((p->p_priority & 0x7f) < PSOCK) || 482 (p->p_slptime <= 4)) 483 continue; 484 485 vm_map_reference(&p->p_vmspace->vm_map); 486 /* 487 * do not swapout a process that is waiting for VM 488 * datastructures there is a possible deadlock. 489 */ 490 if (!lock_try_write(&p->p_vmspace->vm_map.lock)) { 491 vm_map_deallocate(&p->p_vmspace->vm_map); 492 continue; 493 } 494 vm_map_unlock(&p->p_vmspace->vm_map); 495 /* 496 * If the process has been asleep for awhile and had 497 * most of its pages taken away already, swap it out. 498 */ 499 swapout(p); 500 vm_map_deallocate(&p->p_vmspace->vm_map); 501 didswap++; 502 goto retry; 503 } 504 } 505 /* 506 * If we swapped something out, and another process needed memory, 507 * then wakeup the sched process. 508 */ 509 if (didswap) 510 wakeup(&proc0); 511} 512 513static void 514swapout(p) 515 register struct proc *p; 516{ 517 vm_map_t map = &p->p_vmspace->vm_map; 518 vm_offset_t ptaddr; 519 int i; 520 521 ++p->p_stats->p_ru.ru_nswap; 522 /* 523 * remember the process resident count 524 */ 525 p->p_vmspace->vm_swrss = 526 p->p_vmspace->vm_pmap.pm_stats.resident_count; 527 528 (void) splhigh(); 529 p->p_flag &= ~P_INMEM; 530 p->p_flag |= P_SWAPPING; 531 if (p->p_stat == SRUN) 532 remrq(p); 533 (void) spl0(); 534 535 /* 536 * let the upages be paged 537 */ 538 for(i=0;i<UPAGES;i++) 539 pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i); 540 541 vm_map_pageable(map, (vm_offset_t) kstack, 542 (vm_offset_t) kstack + UPAGES * PAGE_SIZE, TRUE); 543 544 ptaddr = trunc_page((u_int) vtopte(kstack)); 545 vm_map_pageable(map, ptaddr, ptaddr + PAGE_SIZE, TRUE); 546 547 p->p_flag &= ~P_SWAPPING; 548 p->p_swtime = 0; 549} 550 551#ifdef DDB 552/* 553 * DEBUG stuff 554 */ 555 556int indent; 557 558#include <machine/stdarg.h> /* see subr_prf.c */ 559 560/*ARGSUSED2*/ 561void 562#if __STDC__ 563iprintf(const char *fmt,...) 564#else 565iprintf(fmt /* , va_alist */ ) 566 char *fmt; 567 568 /* va_dcl */ 569#endif 570{ 571 register int i; 572 va_list ap; 573 574 for (i = indent; i >= 8; i -= 8) 575 printf("\t"); 576 while (--i >= 0) 577 printf(" "); 578 va_start(ap, fmt); 579 printf("%r", fmt, ap); 580 va_end(ap); 581} 582#endif /* DDB */ 583