vm_machdep.c revision 36865
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 * $Id: vm_machdep.c,v 1.1 1998/06/10 10:53:40 dfr Exp $ 42 */ 43/* 44 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 45 * All rights reserved. 46 * 47 * Author: Chris G. Demetriou 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/proc.h> 73#include <sys/malloc.h> 74#include <sys/buf.h> 75#include <sys/vnode.h> 76#include <sys/vmmeter.h> 77#include <sys/kernel.h> 78#include <sys/sysctl.h> 79 80#include <machine/clock.h> 81#include <machine/cpu.h> 82#include <machine/md_var.h> 83#include <machine/prom.h> 84 85#include <vm/vm.h> 86#include <vm/vm_param.h> 87#include <vm/vm_prot.h> 88#include <sys/lock.h> 89#include <vm/vm_kern.h> 90#include <vm/vm_page.h> 91#include <vm/vm_map.h> 92#include <vm/vm_extern.h> 93 94#include <sys/user.h> 95 96/* 97 * quick version of vm_fault 98 */ 99void 100vm_fault_quick(v, prot) 101 caddr_t v; 102 int prot; 103{ 104 if (prot & VM_PROT_WRITE) 105 subyte(v, fubyte(v)); 106 else 107 fubyte(v); 108} 109 110/* 111 * Finish a fork operation, with process p2 nearly set up. 112 * Copy and update the pcb, set up the stack so that the child 113 * ready to run and return to user mode. 114 */ 115void 116cpu_fork(p1, p2) 117 register struct proc *p1, *p2; 118{ 119 struct user *up = p2->p_addr; 120 int i; 121 122 p2->p_md.md_tf = p1->p_md.md_tf; 123 p2->p_md.md_flags = p1->p_md.md_flags & MDP_FPUSED; 124 125 /* 126 * Cache the physical address of the pcb, so we can 127 * swap to it easily. 128 */ 129 p2->p_md.md_pcbpaddr = (void*) vtophys((vm_offset_t) &up->u_pcb); 130 131 /* 132 * Simulate a write to the process's U-area pages, 133 * so that the system doesn't lose badly. 134 * (If this isn't done, the kernel can't read or 135 * write the kernel stack. "Ouch!") 136 */ 137 for (i = 0; i < UPAGES; i++) 138 pmap_emulate_reference(p2, (vm_offset_t)up + i * PAGE_SIZE, 139 0, 1); 140 141 /* 142 * Copy floating point state from the FP chip to the PCB 143 * if this process has state stored there. 144 */ 145 if (p1 == fpcurproc) { 146 alpha_pal_wrfen(1); 147 savefpstate(&fpcurproc->p_addr->u_pcb.pcb_fp); 148 alpha_pal_wrfen(0); 149 } 150 151 /* 152 * Copy pcb and stack from proc p1 to p2. 153 * We do this as cheaply as possible, copying only the active 154 * part of the stack. The stack and pcb need to agree; 155 */ 156 p2->p_addr->u_pcb = p1->p_addr->u_pcb; 157 p2->p_addr->u_pcb.pcb_hw.apcb_usp = alpha_pal_rdusp(); 158 159 /* 160 * Arrange for a non-local goto when the new process 161 * is started, to resume here, returning nonzero from setjmp. 162 */ 163#ifdef DIAGNOSTIC 164 if (p1 != curproc) 165 panic("cpu_fork: curproc"); 166 if ((up->u_pcb.pcb_hw.apcb_flags & ALPHA_PCB_FLAGS_FEN) != 0) 167 printf("DANGER WILL ROBINSON: FEN SET IN cpu_fork!\n"); 168#endif 169 170 /* 171 * create the child's kernel stack, from scratch. 172 */ 173 { 174 struct trapframe *p2tf; 175 176 /* 177 * Pick a stack pointer, leaving room for a trapframe; 178 * copy trapframe from parent so return to user mode 179 * will be to right address, with correct registers. 180 */ 181 p2tf = p2->p_md.md_tf = (struct trapframe *) 182 ((char *)p2->p_addr + USPACE - sizeof(struct trapframe)); 183 bcopy(p1->p_md.md_tf, p2->p_md.md_tf, 184 sizeof(struct trapframe)); 185 186 /* 187 * Set up return-value registers as fork() libc stub expects. 188 */ 189 p2tf->tf_regs[FRAME_V0] = p1->p_pid; /* parent's pid */ 190 p2tf->tf_regs[FRAME_A3] = 0; /* no error */ 191 p2tf->tf_regs[FRAME_A4] = 1; /* is child */ 192 193 /* 194 * Arrange for continuation at child_return(), which 195 * will return to exception_return(). Note that the child 196 * process doesn't stay in the kernel for long! 197 * 198 * This is an inlined version of cpu_set_kpc. 199 */ 200 up->u_pcb.pcb_hw.apcb_ksp = (u_int64_t)p2tf; 201 up->u_pcb.pcb_context[0] = 202 (u_int64_t)child_return; /* s0: pc */ 203 up->u_pcb.pcb_context[1] = 204 (u_int64_t)exception_return; /* s1: ra */ 205 up->u_pcb.pcb_context[2] = (u_long) p2; /* s2: a0 */ 206 up->u_pcb.pcb_context[7] = 207 (u_int64_t)switch_trampoline; /* ra: assembly magic */ 208 } 209} 210 211/* 212 * Intercept the return address from a freshly forked process that has NOT 213 * been scheduled yet. 214 * 215 * This is needed to make kernel threads stay in kernel mode. 216 */ 217void 218cpu_set_fork_handler(p, func, arg) 219 struct proc *p; 220 void (*func) __P((void *)); 221 void *arg; 222{ 223 /* 224 * Note that the trap frame follows the args, so the function 225 * is really called like this: func(arg, frame); 226 */ 227 p->p_addr->u_pcb.pcb_context[0] = (u_long) func; 228 p->p_addr->u_pcb.pcb_context[2] = (u_long) arg; 229} 230 231/* 232 * cpu_exit is called as the last action during exit. 233 * We release the address space of the process, block interrupts, 234 * and call switch_exit. switch_exit switches to proc0's PCB and stack, 235 * then jumps into the middle of cpu_switch, as if it were switching 236 * from proc0. 237 */ 238void 239cpu_exit(p) 240 register struct proc *p; 241{ 242 if (p == fpcurproc) 243 fpcurproc = NULL; 244 245 (void) splhigh(); 246 cnt.v_swtch++; 247 cpu_switch(p); 248 panic("cpu_exit"); 249} 250 251void 252cpu_wait(p) 253 struct proc *p; 254{ 255 /* drop per-process resources */ 256 pmap_dispose_proc(p); 257 258 /* and clean-out the vmspace */ 259 vmspace_free(p->p_vmspace); 260} 261 262/* 263 * Dump the machine specific header information at the start of a core dump. 264 */ 265int 266cpu_coredump(p, vp, cred) 267 struct proc *p; 268 struct vnode *vp; 269 struct ucred *cred; 270{ 271 272 return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES), 273 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, 274 p)); 275} 276 277#ifdef notyet 278static void 279setredzone(pte, vaddr) 280 u_short *pte; 281 caddr_t vaddr; 282{ 283/* eventually do this by setting up an expand-down stack segment 284 for ss0: selector, allowing stack access down to top of u. 285 this means though that protection violations need to be handled 286 thru a double fault exception that must do an integral task 287 switch to a known good context, within which a dump can be 288 taken. a sensible scheme might be to save the initial context 289 used by sched (that has physical memory mapped 1:1 at bottom) 290 and take the dump while still in mapped mode */ 291} 292#endif 293 294/* 295 * Map an IO request into kernel virtual address space. 296 * 297 * All requests are (re)mapped into kernel VA space. 298 * Notice that we use b_bufsize for the size of the buffer 299 * to be mapped. b_bcount might be modified by the driver. 300 */ 301void 302vmapbuf(bp) 303 register struct buf *bp; 304{ 305 register caddr_t addr, v, kva; 306 vm_offset_t pa; 307 308 if ((bp->b_flags & B_PHYS) == 0) 309 panic("vmapbuf"); 310 311 for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data); 312 addr < bp->b_data + bp->b_bufsize; 313 addr += PAGE_SIZE, v += PAGE_SIZE) { 314 /* 315 * Do the vm_fault if needed; do the copy-on-write thing 316 * when reading stuff off device into memory. 317 */ 318 vm_fault_quick(addr, 319 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ); 320 pa = trunc_page(pmap_kextract((vm_offset_t) addr)); 321 if (pa == 0) 322 panic("vmapbuf: page not present"); 323 vm_page_hold(PHYS_TO_VM_PAGE(pa)); 324 pmap_kenter((vm_offset_t) v, pa); 325 } 326 327 kva = bp->b_saveaddr; 328 bp->b_saveaddr = bp->b_data; 329 bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK); 330} 331 332/* 333 * Free the io map PTEs associated with this IO operation. 334 * We also invalidate the TLB entries and restore the original b_addr. 335 */ 336void 337vunmapbuf(bp) 338 register struct buf *bp; 339{ 340 register caddr_t addr; 341 vm_offset_t pa; 342 343 if ((bp->b_flags & B_PHYS) == 0) 344 panic("vunmapbuf"); 345 346 for (addr = (caddr_t)trunc_page(bp->b_data); 347 addr < bp->b_data + bp->b_bufsize; 348 addr += PAGE_SIZE) { 349 pa = trunc_page(pmap_kextract((vm_offset_t) addr)); 350 pmap_kremove((vm_offset_t) addr); 351 vm_page_unhold(PHYS_TO_VM_PAGE(pa)); 352 } 353 354 bp->b_data = bp->b_saveaddr; 355} 356 357/* 358 * Force reset the processor by invalidating the entire address space! 359 */ 360void 361cpu_reset() 362{ 363 prom_halt(0); 364} 365 366/* 367 * Grow the user stack to allow for 'sp'. This version grows the stack in 368 * chunks of SGROWSIZ. 369 */ 370int 371grow(p, sp) 372 struct proc *p; 373 size_t sp; 374{ 375 unsigned int nss; 376 caddr_t v; 377 struct vmspace *vm = p->p_vmspace; 378 379 if ((caddr_t)sp <= vm->vm_maxsaddr || sp >= (size_t) USRSTACK) 380 return (1); 381 382 nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE); 383 384 if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) 385 return (0); 386 387 if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT, 388 SGROWSIZ) < nss) { 389 int grow_amount; 390 /* 391 * If necessary, grow the VM that the stack occupies 392 * to allow for the rlimit. This allows us to not have 393 * to allocate all of the VM up-front in execve (which 394 * is expensive). 395 * Grow the VM by the amount requested rounded up to 396 * the nearest SGROWSIZ to provide for some hysteresis. 397 */ 398 grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ); 399 v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT, 400 SGROWSIZ) - grow_amount; 401 /* 402 * If there isn't enough room to extend by SGROWSIZ, then 403 * just extend to the maximum size 404 */ 405 if (v < vm->vm_maxsaddr) { 406 v = vm->vm_maxsaddr; 407 grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT); 408 } 409 if ((grow_amount == 0) || (vm_map_find(&vm->vm_map, NULL, 0, (vm_offset_t *)&v, 410 grow_amount, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0) != KERN_SUCCESS)) { 411 return (0); 412 } 413 vm->vm_ssize += grow_amount >> PAGE_SHIFT; 414 } 415 416 return (1); 417} 418 419static int cnt_prezero; 420 421SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); 422 423/* 424 * Implement the pre-zeroed page mechanism. 425 * This routine is called from the idle loop. 426 */ 427int 428vm_page_zero_idle() 429{ 430 static int free_rover; 431 vm_page_t m; 432 int s; 433 434 /* 435 * XXX 436 * We stop zeroing pages when there are sufficent prezeroed pages. 437 * This threshold isn't really needed, except we want to 438 * bypass unneeded calls to vm_page_list_find, and the 439 * associated cache flush and latency. The pre-zero will 440 * still be called when there are significantly more 441 * non-prezeroed pages than zeroed pages. The threshold 442 * of half the number of reserved pages is arbitrary, but 443 * approximately the right amount. Eventually, we should 444 * perhaps interrupt the zero operation when a process 445 * is found to be ready to run. 446 */ 447 if (cnt.v_free_count - vm_page_zero_count <= cnt.v_free_reserved / 2) 448 return (0); 449#ifdef SMP 450 if (try_mplock()) { 451#endif 452 s = splvm(); 453 m = vm_page_list_find(PQ_FREE, free_rover); 454 if (m != NULL) { 455 --(*vm_page_queues[m->queue].lcnt); 456 TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq); 457 m->queue = PQ_NONE; 458 splx(s); 459#if 0 460 rel_mplock(); 461#endif 462 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 463#if 0 464 get_mplock(); 465#endif 466 (void)splvm(); 467 m->queue = PQ_ZERO + m->pc; 468 ++(*vm_page_queues[m->queue].lcnt); 469 TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m, 470 pageq); 471 free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK; 472 ++vm_page_zero_count; 473 ++cnt_prezero; 474 } 475 splx(s); 476#ifdef SMP 477 rel_mplock(); 478#endif 479 return (1); 480#ifdef SMP 481 } 482#endif 483 return (0); 484} 485 486/* 487 * Software interrupt handler for queued VM system processing. 488 */ 489void 490swi_vm() 491{ 492#if 0 493 if (busdma_swi_pending != 0) 494 busdma_swi(); 495#endif 496} 497 498/* 499 * Tell whether this address is in some physical memory region. 500 * Currently used by the kernel coredump code in order to avoid 501 * dumping the ``ISA memory hole'' which could cause indefinite hangs, 502 * or other unpredictable behaviour. 503 */ 504 505 506int 507is_physical_memory(addr) 508 vm_offset_t addr; 509{ 510 /* 511 * stuff other tests for known memory-mapped devices (PCI?) 512 * here 513 */ 514 515 return 1; 516} 517