vm_machdep.c revision 49444
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 * $Id: vm_machdep.c,v 1.18 1999/07/22 06:03:42 alc Exp $ 42 */ 43/* 44 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 45 * All rights reserved. 46 * 47 * Author: Chris G. Demetriou 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/proc.h> 73#include <sys/malloc.h> 74#include <sys/buf.h> 75#include <sys/vnode.h> 76#include <sys/vmmeter.h> 77#include <sys/kernel.h> 78#include <sys/sysctl.h> 79 80#include <machine/clock.h> 81#include <machine/cpu.h> 82#include <machine/fpu.h> 83#include <machine/md_var.h> 84#include <machine/prom.h> 85 86#include <vm/vm.h> 87#include <vm/vm_param.h> 88#include <vm/vm_prot.h> 89#include <sys/lock.h> 90#include <vm/vm_kern.h> 91#include <vm/vm_page.h> 92#include <vm/vm_map.h> 93#include <vm/vm_extern.h> 94 95#include <sys/user.h> 96 97/* 98 * quick version of vm_fault 99 */ 100void 101vm_fault_quick(v, prot) 102 caddr_t v; 103 int prot; 104{ 105 if (prot & VM_PROT_WRITE) 106 subyte(v, fubyte(v)); 107 else 108 fubyte(v); 109} 110 111/* 112 * Finish a fork operation, with process p2 nearly set up. 113 * Copy and update the pcb, set up the stack so that the child 114 * ready to run and return to user mode. 115 */ 116void 117cpu_fork(p1, p2) 118 register struct proc *p1, *p2; 119{ 120 struct user *up = p2->p_addr; 121 122 p2->p_md.md_tf = p1->p_md.md_tf; 123 p2->p_md.md_flags = p1->p_md.md_flags & MDP_FPUSED; 124 125 /* 126 * Cache the physical address of the pcb, so we can 127 * swap to it easily. 128 */ 129 p2->p_md.md_pcbpaddr = (void*) vtophys((vm_offset_t) &up->u_pcb); 130 131 /* 132 * Copy floating point state from the FP chip to the PCB 133 * if this process has state stored there. 134 */ 135 if (p1 == fpcurproc) { 136 alpha_pal_wrfen(1); 137 savefpstate(&fpcurproc->p_addr->u_pcb.pcb_fp); 138 alpha_pal_wrfen(0); 139 } 140 141 /* 142 * Copy pcb and stack from proc p1 to p2. 143 * We do this as cheaply as possible, copying only the active 144 * part of the stack. The stack and pcb need to agree; 145 */ 146 p2->p_addr->u_pcb = p1->p_addr->u_pcb; 147 p2->p_addr->u_pcb.pcb_hw.apcb_usp = alpha_pal_rdusp(); 148 149 /* 150 * Set the floating point state. 151 */ 152 if ((p2->p_addr->u_pcb.pcb_fp_control & IEEE_INHERIT) == 0) { 153 p2->p_addr->u_pcb.pcb_fp_control = 0; 154 p2->p_addr->u_pcb.pcb_fp.fpr_cr = (FPCR_DYN_NORMAL 155 | FPCR_INVD | FPCR_DZED 156 | FPCR_OVFD | FPCR_INED 157 | FPCR_UNFD); 158 } 159 160 /* 161 * Arrange for a non-local goto when the new process 162 * is started, to resume here, returning nonzero from setjmp. 163 */ 164#ifdef DIAGNOSTIC 165 if (p1 != curproc) 166 panic("cpu_fork: curproc"); 167 if ((up->u_pcb.pcb_hw.apcb_flags & ALPHA_PCB_FLAGS_FEN) != 0) 168 printf("DANGER WILL ROBINSON: FEN SET IN cpu_fork!\n"); 169#endif 170 171 /* 172 * create the child's kernel stack, from scratch. 173 */ 174 { 175 struct trapframe *p2tf; 176 177 /* 178 * Pick a stack pointer, leaving room for a trapframe; 179 * copy trapframe from parent so return to user mode 180 * will be to right address, with correct registers. 181 */ 182 p2tf = p2->p_md.md_tf = (struct trapframe *) 183 ((char *)p2->p_addr + USPACE - sizeof(struct trapframe)); 184 bcopy(p1->p_md.md_tf, p2->p_md.md_tf, 185 sizeof(struct trapframe)); 186 187 /* 188 * Set up return-value registers as fork() libc stub expects. 189 */ 190 p2tf->tf_regs[FRAME_V0] = p1->p_pid; /* parent's pid */ 191 p2tf->tf_regs[FRAME_A3] = 0; /* no error */ 192 p2tf->tf_regs[FRAME_A4] = 1; /* is child */ 193 194 /* 195 * Arrange for continuation at child_return(), which 196 * will return to exception_return(). Note that the child 197 * process doesn't stay in the kernel for long! 198 * 199 * This is an inlined version of cpu_set_kpc. 200 */ 201 up->u_pcb.pcb_hw.apcb_ksp = (u_int64_t)p2tf; 202 up->u_pcb.pcb_context[0] = 203 (u_int64_t)child_return; /* s0: pc */ 204 up->u_pcb.pcb_context[1] = 205 (u_int64_t)exception_return; /* s1: ra */ 206 up->u_pcb.pcb_context[2] = (u_long) p2; /* s2: a0 */ 207 up->u_pcb.pcb_context[7] = 208 (u_int64_t)switch_trampoline; /* ra: assembly magic */ 209 } 210} 211 212/* 213 * Intercept the return address from a freshly forked process that has NOT 214 * been scheduled yet. 215 * 216 * This is needed to make kernel threads stay in kernel mode. 217 */ 218void 219cpu_set_fork_handler(p, func, arg) 220 struct proc *p; 221 void (*func) __P((void *)); 222 void *arg; 223{ 224 /* 225 * Note that the trap frame follows the args, so the function 226 * is really called like this: func(arg, frame); 227 */ 228 p->p_addr->u_pcb.pcb_context[0] = (u_long) func; 229 p->p_addr->u_pcb.pcb_context[2] = (u_long) arg; 230} 231 232/* 233 * cpu_exit is called as the last action during exit. 234 * We release the address space of the process, block interrupts, 235 * and call switch_exit. switch_exit switches to proc0's PCB and stack, 236 * then jumps into the middle of cpu_switch, as if it were switching 237 * from proc0. 238 */ 239void 240cpu_exit(p) 241 register struct proc *p; 242{ 243 if (p == fpcurproc) 244 fpcurproc = NULL; 245 246 (void) splhigh(); 247 cnt.v_swtch++; 248 cpu_switch(p); 249 panic("cpu_exit"); 250} 251 252void 253cpu_wait(p) 254 struct proc *p; 255{ 256 /* drop per-process resources */ 257 pmap_dispose_proc(p); 258 259 /* and clean-out the vmspace */ 260 vmspace_free(p->p_vmspace); 261} 262 263/* 264 * Dump the machine specific header information at the start of a core dump. 265 */ 266int 267cpu_coredump(p, vp, cred) 268 struct proc *p; 269 struct vnode *vp; 270 struct ucred *cred; 271{ 272 273 return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES), 274 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, 275 p)); 276} 277 278#ifdef notyet 279static void 280setredzone(pte, vaddr) 281 u_short *pte; 282 caddr_t vaddr; 283{ 284/* eventually do this by setting up an expand-down stack segment 285 for ss0: selector, allowing stack access down to top of u. 286 this means though that protection violations need to be handled 287 thru a double fault exception that must do an integral task 288 switch to a known good context, within which a dump can be 289 taken. a sensible scheme might be to save the initial context 290 used by sched (that has physical memory mapped 1:1 at bottom) 291 and take the dump while still in mapped mode */ 292} 293#endif 294 295/* 296 * Map an IO request into kernel virtual address space. 297 * 298 * All requests are (re)mapped into kernel VA space. 299 * Notice that we use b_bufsize for the size of the buffer 300 * to be mapped. b_bcount might be modified by the driver. 301 */ 302void 303vmapbuf(bp) 304 register struct buf *bp; 305{ 306 register caddr_t addr, v, kva; 307 vm_offset_t pa; 308 309 if ((bp->b_flags & B_PHYS) == 0) 310 panic("vmapbuf"); 311 312 for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data); 313 addr < bp->b_data + bp->b_bufsize; 314 addr += PAGE_SIZE, v += PAGE_SIZE) { 315 /* 316 * Do the vm_fault if needed; do the copy-on-write thing 317 * when reading stuff off device into memory. 318 */ 319 vm_fault_quick(addr, 320 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ); 321 pa = trunc_page(pmap_kextract((vm_offset_t) addr)); 322 if (pa == 0) 323 panic("vmapbuf: page not present"); 324 vm_page_hold(PHYS_TO_VM_PAGE(pa)); 325 pmap_kenter((vm_offset_t) v, pa); 326 } 327 328 kva = bp->b_saveaddr; 329 bp->b_saveaddr = bp->b_data; 330 bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK); 331} 332 333/* 334 * Free the io map PTEs associated with this IO operation. 335 * We also invalidate the TLB entries and restore the original b_addr. 336 */ 337void 338vunmapbuf(bp) 339 register struct buf *bp; 340{ 341 register caddr_t addr; 342 vm_offset_t pa; 343 344 if ((bp->b_flags & B_PHYS) == 0) 345 panic("vunmapbuf"); 346 347 for (addr = (caddr_t)trunc_page(bp->b_data); 348 addr < bp->b_data + bp->b_bufsize; 349 addr += PAGE_SIZE) { 350 pa = trunc_page(pmap_kextract((vm_offset_t) addr)); 351 pmap_kremove((vm_offset_t) addr); 352 vm_page_unhold(PHYS_TO_VM_PAGE(pa)); 353 } 354 355 bp->b_data = bp->b_saveaddr; 356} 357 358/* 359 * Force reset the processor by invalidating the entire address space! 360 */ 361void 362cpu_reset() 363{ 364 prom_halt(0); 365} 366 367int 368grow_stack(p, sp) 369 struct proc *p; 370 size_t sp; 371{ 372 int rv; 373 374 rv = vm_map_growstack (p, sp); 375 if (rv != KERN_SUCCESS) 376 return (0); 377 378 return (1); 379} 380 381 382static int cnt_prezero; 383 384SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); 385 386/* 387 * Implement the pre-zeroed page mechanism. 388 * This routine is called from the idle loop. 389 */ 390 391#define ZIDLE_LO(v) ((v) * 2 / 3) 392#define ZIDLE_HI(v) ((v) * 4 / 5) 393 394int 395vm_page_zero_idle() 396{ 397 static int free_rover; 398 static int zero_state; 399 vm_page_t m; 400 int s; 401 402 /* 403 * Attempt to maintain approximately 1/2 of our free pages in a 404 * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid 405 * generally zeroing a page when the system is near steady-state. 406 * Otherwise we might get 'flutter' during disk I/O / IPC or 407 * fast sleeps. We also do not want to be continuously zeroing 408 * pages because doing so may flush our L1 and L2 caches too much. 409 */ 410 411 if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) 412 return(0); 413 if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) 414 return(0); 415 416#ifdef SMP 417 if (try_mplock()) { 418#endif 419 s = splvm(); 420 m = vm_page_list_find(PQ_FREE, free_rover, FALSE); 421 zero_state = 0; 422 if (m != NULL && (m->flags & PG_ZERO) == 0) { 423 vm_page_queues[m->queue].lcnt--; 424 TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq); 425 m->queue = PQ_NONE; 426 splx(s); 427#if 0 428 rel_mplock(); 429#endif 430 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 431#if 0 432 get_mplock(); 433#endif 434 (void)splvm(); 435 vm_page_flag_set(m, PG_ZERO); 436 m->queue = PQ_FREE + m->pc; 437 vm_page_queues[m->queue].lcnt++; 438 TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, 439 pageq); 440 ++vm_page_zero_count; 441 ++cnt_prezero; 442 if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) 443 zero_state = 1; 444 } 445 free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; 446 splx(s); 447#ifdef SMP 448 rel_mplock(); 449#endif 450 return (1); 451#ifdef SMP 452 } 453#endif 454 return (0); 455} 456 457/* 458 * Software interrupt handler for queued VM system processing. 459 */ 460void 461swi_vm() 462{ 463#if 0 464 if (busdma_swi_pending != 0) 465 busdma_swi(); 466#endif 467} 468 469/* 470 * Tell whether this address is in some physical memory region. 471 * Currently used by the kernel coredump code in order to avoid 472 * dumping the ``ISA memory hole'' which could cause indefinite hangs, 473 * or other unpredictable behaviour. 474 */ 475 476 477int 478is_physical_memory(addr) 479 vm_offset_t addr; 480{ 481 /* 482 * stuff other tests for known memory-mapped devices (PCI?) 483 * here 484 */ 485 486 return 1; 487} 488