vm_machdep.c revision 54207
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 * $FreeBSD: head/sys/powerpc/aim/vm_machdep.c 54207 1999-12-06 18:12:29Z peter $ 42 */ 43/* 44 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 45 * All rights reserved. 46 * 47 * Author: Chris G. Demetriou 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/proc.h> 73#include <sys/malloc.h> 74#include <sys/buf.h> 75#include <sys/vnode.h> 76#include <sys/vmmeter.h> 77#include <sys/kernel.h> 78#include <sys/sysctl.h> 79#include <sys/unistd.h> 80 81#include <machine/clock.h> 82#include <machine/cpu.h> 83#include <machine/fpu.h> 84#include <machine/md_var.h> 85#include <machine/prom.h> 86 87#include <vm/vm.h> 88#include <vm/vm_param.h> 89#include <sys/lock.h> 90#include <vm/vm_kern.h> 91#include <vm/vm_page.h> 92#include <vm/vm_map.h> 93#include <vm/vm_extern.h> 94 95#include <sys/user.h> 96 97/* 98 * quick version of vm_fault 99 */ 100int 101vm_fault_quick(v, prot) 102 caddr_t v; 103 int prot; 104{ 105 int r; 106 if (prot & VM_PROT_WRITE) 107 r = subyte(v, fubyte(v)); 108 else 109 r = fubyte(v); 110 return(r); 111} 112 113/* 114 * Finish a fork operation, with process p2 nearly set up. 115 * Copy and update the pcb, set up the stack so that the child 116 * ready to run and return to user mode. 117 */ 118void 119cpu_fork(p1, p2, flags) 120 register struct proc *p1, *p2; 121 int flags; 122{ 123 struct user *up = p2->p_addr; 124 125 if ((flags & RFPROC) == 0) 126 return; 127 128 p2->p_md.md_tf = p1->p_md.md_tf; 129 p2->p_md.md_flags = p1->p_md.md_flags & MDP_FPUSED; 130 131 /* 132 * Cache the physical address of the pcb, so we can 133 * swap to it easily. 134 */ 135 p2->p_md.md_pcbpaddr = (void*) vtophys((vm_offset_t) &up->u_pcb); 136 137 /* 138 * Copy floating point state from the FP chip to the PCB 139 * if this process has state stored there. 140 */ 141 alpha_fpstate_save(p1, 0); 142 143 /* 144 * Copy pcb and stack from proc p1 to p2. We do this as 145 * cheaply as possible, copying only the active part of the 146 * stack. The stack and pcb need to agree. Make sure that the 147 * new process has FEN disabled. 148 */ 149 p2->p_addr->u_pcb = p1->p_addr->u_pcb; 150 p2->p_addr->u_pcb.pcb_hw.apcb_usp = alpha_pal_rdusp(); 151 p2->p_addr->u_pcb.pcb_hw.apcb_flags &= ~ALPHA_PCB_FLAGS_FEN; 152 153 /* 154 * Set the floating point state. 155 */ 156 if ((p2->p_addr->u_pcb.pcb_fp_control & IEEE_INHERIT) == 0) { 157 p2->p_addr->u_pcb.pcb_fp_control = 0; 158 p2->p_addr->u_pcb.pcb_fp.fpr_cr = (FPCR_DYN_NORMAL 159 | FPCR_INVD | FPCR_DZED 160 | FPCR_OVFD | FPCR_INED 161 | FPCR_UNFD); 162 } 163 164 /* 165 * Arrange for a non-local goto when the new process 166 * is started, to resume here, returning nonzero from setjmp. 167 */ 168#ifdef DIAGNOSTIC 169 if (p1 != curproc) 170 panic("cpu_fork: curproc"); 171 alpha_fpstate_check(p1); 172#endif 173 174 /* 175 * create the child's kernel stack, from scratch. 176 */ 177 { 178 struct trapframe *p2tf; 179 180 /* 181 * Pick a stack pointer, leaving room for a trapframe; 182 * copy trapframe from parent so return to user mode 183 * will be to right address, with correct registers. 184 */ 185 p2tf = p2->p_md.md_tf = (struct trapframe *) 186 ((char *)p2->p_addr + USPACE - sizeof(struct trapframe)); 187 bcopy(p1->p_md.md_tf, p2->p_md.md_tf, 188 sizeof(struct trapframe)); 189 190 /* 191 * Set up return-value registers as fork() libc stub expects. 192 */ 193 p2tf->tf_regs[FRAME_V0] = 0; /* child's pid (linux) */ 194 p2tf->tf_regs[FRAME_A3] = 0; /* no error */ 195 p2tf->tf_regs[FRAME_A4] = 1; /* is child (FreeBSD) */ 196 197 /* 198 * Arrange for continuation at child_return(), which 199 * will return to exception_return(). Note that the child 200 * process doesn't stay in the kernel for long! 201 * 202 * This is an inlined version of cpu_set_kpc. 203 */ 204 up->u_pcb.pcb_hw.apcb_ksp = (u_int64_t)p2tf; 205 up->u_pcb.pcb_context[0] = 206 (u_int64_t)child_return; /* s0: pc */ 207 up->u_pcb.pcb_context[1] = 208 (u_int64_t)exception_return; /* s1: ra */ 209 up->u_pcb.pcb_context[2] = (u_long) p2; /* s2: a0 */ 210 up->u_pcb.pcb_context[7] = 211 (u_int64_t)switch_trampoline; /* ra: assembly magic */ 212 } 213} 214 215/* 216 * Intercept the return address from a freshly forked process that has NOT 217 * been scheduled yet. 218 * 219 * This is needed to make kernel threads stay in kernel mode. 220 */ 221void 222cpu_set_fork_handler(p, func, arg) 223 struct proc *p; 224 void (*func) __P((void *)); 225 void *arg; 226{ 227 /* 228 * Note that the trap frame follows the args, so the function 229 * is really called like this: func(arg, frame); 230 */ 231 p->p_addr->u_pcb.pcb_context[0] = (u_long) func; 232 p->p_addr->u_pcb.pcb_context[2] = (u_long) arg; 233} 234 235/* 236 * cpu_exit is called as the last action during exit. 237 * We release the address space of the process, block interrupts, 238 * and call switch_exit. switch_exit switches to proc0's PCB and stack, 239 * then jumps into the middle of cpu_switch, as if it were switching 240 * from proc0. 241 */ 242void 243cpu_exit(p) 244 register struct proc *p; 245{ 246 alpha_fpstate_drop(p); 247 248 (void) splhigh(); 249 cnt.v_swtch++; 250 cpu_switch(p); 251 panic("cpu_exit"); 252} 253 254void 255cpu_wait(p) 256 struct proc *p; 257{ 258 /* drop per-process resources */ 259 pmap_dispose_proc(p); 260 261 /* and clean-out the vmspace */ 262 vmspace_free(p->p_vmspace); 263} 264 265/* 266 * Dump the machine specific header information at the start of a core dump. 267 */ 268int 269cpu_coredump(p, vp, cred) 270 struct proc *p; 271 struct vnode *vp; 272 struct ucred *cred; 273{ 274 275 return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES), 276 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, 277 p)); 278} 279 280#ifdef notyet 281static void 282setredzone(pte, vaddr) 283 u_short *pte; 284 caddr_t vaddr; 285{ 286/* eventually do this by setting up an expand-down stack segment 287 for ss0: selector, allowing stack access down to top of u. 288 this means though that protection violations need to be handled 289 thru a double fault exception that must do an integral task 290 switch to a known good context, within which a dump can be 291 taken. a sensible scheme might be to save the initial context 292 used by sched (that has physical memory mapped 1:1 at bottom) 293 and take the dump while still in mapped mode */ 294} 295#endif 296 297/* 298 * Map an IO request into kernel virtual address space. 299 * 300 * All requests are (re)mapped into kernel VA space. 301 * Notice that we use b_bufsize for the size of the buffer 302 * to be mapped. b_bcount might be modified by the driver. 303 */ 304void 305vmapbuf(bp) 306 register struct buf *bp; 307{ 308 register caddr_t addr, v, kva; 309 vm_offset_t pa; 310 311 if ((bp->b_flags & B_PHYS) == 0) 312 panic("vmapbuf"); 313 314 for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data); 315 addr < bp->b_data + bp->b_bufsize; 316 addr += PAGE_SIZE, v += PAGE_SIZE) { 317 /* 318 * Do the vm_fault if needed; do the copy-on-write thing 319 * when reading stuff off device into memory. 320 */ 321 vm_fault_quick(addr, 322 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ); 323 pa = trunc_page(pmap_kextract((vm_offset_t) addr)); 324 if (pa == 0) 325 panic("vmapbuf: page not present"); 326 vm_page_hold(PHYS_TO_VM_PAGE(pa)); 327 pmap_kenter((vm_offset_t) v, pa); 328 } 329 330 kva = bp->b_saveaddr; 331 bp->b_saveaddr = bp->b_data; 332 bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK); 333} 334 335/* 336 * Free the io map PTEs associated with this IO operation. 337 * We also invalidate the TLB entries and restore the original b_addr. 338 */ 339void 340vunmapbuf(bp) 341 register struct buf *bp; 342{ 343 register caddr_t addr; 344 vm_offset_t pa; 345 346 if ((bp->b_flags & B_PHYS) == 0) 347 panic("vunmapbuf"); 348 349 for (addr = (caddr_t)trunc_page(bp->b_data); 350 addr < bp->b_data + bp->b_bufsize; 351 addr += PAGE_SIZE) { 352 pa = trunc_page(pmap_kextract((vm_offset_t) addr)); 353 pmap_kremove((vm_offset_t) addr); 354 vm_page_unhold(PHYS_TO_VM_PAGE(pa)); 355 } 356 357 bp->b_data = bp->b_saveaddr; 358} 359 360/* 361 * Force reset the processor by invalidating the entire address space! 362 */ 363void 364cpu_reset() 365{ 366 prom_halt(0); 367} 368 369int 370grow_stack(p, sp) 371 struct proc *p; 372 size_t sp; 373{ 374 int rv; 375 376 rv = vm_map_growstack (p, sp); 377 if (rv != KERN_SUCCESS) 378 return (0); 379 380 return (1); 381} 382 383 384static int cnt_prezero; 385 386SYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); 387 388/* 389 * Implement the pre-zeroed page mechanism. 390 * This routine is called from the idle loop. 391 */ 392 393#define ZIDLE_LO(v) ((v) * 2 / 3) 394#define ZIDLE_HI(v) ((v) * 4 / 5) 395 396int 397vm_page_zero_idle() 398{ 399 static int free_rover; 400 static int zero_state; 401 vm_page_t m; 402 int s; 403 404 /* 405 * Attempt to maintain approximately 1/2 of our free pages in a 406 * PG_ZERO'd state. Add some hysteresis to (attempt to) avoid 407 * generally zeroing a page when the system is near steady-state. 408 * Otherwise we might get 'flutter' during disk I/O / IPC or 409 * fast sleeps. We also do not want to be continuously zeroing 410 * pages because doing so may flush our L1 and L2 caches too much. 411 */ 412 413 if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) 414 return(0); 415 if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) 416 return(0); 417 418#ifdef SMP 419 if (try_mplock()) { 420#endif 421 s = splvm(); 422 m = vm_page_list_find(PQ_FREE, free_rover, FALSE); 423 zero_state = 0; 424 if (m != NULL && (m->flags & PG_ZERO) == 0) { 425 vm_page_queues[m->queue].lcnt--; 426 TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); 427 m->queue = PQ_NONE; 428 splx(s); 429#if 0 430 rel_mplock(); 431#endif 432 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 433#if 0 434 get_mplock(); 435#endif 436 (void)splvm(); 437 vm_page_flag_set(m, PG_ZERO); 438 m->queue = PQ_FREE + m->pc; 439 vm_page_queues[m->queue].lcnt++; 440 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, 441 pageq); 442 ++vm_page_zero_count; 443 ++cnt_prezero; 444 if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) 445 zero_state = 1; 446 } 447 free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; 448 splx(s); 449#ifdef SMP 450 rel_mplock(); 451#endif 452 return (1); 453#ifdef SMP 454 } 455#endif 456 return (0); 457} 458 459/* 460 * Software interrupt handler for queued VM system processing. 461 */ 462void 463swi_vm() 464{ 465#if 0 466 if (busdma_swi_pending != 0) 467 busdma_swi(); 468#endif 469} 470 471/* 472 * Tell whether this address is in some physical memory region. 473 * Currently used by the kernel coredump code in order to avoid 474 * dumping the ``ISA memory hole'' which could cause indefinite hangs, 475 * or other unpredictable behaviour. 476 */ 477 478 479int 480is_physical_memory(addr) 481 vm_offset_t addr; 482{ 483 /* 484 * stuff other tests for known memory-mapped devices (PCI?) 485 * here 486 */ 487 488 return 1; 489} 490