vm_machdep.c revision 89044
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * Copyright (c) 2001 Jake Burkholder. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department, and William Jolitz. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 41 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 42 * from: FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.167 2001/07/12 43 * $FreeBSD: head/sys/sparc64/sparc64/vm_machdep.c 89044 2002-01-08 05:10:58Z jake $ 44 */ 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/malloc.h> 49#include <sys/proc.h> 50#include <sys/bio.h> 51#include <sys/buf.h> 52#include <sys/unistd.h> 53#include <sys/user.h> 54#include <sys/vmmeter.h> 55 56#include <dev/ofw/openfirm.h> 57 58#include <vm/vm.h> 59#include <vm/vm_extern.h> 60#include <vm/pmap.h> 61#include <vm/vm_map.h> 62#include <vm/vm_page.h> 63 64#include <machine/cache.h> 65#include <machine/cpu.h> 66#include <machine/fsr.h> 67#include <machine/frame.h> 68#include <machine/md_var.h> 69#include <machine/ofw_machdep.h> 70#include <machine/tstate.h> 71 72void 73cpu_exit(struct thread *td) 74{ 75 struct md_utrap *ut; 76 77 if ((ut = td->td_proc->p_md.md_utrap) != NULL) { 78 ut->ut_refcnt--; 79 if (ut->ut_refcnt == 0) 80 free(ut, M_SUBPROC); 81 td->td_proc->p_md.md_utrap = NULL; 82 } 83} 84 85/* 86 * Finish a fork operation, with process p2 nearly set up. 87 * Copy and update the pcb, set up the stack so that the child 88 * ready to run and return to user mode. 89 */ 90void 91cpu_fork(struct thread *td1, struct proc *p2, int flags) 92{ 93 struct thread *td2; 94 struct md_utrap *ut; 95 struct trapframe *tf; 96 struct frame *fp; 97 struct pcb *pcb; 98 99 KASSERT(td1 == curthread || td1 == thread0, 100 ("cpu_fork: p1 not curproc and not proc0")); 101 102 if ((flags & RFPROC) == 0) 103 return; 104 105 if ((ut = td1->td_proc->p_md.md_utrap) != NULL) 106 ut->ut_refcnt++; 107 p2->p_md.md_utrap = ut; 108 109 td2 = &p2->p_thread; 110 /* The pcb must be aligned on a 64-byte boundary. */ 111 pcb = (struct pcb *)((td2->td_kstack + KSTACK_PAGES * PAGE_SIZE - 112 sizeof(struct pcb)) & ~0x3fUL); 113 td2->td_pcb = pcb; 114 115 /* 116 * Ensure that p1's pcb is up to date. 117 */ 118 if ((td1->td_frame->tf_fprs & FPRS_FEF) != 0) { 119 mtx_lock_spin(&sched_lock); 120 savefpctx(&td1->td_pcb->pcb_fpstate); 121 mtx_unlock_spin(&sched_lock); 122 } 123 /* Make sure the copied windows are spilled. */ 124 flushw(); 125 /* Copy the pcb (this will copy the windows saved in the pcb, too). */ 126 bcopy(td1->td_pcb, pcb, sizeof(*pcb)); 127 128 /* 129 * Create a new fresh stack for the new process. 130 * Copy the trap frame for the return to user mode as if from a 131 * syscall. This copies most of the user mode register values. 132 */ 133 tf = (struct trapframe *)pcb - 1; 134 bcopy(td1->td_frame, tf, sizeof(*tf)); 135 136 tf->tf_out[0] = 0; /* Child returns zero */ 137 tf->tf_out[1] = 0; 138 tf->tf_tstate &= ~TSTATE_XCC_C; /* success */ 139 tf->tf_fprs = 0; 140 141 td2->td_frame = tf; 142 fp = (struct frame *)tf - 1; 143 fp->f_local[0] = (u_long)fork_return; 144 fp->f_local[1] = (u_long)td2; 145 fp->f_local[2] = (u_long)tf; 146 pcb->pcb_fp = (u_long)fp - SPOFF; 147 pcb->pcb_pc = (u_long)fork_trampoline - 8; 148 149 /* 150 * Now, cpu_switch() can schedule the new process. 151 */ 152} 153 154void 155cpu_reset(void) 156{ 157 static char bspec[64] = ""; 158 phandle_t chosen; 159 static struct { 160 cell_t name; 161 cell_t nargs; 162 cell_t nreturns; 163 cell_t bootspec; 164 } args = { 165 (cell_t)"boot", 166 1, 167 0, 168 (cell_t)bspec 169 }; 170 if ((chosen = OF_finddevice("/chosen")) != 0) { 171 if (OF_getprop(chosen, "bootpath", bspec, sizeof(bspec)) == -1) 172 bspec[0] = '\0'; 173 bspec[sizeof(bspec) - 1] = '\0'; 174 } 175 176 openfirmware_exit(&args); 177} 178 179/* 180 * Intercept the return address from a freshly forked process that has NOT 181 * been scheduled yet. 182 * 183 * This is needed to make kernel threads stay in kernel mode. 184 */ 185void 186cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg) 187{ 188 struct frame *fp; 189 struct pcb *pcb; 190 191 pcb = td->td_pcb; 192 fp = (struct frame *)(pcb->pcb_fp + SPOFF); 193 fp->f_local[0] = (u_long)func; 194 fp->f_local[1] = (u_long)arg; 195} 196 197void 198cpu_wait(struct proc *p) 199{ 200} 201 202int 203is_physical_memory(vm_offset_t addr) 204{ 205 206 /* There is no device memory in the midst of the normal RAM. */ 207 return (1); 208} 209 210void 211swi_vm(void *v) 212{ 213 214 /* 215 * Nothing to do here yet - busdma bounce buffers are not yet 216 * implemented. 217 */ 218} 219 220/* 221 * quick version of vm_fault 222 */ 223int 224vm_fault_quick(caddr_t v, int prot) 225{ 226 int r; 227 228 if (prot & VM_PROT_WRITE) 229 r = subyte(v, fubyte(v)); 230 else 231 r = fubyte(v); 232 return(r); 233} 234 235/* 236 * Map an IO request into kernel virtual address space. 237 * 238 * All requests are (re)mapped into kernel VA space. 239 * Notice that we use b_bufsize for the size of the buffer 240 * to be mapped. b_bcount might be modified by the driver. 241 */ 242void 243vmapbuf(struct buf *bp) 244{ 245 caddr_t addr, kva; 246 vm_offset_t pa; 247 int pidx; 248 struct vm_page *m; 249 pmap_t pmap; 250 251 GIANT_REQUIRED; 252 253 if ((bp->b_flags & B_PHYS) == 0) 254 panic("vmapbuf"); 255 256 pmap = &curproc->p_vmspace->vm_pmap; 257 for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0; 258 addr < bp->b_data + bp->b_bufsize; addr += PAGE_SIZE, pidx++) { 259 /* 260 * Do the vm_fault if needed; do the copy-on-write thing 261 * when reading stuff off device into memory. 262 */ 263 vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data, 264 (bp->b_iocmd == BIO_READ) ? (VM_PROT_READ | VM_PROT_WRITE) : 265 VM_PROT_READ); 266 pa = trunc_page(pmap_extract(pmap, (vm_offset_t)addr)); 267 if (pa == 0) 268 panic("vmapbuf: page not present"); 269 m = PHYS_TO_VM_PAGE(pa); 270 vm_page_hold(m); 271 bp->b_pages[pidx] = m; 272 } 273 if (pidx > btoc(MAXPHYS)) 274 panic("vmapbuf: mapped more than MAXPHYS"); 275 pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx); 276 277 kva = bp->b_saveaddr; 278 bp->b_npages = pidx; 279 bp->b_saveaddr = bp->b_data; 280 bp->b_data = kva + (((vm_offset_t)bp->b_data) & PAGE_MASK); 281 if (CACHE_BADALIAS(trunc_page(bp->b_data), 282 trunc_page(bp->b_saveaddr))) { 283 /* 284 * bp->data (the virtual address the buffer got mapped to in the 285 * kernel) is an illegal alias to the user address. 286 * If the kernel had mapped this buffer previously (during a 287 * past IO operation) at this address, there might still be 288 * stale but valid tagged data in the cache, so flush it. 289 * XXX: the kernel address should be selected such that this 290 * cannot happen. 291 * XXX: pmap_kenter() maps physically uncacheable right now, so 292 * this cannot happen. 293 */ 294 dcache_inval(pmap, (vm_offset_t)bp->b_data, 295 (vm_offset_t)bp->b_data + bp->b_bufsize - 1); 296 } 297} 298 299/* 300 * Free the io map PTEs associated with this IO operation. 301 * We also invalidate the TLB entries and restore the original b_addr. 302 */ 303void 304vunmapbuf(struct buf *bp) 305{ 306 int pidx; 307 int npages; 308 309 GIANT_REQUIRED; 310 311 if ((bp->b_flags & B_PHYS) == 0) 312 panic("vunmapbuf"); 313 314 npages = bp->b_npages; 315 pmap_qremove(trunc_page((vm_offset_t)bp->b_data), 316 npages); 317 for (pidx = 0; pidx < npages; pidx++) 318 vm_page_unhold(bp->b_pages[pidx]); 319 320 if (CACHE_BADALIAS(trunc_page(bp->b_data), 321 trunc_page(bp->b_saveaddr))) { 322 /* 323 * bp->data (the virtual address the buffer got mapped to in the 324 * kernel) is an illegal alias to the user address. In this 325 * case, D$ of the user adress needs to be flushed to avoid the 326 * user reading stale data. 327 * XXX: the kernel address should be selected such that this 328 * cannot happen. 329 */ 330 dcache_inval(&curproc->p_vmspace->vm_pmap, 331 (vm_offset_t)bp->b_saveaddr, (vm_offset_t)bp->b_saveaddr + 332 bp->b_bufsize - 1); 333 } 334 bp->b_data = bp->b_saveaddr; 335} 336