vm_machdep.c revision 608
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 39 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 40 * $Id$ 41 */ 42 43#include "npx.h" 44#include "param.h" 45#include "systm.h" 46#include "proc.h" 47#include "malloc.h" 48#include "buf.h" 49#include "user.h" 50 51#include "../include/cpu.h" 52 53#include "vm/vm.h" 54#include "vm/vm_kern.h" 55 56/* 57 * Finish a fork operation, with process p2 nearly set up. 58 * Copy and update the kernel stack and pcb, making the child 59 * ready to run, and marking it so that it can return differently 60 * than the parent. Returns 1 in the child process, 0 in the parent. 61 * We currently double-map the user area so that the stack is at the same 62 * address in each process; in the future we will probably relocate 63 * the frame pointers on the stack after copying. 64 */ 65cpu_fork(p1, p2) 66 register struct proc *p1, *p2; 67{ 68 register struct user *up = p2->p_addr; 69 int foo, offset, addr, i; 70 extern char kstack[]; 71 extern int mvesp(); 72 73 /* 74 * Copy pcb and stack from proc p1 to p2. 75 * We do this as cheaply as possible, copying only the active 76 * part of the stack. The stack and pcb need to agree; 77 * this is tricky, as the final pcb is constructed by savectx, 78 * but its frame isn't yet on the stack when the stack is copied. 79 * swtch compensates for this when the child eventually runs. 80 * This should be done differently, with a single call 81 * that copies and updates the pcb+stack, 82 * replacing the bcopy and savectx. 83 */ 84 p2->p_addr->u_pcb = p1->p_addr->u_pcb; 85 offset = mvesp() - (int)kstack; 86 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset, 87 (unsigned) ctob(UPAGES) - offset); 88 p2->p_regs = p1->p_regs; 89 90 /* 91 * Wire top of address space of child to it's kstack. 92 * First, fault in a page of pte's to map it. 93 */ 94 addr = trunc_page((u_int)vtopte(kstack)); 95 vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE); 96 for (i=0; i < UPAGES; i++) 97 pmap_enter(&p2->p_vmspace->vm_pmap, kstack+i*NBPG, 98 pmap_extract(kernel_pmap, ((int)p2->p_addr)+i*NBPG), 99 /* 100 * The user area has to be mapped writable because 101 * it contains the kernel stack (when CR0_WP is on 102 * on a 486 there is no user-read/kernel-write 103 * mode). It is protected from user mode access 104 * by the segment limits. 105 */ 106 VM_PROT_READ|VM_PROT_WRITE, TRUE); 107 pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb); 108 109 /* 110 * 111 * Arrange for a non-local goto when the new process 112 * is started, to resume here, returning nonzero from setjmp. 113 */ 114 if (savectx(up, 1)) { 115 /* 116 * Return 1 in child. 117 */ 118 return (1); 119 } 120 return (0); 121} 122 123#ifdef notyet 124/* 125 * cpu_exit is called as the last action during exit. 126 * 127 * We change to an inactive address space and a "safe" stack, 128 * passing thru an argument to the new stack. Now, safely isolated 129 * from the resources we're shedding, we release the address space 130 * and any remaining machine-dependent resources, including the 131 * memory for the user structure and kernel stack. 132 * 133 * Next, we assign a dummy context to be written over by swtch, 134 * calling it to send this process off to oblivion. 135 * [The nullpcb allows us to minimize cost in swtch() by not having 136 * a special case]. 137 */ 138struct proc *swtch_to_inactive(); 139volatile void 140cpu_exit(p) 141 register struct proc *p; 142{ 143 static struct pcb nullpcb; /* pcb to overwrite on last swtch */ 144 145#if NNPX > 0 146 npxexit(p); 147#endif /* NNPX */ 148 149 /* move to inactive space and stack, passing arg accross */ 150 p = swtch_to_inactive(p); 151 152 /* drop per-process resources */ 153 vmspace_free(p->p_vmspace); 154 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 155 156 p->p_addr = (struct user *) &nullpcb; 157 splclock(); 158 swtch(); 159 /* NOTREACHED */ 160} 161#else 162void 163cpu_exit(p) 164 register struct proc *p; 165{ 166 167#if NNPX > 0 168 npxexit(p); 169#endif /* NNPX */ 170 splclock(); 171 swtch(); 172 /* 173 * This is to shutup the compiler, and if swtch() failed I suppose 174 * this would be a good thing. This keeps gcc happy because panic 175 * is a volatile void function as well. 176 */ 177 panic("cpu_exit"); 178} 179 180cpu_wait(p) struct proc *p; { 181 182 /* drop per-process resources */ 183 vmspace_free(p->p_vmspace); 184 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 185} 186#endif 187 188/* 189 * Set a red zone in the kernel stack after the u. area. 190 */ 191setredzone(pte, vaddr) 192 u_short *pte; 193 caddr_t vaddr; 194{ 195/* eventually do this by setting up an expand-down stack segment 196 for ss0: selector, allowing stack access down to top of u. 197 this means though that protection violations need to be handled 198 thru a double fault exception that must do an integral task 199 switch to a known good context, within which a dump can be 200 taken. a sensible scheme might be to save the initial context 201 used by sched (that has physical memory mapped 1:1 at bottom) 202 and take the dump while still in mapped mode */ 203} 204 205/* 206 * Move pages from one kernel virtual address to another. 207 * Both addresses are assumed to reside in the Sysmap, 208 * and size must be a multiple of CLSIZE. 209 */ 210pagemove(from, to, size) 211 register caddr_t from, to; 212 int size; 213{ 214 register struct pte *fpte, *tpte; 215 216 if (size % CLBYTES) 217 panic("pagemove"); 218 fpte = kvtopte(from); 219 tpte = kvtopte(to); 220 while (size > 0) { 221 *tpte++ = *fpte; 222 *(int *)fpte++ = 0; 223 from += NBPG; 224 to += NBPG; 225 size -= NBPG; 226 } 227 tlbflush(); 228} 229 230/* 231 * Convert kernel VA to physical address 232 */ 233kvtop(addr) 234 register caddr_t addr; 235{ 236 vm_offset_t va; 237 238 va = pmap_extract(kernel_pmap, (vm_offset_t)addr); 239 if (va == 0) 240 panic("kvtop: zero page frame"); 241 return((int)va); 242} 243 244#ifdef notdef 245/* 246 * The probe[rw] routines should probably be redone in assembler 247 * for efficiency. 248 */ 249prober(addr) 250 register u_int addr; 251{ 252 register int page; 253 register struct proc *p; 254 255 if (addr >= USRSTACK) 256 return(0); 257 p = u.u_procp; 258 page = btop(addr); 259 if (page < dptov(p, p->p_dsize) || page > sptov(p, p->p_ssize)) 260 return(1); 261 return(0); 262} 263 264probew(addr) 265 register u_int addr; 266{ 267 register int page; 268 register struct proc *p; 269 270 if (addr >= USRSTACK) 271 return(0); 272 p = u.u_procp; 273 page = btop(addr); 274 if (page < dptov(p, p->p_dsize) || page > sptov(p, p->p_ssize)) 275 return((*(int *)vtopte(p, page) & PG_PROT) == PG_UW); 276 return(0); 277} 278 279/* 280 * NB: assumes a physically contiguous kernel page table 281 * (makes life a LOT simpler). 282 */ 283kernacc(addr, count, rw) 284 register u_int addr; 285 int count, rw; 286{ 287 register struct pde *pde; 288 register struct pte *pte; 289 register int ix, cnt; 290 extern long Syssize; 291 292 if (count <= 0) 293 return(0); 294 pde = (struct pde *)((u_int)u.u_procp->p_p0br + u.u_procp->p_szpt * NBPG); 295 ix = (addr & PD_MASK) >> PD_SHIFT; 296 cnt = ((addr + count + (1 << PD_SHIFT) - 1) & PD_MASK) >> PD_SHIFT; 297 cnt -= ix; 298 for (pde += ix; cnt; cnt--, pde++) 299 if (pde->pd_v == 0) 300 return(0); 301 ix = btop(addr-KERNBASE); 302 cnt = btop(addr-KERNBASE+count+NBPG-1); 303 if (cnt > (int)&Syssize) 304 return(0); 305 cnt -= ix; 306 for (pte = &Sysmap[ix]; cnt; cnt--, pte++) 307 if (pte->pg_v == 0 /*|| (rw == B_WRITE && pte->pg_prot == 1)*/) 308 return(0); 309 return(1); 310} 311 312useracc(addr, count, rw) 313 register u_int addr; 314 int count, rw; 315{ 316 register int (*func)(); 317 register u_int addr2; 318 extern int prober(), probew(); 319 320 if (count <= 0) 321 return(0); 322 addr2 = addr; 323 addr += count; 324 func = (rw == B_READ) ? prober : probew; 325 do { 326 if ((*func)(addr2) == 0) 327 return(0); 328 addr2 = (addr2 + NBPG) & ~PGOFSET; 329 } while (addr2 < addr); 330 return(1); 331} 332#endif 333 334extern vm_map_t phys_map; 335 336/* 337 * Map an IO request into kernel virtual address space. Requests fall into 338 * one of five catagories: 339 * 340 * B_PHYS|B_UAREA: User u-area swap. 341 * Address is relative to start of u-area (p_addr). 342 * B_PHYS|B_PAGET: User page table swap. 343 * Address is a kernel VA in usrpt (Usrptmap). 344 * B_PHYS|B_DIRTY: Dirty page push. 345 * Address is a VA in proc2's address space. 346 * B_PHYS|B_PGIN: Kernel pagein of user pages. 347 * Address is VA in user's address space. 348 * B_PHYS: User "raw" IO request. 349 * Address is VA in user's address space. 350 * 351 * All requests are (re)mapped into kernel VA space via the useriomap 352 * (a name with only slightly more meaning than "kernelmap") 353 */ 354vmapbuf(bp) 355 register struct buf *bp; 356{ 357 register int npf; 358 register caddr_t addr; 359 register long flags = bp->b_flags; 360 struct proc *p; 361 int off; 362 vm_offset_t kva; 363 register vm_offset_t pa; 364 365 if ((flags & B_PHYS) == 0) 366 panic("vmapbuf"); 367 addr = bp->b_saveaddr = bp->b_un.b_addr; 368 off = (int)addr & PGOFSET; 369 p = bp->b_proc; 370 npf = btoc(round_page(bp->b_bcount + off)); 371 kva = kmem_alloc_wait(phys_map, ctob(npf)); 372 bp->b_un.b_addr = (caddr_t) (kva + off); 373 while (npf--) { 374 pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr); 375 if (pa == 0) 376 panic("vmapbuf: null page frame"); 377 pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa), 378 VM_PROT_READ|VM_PROT_WRITE, TRUE); 379 addr += PAGE_SIZE; 380 kva += PAGE_SIZE; 381 } 382} 383 384/* 385 * Free the io map PTEs associated with this IO operation. 386 * We also invalidate the TLB entries and restore the original b_addr. 387 */ 388vunmapbuf(bp) 389 register struct buf *bp; 390{ 391 register int npf; 392 register caddr_t addr = bp->b_un.b_addr; 393 vm_offset_t kva; 394 395 if ((bp->b_flags & B_PHYS) == 0) 396 panic("vunmapbuf"); 397 npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET))); 398 kva = (vm_offset_t)((int)addr & ~PGOFSET); 399 kmem_free_wakeup(phys_map, kva, ctob(npf)); 400 bp->b_un.b_addr = bp->b_saveaddr; 401 bp->b_saveaddr = NULL; 402} 403 404/* 405 * Force reset the processor by invalidating the entire address space! 406 */ 407cpu_reset() { 408 409 /* force a shutdown by unmapping entire address space ! */ 410 bzero((caddr_t) PTD, NBPG); 411 412 /* "good night, sweet prince .... <THUNK!>" */ 413 tlbflush(); 414 /* NOTREACHED */ 415} 416