vm_machdep.c revision 433
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 39 * 40 * PATCHES MAGIC LEVEL PATCH THAT GOT US HERE 41 * -------------------- ----- ---------------------- 42 * CURRENT PATCH LEVEL: 1 00154 43 * -------------------- ----- ---------------------- 44 * 45 * 20 Apr 93 Bruce Evans New npx-0.5 code 46 * 47 */ 48 49/* 50 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 51 */ 52static char rcsid[] = "$Header: /a/cvs/386BSD/src/sys/i386/i386/vm_machdep.c,v 1.3 1993/07/27 10:52:21 davidg Exp $"; 53 54#include "param.h" 55#include "systm.h" 56#include "proc.h" 57#include "malloc.h" 58#include "buf.h" 59#include "user.h" 60 61#include "../include/cpu.h" 62 63#include "vm/vm.h" 64#include "vm/vm_kern.h" 65 66/* 67 * Finish a fork operation, with process p2 nearly set up. 68 * Copy and update the kernel stack and pcb, making the child 69 * ready to run, and marking it so that it can return differently 70 * than the parent. Returns 1 in the child process, 0 in the parent. 71 * We currently double-map the user area so that the stack is at the same 72 * address in each process; in the future we will probably relocate 73 * the frame pointers on the stack after copying. 74 */ 75cpu_fork(p1, p2) 76 register struct proc *p1, *p2; 77{ 78 register struct user *up = p2->p_addr; 79 int foo, offset, addr, i; 80 extern char kstack[]; 81 extern int mvesp(); 82 83 /* 84 * Copy pcb and stack from proc p1 to p2. 85 * We do this as cheaply as possible, copying only the active 86 * part of the stack. The stack and pcb need to agree; 87 * this is tricky, as the final pcb is constructed by savectx, 88 * but its frame isn't yet on the stack when the stack is copied. 89 * swtch compensates for this when the child eventually runs. 90 * This should be done differently, with a single call 91 * that copies and updates the pcb+stack, 92 * replacing the bcopy and savectx. 93 */ 94 p2->p_addr->u_pcb = p1->p_addr->u_pcb; 95 offset = mvesp() - (int)kstack; 96 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset, 97 (unsigned) ctob(UPAGES) - offset); 98 p2->p_regs = p1->p_regs; 99 100 /* 101 * Wire top of address space of child to it's kstack. 102 * First, fault in a page of pte's to map it. 103 */ 104 addr = trunc_page((u_int)vtopte(kstack)); 105 vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE); 106 for (i=0; i < UPAGES; i++) 107 pmap_enter(&p2->p_vmspace->vm_pmap, kstack+i*NBPG, 108 pmap_extract(kernel_pmap, ((int)p2->p_addr)+i*NBPG), 109 /* 110 * The user area has to be mapped writable because 111 * it contains the kernel stack (when CR0_WP is on 112 * on a 486 there is no user-read/kernel-write 113 * mode). It is protected from user mode access 114 * by the segment limits. 115 */ 116 VM_PROT_READ|VM_PROT_WRITE, TRUE); 117 pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb); 118 119 /* 120 * 121 * Arrange for a non-local goto when the new process 122 * is started, to resume here, returning nonzero from setjmp. 123 */ 124 if (savectx(up, 1)) { 125 /* 126 * Return 1 in child. 127 */ 128 return (1); 129 } 130 return (0); 131} 132 133#ifdef notyet 134/* 135 * cpu_exit is called as the last action during exit. 136 * 137 * We change to an inactive address space and a "safe" stack, 138 * passing thru an argument to the new stack. Now, safely isolated 139 * from the resources we're shedding, we release the address space 140 * and any remaining machine-dependent resources, including the 141 * memory for the user structure and kernel stack. 142 * 143 * Next, we assign a dummy context to be written over by swtch, 144 * calling it to send this process off to oblivion. 145 * [The nullpcb allows us to minimize cost in swtch() by not having 146 * a special case]. 147 */ 148struct proc *swtch_to_inactive(); 149volatile void 150cpu_exit(p) 151 register struct proc *p; 152{ 153 static struct pcb nullpcb; /* pcb to overwrite on last swtch */ 154 155#ifdef NPX 156 npxexit(p); 157#endif 158 159 /* move to inactive space and stack, passing arg accross */ 160 p = swtch_to_inactive(p); 161 162 /* drop per-process resources */ 163 vmspace_free(p->p_vmspace); 164 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 165 166 p->p_addr = (struct user *) &nullpcb; 167 splclock(); 168 swtch(); 169 /* NOTREACHED */ 170} 171#else 172volatile void 173cpu_exit(p) 174 register struct proc *p; 175{ 176 177#ifdef NPX 178 npxexit(p); 179#endif 180 splclock(); 181 swtch(); 182 /*NOTREACHED*/ 183 for(;;); 184} 185 186cpu_wait(p) struct proc *p; { 187 188 /* drop per-process resources */ 189 vmspace_free(p->p_vmspace); 190 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 191} 192#endif 193 194/* 195 * Set a red zone in the kernel stack after the u. area. 196 */ 197setredzone(pte, vaddr) 198 u_short *pte; 199 caddr_t vaddr; 200{ 201/* eventually do this by setting up an expand-down stack segment 202 for ss0: selector, allowing stack access down to top of u. 203 this means though that protection violations need to be handled 204 thru a double fault exception that must do an integral task 205 switch to a known good context, within which a dump can be 206 taken. a sensible scheme might be to save the initial context 207 used by sched (that has physical memory mapped 1:1 at bottom) 208 and take the dump while still in mapped mode */ 209} 210 211/* 212 * Move pages from one kernel virtual address to another. 213 * Both addresses are assumed to reside in the Sysmap, 214 * and size must be a multiple of CLSIZE. 215 */ 216pagemove(from, to, size) 217 register caddr_t from, to; 218 int size; 219{ 220 register struct pte *fpte, *tpte; 221 222 if (size % CLBYTES) 223 panic("pagemove"); 224 fpte = kvtopte(from); 225 tpte = kvtopte(to); 226 while (size > 0) { 227 *tpte++ = *fpte; 228 *(int *)fpte++ = 0; 229 from += NBPG; 230 to += NBPG; 231 size -= NBPG; 232 } 233 tlbflush(); 234} 235 236/* 237 * Convert kernel VA to physical address 238 */ 239kvtop(addr) 240 register caddr_t addr; 241{ 242 vm_offset_t va; 243 244 va = pmap_extract(kernel_pmap, (vm_offset_t)addr); 245 if (va == 0) 246 panic("kvtop: zero page frame"); 247 return((int)va); 248} 249 250#ifdef notdef 251/* 252 * The probe[rw] routines should probably be redone in assembler 253 * for efficiency. 254 */ 255prober(addr) 256 register u_int addr; 257{ 258 register int page; 259 register struct proc *p; 260 261 if (addr >= USRSTACK) 262 return(0); 263 p = u.u_procp; 264 page = btop(addr); 265 if (page < dptov(p, p->p_dsize) || page > sptov(p, p->p_ssize)) 266 return(1); 267 return(0); 268} 269 270probew(addr) 271 register u_int addr; 272{ 273 register int page; 274 register struct proc *p; 275 276 if (addr >= USRSTACK) 277 return(0); 278 p = u.u_procp; 279 page = btop(addr); 280 if (page < dptov(p, p->p_dsize) || page > sptov(p, p->p_ssize)) 281 return((*(int *)vtopte(p, page) & PG_PROT) == PG_UW); 282 return(0); 283} 284 285/* 286 * NB: assumes a physically contiguous kernel page table 287 * (makes life a LOT simpler). 288 */ 289kernacc(addr, count, rw) 290 register u_int addr; 291 int count, rw; 292{ 293 register struct pde *pde; 294 register struct pte *pte; 295 register int ix, cnt; 296 extern long Syssize; 297 298 if (count <= 0) 299 return(0); 300 pde = (struct pde *)((u_int)u.u_procp->p_p0br + u.u_procp->p_szpt * NBPG); 301 ix = (addr & PD_MASK) >> PD_SHIFT; 302 cnt = ((addr + count + (1 << PD_SHIFT) - 1) & PD_MASK) >> PD_SHIFT; 303 cnt -= ix; 304 for (pde += ix; cnt; cnt--, pde++) 305 if (pde->pd_v == 0) 306 return(0); 307 ix = btop(addr-0xfe000000); 308 cnt = btop(addr-0xfe000000+count+NBPG-1); 309 if (cnt > (int)&Syssize) 310 return(0); 311 cnt -= ix; 312 for (pte = &Sysmap[ix]; cnt; cnt--, pte++) 313 if (pte->pg_v == 0 /*|| (rw == B_WRITE && pte->pg_prot == 1)*/) 314 return(0); 315 return(1); 316} 317 318useracc(addr, count, rw) 319 register u_int addr; 320 int count, rw; 321{ 322 register int (*func)(); 323 register u_int addr2; 324 extern int prober(), probew(); 325 326 if (count <= 0) 327 return(0); 328 addr2 = addr; 329 addr += count; 330 func = (rw == B_READ) ? prober : probew; 331 do { 332 if ((*func)(addr2) == 0) 333 return(0); 334 addr2 = (addr2 + NBPG) & ~PGOFSET; 335 } while (addr2 < addr); 336 return(1); 337} 338#endif 339 340extern vm_map_t phys_map; 341 342/* 343 * Map an IO request into kernel virtual address space. Requests fall into 344 * one of five catagories: 345 * 346 * B_PHYS|B_UAREA: User u-area swap. 347 * Address is relative to start of u-area (p_addr). 348 * B_PHYS|B_PAGET: User page table swap. 349 * Address is a kernel VA in usrpt (Usrptmap). 350 * B_PHYS|B_DIRTY: Dirty page push. 351 * Address is a VA in proc2's address space. 352 * B_PHYS|B_PGIN: Kernel pagein of user pages. 353 * Address is VA in user's address space. 354 * B_PHYS: User "raw" IO request. 355 * Address is VA in user's address space. 356 * 357 * All requests are (re)mapped into kernel VA space via the useriomap 358 * (a name with only slightly more meaning than "kernelmap") 359 */ 360vmapbuf(bp) 361 register struct buf *bp; 362{ 363 register int npf; 364 register caddr_t addr; 365 register long flags = bp->b_flags; 366 struct proc *p; 367 int off; 368 vm_offset_t kva; 369 register vm_offset_t pa; 370 371 if ((flags & B_PHYS) == 0) 372 panic("vmapbuf"); 373 addr = bp->b_saveaddr = bp->b_un.b_addr; 374 off = (int)addr & PGOFSET; 375 p = bp->b_proc; 376 npf = btoc(round_page(bp->b_bcount + off)); 377 kva = kmem_alloc_wait(phys_map, ctob(npf)); 378 bp->b_un.b_addr = (caddr_t) (kva + off); 379 while (npf--) { 380 pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr); 381 if (pa == 0) 382 panic("vmapbuf: null page frame"); 383 pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa), 384 VM_PROT_READ|VM_PROT_WRITE, TRUE); 385 addr += PAGE_SIZE; 386 kva += PAGE_SIZE; 387 } 388} 389 390/* 391 * Free the io map PTEs associated with this IO operation. 392 * We also invalidate the TLB entries and restore the original b_addr. 393 */ 394vunmapbuf(bp) 395 register struct buf *bp; 396{ 397 register int npf; 398 register caddr_t addr = bp->b_un.b_addr; 399 vm_offset_t kva; 400 401 if ((bp->b_flags & B_PHYS) == 0) 402 panic("vunmapbuf"); 403 npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET))); 404 kva = (vm_offset_t)((int)addr & ~PGOFSET); 405 kmem_free_wakeup(phys_map, kva, ctob(npf)); 406 bp->b_un.b_addr = bp->b_saveaddr; 407 bp->b_saveaddr = NULL; 408} 409 410/* 411 * Force reset the processor by invalidating the entire address space! 412 */ 413cpu_reset() { 414 415 /* force a shutdown by unmapping entire address space ! */ 416 bzero((caddr_t) PTD, NBPG); 417 418 /* "good night, sweet prince .... <THUNK!>" */ 419 tlbflush(); 420 /* NOTREACHED */ 421} 422