vm_machdep.c revision 1127
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department, and William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 39 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 40 * $Id: vm_machdep.c,v 1.10 1994/01/21 17:11:38 davidg Exp $ 41 */ 42 43#include "npx.h" 44#include "param.h" 45#include "systm.h" 46#include "proc.h" 47#include "malloc.h" 48#include "buf.h" 49#include "user.h" 50 51#include "../include/cpu.h" 52 53#include "vm/vm.h" 54#include "vm/vm_kern.h" 55 56/* 57 * Finish a fork operation, with process p2 nearly set up. 58 * Copy and update the kernel stack and pcb, making the child 59 * ready to run, and marking it so that it can return differently 60 * than the parent. Returns 1 in the child process, 0 in the parent. 61 * We currently double-map the user area so that the stack is at the same 62 * address in each process; in the future we will probably relocate 63 * the frame pointers on the stack after copying. 64 */ 65int 66cpu_fork(p1, p2) 67 register struct proc *p1, *p2; 68{ 69 register struct user *up = p2->p_addr; 70 int foo, offset, addr, i; 71 extern char kstack[]; 72 extern int mvesp(); 73 74 /* 75 * Copy pcb and stack from proc p1 to p2. 76 * We do this as cheaply as possible, copying only the active 77 * part of the stack. The stack and pcb need to agree; 78 * this is tricky, as the final pcb is constructed by savectx, 79 * but its frame isn't yet on the stack when the stack is copied. 80 * swtch compensates for this when the child eventually runs. 81 * This should be done differently, with a single call 82 * that copies and updates the pcb+stack, 83 * replacing the bcopy and savectx. 84 */ 85 p2->p_addr->u_pcb = p1->p_addr->u_pcb; 86 offset = mvesp() - (int)kstack; 87 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset, 88 (unsigned) ctob(UPAGES) - offset); 89 p2->p_regs = p1->p_regs; 90 91 /* 92 * Wire top of address space of child to it's kstack. 93 * First, fault in a page of pte's to map it. 94 */ 95#if 0 96 addr = trunc_page((u_int)vtopte(kstack)); 97 vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE); 98 for (i=0; i < UPAGES; i++) 99 pmap_enter(&p2->p_vmspace->vm_pmap, kstack+i*NBPG, 100 pmap_extract(kernel_pmap, ((int)p2->p_addr)+i*NBPG), 101 /* 102 * The user area has to be mapped writable because 103 * it contains the kernel stack (when CR0_WP is on 104 * on a 486 there is no user-read/kernel-write 105 * mode). It is protected from user mode access 106 * by the segment limits. 107 */ 108 VM_PROT_READ|VM_PROT_WRITE, TRUE); 109#endif 110 pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb); 111 112 /* 113 * 114 * Arrange for a non-local goto when the new process 115 * is started, to resume here, returning nonzero from setjmp. 116 */ 117 if (savectx(up, 1)) { 118 /* 119 * Return 1 in child. 120 */ 121 return (1); 122 } 123 return (0); 124} 125 126#ifdef notyet 127/* 128 * cpu_exit is called as the last action during exit. 129 * 130 * We change to an inactive address space and a "safe" stack, 131 * passing thru an argument to the new stack. Now, safely isolated 132 * from the resources we're shedding, we release the address space 133 * and any remaining machine-dependent resources, including the 134 * memory for the user structure and kernel stack. 135 * 136 * Next, we assign a dummy context to be written over by swtch, 137 * calling it to send this process off to oblivion. 138 * [The nullpcb allows us to minimize cost in swtch() by not having 139 * a special case]. 140 */ 141struct proc *swtch_to_inactive(); 142volatile void 143cpu_exit(p) 144 register struct proc *p; 145{ 146 static struct pcb nullpcb; /* pcb to overwrite on last swtch */ 147 148#if NNPX > 0 149 npxexit(p); 150#endif /* NNPX */ 151 152 /* move to inactive space and stack, passing arg accross */ 153 p = swtch_to_inactive(p); 154 155 /* drop per-process resources */ 156 vmspace_free(p->p_vmspace); 157 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 158 159 p->p_addr = (struct user *) &nullpcb; 160 splclock(); 161 swtch(); 162 /* NOTREACHED */ 163} 164#else 165void 166cpu_exit(p) 167 register struct proc *p; 168{ 169 170#if NNPX > 0 171 npxexit(p); 172#endif /* NNPX */ 173 splclock(); 174 curproc = 0; 175 swtch(); 176 /* 177 * This is to shutup the compiler, and if swtch() failed I suppose 178 * this would be a good thing. This keeps gcc happy because panic 179 * is a volatile void function as well. 180 */ 181 panic("cpu_exit"); 182} 183 184void 185cpu_wait(p) struct proc *p; { 186/* extern vm_map_t upages_map; */ 187 extern char kstack[]; 188 189 /* drop per-process resources */ 190 pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr, 191 ((vm_offset_t) p->p_addr) + ctob(UPAGES)); 192 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 193 vmspace_free(p->p_vmspace); 194} 195#endif 196 197/* 198 * Set a red zone in the kernel stack after the u. area. 199 */ 200void 201setredzone(pte, vaddr) 202 u_short *pte; 203 caddr_t vaddr; 204{ 205/* eventually do this by setting up an expand-down stack segment 206 for ss0: selector, allowing stack access down to top of u. 207 this means though that protection violations need to be handled 208 thru a double fault exception that must do an integral task 209 switch to a known good context, within which a dump can be 210 taken. a sensible scheme might be to save the initial context 211 used by sched (that has physical memory mapped 1:1 at bottom) 212 and take the dump while still in mapped mode */ 213} 214 215/* 216 * Move pages from one kernel virtual address to another. 217 * Both addresses are assumed to reside in the Sysmap, 218 * and size must be a multiple of CLSIZE. 219 */ 220void 221pagemove(from, to, size) 222 register caddr_t from, to; 223 int size; 224{ 225 register struct pte *fpte, *tpte; 226 227 if (size % CLBYTES) 228 panic("pagemove"); 229 fpte = kvtopte(from); 230 tpte = kvtopte(to); 231 while (size > 0) { 232 *tpte++ = *fpte; 233 *(int *)fpte++ = 0; 234 from += NBPG; 235 to += NBPG; 236 size -= NBPG; 237 } 238 tlbflush(); 239} 240 241/* 242 * Convert kernel VA to physical address 243 */ 244u_long 245kvtop(void *addr) 246{ 247 vm_offset_t va; 248 249 va = pmap_extract(kernel_pmap, (vm_offset_t)addr); 250 if (va == 0) 251 panic("kvtop: zero page frame"); 252 return((int)va); 253} 254 255extern vm_map_t phys_map; 256 257/* 258 * Map an IO request into kernel virtual address space. Requests fall into 259 * one of five catagories: 260 * 261 * B_PHYS|B_UAREA: User u-area swap. 262 * Address is relative to start of u-area (p_addr). 263 * B_PHYS|B_PAGET: User page table swap. 264 * Address is a kernel VA in usrpt (Usrptmap). 265 * B_PHYS|B_DIRTY: Dirty page push. 266 * Address is a VA in proc2's address space. 267 * B_PHYS|B_PGIN: Kernel pagein of user pages. 268 * Address is VA in user's address space. 269 * B_PHYS: User "raw" IO request. 270 * Address is VA in user's address space. 271 * 272 * All requests are (re)mapped into kernel VA space via the useriomap 273 * (a name with only slightly more meaning than "kernelmap") 274 */ 275void 276vmapbuf(bp) 277 register struct buf *bp; 278{ 279 register int npf; 280 register caddr_t addr; 281 register long flags = bp->b_flags; 282 struct proc *p; 283 int off; 284 vm_offset_t kva; 285 register vm_offset_t pa; 286 287 if ((flags & B_PHYS) == 0) 288 panic("vmapbuf"); 289 addr = bp->b_saveaddr = bp->b_un.b_addr; 290 off = (int)addr & PGOFSET; 291 p = bp->b_proc; 292 npf = btoc(round_page(bp->b_bcount + off)); 293 kva = kmem_alloc_wait(phys_map, ctob(npf)); 294 bp->b_un.b_addr = (caddr_t) (kva + off); 295 while (npf--) { 296 pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr); 297 if (pa == 0) 298 panic("vmapbuf: null page frame"); 299 pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa), 300 VM_PROT_READ|VM_PROT_WRITE, TRUE); 301 addr += PAGE_SIZE; 302 kva += PAGE_SIZE; 303 } 304} 305 306/* 307 * Free the io map PTEs associated with this IO operation. 308 * We also invalidate the TLB entries and restore the original b_addr. 309 */ 310void 311vunmapbuf(bp) 312 register struct buf *bp; 313{ 314 register int npf; 315 register caddr_t addr = bp->b_un.b_addr; 316 vm_offset_t kva; 317 318 if ((bp->b_flags & B_PHYS) == 0) 319 panic("vunmapbuf"); 320 npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET))); 321 kva = (vm_offset_t)((int)addr & ~PGOFSET); 322 kmem_free_wakeup(phys_map, kva, ctob(npf)); 323 bp->b_un.b_addr = bp->b_saveaddr; 324 bp->b_saveaddr = NULL; 325} 326 327/* 328 * Force reset the processor by invalidating the entire address space! 329 */ 330void 331cpu_reset() { 332 333 /* force a shutdown by unmapping entire address space ! */ 334 bzero((caddr_t) PTD, NBPG); 335 336 /* "good night, sweet prince .... <THUNK!>" */ 337 tlbflush(); 338 /* NOTREACHED */ 339 while(1); 340} 341 342/* 343 * Grow the user stack to allow for 'sp'. This version grows the stack in 344 * chunks of DFLSSIZ. It is expected (required) that there is an 345 * integer number of DFLSSIZ chunks in MAXSSIZ. 346 */ 347int 348grow(p, sp) 349 struct proc *p; 350 int sp; 351{ 352 unsigned int nss; 353 caddr_t v; 354 struct vmspace *vm = p->p_vmspace; 355 356 if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK) 357 return (1); 358 359 nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE); 360 361 if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) 362 return (0); 363 364 if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT, 365 DFLSSIZ) < nss) { 366 int grow_amount; 367 /* 368 * If necessary, grow the VM that the stack occupies 369 * to allow for the rlimit. This allows us to not have 370 * to allocate all of the VM up-front in execve (which 371 * is expensive). 372 * Grow the VM by the amount requested rounded up to 373 * the nearest DFLSSIZ to provide for some hysteresis. 374 */ 375 grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), DFLSSIZ); 376 v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT, 377 DFLSSIZ) - grow_amount; 378 /* 379 * If there isn't enough room to extend by DFLSSIZ, then 380 * just extend to the maximum size 381 */ 382 if (v < vm->vm_maxsaddr) { 383 v = vm->vm_maxsaddr; 384 grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT); 385 } 386 if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v, 387 grow_amount, FALSE) != KERN_SUCCESS) { 388 return (0); 389 } 390 vm->vm_ssize += grow_amount >> PAGE_SHIFT; 391 } 392 393 return (1); 394} 395