vm_machdep.c revision 3436
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 * $Id: vm_machdep.c,v 1.28 1994/09/02 04:12:07 davidg Exp $ 42 */ 43 44#include "npx.h" 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/proc.h> 48#include <sys/malloc.h> 49#include <sys/buf.h> 50#include <sys/vnode.h> 51#include <sys/user.h> 52 53#include <machine/cpu.h> 54 55#include <vm/vm.h> 56#include <vm/vm_kern.h> 57 58#ifdef BOUNCE_BUFFERS 59vm_map_t io_map; 60volatile int kvasfreecnt; 61 62 63caddr_t bouncememory; 64int bouncepages, bpwait; 65vm_offset_t *bouncepa; 66int bmwait, bmfreeing; 67 68#define BITS_IN_UNSIGNED (8*sizeof(unsigned)) 69int bounceallocarraysize; 70unsigned *bounceallocarray; 71int bouncefree; 72 73#define SIXTEENMEG (4096*4096) 74#define MAXBKVA 1024 75int maxbkva = MAXBKVA*NBPG; 76 77/* special list that can be used at interrupt time for eventual kva free */ 78struct kvasfree { 79 vm_offset_t addr; 80 vm_offset_t size; 81} kvaf[MAXBKVA]; 82 83 84vm_offset_t vm_bounce_kva(); 85/* 86 * get bounce buffer pages (count physically contiguous) 87 * (only 1 inplemented now) 88 */ 89vm_offset_t 90vm_bounce_page_find(count) 91 int count; 92{ 93 int bit; 94 int s,i; 95 96 if (count != 1) 97 panic("vm_bounce_page_find -- no support for > 1 page yet!!!"); 98 99 s = splbio(); 100retry: 101 for (i = 0; i < bounceallocarraysize; i++) { 102 if (bounceallocarray[i] != 0xffffffff) { 103 bit = ffs(~bounceallocarray[i]); 104 if (bit) { 105 bounceallocarray[i] |= 1 << (bit - 1) ; 106 bouncefree -= count; 107 splx(s); 108 return bouncepa[(i * BITS_IN_UNSIGNED + (bit - 1))]; 109 } 110 } 111 } 112 bpwait = 1; 113 tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0); 114 goto retry; 115} 116 117void 118vm_bounce_kva_free(addr, size, now) 119 vm_offset_t addr; 120 vm_offset_t size; 121 int now; 122{ 123 int s = splbio(); 124 kvaf[kvasfreecnt].addr = addr; 125 kvaf[kvasfreecnt].size = size; 126 ++kvasfreecnt; 127 if( now) { 128 /* 129 * this will do wakeups 130 */ 131 vm_bounce_kva(0,0); 132 } else { 133 if (bmwait) { 134 /* 135 * if anyone is waiting on the bounce-map, then wakeup 136 */ 137 wakeup((caddr_t) io_map); 138 bmwait = 0; 139 } 140 } 141 splx(s); 142} 143 144/* 145 * free count bounce buffer pages 146 */ 147void 148vm_bounce_page_free(pa, count) 149 vm_offset_t pa; 150 int count; 151{ 152 int allocindex; 153 int index; 154 int bit; 155 156 if (count != 1) 157 panic("vm_bounce_page_free -- no support for > 1 page yet!!!\n"); 158 159 for(index=0;index<bouncepages;index++) { 160 if( pa == bouncepa[index]) 161 break; 162 } 163 164 if( index == bouncepages) 165 panic("vm_bounce_page_free: invalid bounce buffer"); 166 167 allocindex = index / BITS_IN_UNSIGNED; 168 bit = index % BITS_IN_UNSIGNED; 169 170 bounceallocarray[allocindex] &= ~(1 << bit); 171 172 bouncefree += count; 173 if (bpwait) { 174 bpwait = 0; 175 wakeup((caddr_t) &bounceallocarray); 176 } 177} 178 179/* 180 * allocate count bounce buffer kva pages 181 */ 182vm_offset_t 183vm_bounce_kva(size, waitok) 184 int size; 185 int waitok; 186{ 187 int i; 188 vm_offset_t kva = 0; 189 vm_offset_t off; 190 int s = splbio(); 191more: 192 if (!bmfreeing && kvasfreecnt) { 193 bmfreeing = 1; 194 for (i = 0; i < kvasfreecnt; i++) { 195 for(off=0;off<kvaf[i].size;off+=NBPG) { 196 pmap_kremove( kvaf[i].addr + off); 197 } 198 kmem_free_wakeup(io_map, kvaf[i].addr, 199 kvaf[i].size); 200 } 201 kvasfreecnt = 0; 202 bmfreeing = 0; 203 if( bmwait) { 204 bmwait = 0; 205 wakeup( (caddr_t) io_map); 206 } 207 } 208 209 if( size == 0) { 210 splx(s); 211 return NULL; 212 } 213 214 if ((kva = kmem_alloc_pageable(io_map, size)) == 0) { 215 if( !waitok) { 216 splx(s); 217 return NULL; 218 } 219 bmwait = 1; 220 tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0); 221 goto more; 222 } 223 splx(s); 224 return kva; 225} 226 227/* 228 * same as vm_bounce_kva -- but really allocate (but takes pages as arg) 229 */ 230vm_offset_t 231vm_bounce_kva_alloc(count) 232int count; 233{ 234 int i; 235 vm_offset_t kva; 236 vm_offset_t pa; 237 if( bouncepages == 0) { 238 kva = (vm_offset_t) malloc(count*NBPG, M_TEMP, M_WAITOK); 239 return kva; 240 } 241 kva = vm_bounce_kva(count*NBPG, 1); 242 for(i=0;i<count;i++) { 243 pa = vm_bounce_page_find(1); 244 pmap_kenter(kva + i * NBPG, pa); 245 } 246 return kva; 247} 248 249/* 250 * same as vm_bounce_kva_free -- but really free 251 */ 252void 253vm_bounce_kva_alloc_free(kva, count) 254 vm_offset_t kva; 255 int count; 256{ 257 int i; 258 vm_offset_t pa; 259 if( bouncepages == 0) { 260 free((caddr_t) kva, M_TEMP); 261 return; 262 } 263 for(i = 0; i < count; i++) { 264 pa = pmap_kextract(kva + i * NBPG); 265 vm_bounce_page_free(pa, 1); 266 } 267 vm_bounce_kva_free(kva, count*NBPG, 0); 268} 269 270/* 271 * do the things necessary to the struct buf to implement 272 * bounce buffers... inserted before the disk sort 273 */ 274void 275vm_bounce_alloc(bp) 276 struct buf *bp; 277{ 278 int countvmpg; 279 vm_offset_t vastart, vaend; 280 vm_offset_t vapstart, vapend; 281 vm_offset_t va, kva; 282 vm_offset_t pa; 283 int dobounceflag = 0; 284 int i; 285 286 if (bouncepages == 0) 287 return; 288 289 if (bp->b_flags & B_BOUNCE) { 290 printf("vm_bounce_alloc: called recursively???\n"); 291 return; 292 } 293 294 if (bp->b_bufsize < bp->b_bcount) { 295 printf( 296 "vm_bounce_alloc: b_bufsize(0x%lx) < b_bcount(0x%lx) !!\n", 297 bp->b_bufsize, bp->b_bcount); 298 panic("vm_bounce_alloc"); 299 } 300 301/* 302 * This is not really necessary 303 * if( bp->b_bufsize != bp->b_bcount) { 304 * printf("size: %d, count: %d\n", bp->b_bufsize, bp->b_bcount); 305 * } 306 */ 307 308 309 vastart = (vm_offset_t) bp->b_data; 310 vaend = (vm_offset_t) bp->b_data + bp->b_bufsize; 311 312 vapstart = i386_trunc_page(vastart); 313 vapend = i386_round_page(vaend); 314 countvmpg = (vapend - vapstart) / NBPG; 315 316/* 317 * if any page is above 16MB, then go into bounce-buffer mode 318 */ 319 va = vapstart; 320 for (i = 0; i < countvmpg; i++) { 321 pa = pmap_kextract(va); 322 if (pa >= SIXTEENMEG) 323 ++dobounceflag; 324 va += NBPG; 325 } 326 if (dobounceflag == 0) 327 return; 328 329 if (bouncepages < dobounceflag) 330 panic("Not enough bounce buffers!!!"); 331 332/* 333 * allocate a replacement kva for b_addr 334 */ 335 kva = vm_bounce_kva(countvmpg*NBPG, 1); 336#if 0 337 printf("%s: vapstart: %x, vapend: %x, countvmpg: %d, kva: %x ", 338 (bp->b_flags & B_READ) ? "read":"write", 339 vapstart, vapend, countvmpg, kva); 340#endif 341 va = vapstart; 342 for (i = 0; i < countvmpg; i++) { 343 pa = pmap_kextract(va); 344 if (pa >= SIXTEENMEG) { 345 /* 346 * allocate a replacement page 347 */ 348 vm_offset_t bpa = vm_bounce_page_find(1); 349 pmap_kenter(kva + (NBPG * i), bpa); 350#if 0 351 printf("r(%d): (%x,%x,%x) ", i, va, pa, bpa); 352#endif 353 /* 354 * if we are writing, the copy the data into the page 355 */ 356 if ((bp->b_flags & B_READ) == 0) { 357 bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG); 358 } 359 } else { 360 /* 361 * use original page 362 */ 363 pmap_kenter(kva + (NBPG * i), pa); 364 } 365 va += NBPG; 366 } 367 368/* 369 * flag the buffer as being bounced 370 */ 371 bp->b_flags |= B_BOUNCE; 372/* 373 * save the original buffer kva 374 */ 375 bp->b_savekva = bp->b_data; 376/* 377 * put our new kva into the buffer (offset by original offset) 378 */ 379 bp->b_data = (caddr_t) (((vm_offset_t) kva) | 380 ((vm_offset_t) bp->b_savekva & (NBPG - 1))); 381#if 0 382 printf("b_savekva: %x, newva: %x\n", bp->b_savekva, bp->b_data); 383#endif 384 return; 385} 386 387/* 388 * hook into biodone to free bounce buffer 389 */ 390void 391vm_bounce_free(bp) 392 struct buf *bp; 393{ 394 int i; 395 vm_offset_t origkva, bouncekva, bouncekvaend; 396 397/* 398 * if this isn't a bounced buffer, then just return 399 */ 400 if ((bp->b_flags & B_BOUNCE) == 0) 401 return; 402 403/* 404 * This check is not necessary 405 * if (bp->b_bufsize != bp->b_bcount) { 406 * printf("vm_bounce_free: b_bufsize=%d, b_bcount=%d\n", 407 * bp->b_bufsize, bp->b_bcount); 408 * } 409 */ 410 411 origkva = (vm_offset_t) bp->b_savekva; 412 bouncekva = (vm_offset_t) bp->b_data; 413/* 414 printf("free: %d ", bp->b_bufsize); 415*/ 416 417/* 418 * check every page in the kva space for b_addr 419 */ 420 for (i = 0; i < bp->b_bufsize; ) { 421 vm_offset_t mybouncepa; 422 vm_offset_t copycount; 423 424 copycount = i386_round_page(bouncekva + 1) - bouncekva; 425 mybouncepa = pmap_kextract(i386_trunc_page(bouncekva)); 426 427/* 428 * if this is a bounced pa, then process as one 429 */ 430 if ( mybouncepa != pmap_kextract( i386_trunc_page( origkva))) { 431 vm_offset_t tocopy = copycount; 432 if (i + tocopy > bp->b_bufsize) 433 tocopy = bp->b_bufsize - i; 434/* 435 * if this is a read, then copy from bounce buffer into original buffer 436 */ 437 if (bp->b_flags & B_READ) 438 bcopy((caddr_t) bouncekva, (caddr_t) origkva, tocopy); 439/* 440 * free the bounce allocation 441 */ 442 443/* 444 printf("(kva: %x, pa: %x)", bouncekva, mybouncepa); 445*/ 446 vm_bounce_page_free(mybouncepa, 1); 447 } 448 449 origkva += copycount; 450 bouncekva += copycount; 451 i += copycount; 452 } 453 454/* 455 printf("\n"); 456*/ 457/* 458 * add the old kva into the "to free" list 459 */ 460 461 bouncekva= i386_trunc_page((vm_offset_t) bp->b_data); 462 bouncekvaend= i386_round_page((vm_offset_t)bp->b_data + bp->b_bufsize); 463 464/* 465 printf("freeva: %d\n", (bouncekvaend - bouncekva) / NBPG); 466*/ 467 vm_bounce_kva_free( bouncekva, (bouncekvaend - bouncekva), 0); 468 bp->b_data = bp->b_savekva; 469 bp->b_savekva = 0; 470 bp->b_flags &= ~B_BOUNCE; 471 472 return; 473} 474 475 476/* 477 * init the bounce buffer system 478 */ 479void 480vm_bounce_init() 481{ 482 int i; 483 484 kvasfreecnt = 0; 485 486 if (bouncepages == 0) 487 return; 488 489 bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED; 490 bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT); 491 492 if (!bounceallocarray) 493 panic("Cannot allocate bounce resource array\n"); 494 495 bzero(bounceallocarray, bounceallocarraysize * sizeof(unsigned)); 496 bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT); 497 if (!bouncepa) 498 panic("Cannot allocate physical memory array\n"); 499 500 for(i=0;i<bouncepages;i++) { 501 vm_offset_t pa; 502 if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * NBPG)) >= SIXTEENMEG) 503 panic("bounce memory out of range"); 504 if( pa == 0) 505 panic("bounce memory not resident"); 506 bouncepa[i] = pa; 507 } 508 bouncefree = bouncepages; 509 510} 511#endif /* BOUNCE_BUFFERS */ 512/* 513 * quick version of vm_fault 514 */ 515 516void 517vm_fault_quick( v, prot) 518 vm_offset_t v; 519 int prot; 520{ 521 if( (cpu_class == CPUCLASS_386) && 522 (prot & VM_PROT_WRITE)) 523 vm_fault(&curproc->p_vmspace->vm_map, v, 524 VM_PROT_READ|VM_PROT_WRITE, FALSE); 525 else if( prot & VM_PROT_WRITE) 526 *(volatile char *)v += 0; 527 else 528 *(volatile char *)v; 529} 530 531 532/* 533 * Finish a fork operation, with process p2 nearly set up. 534 * Copy and update the kernel stack and pcb, making the child 535 * ready to run, and marking it so that it can return differently 536 * than the parent. Returns 1 in the child process, 0 in the parent. 537 * We currently double-map the user area so that the stack is at the same 538 * address in each process; in the future we will probably relocate 539 * the frame pointers on the stack after copying. 540 */ 541int 542cpu_fork(p1, p2) 543 register struct proc *p1, *p2; 544{ 545 register struct user *up = p2->p_addr; 546 int offset; 547 extern char kstack[]; 548 extern int mvesp(); 549 550 /* 551 * Copy pcb and stack from proc p1 to p2. 552 * We do this as cheaply as possible, copying only the active 553 * part of the stack. The stack and pcb need to agree; 554 * this is tricky, as the final pcb is constructed by savectx, 555 * but its frame isn't yet on the stack when the stack is copied. 556 * swtch compensates for this when the child eventually runs. 557 * This should be done differently, with a single call 558 * that copies and updates the pcb+stack, 559 * replacing the bcopy and savectx. 560 */ 561 p2->p_addr->u_pcb = p1->p_addr->u_pcb; 562 offset = mvesp() - (int)kstack; 563 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset, 564 (unsigned) ctob(UPAGES) - offset); 565 p2->p_md.md_regs = p1->p_md.md_regs; 566 567 pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb); 568 569 /* 570 * 571 * Arrange for a non-local goto when the new process 572 * is started, to resume here, returning nonzero from setjmp. 573 */ 574 if (savectx(&up->u_pcb, 1)) { 575 /* 576 * Return 1 in child. 577 */ 578 return (1); 579 } 580 return (0); 581} 582 583void 584cpu_exit(p) 585 register struct proc *p; 586{ 587 588#if NNPX > 0 589 npxexit(p); 590#endif /* NNPX */ 591 curproc = p; 592 mi_switch(); 593 /* 594 * This is to shutup the compiler, and if swtch() failed I suppose 595 * this would be a good thing. This keeps gcc happy because panic 596 * is a volatile void function as well. 597 */ 598 panic("cpu_exit"); 599} 600 601void 602cpu_wait(p) struct proc *p; { 603/* extern vm_map_t upages_map; */ 604 605 /* drop per-process resources */ 606 pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr, 607 ((vm_offset_t) p->p_addr) + ctob(UPAGES)); 608 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 609 vmspace_free(p->p_vmspace); 610} 611 612/* 613 * Dump the machine specific header information at the start of a core dump. 614 */ 615int 616cpu_coredump(p, vp, cred) 617 struct proc *p; 618 struct vnode *vp; 619 struct ucred *cred; 620{ 621 622 return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES), 623 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, 624 p)); 625} 626 627/* 628 * Set a red zone in the kernel stack after the u. area. 629 */ 630void 631setredzone(pte, vaddr) 632 u_short *pte; 633 caddr_t vaddr; 634{ 635/* eventually do this by setting up an expand-down stack segment 636 for ss0: selector, allowing stack access down to top of u. 637 this means though that protection violations need to be handled 638 thru a double fault exception that must do an integral task 639 switch to a known good context, within which a dump can be 640 taken. a sensible scheme might be to save the initial context 641 used by sched (that has physical memory mapped 1:1 at bottom) 642 and take the dump while still in mapped mode */ 643} 644 645/* 646 * Move pages from one kernel virtual address to another. 647 * Both addresses are assumed to reside in the Sysmap, 648 * and size must be a multiple of CLSIZE. 649 */ 650 651void 652pagemove(from, to, size) 653 register caddr_t from, to; 654 int size; 655{ 656 register vm_offset_t pa; 657 658 if (size & CLOFSET) 659 panic("pagemove"); 660 while (size > 0) { 661 pa = pmap_kextract((vm_offset_t)from); 662 if (pa == 0) 663 panic("pagemove 2"); 664 if (pmap_kextract((vm_offset_t)to) != 0) 665 panic("pagemove 3"); 666 pmap_kremove((vm_offset_t)from); 667 pmap_kenter((vm_offset_t)to, pa); 668 from += PAGE_SIZE; 669 to += PAGE_SIZE; 670 size -= PAGE_SIZE; 671 } 672} 673 674/* 675 * Convert kernel VA to physical address 676 */ 677u_long 678kvtop(void *addr) 679{ 680 vm_offset_t va; 681 682 va = pmap_kextract((vm_offset_t)addr); 683 if (va == 0) 684 panic("kvtop: zero page frame"); 685 return((int)va); 686} 687 688/* 689 * Map an IO request into kernel virtual address space. 690 * 691 * All requests are (re)mapped into kernel VA space. 692 * Notice that we use b_bufsize for the size of the buffer 693 * to be mapped. b_bcount might be modified by the driver. 694 */ 695void 696vmapbuf(bp) 697 register struct buf *bp; 698{ 699 register int npf; 700 register caddr_t addr; 701 int off; 702 vm_offset_t kva; 703 vm_offset_t pa, lastv, v; 704 705 if ((bp->b_flags & B_PHYS) == 0) 706 panic("vmapbuf"); 707 708 /* 709 * this is the kva that is to be used for 710 * the temporary kernel mapping 711 */ 712 kva = (vm_offset_t) bp->b_saveaddr; 713 714 lastv = 0; 715 for (addr = (caddr_t)trunc_page(bp->b_data); 716 addr < bp->b_data + bp->b_bufsize; 717 addr += PAGE_SIZE) { 718 719/* 720 * make sure that the pde is valid and held 721 */ 722 v = trunc_page(((vm_offset_t)vtopte(addr))); 723 if (v != lastv) { 724 vm_fault_quick(v, VM_PROT_READ); 725 pa = pmap_kextract( v); 726 vm_page_hold(PHYS_TO_VM_PAGE(pa)); 727 lastv = v; 728 } 729 730/* 731 * do the vm_fault if needed, do the copy-on-write thing when 732 * reading stuff off device into memory. 733 */ 734 vm_fault_quick(addr, 735 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ); 736 pa = pmap_kextract((vm_offset_t) addr); 737/* 738 * hold the data page 739 */ 740 vm_page_hold(PHYS_TO_VM_PAGE(pa)); 741 } 742 743 addr = bp->b_saveaddr = bp->b_data; 744 off = (int)addr & PGOFSET; 745 npf = btoc(round_page(bp->b_bufsize + off)); 746 bp->b_data = (caddr_t) (kva + off); 747 while (npf--) { 748 pa = pmap_kextract((vm_offset_t)addr); 749 if (pa == 0) 750 panic("vmapbuf: null page frame"); 751 pmap_kenter(kva, trunc_page(pa)); 752 addr += PAGE_SIZE; 753 kva += PAGE_SIZE; 754 } 755} 756 757/* 758 * Free the io map PTEs associated with this IO operation. 759 * We also invalidate the TLB entries and restore the original b_addr. 760 */ 761void 762vunmapbuf(bp) 763 register struct buf *bp; 764{ 765 register caddr_t addr; 766 vm_offset_t v,lastv,pa; 767 768 if ((bp->b_flags & B_PHYS) == 0) 769 panic("vunmapbuf"); 770 771 for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data); 772 addr < bp->b_data + bp->b_bufsize; 773 addr += NBPG) 774 pmap_kremove((vm_offset_t) addr); 775 776 bp->b_data = bp->b_saveaddr; 777 bp->b_saveaddr = NULL; 778 779/* 780 * unhold the pde, and data pages 781 */ 782 lastv = 0; 783 for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data); 784 addr < bp->b_data + bp->b_bufsize; 785 addr += NBPG) { 786 787 /* 788 * release the data page 789 */ 790 pa = pmap_kextract((vm_offset_t) addr); 791 vm_page_unhold(PHYS_TO_VM_PAGE(pa)); 792 793 /* 794 * and unhold the page table 795 */ 796 v = trunc_page(((vm_offset_t)vtopte(addr))); 797 if (v != lastv) { 798 pa = pmap_kextract(v); 799 vm_page_unhold(PHYS_TO_VM_PAGE(pa)); 800 lastv = v; 801 } 802 } 803} 804 805/* 806 * Force reset the processor by invalidating the entire address space! 807 */ 808void 809cpu_reset() { 810 811 /* force a shutdown by unmapping entire address space ! */ 812 bzero((caddr_t) PTD, NBPG); 813 814 /* "good night, sweet prince .... <THUNK!>" */ 815 pmap_update(); 816 /* NOTREACHED */ 817 while(1); 818} 819 820/* 821 * Grow the user stack to allow for 'sp'. This version grows the stack in 822 * chunks of SGROWSIZ. 823 */ 824int 825grow(p, sp) 826 struct proc *p; 827 u_int sp; 828{ 829 unsigned int nss; 830 caddr_t v; 831 struct vmspace *vm = p->p_vmspace; 832 833 if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK) 834 return (1); 835 836 nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE); 837 838 if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) 839 return (0); 840 841 if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT, 842 SGROWSIZ) < nss) { 843 int grow_amount; 844 /* 845 * If necessary, grow the VM that the stack occupies 846 * to allow for the rlimit. This allows us to not have 847 * to allocate all of the VM up-front in execve (which 848 * is expensive). 849 * Grow the VM by the amount requested rounded up to 850 * the nearest SGROWSIZ to provide for some hysteresis. 851 */ 852 grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ); 853 v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT, 854 SGROWSIZ) - grow_amount; 855 /* 856 * If there isn't enough room to extend by SGROWSIZ, then 857 * just extend to the maximum size 858 */ 859 if (v < vm->vm_maxsaddr) { 860 v = vm->vm_maxsaddr; 861 grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT); 862 } 863 if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v, 864 grow_amount, FALSE) != KERN_SUCCESS) { 865 return (0); 866 } 867 vm->vm_ssize += grow_amount >> PAGE_SHIFT; 868 } 869 870 return (1); 871} 872