vm_machdep.c revision 1894
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 * $Id: vm_machdep.c,v 1.24 1994/08/06 10:25:37 davidg Exp $ 42 */ 43 44#include "npx.h" 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/proc.h> 48#include <sys/malloc.h> 49#include <sys/buf.h> 50#include <sys/vnode.h> 51#include <sys/user.h> 52 53#include <machine/cpu.h> 54 55#include <vm/vm.h> 56#include <vm/vm_kern.h> 57 58#ifndef NOBOUNCE 59vm_map_t io_map; 60volatile int kvasfreecnt; 61 62 63caddr_t bouncememory; 64int bouncepages, bpwait; 65vm_offset_t *bouncepa; 66int bmwait, bmfreeing; 67 68#define BITS_IN_UNSIGNED (8*sizeof(unsigned)) 69int bounceallocarraysize; 70unsigned *bounceallocarray; 71int bouncefree; 72 73#define SIXTEENMEG (4096*4096) 74#define MAXBKVA 1024 75int maxbkva = MAXBKVA*NBPG; 76 77/* special list that can be used at interrupt time for eventual kva free */ 78struct kvasfree { 79 vm_offset_t addr; 80 vm_offset_t size; 81} kvaf[MAXBKVA]; 82 83 84vm_offset_t vm_bounce_kva(); 85/* 86 * get bounce buffer pages (count physically contiguous) 87 * (only 1 inplemented now) 88 */ 89vm_offset_t 90vm_bounce_page_find(count) 91 int count; 92{ 93 int bit; 94 int s,i; 95 96 if (count != 1) 97 panic("vm_bounce_page_find -- no support for > 1 page yet!!!"); 98 99 s = splbio(); 100retry: 101 for (i = 0; i < bounceallocarraysize; i++) { 102 if (bounceallocarray[i] != 0xffffffff) { 103 if (bit = ffs(~bounceallocarray[i])) { 104 bounceallocarray[i] |= 1 << (bit - 1) ; 105 bouncefree -= count; 106 splx(s); 107 return bouncepa[(i * BITS_IN_UNSIGNED + (bit - 1))]; 108 } 109 } 110 } 111 bpwait = 1; 112 tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0); 113 goto retry; 114} 115 116void 117vm_bounce_kva_free(addr, size, now) 118 vm_offset_t addr; 119 vm_offset_t size; 120 int now; 121{ 122 int s = splbio(); 123 kvaf[kvasfreecnt].addr = addr; 124 kvaf[kvasfreecnt].size = size; 125 ++kvasfreecnt; 126 if( now) { 127 /* 128 * this will do wakeups 129 */ 130 vm_bounce_kva(0,0); 131 } else { 132 if (bmwait) { 133 /* 134 * if anyone is waiting on the bounce-map, then wakeup 135 */ 136 wakeup((caddr_t) io_map); 137 bmwait = 0; 138 } 139 } 140 splx(s); 141} 142 143/* 144 * free count bounce buffer pages 145 */ 146void 147vm_bounce_page_free(pa, count) 148 vm_offset_t pa; 149 int count; 150{ 151 int allocindex; 152 int index; 153 int bit; 154 155 if (count != 1) 156 panic("vm_bounce_page_free -- no support for > 1 page yet!!!\n"); 157 158 for(index=0;index<bouncepages;index++) { 159 if( pa == bouncepa[index]) 160 break; 161 } 162 163 if( index == bouncepages) 164 panic("vm_bounce_page_free: invalid bounce buffer"); 165 166 allocindex = index / BITS_IN_UNSIGNED; 167 bit = index % BITS_IN_UNSIGNED; 168 169 bounceallocarray[allocindex] &= ~(1 << bit); 170 171 bouncefree += count; 172 if (bpwait) { 173 bpwait = 0; 174 wakeup((caddr_t) &bounceallocarray); 175 } 176} 177 178/* 179 * allocate count bounce buffer kva pages 180 */ 181vm_offset_t 182vm_bounce_kva(size, waitok) 183 int size; 184 int waitok; 185{ 186 int i; 187 int startfree; 188 vm_offset_t kva = 0; 189 vm_offset_t off; 190 int s = splbio(); 191more: 192 if (!bmfreeing && kvasfreecnt) { 193 bmfreeing = 1; 194 for (i = 0; i < kvasfreecnt; i++) { 195 for(off=0;off<kvaf[i].size;off+=NBPG) { 196 pmap_kremove( kvaf[i].addr + off); 197 } 198 kmem_free_wakeup(io_map, kvaf[i].addr, 199 kvaf[i].size); 200 } 201 kvasfreecnt = 0; 202 bmfreeing = 0; 203 if( bmwait) { 204 bmwait = 0; 205 wakeup( (caddr_t) io_map); 206 } 207 } 208 209 if( size == 0) { 210 splx(s); 211 return NULL; 212 } 213 214 if ((kva = kmem_alloc_pageable(io_map, size)) == 0) { 215 if( !waitok) { 216 splx(s); 217 return NULL; 218 } 219 bmwait = 1; 220 tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0); 221 goto more; 222 } 223 splx(s); 224 return kva; 225} 226 227/* 228 * same as vm_bounce_kva -- but really allocate (but takes pages as arg) 229 */ 230vm_offset_t 231vm_bounce_kva_alloc(count) 232int count; 233{ 234 int i; 235 vm_offset_t kva; 236 vm_offset_t pa; 237 if( bouncepages == 0) { 238 kva = (vm_offset_t) malloc(count*NBPG, M_TEMP, M_WAITOK); 239 return kva; 240 } 241 kva = vm_bounce_kva(count*NBPG, 1); 242 for(i=0;i<count;i++) { 243 pa = vm_bounce_page_find(1); 244 pmap_kenter(kva + i * NBPG, pa); 245 } 246 pmap_update(); 247 return kva; 248} 249 250/* 251 * same as vm_bounce_kva_free -- but really free 252 */ 253void 254vm_bounce_kva_alloc_free(kva, count) 255 vm_offset_t kva; 256 int count; 257{ 258 int i; 259 vm_offset_t pa; 260 if( bouncepages == 0) { 261 free((caddr_t) kva, M_TEMP); 262 return; 263 } 264 for(i = 0; i < count; i++) { 265 pa = pmap_kextract(kva + i * NBPG); 266 vm_bounce_page_free(pa, 1); 267 } 268 vm_bounce_kva_free(kva, count*NBPG, 0); 269} 270 271/* 272 * do the things necessary to the struct buf to implement 273 * bounce buffers... inserted before the disk sort 274 */ 275void 276vm_bounce_alloc(bp) 277 struct buf *bp; 278{ 279 int countvmpg; 280 vm_offset_t vastart, vaend; 281 vm_offset_t vapstart, vapend; 282 vm_offset_t va, kva; 283 vm_offset_t pa; 284 int dobounceflag = 0; 285 int bounceindex; 286 int i; 287 int s; 288 289 if (bouncepages == 0) 290 return; 291 292 if (bp->b_flags & B_BOUNCE) { 293 printf("vm_bounce_alloc: called recursively???\n"); 294 return; 295 } 296 297 if (bp->b_bufsize < bp->b_bcount) { 298 printf("vm_bounce_alloc: b_bufsize(0x%x) < b_bcount(0x%x) !!!!\n", 299 bp->b_bufsize, bp->b_bcount); 300 panic("vm_bounce_alloc"); 301 } 302 303/* 304 * This is not really necessary 305 * if( bp->b_bufsize != bp->b_bcount) { 306 * printf("size: %d, count: %d\n", bp->b_bufsize, bp->b_bcount); 307 * } 308 */ 309 310 311 vastart = (vm_offset_t) bp->b_data; 312 vaend = (vm_offset_t) bp->b_data + bp->b_bufsize; 313 314 vapstart = i386_trunc_page(vastart); 315 vapend = i386_round_page(vaend); 316 countvmpg = (vapend - vapstart) / NBPG; 317 318/* 319 * if any page is above 16MB, then go into bounce-buffer mode 320 */ 321 va = vapstart; 322 for (i = 0; i < countvmpg; i++) { 323 pa = pmap_kextract(va); 324 if (pa >= SIXTEENMEG) 325 ++dobounceflag; 326 va += NBPG; 327 } 328 if (dobounceflag == 0) 329 return; 330 331 if (bouncepages < dobounceflag) 332 panic("Not enough bounce buffers!!!"); 333 334/* 335 * allocate a replacement kva for b_addr 336 */ 337 kva = vm_bounce_kva(countvmpg*NBPG, 1); 338#if 0 339 printf("%s: vapstart: %x, vapend: %x, countvmpg: %d, kva: %x ", 340 (bp->b_flags & B_READ) ? "read":"write", 341 vapstart, vapend, countvmpg, kva); 342#endif 343 va = vapstart; 344 for (i = 0; i < countvmpg; i++) { 345 pa = pmap_kextract(va); 346 if (pa >= SIXTEENMEG) { 347 /* 348 * allocate a replacement page 349 */ 350 vm_offset_t bpa = vm_bounce_page_find(1); 351 pmap_kenter(kva + (NBPG * i), bpa); 352#if 0 353 printf("r(%d): (%x,%x,%x) ", i, va, pa, bpa); 354#endif 355 /* 356 * if we are writing, the copy the data into the page 357 */ 358 if ((bp->b_flags & B_READ) == 0) { 359 pmap_update(); 360 bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG); 361 } 362 } else { 363 /* 364 * use original page 365 */ 366 pmap_kenter(kva + (NBPG * i), pa); 367 } 368 va += NBPG; 369 } 370 pmap_update(); 371 372/* 373 * flag the buffer as being bounced 374 */ 375 bp->b_flags |= B_BOUNCE; 376/* 377 * save the original buffer kva 378 */ 379 bp->b_savekva = bp->b_data; 380/* 381 * put our new kva into the buffer (offset by original offset) 382 */ 383 bp->b_data = (caddr_t) (((vm_offset_t) kva) | 384 ((vm_offset_t) bp->b_savekva & (NBPG - 1))); 385#if 0 386 printf("b_savekva: %x, newva: %x\n", bp->b_savekva, bp->b_data); 387#endif 388 return; 389} 390 391/* 392 * hook into biodone to free bounce buffer 393 */ 394void 395vm_bounce_free(bp) 396 struct buf *bp; 397{ 398 int i; 399 vm_offset_t origkva, bouncekva, bouncekvaend; 400 int countvmpg; 401 int s; 402 403/* 404 * if this isn't a bounced buffer, then just return 405 */ 406 if ((bp->b_flags & B_BOUNCE) == 0) 407 return; 408 409/* 410 * This check is not necessary 411 * if (bp->b_bufsize != bp->b_bcount) { 412 * printf("vm_bounce_free: b_bufsize=%d, b_bcount=%d\n", 413 * bp->b_bufsize, bp->b_bcount); 414 * } 415 */ 416 417 origkva = (vm_offset_t) bp->b_savekva; 418 bouncekva = (vm_offset_t) bp->b_data; 419/* 420 printf("free: %d ", bp->b_bufsize); 421*/ 422 423/* 424 * check every page in the kva space for b_addr 425 */ 426 for (i = 0; i < bp->b_bufsize; ) { 427 vm_offset_t mybouncepa; 428 vm_offset_t copycount; 429 430 copycount = i386_round_page(bouncekva + 1) - bouncekva; 431 mybouncepa = pmap_kextract(i386_trunc_page(bouncekva)); 432 433/* 434 * if this is a bounced pa, then process as one 435 */ 436 if ( mybouncepa != pmap_kextract( i386_trunc_page( origkva))) { 437 vm_offset_t tocopy = copycount; 438 if (i + tocopy > bp->b_bufsize) 439 tocopy = bp->b_bufsize - i; 440/* 441 * if this is a read, then copy from bounce buffer into original buffer 442 */ 443 if (bp->b_flags & B_READ) 444 bcopy((caddr_t) bouncekva, (caddr_t) origkva, tocopy); 445/* 446 * free the bounce allocation 447 */ 448 449/* 450 printf("(kva: %x, pa: %x)", bouncekva, mybouncepa); 451*/ 452 vm_bounce_page_free(mybouncepa, 1); 453 } 454 455 origkva += copycount; 456 bouncekva += copycount; 457 i += copycount; 458 } 459 460/* 461 printf("\n"); 462*/ 463/* 464 * add the old kva into the "to free" list 465 */ 466 467 bouncekva= i386_trunc_page((vm_offset_t) bp->b_data); 468 bouncekvaend= i386_round_page((vm_offset_t)bp->b_data + bp->b_bufsize); 469 470/* 471 printf("freeva: %d\n", (bouncekvaend - bouncekva) / NBPG); 472*/ 473 vm_bounce_kva_free( bouncekva, (bouncekvaend - bouncekva), 0); 474 bp->b_data = bp->b_savekva; 475 bp->b_savekva = 0; 476 bp->b_flags &= ~B_BOUNCE; 477 478 return; 479} 480 481 482/* 483 * init the bounce buffer system 484 */ 485void 486vm_bounce_init() 487{ 488 vm_offset_t minaddr, maxaddr; 489 int i; 490 491 kvasfreecnt = 0; 492 493 if (bouncepages == 0) 494 return; 495 496 bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED; 497 bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT); 498 499 if (!bounceallocarray) 500 panic("Cannot allocate bounce resource array\n"); 501 502 bzero(bounceallocarray, bounceallocarraysize * sizeof(unsigned)); 503 bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT); 504 if (!bouncepa) 505 panic("Cannot allocate physical memory array\n"); 506 507 for(i=0;i<bouncepages;i++) { 508 vm_offset_t pa; 509 if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * NBPG)) >= SIXTEENMEG) 510 panic("bounce memory out of range"); 511 if( pa == 0) 512 panic("bounce memory not resident"); 513 bouncepa[i] = pa; 514 } 515 bouncefree = bouncepages; 516 517} 518#endif /* NOBOUNCE */ 519/* 520 * quick version of vm_fault 521 */ 522 523void 524vm_fault_quick( v, prot) 525 vm_offset_t v; 526 int prot; 527{ 528 if( (cpu_class == CPUCLASS_386) && 529 (prot & VM_PROT_WRITE)) 530 vm_fault(&curproc->p_vmspace->vm_map, v, 531 VM_PROT_READ|VM_PROT_WRITE, FALSE); 532 else if( prot & VM_PROT_WRITE) 533 *(volatile char *)v += 0; 534 else 535 *(volatile char *)v; 536} 537 538 539/* 540 * Finish a fork operation, with process p2 nearly set up. 541 * Copy and update the kernel stack and pcb, making the child 542 * ready to run, and marking it so that it can return differently 543 * than the parent. Returns 1 in the child process, 0 in the parent. 544 * We currently double-map the user area so that the stack is at the same 545 * address in each process; in the future we will probably relocate 546 * the frame pointers on the stack after copying. 547 */ 548int 549cpu_fork(p1, p2) 550 register struct proc *p1, *p2; 551{ 552 register struct user *up = p2->p_addr; 553 int foo, offset, addr, i; 554 extern char kstack[]; 555 extern int mvesp(); 556 557 /* 558 * Copy pcb and stack from proc p1 to p2. 559 * We do this as cheaply as possible, copying only the active 560 * part of the stack. The stack and pcb need to agree; 561 * this is tricky, as the final pcb is constructed by savectx, 562 * but its frame isn't yet on the stack when the stack is copied. 563 * swtch compensates for this when the child eventually runs. 564 * This should be done differently, with a single call 565 * that copies and updates the pcb+stack, 566 * replacing the bcopy and savectx. 567 */ 568 p2->p_addr->u_pcb = p1->p_addr->u_pcb; 569 offset = mvesp() - (int)kstack; 570 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset, 571 (unsigned) ctob(UPAGES) - offset); 572 p2->p_md.md_regs = p1->p_md.md_regs; 573 574 pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb); 575 576 /* 577 * 578 * Arrange for a non-local goto when the new process 579 * is started, to resume here, returning nonzero from setjmp. 580 */ 581 if (savectx(up, 1)) { 582 /* 583 * Return 1 in child. 584 */ 585 return (1); 586 } 587 return (0); 588} 589 590void 591cpu_exit(p) 592 register struct proc *p; 593{ 594 595#if NNPX > 0 596 npxexit(p); 597#endif /* NNPX */ 598 curproc = p; 599 mi_switch(); 600 /* 601 * This is to shutup the compiler, and if swtch() failed I suppose 602 * this would be a good thing. This keeps gcc happy because panic 603 * is a volatile void function as well. 604 */ 605 panic("cpu_exit"); 606} 607 608void 609cpu_wait(p) struct proc *p; { 610/* extern vm_map_t upages_map; */ 611 extern char kstack[]; 612 613 /* drop per-process resources */ 614 pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr, 615 ((vm_offset_t) p->p_addr) + ctob(UPAGES)); 616 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 617 vmspace_free(p->p_vmspace); 618} 619 620/* 621 * Dump the machine specific header information at the start of a core dump. 622 */ 623int 624cpu_coredump(p, vp, cred) 625 struct proc *p; 626 struct vnode *vp; 627 struct ucred *cred; 628{ 629 630 return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES), 631 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, 632 p)); 633} 634 635/* 636 * Set a red zone in the kernel stack after the u. area. 637 */ 638void 639setredzone(pte, vaddr) 640 u_short *pte; 641 caddr_t vaddr; 642{ 643/* eventually do this by setting up an expand-down stack segment 644 for ss0: selector, allowing stack access down to top of u. 645 this means though that protection violations need to be handled 646 thru a double fault exception that must do an integral task 647 switch to a known good context, within which a dump can be 648 taken. a sensible scheme might be to save the initial context 649 used by sched (that has physical memory mapped 1:1 at bottom) 650 and take the dump while still in mapped mode */ 651} 652 653/* 654 * Move pages from one kernel virtual address to another. 655 * Both addresses are assumed to reside in the Sysmap, 656 * and size must be a multiple of CLSIZE. 657 */ 658 659void 660pagemove(from, to, size) 661 register caddr_t from, to; 662 int size; 663{ 664 register vm_offset_t pa; 665 666 if (size & CLOFSET) 667 panic("pagemove"); 668 while (size > 0) { 669 pa = pmap_kextract((vm_offset_t)from); 670 if (pa == 0) 671 panic("pagemove 2"); 672 if (pmap_kextract((vm_offset_t)to) != 0) 673 panic("pagemove 3"); 674 pmap_kremove((vm_offset_t)from); 675 pmap_kenter((vm_offset_t)to, pa); 676 from += PAGE_SIZE; 677 to += PAGE_SIZE; 678 size -= PAGE_SIZE; 679 } 680 pmap_update(); 681} 682 683/* 684 * Convert kernel VA to physical address 685 */ 686u_long 687kvtop(void *addr) 688{ 689 vm_offset_t va; 690 691 va = pmap_kextract((vm_offset_t)addr); 692 if (va == 0) 693 panic("kvtop: zero page frame"); 694 return((int)va); 695} 696 697/* 698 * Map an IO request into kernel virtual address space. 699 * 700 * All requests are (re)mapped into kernel VA space. 701 * Notice that we use b_bufsize for the size of the buffer 702 * to be mapped. b_bcount might be modified by the driver. 703 */ 704void 705vmapbuf(bp) 706 register struct buf *bp; 707{ 708 register int npf; 709 register caddr_t addr; 710 int off; 711 vm_offset_t kva; 712 vm_offset_t pa, lastv, v; 713 714 if ((bp->b_flags & B_PHYS) == 0) 715 panic("vmapbuf"); 716 717 /* 718 * this is the kva that is to be used for 719 * the temporary kernel mapping 720 */ 721 kva = (vm_offset_t) bp->b_saveaddr; 722 723 lastv = 0; 724 for (addr = (caddr_t)trunc_page(bp->b_data); 725 addr < bp->b_data + bp->b_bufsize; 726 addr += PAGE_SIZE) { 727 728/* 729 * make sure that the pde is valid and held 730 */ 731 v = trunc_page(((vm_offset_t)vtopte(addr))); 732 if (v != lastv) { 733 vm_fault_quick(v, VM_PROT_READ); 734 pa = pmap_kextract( v); 735 vm_page_hold(PHYS_TO_VM_PAGE(pa)); 736 lastv = v; 737 } 738 739/* 740 * do the vm_fault if needed, do the copy-on-write thing when 741 * reading stuff off device into memory. 742 */ 743 vm_fault_quick(addr, 744 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ); 745 pa = pmap_kextract((vm_offset_t) addr); 746/* 747 * hold the data page 748 */ 749 vm_page_hold(PHYS_TO_VM_PAGE(pa)); 750 } 751 752 addr = bp->b_saveaddr = bp->b_data; 753 off = (int)addr & PGOFSET; 754 npf = btoc(round_page(bp->b_bufsize + off)); 755 bp->b_data = (caddr_t) (kva + off); 756 while (npf--) { 757 pa = pmap_kextract((vm_offset_t)addr); 758 if (pa == 0) 759 panic("vmapbuf: null page frame"); 760 pmap_kenter(kva, trunc_page(pa)); 761 addr += PAGE_SIZE; 762 kva += PAGE_SIZE; 763 } 764 pmap_update(); 765} 766 767/* 768 * Free the io map PTEs associated with this IO operation. 769 * We also invalidate the TLB entries and restore the original b_addr. 770 */ 771void 772vunmapbuf(bp) 773 register struct buf *bp; 774{ 775 register caddr_t addr; 776 vm_offset_t kva,va,v,lastv,pa; 777 778 if ((bp->b_flags & B_PHYS) == 0) 779 panic("vunmapbuf"); 780 781 for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data); 782 addr < bp->b_data + bp->b_bufsize; 783 addr += NBPG) 784 pmap_kremove((vm_offset_t) addr); 785 786 bp->b_data = bp->b_saveaddr; 787 bp->b_saveaddr = NULL; 788 789/* 790 * unhold the pde, and data pages 791 */ 792 lastv = 0; 793 for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data); 794 addr < bp->b_data + bp->b_bufsize; 795 addr += NBPG) { 796 797 /* 798 * release the data page 799 */ 800 pa = pmap_kextract((vm_offset_t) addr); 801 vm_page_unhold(PHYS_TO_VM_PAGE(pa)); 802 803 /* 804 * and unhold the page table 805 */ 806 v = trunc_page(((vm_offset_t)vtopte(addr))); 807 if (v != lastv) { 808 pa = pmap_kextract(v); 809 vm_page_unhold(PHYS_TO_VM_PAGE(pa)); 810 lastv = v; 811 } 812 } 813} 814 815/* 816 * Force reset the processor by invalidating the entire address space! 817 */ 818void 819cpu_reset() { 820 821 /* force a shutdown by unmapping entire address space ! */ 822 bzero((caddr_t) PTD, NBPG); 823 824 /* "good night, sweet prince .... <THUNK!>" */ 825 tlbflush(); 826 /* NOTREACHED */ 827 while(1); 828} 829 830/* 831 * Grow the user stack to allow for 'sp'. This version grows the stack in 832 * chunks of SGROWSIZ. 833 */ 834int 835grow(p, sp) 836 struct proc *p; 837 u_int sp; 838{ 839 unsigned int nss; 840 caddr_t v; 841 struct vmspace *vm = p->p_vmspace; 842 843 if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK) 844 return (1); 845 846 nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE); 847 848 if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) 849 return (0); 850 851 if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT, 852 SGROWSIZ) < nss) { 853 int grow_amount; 854 /* 855 * If necessary, grow the VM that the stack occupies 856 * to allow for the rlimit. This allows us to not have 857 * to allocate all of the VM up-front in execve (which 858 * is expensive). 859 * Grow the VM by the amount requested rounded up to 860 * the nearest SGROWSIZ to provide for some hysteresis. 861 */ 862 grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ); 863 v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT, 864 SGROWSIZ) - grow_amount; 865 /* 866 * If there isn't enough room to extend by SGROWSIZ, then 867 * just extend to the maximum size 868 */ 869 if (v < vm->vm_maxsaddr) { 870 v = vm->vm_maxsaddr; 871 grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT); 872 } 873 if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v, 874 grow_amount, FALSE) != KERN_SUCCESS) { 875 return (0); 876 } 877 vm->vm_ssize += grow_amount >> PAGE_SHIFT; 878 } 879 880 return (1); 881} 882