vm_machdep.c revision 1896
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 * $Id: vm_machdep.c,v 1.25 1994/08/07 03:31:52 davidg Exp $ 42 */ 43 44#include "npx.h" 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/proc.h> 48#include <sys/malloc.h> 49#include <sys/buf.h> 50#include <sys/vnode.h> 51#include <sys/user.h> 52 53#include <machine/cpu.h> 54 55#include <vm/vm.h> 56#include <vm/vm_kern.h> 57 58#ifndef NOBOUNCE 59vm_map_t io_map; 60volatile int kvasfreecnt; 61 62 63caddr_t bouncememory; 64int bouncepages, bpwait; 65vm_offset_t *bouncepa; 66int bmwait, bmfreeing; 67 68#define BITS_IN_UNSIGNED (8*sizeof(unsigned)) 69int bounceallocarraysize; 70unsigned *bounceallocarray; 71int bouncefree; 72 73#define SIXTEENMEG (4096*4096) 74#define MAXBKVA 1024 75int maxbkva = MAXBKVA*NBPG; 76 77/* special list that can be used at interrupt time for eventual kva free */ 78struct kvasfree { 79 vm_offset_t addr; 80 vm_offset_t size; 81} kvaf[MAXBKVA]; 82 83 84vm_offset_t vm_bounce_kva(); 85/* 86 * get bounce buffer pages (count physically contiguous) 87 * (only 1 inplemented now) 88 */ 89vm_offset_t 90vm_bounce_page_find(count) 91 int count; 92{ 93 int bit; 94 int s,i; 95 96 if (count != 1) 97 panic("vm_bounce_page_find -- no support for > 1 page yet!!!"); 98 99 s = splbio(); 100retry: 101 for (i = 0; i < bounceallocarraysize; i++) { 102 if (bounceallocarray[i] != 0xffffffff) { 103 if (bit = ffs(~bounceallocarray[i])) { 104 bounceallocarray[i] |= 1 << (bit - 1) ; 105 bouncefree -= count; 106 splx(s); 107 return bouncepa[(i * BITS_IN_UNSIGNED + (bit - 1))]; 108 } 109 } 110 } 111 bpwait = 1; 112 tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0); 113 goto retry; 114} 115 116void 117vm_bounce_kva_free(addr, size, now) 118 vm_offset_t addr; 119 vm_offset_t size; 120 int now; 121{ 122 int s = splbio(); 123 kvaf[kvasfreecnt].addr = addr; 124 kvaf[kvasfreecnt].size = size; 125 ++kvasfreecnt; 126 if( now) { 127 /* 128 * this will do wakeups 129 */ 130 vm_bounce_kva(0,0); 131 } else { 132 if (bmwait) { 133 /* 134 * if anyone is waiting on the bounce-map, then wakeup 135 */ 136 wakeup((caddr_t) io_map); 137 bmwait = 0; 138 } 139 } 140 splx(s); 141} 142 143/* 144 * free count bounce buffer pages 145 */ 146void 147vm_bounce_page_free(pa, count) 148 vm_offset_t pa; 149 int count; 150{ 151 int allocindex; 152 int index; 153 int bit; 154 155 if (count != 1) 156 panic("vm_bounce_page_free -- no support for > 1 page yet!!!\n"); 157 158 for(index=0;index<bouncepages;index++) { 159 if( pa == bouncepa[index]) 160 break; 161 } 162 163 if( index == bouncepages) 164 panic("vm_bounce_page_free: invalid bounce buffer"); 165 166 allocindex = index / BITS_IN_UNSIGNED; 167 bit = index % BITS_IN_UNSIGNED; 168 169 bounceallocarray[allocindex] &= ~(1 << bit); 170 171 bouncefree += count; 172 if (bpwait) { 173 bpwait = 0; 174 wakeup((caddr_t) &bounceallocarray); 175 } 176} 177 178/* 179 * allocate count bounce buffer kva pages 180 */ 181vm_offset_t 182vm_bounce_kva(size, waitok) 183 int size; 184 int waitok; 185{ 186 int i; 187 int startfree; 188 vm_offset_t kva = 0; 189 vm_offset_t off; 190 int s = splbio(); 191more: 192 if (!bmfreeing && kvasfreecnt) { 193 bmfreeing = 1; 194 for (i = 0; i < kvasfreecnt; i++) { 195 for(off=0;off<kvaf[i].size;off+=NBPG) { 196 pmap_kremove( kvaf[i].addr + off); 197 } 198 kmem_free_wakeup(io_map, kvaf[i].addr, 199 kvaf[i].size); 200 } 201 kvasfreecnt = 0; 202 bmfreeing = 0; 203 if( bmwait) { 204 bmwait = 0; 205 wakeup( (caddr_t) io_map); 206 } 207 } 208 209 if( size == 0) { 210 splx(s); 211 return NULL; 212 } 213 214 if ((kva = kmem_alloc_pageable(io_map, size)) == 0) { 215 if( !waitok) { 216 splx(s); 217 return NULL; 218 } 219 bmwait = 1; 220 tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0); 221 goto more; 222 } 223 splx(s); 224 return kva; 225} 226 227/* 228 * same as vm_bounce_kva -- but really allocate (but takes pages as arg) 229 */ 230vm_offset_t 231vm_bounce_kva_alloc(count) 232int count; 233{ 234 int i; 235 vm_offset_t kva; 236 vm_offset_t pa; 237 if( bouncepages == 0) { 238 kva = (vm_offset_t) malloc(count*NBPG, M_TEMP, M_WAITOK); 239 return kva; 240 } 241 kva = vm_bounce_kva(count*NBPG, 1); 242 for(i=0;i<count;i++) { 243 pa = vm_bounce_page_find(1); 244 pmap_kenter(kva + i * NBPG, pa); 245 } 246 return kva; 247} 248 249/* 250 * same as vm_bounce_kva_free -- but really free 251 */ 252void 253vm_bounce_kva_alloc_free(kva, count) 254 vm_offset_t kva; 255 int count; 256{ 257 int i; 258 vm_offset_t pa; 259 if( bouncepages == 0) { 260 free((caddr_t) kva, M_TEMP); 261 return; 262 } 263 for(i = 0; i < count; i++) { 264 pa = pmap_kextract(kva + i * NBPG); 265 vm_bounce_page_free(pa, 1); 266 } 267 vm_bounce_kva_free(kva, count*NBPG, 0); 268} 269 270/* 271 * do the things necessary to the struct buf to implement 272 * bounce buffers... inserted before the disk sort 273 */ 274void 275vm_bounce_alloc(bp) 276 struct buf *bp; 277{ 278 int countvmpg; 279 vm_offset_t vastart, vaend; 280 vm_offset_t vapstart, vapend; 281 vm_offset_t va, kva; 282 vm_offset_t pa; 283 int dobounceflag = 0; 284 int bounceindex; 285 int i; 286 int s; 287 288 if (bouncepages == 0) 289 return; 290 291 if (bp->b_flags & B_BOUNCE) { 292 printf("vm_bounce_alloc: called recursively???\n"); 293 return; 294 } 295 296 if (bp->b_bufsize < bp->b_bcount) { 297 printf("vm_bounce_alloc: b_bufsize(0x%x) < b_bcount(0x%x) !!!!\n", 298 bp->b_bufsize, bp->b_bcount); 299 panic("vm_bounce_alloc"); 300 } 301 302/* 303 * This is not really necessary 304 * if( bp->b_bufsize != bp->b_bcount) { 305 * printf("size: %d, count: %d\n", bp->b_bufsize, bp->b_bcount); 306 * } 307 */ 308 309 310 vastart = (vm_offset_t) bp->b_data; 311 vaend = (vm_offset_t) bp->b_data + bp->b_bufsize; 312 313 vapstart = i386_trunc_page(vastart); 314 vapend = i386_round_page(vaend); 315 countvmpg = (vapend - vapstart) / NBPG; 316 317/* 318 * if any page is above 16MB, then go into bounce-buffer mode 319 */ 320 va = vapstart; 321 for (i = 0; i < countvmpg; i++) { 322 pa = pmap_kextract(va); 323 if (pa >= SIXTEENMEG) 324 ++dobounceflag; 325 va += NBPG; 326 } 327 if (dobounceflag == 0) 328 return; 329 330 if (bouncepages < dobounceflag) 331 panic("Not enough bounce buffers!!!"); 332 333/* 334 * allocate a replacement kva for b_addr 335 */ 336 kva = vm_bounce_kva(countvmpg*NBPG, 1); 337#if 0 338 printf("%s: vapstart: %x, vapend: %x, countvmpg: %d, kva: %x ", 339 (bp->b_flags & B_READ) ? "read":"write", 340 vapstart, vapend, countvmpg, kva); 341#endif 342 va = vapstart; 343 for (i = 0; i < countvmpg; i++) { 344 pa = pmap_kextract(va); 345 if (pa >= SIXTEENMEG) { 346 /* 347 * allocate a replacement page 348 */ 349 vm_offset_t bpa = vm_bounce_page_find(1); 350 pmap_kenter(kva + (NBPG * i), bpa); 351#if 0 352 printf("r(%d): (%x,%x,%x) ", i, va, pa, bpa); 353#endif 354 /* 355 * if we are writing, the copy the data into the page 356 */ 357 if ((bp->b_flags & B_READ) == 0) { 358 bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG); 359 } 360 } else { 361 /* 362 * use original page 363 */ 364 pmap_kenter(kva + (NBPG * i), pa); 365 } 366 va += NBPG; 367 } 368 369/* 370 * flag the buffer as being bounced 371 */ 372 bp->b_flags |= B_BOUNCE; 373/* 374 * save the original buffer kva 375 */ 376 bp->b_savekva = bp->b_data; 377/* 378 * put our new kva into the buffer (offset by original offset) 379 */ 380 bp->b_data = (caddr_t) (((vm_offset_t) kva) | 381 ((vm_offset_t) bp->b_savekva & (NBPG - 1))); 382#if 0 383 printf("b_savekva: %x, newva: %x\n", bp->b_savekva, bp->b_data); 384#endif 385 return; 386} 387 388/* 389 * hook into biodone to free bounce buffer 390 */ 391void 392vm_bounce_free(bp) 393 struct buf *bp; 394{ 395 int i; 396 vm_offset_t origkva, bouncekva, bouncekvaend; 397 int countvmpg; 398 int s; 399 400/* 401 * if this isn't a bounced buffer, then just return 402 */ 403 if ((bp->b_flags & B_BOUNCE) == 0) 404 return; 405 406/* 407 * This check is not necessary 408 * if (bp->b_bufsize != bp->b_bcount) { 409 * printf("vm_bounce_free: b_bufsize=%d, b_bcount=%d\n", 410 * bp->b_bufsize, bp->b_bcount); 411 * } 412 */ 413 414 origkva = (vm_offset_t) bp->b_savekva; 415 bouncekva = (vm_offset_t) bp->b_data; 416/* 417 printf("free: %d ", bp->b_bufsize); 418*/ 419 420/* 421 * check every page in the kva space for b_addr 422 */ 423 for (i = 0; i < bp->b_bufsize; ) { 424 vm_offset_t mybouncepa; 425 vm_offset_t copycount; 426 427 copycount = i386_round_page(bouncekva + 1) - bouncekva; 428 mybouncepa = pmap_kextract(i386_trunc_page(bouncekva)); 429 430/* 431 * if this is a bounced pa, then process as one 432 */ 433 if ( mybouncepa != pmap_kextract( i386_trunc_page( origkva))) { 434 vm_offset_t tocopy = copycount; 435 if (i + tocopy > bp->b_bufsize) 436 tocopy = bp->b_bufsize - i; 437/* 438 * if this is a read, then copy from bounce buffer into original buffer 439 */ 440 if (bp->b_flags & B_READ) 441 bcopy((caddr_t) bouncekva, (caddr_t) origkva, tocopy); 442/* 443 * free the bounce allocation 444 */ 445 446/* 447 printf("(kva: %x, pa: %x)", bouncekva, mybouncepa); 448*/ 449 vm_bounce_page_free(mybouncepa, 1); 450 } 451 452 origkva += copycount; 453 bouncekva += copycount; 454 i += copycount; 455 } 456 457/* 458 printf("\n"); 459*/ 460/* 461 * add the old kva into the "to free" list 462 */ 463 464 bouncekva= i386_trunc_page((vm_offset_t) bp->b_data); 465 bouncekvaend= i386_round_page((vm_offset_t)bp->b_data + bp->b_bufsize); 466 467/* 468 printf("freeva: %d\n", (bouncekvaend - bouncekva) / NBPG); 469*/ 470 vm_bounce_kva_free( bouncekva, (bouncekvaend - bouncekva), 0); 471 bp->b_data = bp->b_savekva; 472 bp->b_savekva = 0; 473 bp->b_flags &= ~B_BOUNCE; 474 475 return; 476} 477 478 479/* 480 * init the bounce buffer system 481 */ 482void 483vm_bounce_init() 484{ 485 vm_offset_t minaddr, maxaddr; 486 int i; 487 488 kvasfreecnt = 0; 489 490 if (bouncepages == 0) 491 return; 492 493 bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED; 494 bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT); 495 496 if (!bounceallocarray) 497 panic("Cannot allocate bounce resource array\n"); 498 499 bzero(bounceallocarray, bounceallocarraysize * sizeof(unsigned)); 500 bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT); 501 if (!bouncepa) 502 panic("Cannot allocate physical memory array\n"); 503 504 for(i=0;i<bouncepages;i++) { 505 vm_offset_t pa; 506 if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * NBPG)) >= SIXTEENMEG) 507 panic("bounce memory out of range"); 508 if( pa == 0) 509 panic("bounce memory not resident"); 510 bouncepa[i] = pa; 511 } 512 bouncefree = bouncepages; 513 514} 515#endif /* NOBOUNCE */ 516/* 517 * quick version of vm_fault 518 */ 519 520void 521vm_fault_quick( v, prot) 522 vm_offset_t v; 523 int prot; 524{ 525 if( (cpu_class == CPUCLASS_386) && 526 (prot & VM_PROT_WRITE)) 527 vm_fault(&curproc->p_vmspace->vm_map, v, 528 VM_PROT_READ|VM_PROT_WRITE, FALSE); 529 else if( prot & VM_PROT_WRITE) 530 *(volatile char *)v += 0; 531 else 532 *(volatile char *)v; 533} 534 535 536/* 537 * Finish a fork operation, with process p2 nearly set up. 538 * Copy and update the kernel stack and pcb, making the child 539 * ready to run, and marking it so that it can return differently 540 * than the parent. Returns 1 in the child process, 0 in the parent. 541 * We currently double-map the user area so that the stack is at the same 542 * address in each process; in the future we will probably relocate 543 * the frame pointers on the stack after copying. 544 */ 545int 546cpu_fork(p1, p2) 547 register struct proc *p1, *p2; 548{ 549 register struct user *up = p2->p_addr; 550 int foo, offset, addr, i; 551 extern char kstack[]; 552 extern int mvesp(); 553 554 /* 555 * Copy pcb and stack from proc p1 to p2. 556 * We do this as cheaply as possible, copying only the active 557 * part of the stack. The stack and pcb need to agree; 558 * this is tricky, as the final pcb is constructed by savectx, 559 * but its frame isn't yet on the stack when the stack is copied. 560 * swtch compensates for this when the child eventually runs. 561 * This should be done differently, with a single call 562 * that copies and updates the pcb+stack, 563 * replacing the bcopy and savectx. 564 */ 565 p2->p_addr->u_pcb = p1->p_addr->u_pcb; 566 offset = mvesp() - (int)kstack; 567 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset, 568 (unsigned) ctob(UPAGES) - offset); 569 p2->p_md.md_regs = p1->p_md.md_regs; 570 571 pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb); 572 573 /* 574 * 575 * Arrange for a non-local goto when the new process 576 * is started, to resume here, returning nonzero from setjmp. 577 */ 578 if (savectx(up, 1)) { 579 /* 580 * Return 1 in child. 581 */ 582 return (1); 583 } 584 return (0); 585} 586 587void 588cpu_exit(p) 589 register struct proc *p; 590{ 591 592#if NNPX > 0 593 npxexit(p); 594#endif /* NNPX */ 595 curproc = p; 596 mi_switch(); 597 /* 598 * This is to shutup the compiler, and if swtch() failed I suppose 599 * this would be a good thing. This keeps gcc happy because panic 600 * is a volatile void function as well. 601 */ 602 panic("cpu_exit"); 603} 604 605void 606cpu_wait(p) struct proc *p; { 607/* extern vm_map_t upages_map; */ 608 extern char kstack[]; 609 610 /* drop per-process resources */ 611 pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr, 612 ((vm_offset_t) p->p_addr) + ctob(UPAGES)); 613 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 614 vmspace_free(p->p_vmspace); 615} 616 617/* 618 * Dump the machine specific header information at the start of a core dump. 619 */ 620int 621cpu_coredump(p, vp, cred) 622 struct proc *p; 623 struct vnode *vp; 624 struct ucred *cred; 625{ 626 627 return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES), 628 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, 629 p)); 630} 631 632/* 633 * Set a red zone in the kernel stack after the u. area. 634 */ 635void 636setredzone(pte, vaddr) 637 u_short *pte; 638 caddr_t vaddr; 639{ 640/* eventually do this by setting up an expand-down stack segment 641 for ss0: selector, allowing stack access down to top of u. 642 this means though that protection violations need to be handled 643 thru a double fault exception that must do an integral task 644 switch to a known good context, within which a dump can be 645 taken. a sensible scheme might be to save the initial context 646 used by sched (that has physical memory mapped 1:1 at bottom) 647 and take the dump while still in mapped mode */ 648} 649 650/* 651 * Move pages from one kernel virtual address to another. 652 * Both addresses are assumed to reside in the Sysmap, 653 * and size must be a multiple of CLSIZE. 654 */ 655 656void 657pagemove(from, to, size) 658 register caddr_t from, to; 659 int size; 660{ 661 register vm_offset_t pa; 662 663 if (size & CLOFSET) 664 panic("pagemove"); 665 while (size > 0) { 666 pa = pmap_kextract((vm_offset_t)from); 667 if (pa == 0) 668 panic("pagemove 2"); 669 if (pmap_kextract((vm_offset_t)to) != 0) 670 panic("pagemove 3"); 671 pmap_kremove((vm_offset_t)from); 672 pmap_kenter((vm_offset_t)to, pa); 673 from += PAGE_SIZE; 674 to += PAGE_SIZE; 675 size -= PAGE_SIZE; 676 } 677} 678 679/* 680 * Convert kernel VA to physical address 681 */ 682u_long 683kvtop(void *addr) 684{ 685 vm_offset_t va; 686 687 va = pmap_kextract((vm_offset_t)addr); 688 if (va == 0) 689 panic("kvtop: zero page frame"); 690 return((int)va); 691} 692 693/* 694 * Map an IO request into kernel virtual address space. 695 * 696 * All requests are (re)mapped into kernel VA space. 697 * Notice that we use b_bufsize for the size of the buffer 698 * to be mapped. b_bcount might be modified by the driver. 699 */ 700void 701vmapbuf(bp) 702 register struct buf *bp; 703{ 704 register int npf; 705 register caddr_t addr; 706 int off; 707 vm_offset_t kva; 708 vm_offset_t pa, lastv, v; 709 710 if ((bp->b_flags & B_PHYS) == 0) 711 panic("vmapbuf"); 712 713 /* 714 * this is the kva that is to be used for 715 * the temporary kernel mapping 716 */ 717 kva = (vm_offset_t) bp->b_saveaddr; 718 719 lastv = 0; 720 for (addr = (caddr_t)trunc_page(bp->b_data); 721 addr < bp->b_data + bp->b_bufsize; 722 addr += PAGE_SIZE) { 723 724/* 725 * make sure that the pde is valid and held 726 */ 727 v = trunc_page(((vm_offset_t)vtopte(addr))); 728 if (v != lastv) { 729 vm_fault_quick(v, VM_PROT_READ); 730 pa = pmap_kextract( v); 731 vm_page_hold(PHYS_TO_VM_PAGE(pa)); 732 lastv = v; 733 } 734 735/* 736 * do the vm_fault if needed, do the copy-on-write thing when 737 * reading stuff off device into memory. 738 */ 739 vm_fault_quick(addr, 740 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ); 741 pa = pmap_kextract((vm_offset_t) addr); 742/* 743 * hold the data page 744 */ 745 vm_page_hold(PHYS_TO_VM_PAGE(pa)); 746 } 747 748 addr = bp->b_saveaddr = bp->b_data; 749 off = (int)addr & PGOFSET; 750 npf = btoc(round_page(bp->b_bufsize + off)); 751 bp->b_data = (caddr_t) (kva + off); 752 while (npf--) { 753 pa = pmap_kextract((vm_offset_t)addr); 754 if (pa == 0) 755 panic("vmapbuf: null page frame"); 756 pmap_kenter(kva, trunc_page(pa)); 757 addr += PAGE_SIZE; 758 kva += PAGE_SIZE; 759 } 760} 761 762/* 763 * Free the io map PTEs associated with this IO operation. 764 * We also invalidate the TLB entries and restore the original b_addr. 765 */ 766void 767vunmapbuf(bp) 768 register struct buf *bp; 769{ 770 register caddr_t addr; 771 vm_offset_t kva,va,v,lastv,pa; 772 773 if ((bp->b_flags & B_PHYS) == 0) 774 panic("vunmapbuf"); 775 776 for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data); 777 addr < bp->b_data + bp->b_bufsize; 778 addr += NBPG) 779 pmap_kremove((vm_offset_t) addr); 780 781 bp->b_data = bp->b_saveaddr; 782 bp->b_saveaddr = NULL; 783 784/* 785 * unhold the pde, and data pages 786 */ 787 lastv = 0; 788 for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data); 789 addr < bp->b_data + bp->b_bufsize; 790 addr += NBPG) { 791 792 /* 793 * release the data page 794 */ 795 pa = pmap_kextract((vm_offset_t) addr); 796 vm_page_unhold(PHYS_TO_VM_PAGE(pa)); 797 798 /* 799 * and unhold the page table 800 */ 801 v = trunc_page(((vm_offset_t)vtopte(addr))); 802 if (v != lastv) { 803 pa = pmap_kextract(v); 804 vm_page_unhold(PHYS_TO_VM_PAGE(pa)); 805 lastv = v; 806 } 807 } 808} 809 810/* 811 * Force reset the processor by invalidating the entire address space! 812 */ 813void 814cpu_reset() { 815 816 /* force a shutdown by unmapping entire address space ! */ 817 bzero((caddr_t) PTD, NBPG); 818 819 /* "good night, sweet prince .... <THUNK!>" */ 820 tlbflush(); 821 /* NOTREACHED */ 822 while(1); 823} 824 825/* 826 * Grow the user stack to allow for 'sp'. This version grows the stack in 827 * chunks of SGROWSIZ. 828 */ 829int 830grow(p, sp) 831 struct proc *p; 832 u_int sp; 833{ 834 unsigned int nss; 835 caddr_t v; 836 struct vmspace *vm = p->p_vmspace; 837 838 if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK) 839 return (1); 840 841 nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE); 842 843 if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) 844 return (0); 845 846 if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT, 847 SGROWSIZ) < nss) { 848 int grow_amount; 849 /* 850 * If necessary, grow the VM that the stack occupies 851 * to allow for the rlimit. This allows us to not have 852 * to allocate all of the VM up-front in execve (which 853 * is expensive). 854 * Grow the VM by the amount requested rounded up to 855 * the nearest SGROWSIZ to provide for some hysteresis. 856 */ 857 grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ); 858 v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT, 859 SGROWSIZ) - grow_amount; 860 /* 861 * If there isn't enough room to extend by SGROWSIZ, then 862 * just extend to the maximum size 863 */ 864 if (v < vm->vm_maxsaddr) { 865 v = vm->vm_maxsaddr; 866 grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT); 867 } 868 if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v, 869 grow_amount, FALSE) != KERN_SUCCESS) { 870 return (0); 871 } 872 vm->vm_ssize += grow_amount >> PAGE_SHIFT; 873 } 874 875 return (1); 876} 877