vm_machdep.c revision 15543
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 * $Id: vm_machdep.c,v 1.61 1996/05/02 10:43:06 phk Exp $ 42 */ 43 44#include "npx.h" 45#include "opt_bounce.h" 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/proc.h> 50#include <sys/malloc.h> 51#include <sys/buf.h> 52#include <sys/vnode.h> 53#include <sys/vmmeter.h> 54 55#include <machine/clock.h> 56#include <machine/md_var.h> 57 58#include <vm/vm.h> 59#include <vm/vm_param.h> 60#include <vm/vm_prot.h> 61#include <vm/lock.h> 62#include <vm/vm_kern.h> 63#include <vm/vm_page.h> 64#include <vm/vm_map.h> 65#include <vm/vm_extern.h> 66 67#include <sys/user.h> 68 69#include <i386/isa/isa.h> 70 71#ifdef BOUNCE_BUFFERS 72static vm_offset_t 73 vm_bounce_kva __P((int size, int waitok)); 74static void vm_bounce_kva_free __P((vm_offset_t addr, vm_offset_t size, 75 int now)); 76static vm_offset_t 77 vm_bounce_page_find __P((int count)); 78static void vm_bounce_page_free __P((vm_offset_t pa, int count)); 79 80static volatile int kvasfreecnt; 81 82caddr_t bouncememory; 83int bouncepages; 84static int bpwait; 85static vm_offset_t *bouncepa; 86static int bmwait, bmfreeing; 87 88#define BITS_IN_UNSIGNED (8*sizeof(unsigned)) 89static int bounceallocarraysize; 90static unsigned *bounceallocarray; 91static int bouncefree; 92 93#define SIXTEENMEG (4096*4096) 94#define MAXBKVA 1024 95int maxbkva = MAXBKVA*PAGE_SIZE; 96 97/* special list that can be used at interrupt time for eventual kva free */ 98static struct kvasfree { 99 vm_offset_t addr; 100 vm_offset_t size; 101} kvaf[MAXBKVA]; 102 103/* 104 * get bounce buffer pages (count physically contiguous) 105 * (only 1 inplemented now) 106 */ 107static vm_offset_t 108vm_bounce_page_find(count) 109 int count; 110{ 111 int bit; 112 int s,i; 113 114 if (count != 1) 115 panic("vm_bounce_page_find -- no support for > 1 page yet!!!"); 116 117 s = splbio(); 118retry: 119 for (i = 0; i < bounceallocarraysize; i++) { 120 if (bounceallocarray[i] != 0xffffffff) { 121 bit = ffs(~bounceallocarray[i]); 122 if (bit) { 123 bounceallocarray[i] |= 1 << (bit - 1) ; 124 bouncefree -= count; 125 splx(s); 126 return bouncepa[(i * BITS_IN_UNSIGNED + (bit - 1))]; 127 } 128 } 129 } 130 bpwait = 1; 131 tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0); 132 goto retry; 133} 134 135static void 136vm_bounce_kva_free(addr, size, now) 137 vm_offset_t addr; 138 vm_offset_t size; 139 int now; 140{ 141 int s = splbio(); 142 kvaf[kvasfreecnt].addr = addr; 143 kvaf[kvasfreecnt].size = size; 144 ++kvasfreecnt; 145 if( now) { 146 /* 147 * this will do wakeups 148 */ 149 vm_bounce_kva(0,0); 150 } else { 151 if (bmwait) { 152 /* 153 * if anyone is waiting on the bounce-map, then wakeup 154 */ 155 wakeup((caddr_t) io_map); 156 bmwait = 0; 157 } 158 } 159 splx(s); 160} 161 162/* 163 * free count bounce buffer pages 164 */ 165static void 166vm_bounce_page_free(pa, count) 167 vm_offset_t pa; 168 int count; 169{ 170 int allocindex; 171 int index; 172 int bit; 173 174 if (count != 1) 175 panic("vm_bounce_page_free -- no support for > 1 page yet!!!"); 176 177 for(index=0;index<bouncepages;index++) { 178 if( pa == bouncepa[index]) 179 break; 180 } 181 182 if( index == bouncepages) 183 panic("vm_bounce_page_free: invalid bounce buffer"); 184 185 allocindex = index / BITS_IN_UNSIGNED; 186 bit = index % BITS_IN_UNSIGNED; 187 188 bounceallocarray[allocindex] &= ~(1 << bit); 189 190 bouncefree += count; 191 if (bpwait) { 192 bpwait = 0; 193 wakeup((caddr_t) &bounceallocarray); 194 } 195} 196 197/* 198 * allocate count bounce buffer kva pages 199 */ 200static vm_offset_t 201vm_bounce_kva(size, waitok) 202 int size; 203 int waitok; 204{ 205 int i; 206 vm_offset_t kva = 0; 207 vm_offset_t off; 208 int s = splbio(); 209more: 210 if (!bmfreeing && kvasfreecnt) { 211 bmfreeing = 1; 212 for (i = 0; i < kvasfreecnt; i++) { 213 for(off=0;off<kvaf[i].size;off+=PAGE_SIZE) { 214 pmap_kremove( kvaf[i].addr + off); 215 } 216 kmem_free_wakeup(io_map, kvaf[i].addr, 217 kvaf[i].size); 218 } 219 kvasfreecnt = 0; 220 bmfreeing = 0; 221 if( bmwait) { 222 bmwait = 0; 223 wakeup( (caddr_t) io_map); 224 } 225 } 226 227 if( size == 0) { 228 splx(s); 229 return NULL; 230 } 231 232 if ((kva = kmem_alloc_pageable(io_map, size)) == 0) { 233 if( !waitok) { 234 splx(s); 235 return NULL; 236 } 237 bmwait = 1; 238 tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0); 239 goto more; 240 } 241 splx(s); 242 return kva; 243} 244 245/* 246 * same as vm_bounce_kva -- but really allocate (but takes pages as arg) 247 */ 248vm_offset_t 249vm_bounce_kva_alloc(count) 250int count; 251{ 252 int i; 253 vm_offset_t kva; 254 vm_offset_t pa; 255 if( bouncepages == 0) { 256 kva = (vm_offset_t) malloc(count*PAGE_SIZE, M_TEMP, M_WAITOK); 257 return kva; 258 } 259 kva = vm_bounce_kva(count*PAGE_SIZE, 1); 260 for(i=0;i<count;i++) { 261 pa = vm_bounce_page_find(1); 262 pmap_kenter(kva + i * PAGE_SIZE, pa); 263 } 264 return kva; 265} 266 267/* 268 * same as vm_bounce_kva_free -- but really free 269 */ 270void 271vm_bounce_kva_alloc_free(kva, count) 272 vm_offset_t kva; 273 int count; 274{ 275 int i; 276 vm_offset_t pa; 277 if( bouncepages == 0) { 278 free((caddr_t) kva, M_TEMP); 279 return; 280 } 281 for(i = 0; i < count; i++) { 282 pa = pmap_kextract(kva + i * PAGE_SIZE); 283 vm_bounce_page_free(pa, 1); 284 } 285 vm_bounce_kva_free(kva, count*PAGE_SIZE, 0); 286} 287 288/* 289 * do the things necessary to the struct buf to implement 290 * bounce buffers... inserted before the disk sort 291 */ 292void 293vm_bounce_alloc(bp) 294 struct buf *bp; 295{ 296 int countvmpg; 297 vm_offset_t vastart, vaend; 298 vm_offset_t vapstart, vapend; 299 vm_offset_t va, kva; 300 vm_offset_t pa; 301 int dobounceflag = 0; 302 int i; 303 304 if (bouncepages == 0) 305 return; 306 307 if (bp->b_flags & B_BOUNCE) { 308 printf("vm_bounce_alloc: called recursively???\n"); 309 return; 310 } 311 312 if (bp->b_bufsize < bp->b_bcount) { 313 printf( 314 "vm_bounce_alloc: b_bufsize(0x%lx) < b_bcount(0x%lx) !!\n", 315 bp->b_bufsize, bp->b_bcount); 316 panic("vm_bounce_alloc"); 317 } 318 319/* 320 * This is not really necessary 321 * if( bp->b_bufsize != bp->b_bcount) { 322 * printf("size: %d, count: %d\n", bp->b_bufsize, bp->b_bcount); 323 * } 324 */ 325 326 327 vastart = (vm_offset_t) bp->b_data; 328 vaend = (vm_offset_t) bp->b_data + bp->b_bufsize; 329 330 vapstart = trunc_page(vastart); 331 vapend = round_page(vaend); 332 countvmpg = (vapend - vapstart) / PAGE_SIZE; 333 334/* 335 * if any page is above 16MB, then go into bounce-buffer mode 336 */ 337 va = vapstart; 338 for (i = 0; i < countvmpg; i++) { 339 pa = pmap_kextract(va); 340 if (pa >= SIXTEENMEG) 341 ++dobounceflag; 342 if( pa == 0) 343 panic("vm_bounce_alloc: Unmapped page"); 344 va += PAGE_SIZE; 345 } 346 if (dobounceflag == 0) 347 return; 348 349 if (bouncepages < dobounceflag) 350 panic("Not enough bounce buffers!!!"); 351 352/* 353 * allocate a replacement kva for b_addr 354 */ 355 kva = vm_bounce_kva(countvmpg*PAGE_SIZE, 1); 356#if 0 357 printf("%s: vapstart: %x, vapend: %x, countvmpg: %d, kva: %x ", 358 (bp->b_flags & B_READ) ? "read":"write", 359 vapstart, vapend, countvmpg, kva); 360#endif 361 va = vapstart; 362 for (i = 0; i < countvmpg; i++) { 363 pa = pmap_kextract(va); 364 if (pa >= SIXTEENMEG) { 365 /* 366 * allocate a replacement page 367 */ 368 vm_offset_t bpa = vm_bounce_page_find(1); 369 pmap_kenter(kva + (PAGE_SIZE * i), bpa); 370#if 0 371 printf("r(%d): (%x,%x,%x) ", i, va, pa, bpa); 372#endif 373 /* 374 * if we are writing, the copy the data into the page 375 */ 376 if ((bp->b_flags & B_READ) == 0) { 377 bcopy((caddr_t) va, (caddr_t) kva + (PAGE_SIZE * i), PAGE_SIZE); 378 } 379 } else { 380 /* 381 * use original page 382 */ 383 pmap_kenter(kva + (PAGE_SIZE * i), pa); 384 } 385 va += PAGE_SIZE; 386 } 387 388/* 389 * flag the buffer as being bounced 390 */ 391 bp->b_flags |= B_BOUNCE; 392/* 393 * save the original buffer kva 394 */ 395 bp->b_savekva = bp->b_data; 396/* 397 * put our new kva into the buffer (offset by original offset) 398 */ 399 bp->b_data = (caddr_t) (((vm_offset_t) kva) | 400 ((vm_offset_t) bp->b_savekva & PAGE_MASK)); 401#if 0 402 printf("b_savekva: %x, newva: %x\n", bp->b_savekva, bp->b_data); 403#endif 404 return; 405} 406 407/* 408 * hook into biodone to free bounce buffer 409 */ 410void 411vm_bounce_free(bp) 412 struct buf *bp; 413{ 414 int i; 415 vm_offset_t origkva, bouncekva, bouncekvaend; 416 417/* 418 * if this isn't a bounced buffer, then just return 419 */ 420 if ((bp->b_flags & B_BOUNCE) == 0) 421 return; 422 423/* 424 * This check is not necessary 425 * if (bp->b_bufsize != bp->b_bcount) { 426 * printf("vm_bounce_free: b_bufsize=%d, b_bcount=%d\n", 427 * bp->b_bufsize, bp->b_bcount); 428 * } 429 */ 430 431 origkva = (vm_offset_t) bp->b_savekva; 432 bouncekva = (vm_offset_t) bp->b_data; 433/* 434 printf("free: %d ", bp->b_bufsize); 435*/ 436 437/* 438 * check every page in the kva space for b_addr 439 */ 440 for (i = 0; i < bp->b_bufsize; ) { 441 vm_offset_t mybouncepa; 442 vm_offset_t copycount; 443 444 copycount = round_page(bouncekva + 1) - bouncekva; 445 mybouncepa = pmap_kextract(trunc_page(bouncekva)); 446 447/* 448 * if this is a bounced pa, then process as one 449 */ 450 if ( mybouncepa != pmap_kextract( trunc_page( origkva))) { 451 vm_offset_t tocopy = copycount; 452 if (i + tocopy > bp->b_bufsize) 453 tocopy = bp->b_bufsize - i; 454/* 455 * if this is a read, then copy from bounce buffer into original buffer 456 */ 457 if (bp->b_flags & B_READ) 458 bcopy((caddr_t) bouncekva, (caddr_t) origkva, tocopy); 459/* 460 * free the bounce allocation 461 */ 462 463/* 464 printf("(kva: %x, pa: %x)", bouncekva, mybouncepa); 465*/ 466 vm_bounce_page_free(mybouncepa, 1); 467 } 468 469 origkva += copycount; 470 bouncekva += copycount; 471 i += copycount; 472 } 473 474/* 475 printf("\n"); 476*/ 477/* 478 * add the old kva into the "to free" list 479 */ 480 481 bouncekva= trunc_page((vm_offset_t) bp->b_data); 482 bouncekvaend= round_page((vm_offset_t)bp->b_data + bp->b_bufsize); 483 484/* 485 printf("freeva: %d\n", (bouncekvaend - bouncekva) / PAGE_SIZE); 486*/ 487 vm_bounce_kva_free( bouncekva, (bouncekvaend - bouncekva), 0); 488 bp->b_data = bp->b_savekva; 489 bp->b_savekva = 0; 490 bp->b_flags &= ~B_BOUNCE; 491 492 return; 493} 494 495 496/* 497 * init the bounce buffer system 498 */ 499void 500vm_bounce_init() 501{ 502 int i; 503 504 kvasfreecnt = 0; 505 506 if (bouncepages == 0) 507 return; 508 509 bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED; 510 bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT); 511 512 if (!bounceallocarray) 513 panic("Cannot allocate bounce resource array"); 514 515 bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT); 516 if (!bouncepa) 517 panic("Cannot allocate physical memory array"); 518 519 for(i=0;i<bounceallocarraysize;i++) { 520 bounceallocarray[i] = 0xffffffff; 521 } 522 523 for(i=0;i<bouncepages;i++) { 524 vm_offset_t pa; 525 if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * PAGE_SIZE)) >= SIXTEENMEG) 526 panic("bounce memory out of range"); 527 if( pa == 0) 528 panic("bounce memory not resident"); 529 bouncepa[i] = pa; 530 bounceallocarray[i/(8*sizeof(int))] &= ~(1<<(i%(8*sizeof(int)))); 531 } 532 bouncefree = bouncepages; 533 534} 535#endif /* BOUNCE_BUFFERS */ 536 537/* 538 * quick version of vm_fault 539 */ 540void 541vm_fault_quick(v, prot) 542 caddr_t v; 543 int prot; 544{ 545 if (prot & VM_PROT_WRITE) 546 subyte(v, fubyte(v)); 547 else 548 fubyte(v); 549} 550 551/* 552 * Finish a fork operation, with process p2 nearly set up. 553 * Copy and update the kernel stack and pcb, making the child 554 * ready to run, and marking it so that it can return differently 555 * than the parent. Returns 1 in the child process, 0 in the parent. 556 * We currently double-map the user area so that the stack is at the same 557 * address in each process; in the future we will probably relocate 558 * the frame pointers on the stack after copying. 559 */ 560int 561cpu_fork(p1, p2) 562 register struct proc *p1, *p2; 563{ 564 struct pcb *pcb2 = &p2->p_addr->u_pcb; 565 int sp, offset; 566 volatile int retval; 567 568 /* 569 * Copy pcb and stack from proc p1 to p2. 570 * We do this as cheaply as possible, copying only the active 571 * part of the stack. The stack and pcb need to agree; 572 * this is tricky, as the final pcb is constructed by savectx, 573 * but its frame isn't yet on the stack when the stack is copied. 574 * This should be done differently, with a single call 575 * that copies and updates the pcb+stack, 576 * replacing the bcopy and savectx. 577 */ 578 579 __asm __volatile("movl %%esp,%0" : "=r" (sp)); 580 offset = sp - (int)kstack; 581 582 retval = 1; /* return 1 in child */ 583 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset, 584 (unsigned) ctob(UPAGES) - offset); 585 p2->p_md.md_regs = p1->p_md.md_regs; 586 587 *pcb2 = p1->p_addr->u_pcb; 588 pcb2->pcb_cr3 = vtophys(p2->p_vmspace->vm_pmap.pm_pdir); 589 590 retval = 0; /* return 0 in parent */ 591 savectx(pcb2); 592 return (retval); 593} 594 595void 596cpu_exit(p) 597 register struct proc *p; 598{ 599#ifdef USER_LDT 600 struct pcb *pcb; 601#endif 602 603#if NNPX > 0 604 npxexit(p); 605#endif /* NNPX */ 606#ifdef USER_LDT 607 pcb = &p->p_addr->u_pcb; 608 if (pcb->pcb_ldt != 0) { 609 if (pcb == curpcb) 610 lldt(GSEL(GUSERLDT_SEL, SEL_KPL)); 611 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt, 612 pcb->pcb_ldt_len * sizeof(union descriptor)); 613 pcb->pcb_ldt_len = (int)pcb->pcb_ldt = 0; 614 } 615#endif 616 cnt.v_swtch++; 617 cpu_switch(p); 618 panic("cpu_exit"); 619} 620 621void 622cpu_wait(p) 623 struct proc *p; 624{ 625 /* drop per-process resources */ 626 pmap_qremove((vm_offset_t) p->p_addr, UPAGES); 627 kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 628 vmspace_free(p->p_vmspace); 629} 630 631/* 632 * Dump the machine specific header information at the start of a core dump. 633 */ 634int 635cpu_coredump(p, vp, cred) 636 struct proc *p; 637 struct vnode *vp; 638 struct ucred *cred; 639{ 640 641 return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES), 642 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, 643 p)); 644} 645 646#ifdef notyet 647static void 648setredzone(pte, vaddr) 649 u_short *pte; 650 caddr_t vaddr; 651{ 652/* eventually do this by setting up an expand-down stack segment 653 for ss0: selector, allowing stack access down to top of u. 654 this means though that protection violations need to be handled 655 thru a double fault exception that must do an integral task 656 switch to a known good context, within which a dump can be 657 taken. a sensible scheme might be to save the initial context 658 used by sched (that has physical memory mapped 1:1 at bottom) 659 and take the dump while still in mapped mode */ 660} 661#endif 662 663/* 664 * Convert kernel VA to physical address 665 */ 666u_long 667kvtop(void *addr) 668{ 669 vm_offset_t va; 670 671 va = pmap_kextract((vm_offset_t)addr); 672 if (va == 0) 673 panic("kvtop: zero page frame"); 674 return((int)va); 675} 676 677/* 678 * Map an IO request into kernel virtual address space. 679 * 680 * All requests are (re)mapped into kernel VA space. 681 * Notice that we use b_bufsize for the size of the buffer 682 * to be mapped. b_bcount might be modified by the driver. 683 */ 684void 685vmapbuf(bp) 686 register struct buf *bp; 687{ 688 register int npf; 689 register caddr_t addr; 690 int off; 691 vm_offset_t kva; 692 vm_offset_t pa; 693 694 if ((bp->b_flags & B_PHYS) == 0) 695 panic("vmapbuf"); 696 697 /* 698 * this is the kva that is to be used for 699 * the temporary kernel mapping 700 */ 701 kva = (vm_offset_t) bp->b_saveaddr; 702 703 for (addr = (caddr_t)trunc_page(bp->b_data); 704 addr < bp->b_data + bp->b_bufsize; 705 addr += PAGE_SIZE) { 706 707/* 708 * do the vm_fault if needed, do the copy-on-write thing when 709 * reading stuff off device into memory. 710 */ 711 vm_fault_quick(addr, 712 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ); 713 pa = pmap_kextract((vm_offset_t) addr); 714 if (pa == 0) 715 panic("vmapbuf: page not present"); 716/* 717 * hold the data page 718 */ 719#ifdef DIAGNOSTIC 720 if( VM_PAGE_TO_PHYS(PHYS_TO_VM_PAGE(pa)) != pa) 721 panic("vmapbuf: confused PHYS_TO_VM_PAGE mapping"); 722#endif 723 vm_page_hold(PHYS_TO_VM_PAGE(pa)); 724 } 725 726 addr = bp->b_saveaddr = bp->b_data; 727 off = (int)addr & PAGE_MASK; 728 npf = btoc(round_page(bp->b_bufsize + off)); 729 bp->b_data = (caddr_t) (kva + off); 730 while (npf--) { 731 pa = pmap_kextract((vm_offset_t)addr); 732 if (pa == 0) 733 panic("vmapbuf: null page frame"); 734 pmap_kenter(kva, trunc_page(pa)); 735 addr += PAGE_SIZE; 736 kva += PAGE_SIZE; 737 } 738} 739 740/* 741 * Free the io map PTEs associated with this IO operation. 742 * We also invalidate the TLB entries and restore the original b_addr. 743 */ 744void 745vunmapbuf(bp) 746 register struct buf *bp; 747{ 748 register caddr_t addr; 749 vm_offset_t pa; 750 751 if ((bp->b_flags & B_PHYS) == 0) 752 panic("vunmapbuf"); 753 754 for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data); 755 addr < bp->b_data + bp->b_bufsize; 756 addr += PAGE_SIZE) 757 pmap_kremove((vm_offset_t) addr); 758 759 bp->b_data = bp->b_saveaddr; 760 bp->b_saveaddr = NULL; 761 762/* 763 * unhold the pde, and data pages 764 */ 765 for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data); 766 addr < bp->b_data + bp->b_bufsize; 767 addr += PAGE_SIZE) { 768 /* 769 * release the data page 770 */ 771 pa = pmap_kextract((vm_offset_t) addr); 772 vm_page_unhold(PHYS_TO_VM_PAGE(pa)); 773 } 774} 775 776/* 777 * Force reset the processor by invalidating the entire address space! 778 */ 779void 780cpu_reset() { 781 782 /* 783 * Attempt to do a CPU reset via the keyboard controller, 784 * do not turn of the GateA20, as any machine that fails 785 * to do the reset here would then end up in no man's land. 786 */ 787 788#ifndef BROKEN_KEYBOARD_RESET 789 outb(IO_KBD + 4, 0xFE); 790 DELAY(500000); /* wait 0.5 sec to see if that did it */ 791 printf("Keyboard reset did not work, attempting CPU shutdown\n"); 792 DELAY(1000000); /* wait 1 sec for printf to complete */ 793#endif 794 795 /* force a shutdown by unmapping entire address space ! */ 796 bzero((caddr_t) PTD, PAGE_SIZE); 797 798 /* "good night, sweet prince .... <THUNK!>" */ 799 pmap_update(); 800 /* NOTREACHED */ 801 while(1); 802} 803 804/* 805 * Grow the user stack to allow for 'sp'. This version grows the stack in 806 * chunks of SGROWSIZ. 807 */ 808int 809grow(p, sp) 810 struct proc *p; 811 u_int sp; 812{ 813 unsigned int nss; 814 caddr_t v; 815 struct vmspace *vm = p->p_vmspace; 816 817 if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK) 818 return (1); 819 820 nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE); 821 822 if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) 823 return (0); 824 825 if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT, 826 SGROWSIZ) < nss) { 827 int grow_amount; 828 /* 829 * If necessary, grow the VM that the stack occupies 830 * to allow for the rlimit. This allows us to not have 831 * to allocate all of the VM up-front in execve (which 832 * is expensive). 833 * Grow the VM by the amount requested rounded up to 834 * the nearest SGROWSIZ to provide for some hysteresis. 835 */ 836 grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ); 837 v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT, 838 SGROWSIZ) - grow_amount; 839 /* 840 * If there isn't enough room to extend by SGROWSIZ, then 841 * just extend to the maximum size 842 */ 843 if (v < vm->vm_maxsaddr) { 844 v = vm->vm_maxsaddr; 845 grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT); 846 } 847 if ((grow_amount == 0) || (vm_map_find(&vm->vm_map, NULL, 0, (vm_offset_t *)&v, 848 grow_amount, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0) != KERN_SUCCESS)) { 849 return (0); 850 } 851 vm->vm_ssize += grow_amount >> PAGE_SHIFT; 852 } 853 854 return (1); 855} 856 857/* 858 * prototype routine to implement the pre-zeroed page mechanism 859 * this routine is called from the idle loop. 860 */ 861int 862vm_page_zero_idle() { 863 vm_page_t m; 864 if ((cnt.v_free_count > cnt.v_interrupt_free_min) && 865 (m = vm_page_queue_free.tqh_first)) { 866 TAILQ_REMOVE(&vm_page_queue_free, m, pageq); 867 enable_intr(); 868 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 869 disable_intr(); 870 TAILQ_INSERT_HEAD(&vm_page_queue_zero, m, pageq); 871 m->queue = PQ_ZERO; 872 ++vm_page_zero_count; 873 return 1; 874 } 875 return 0; 876} 877