vm_machdep.c revision 12767
1/*- 2 * Copyright (c) 1982, 1986 The Regents of the University of California. 3 * Copyright (c) 1989, 1990 William Jolitz 4 * Copyright (c) 1994 John Dyson 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department, and William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 41 * $Id: vm_machdep.c,v 1.47 1995/12/10 13:36:34 phk Exp $ 42 */ 43 44#include "npx.h" 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/proc.h> 48#include <sys/malloc.h> 49#include <sys/buf.h> 50#include <sys/vnode.h> 51#include <sys/vmmeter.h> 52 53#include <machine/clock.h> 54#include <machine/cpu.h> 55#include <machine/md_var.h> 56 57#include <vm/vm.h> 58#include <vm/vm_param.h> 59#include <vm/vm_prot.h> 60#include <vm/lock.h> 61#include <vm/vm_kern.h> 62#include <vm/vm_page.h> 63#include <vm/vm_map.h> 64#include <vm/vm_extern.h> 65 66#include <sys/user.h> 67 68#include <i386/isa/isa.h> 69 70extern void pagemove __P((caddr_t from, caddr_t to, int size)); 71static void setredzone __P((u_short *pte, caddr_t vaddr)); 72static void vm_fault_quick __P((caddr_t v, int prot)); 73 74#ifdef BOUNCE_BUFFERS 75static vm_offset_t 76 vm_bounce_kva __P((int size, int waitok)); 77static void vm_bounce_kva_free __P((vm_offset_t addr, vm_offset_t size, 78 int now)); 79static vm_offset_t 80 vm_bounce_page_find __P((int count)); 81static void vm_bounce_page_free __P((vm_offset_t pa, int count)); 82 83static static volatile int kvasfreecnt; 84 85caddr_t bouncememory; 86int bouncepages; 87static int bpwait; 88static vm_offset_t *bouncepa; 89static int bmwait, bmfreeing; 90 91#define BITS_IN_UNSIGNED (8*sizeof(unsigned)) 92static int bounceallocarraysize; 93static unsigned *bounceallocarray; 94static int bouncefree; 95 96#define SIXTEENMEG (4096*4096) 97#define MAXBKVA 1024 98int maxbkva = MAXBKVA*NBPG; 99 100/* special list that can be used at interrupt time for eventual kva free */ 101static struct kvasfree { 102 vm_offset_t addr; 103 vm_offset_t size; 104} kvaf[MAXBKVA]; 105 106/* 107 * get bounce buffer pages (count physically contiguous) 108 * (only 1 inplemented now) 109 */ 110static vm_offset_t 111vm_bounce_page_find(count) 112 int count; 113{ 114 int bit; 115 int s,i; 116 117 if (count != 1) 118 panic("vm_bounce_page_find -- no support for > 1 page yet!!!"); 119 120 s = splbio(); 121retry: 122 for (i = 0; i < bounceallocarraysize; i++) { 123 if (bounceallocarray[i] != 0xffffffff) { 124 bit = ffs(~bounceallocarray[i]); 125 if (bit) { 126 bounceallocarray[i] |= 1 << (bit - 1) ; 127 bouncefree -= count; 128 splx(s); 129 return bouncepa[(i * BITS_IN_UNSIGNED + (bit - 1))]; 130 } 131 } 132 } 133 bpwait = 1; 134 tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0); 135 goto retry; 136} 137 138static void 139vm_bounce_kva_free(addr, size, now) 140 vm_offset_t addr; 141 vm_offset_t size; 142 int now; 143{ 144 int s = splbio(); 145 kvaf[kvasfreecnt].addr = addr; 146 kvaf[kvasfreecnt].size = size; 147 ++kvasfreecnt; 148 if( now) { 149 /* 150 * this will do wakeups 151 */ 152 vm_bounce_kva(0,0); 153 } else { 154 if (bmwait) { 155 /* 156 * if anyone is waiting on the bounce-map, then wakeup 157 */ 158 wakeup((caddr_t) io_map); 159 bmwait = 0; 160 } 161 } 162 splx(s); 163} 164 165/* 166 * free count bounce buffer pages 167 */ 168static void 169vm_bounce_page_free(pa, count) 170 vm_offset_t pa; 171 int count; 172{ 173 int allocindex; 174 int index; 175 int bit; 176 177 if (count != 1) 178 panic("vm_bounce_page_free -- no support for > 1 page yet!!!"); 179 180 for(index=0;index<bouncepages;index++) { 181 if( pa == bouncepa[index]) 182 break; 183 } 184 185 if( index == bouncepages) 186 panic("vm_bounce_page_free: invalid bounce buffer"); 187 188 allocindex = index / BITS_IN_UNSIGNED; 189 bit = index % BITS_IN_UNSIGNED; 190 191 bounceallocarray[allocindex] &= ~(1 << bit); 192 193 bouncefree += count; 194 if (bpwait) { 195 bpwait = 0; 196 wakeup((caddr_t) &bounceallocarray); 197 } 198} 199 200/* 201 * allocate count bounce buffer kva pages 202 */ 203static vm_offset_t 204vm_bounce_kva(size, waitok) 205 int size; 206 int waitok; 207{ 208 int i; 209 vm_offset_t kva = 0; 210 vm_offset_t off; 211 int s = splbio(); 212more: 213 if (!bmfreeing && kvasfreecnt) { 214 bmfreeing = 1; 215 for (i = 0; i < kvasfreecnt; i++) { 216 for(off=0;off<kvaf[i].size;off+=NBPG) { 217 pmap_kremove( kvaf[i].addr + off); 218 } 219 kmem_free_wakeup(io_map, kvaf[i].addr, 220 kvaf[i].size); 221 } 222 kvasfreecnt = 0; 223 bmfreeing = 0; 224 if( bmwait) { 225 bmwait = 0; 226 wakeup( (caddr_t) io_map); 227 } 228 } 229 230 if( size == 0) { 231 splx(s); 232 return NULL; 233 } 234 235 if ((kva = kmem_alloc_pageable(io_map, size)) == 0) { 236 if( !waitok) { 237 splx(s); 238 return NULL; 239 } 240 bmwait = 1; 241 tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0); 242 goto more; 243 } 244 splx(s); 245 return kva; 246} 247 248/* 249 * same as vm_bounce_kva -- but really allocate (but takes pages as arg) 250 */ 251vm_offset_t 252vm_bounce_kva_alloc(count) 253int count; 254{ 255 int i; 256 vm_offset_t kva; 257 vm_offset_t pa; 258 if( bouncepages == 0) { 259 kva = (vm_offset_t) malloc(count*NBPG, M_TEMP, M_WAITOK); 260 return kva; 261 } 262 kva = vm_bounce_kva(count*NBPG, 1); 263 for(i=0;i<count;i++) { 264 pa = vm_bounce_page_find(1); 265 pmap_kenter(kva + i * NBPG, pa); 266 } 267 return kva; 268} 269 270/* 271 * same as vm_bounce_kva_free -- but really free 272 */ 273void 274vm_bounce_kva_alloc_free(kva, count) 275 vm_offset_t kva; 276 int count; 277{ 278 int i; 279 vm_offset_t pa; 280 if( bouncepages == 0) { 281 free((caddr_t) kva, M_TEMP); 282 return; 283 } 284 for(i = 0; i < count; i++) { 285 pa = pmap_kextract(kva + i * NBPG); 286 vm_bounce_page_free(pa, 1); 287 } 288 vm_bounce_kva_free(kva, count*NBPG, 0); 289} 290 291/* 292 * do the things necessary to the struct buf to implement 293 * bounce buffers... inserted before the disk sort 294 */ 295void 296vm_bounce_alloc(bp) 297 struct buf *bp; 298{ 299 int countvmpg; 300 vm_offset_t vastart, vaend; 301 vm_offset_t vapstart, vapend; 302 vm_offset_t va, kva; 303 vm_offset_t pa; 304 int dobounceflag = 0; 305 int i; 306 307 if (bouncepages == 0) 308 return; 309 310 if (bp->b_flags & B_BOUNCE) { 311 printf("vm_bounce_alloc: called recursively???\n"); 312 return; 313 } 314 315 if (bp->b_bufsize < bp->b_bcount) { 316 printf( 317 "vm_bounce_alloc: b_bufsize(0x%lx) < b_bcount(0x%lx) !!\n", 318 bp->b_bufsize, bp->b_bcount); 319 panic("vm_bounce_alloc"); 320 } 321 322/* 323 * This is not really necessary 324 * if( bp->b_bufsize != bp->b_bcount) { 325 * printf("size: %d, count: %d\n", bp->b_bufsize, bp->b_bcount); 326 * } 327 */ 328 329 330 vastart = (vm_offset_t) bp->b_data; 331 vaend = (vm_offset_t) bp->b_data + bp->b_bufsize; 332 333 vapstart = i386_trunc_page(vastart); 334 vapend = i386_round_page(vaend); 335 countvmpg = (vapend - vapstart) / NBPG; 336 337/* 338 * if any page is above 16MB, then go into bounce-buffer mode 339 */ 340 va = vapstart; 341 for (i = 0; i < countvmpg; i++) { 342 pa = pmap_kextract(va); 343 if (pa >= SIXTEENMEG) 344 ++dobounceflag; 345 if( pa == 0) 346 panic("vm_bounce_alloc: Unmapped page"); 347 va += NBPG; 348 } 349 if (dobounceflag == 0) 350 return; 351 352 if (bouncepages < dobounceflag) 353 panic("Not enough bounce buffers!!!"); 354 355/* 356 * allocate a replacement kva for b_addr 357 */ 358 kva = vm_bounce_kva(countvmpg*NBPG, 1); 359#if 0 360 printf("%s: vapstart: %x, vapend: %x, countvmpg: %d, kva: %x ", 361 (bp->b_flags & B_READ) ? "read":"write", 362 vapstart, vapend, countvmpg, kva); 363#endif 364 va = vapstart; 365 for (i = 0; i < countvmpg; i++) { 366 pa = pmap_kextract(va); 367 if (pa >= SIXTEENMEG) { 368 /* 369 * allocate a replacement page 370 */ 371 vm_offset_t bpa = vm_bounce_page_find(1); 372 pmap_kenter(kva + (NBPG * i), bpa); 373#if 0 374 printf("r(%d): (%x,%x,%x) ", i, va, pa, bpa); 375#endif 376 /* 377 * if we are writing, the copy the data into the page 378 */ 379 if ((bp->b_flags & B_READ) == 0) { 380 bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG); 381 } 382 } else { 383 /* 384 * use original page 385 */ 386 pmap_kenter(kva + (NBPG * i), pa); 387 } 388 va += NBPG; 389 } 390 391/* 392 * flag the buffer as being bounced 393 */ 394 bp->b_flags |= B_BOUNCE; 395/* 396 * save the original buffer kva 397 */ 398 bp->b_savekva = bp->b_data; 399/* 400 * put our new kva into the buffer (offset by original offset) 401 */ 402 bp->b_data = (caddr_t) (((vm_offset_t) kva) | 403 ((vm_offset_t) bp->b_savekva & (NBPG - 1))); 404#if 0 405 printf("b_savekva: %x, newva: %x\n", bp->b_savekva, bp->b_data); 406#endif 407 return; 408} 409 410/* 411 * hook into biodone to free bounce buffer 412 */ 413void 414vm_bounce_free(bp) 415 struct buf *bp; 416{ 417 int i; 418 vm_offset_t origkva, bouncekva, bouncekvaend; 419 420/* 421 * if this isn't a bounced buffer, then just return 422 */ 423 if ((bp->b_flags & B_BOUNCE) == 0) 424 return; 425 426/* 427 * This check is not necessary 428 * if (bp->b_bufsize != bp->b_bcount) { 429 * printf("vm_bounce_free: b_bufsize=%d, b_bcount=%d\n", 430 * bp->b_bufsize, bp->b_bcount); 431 * } 432 */ 433 434 origkva = (vm_offset_t) bp->b_savekva; 435 bouncekva = (vm_offset_t) bp->b_data; 436/* 437 printf("free: %d ", bp->b_bufsize); 438*/ 439 440/* 441 * check every page in the kva space for b_addr 442 */ 443 for (i = 0; i < bp->b_bufsize; ) { 444 vm_offset_t mybouncepa; 445 vm_offset_t copycount; 446 447 copycount = i386_round_page(bouncekva + 1) - bouncekva; 448 mybouncepa = pmap_kextract(i386_trunc_page(bouncekva)); 449 450/* 451 * if this is a bounced pa, then process as one 452 */ 453 if ( mybouncepa != pmap_kextract( i386_trunc_page( origkva))) { 454 vm_offset_t tocopy = copycount; 455 if (i + tocopy > bp->b_bufsize) 456 tocopy = bp->b_bufsize - i; 457/* 458 * if this is a read, then copy from bounce buffer into original buffer 459 */ 460 if (bp->b_flags & B_READ) 461 bcopy((caddr_t) bouncekva, (caddr_t) origkva, tocopy); 462/* 463 * free the bounce allocation 464 */ 465 466/* 467 printf("(kva: %x, pa: %x)", bouncekva, mybouncepa); 468*/ 469 vm_bounce_page_free(mybouncepa, 1); 470 } 471 472 origkva += copycount; 473 bouncekva += copycount; 474 i += copycount; 475 } 476 477/* 478 printf("\n"); 479*/ 480/* 481 * add the old kva into the "to free" list 482 */ 483 484 bouncekva= i386_trunc_page((vm_offset_t) bp->b_data); 485 bouncekvaend= i386_round_page((vm_offset_t)bp->b_data + bp->b_bufsize); 486 487/* 488 printf("freeva: %d\n", (bouncekvaend - bouncekva) / NBPG); 489*/ 490 vm_bounce_kva_free( bouncekva, (bouncekvaend - bouncekva), 0); 491 bp->b_data = bp->b_savekva; 492 bp->b_savekva = 0; 493 bp->b_flags &= ~B_BOUNCE; 494 495 return; 496} 497 498 499/* 500 * init the bounce buffer system 501 */ 502void 503vm_bounce_init() 504{ 505 int i; 506 507 kvasfreecnt = 0; 508 509 if (bouncepages == 0) 510 return; 511 512 bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED; 513 bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT); 514 515 if (!bounceallocarray) 516 panic("Cannot allocate bounce resource array"); 517 518 bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT); 519 if (!bouncepa) 520 panic("Cannot allocate physical memory array"); 521 522 for(i=0;i<bounceallocarraysize;i++) { 523 bounceallocarray[i] = 0xffffffff; 524 } 525 526 for(i=0;i<bouncepages;i++) { 527 vm_offset_t pa; 528 if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * NBPG)) >= SIXTEENMEG) 529 panic("bounce memory out of range"); 530 if( pa == 0) 531 panic("bounce memory not resident"); 532 bouncepa[i] = pa; 533 bounceallocarray[i/(8*sizeof(int))] &= ~(1<<(i%(8*sizeof(int)))); 534 } 535 bouncefree = bouncepages; 536 537} 538#endif /* BOUNCE_BUFFERS */ 539 540/* 541 * quick version of vm_fault 542 */ 543static void 544vm_fault_quick(v, prot) 545 caddr_t v; 546 int prot; 547{ 548 if (prot & VM_PROT_WRITE) 549 subyte(v, fubyte(v)); 550 else 551 fubyte(v); 552} 553 554/* 555 * Finish a fork operation, with process p2 nearly set up. 556 * Copy and update the kernel stack and pcb, making the child 557 * ready to run, and marking it so that it can return differently 558 * than the parent. Returns 1 in the child process, 0 in the parent. 559 * We currently double-map the user area so that the stack is at the same 560 * address in each process; in the future we will probably relocate 561 * the frame pointers on the stack after copying. 562 */ 563int 564cpu_fork(p1, p2) 565 register struct proc *p1, *p2; 566{ 567 register struct user *up = p2->p_addr; 568 int offset; 569 570 /* 571 * Copy pcb and stack from proc p1 to p2. 572 * We do this as cheaply as possible, copying only the active 573 * part of the stack. The stack and pcb need to agree; 574 * this is tricky, as the final pcb is constructed by savectx, 575 * but its frame isn't yet on the stack when the stack is copied. 576 * swtch compensates for this when the child eventually runs. 577 * This should be done differently, with a single call 578 * that copies and updates the pcb+stack, 579 * replacing the bcopy and savectx. 580 */ 581 p2->p_addr->u_pcb = p1->p_addr->u_pcb; 582 offset = mvesp() - (int)kstack; 583 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset, 584 (unsigned) ctob(UPAGES) - offset); 585 p2->p_md.md_regs = p1->p_md.md_regs; 586 587 pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb); 588 589 /* 590 * 591 * Arrange for a non-local goto when the new process 592 * is started, to resume here, returning nonzero from setjmp. 593 */ 594 if (savectx(&up->u_pcb, 1)) { 595 /* 596 * Return 1 in child. 597 */ 598 return (1); 599 } 600 return (0); 601} 602 603void 604cpu_exit(p) 605 register struct proc *p; 606{ 607 608#if NNPX > 0 609 npxexit(p); 610#endif /* NNPX */ 611 cnt.v_swtch++; 612 cpu_switch(p); 613 panic("cpu_exit"); 614} 615 616void 617cpu_wait(p) struct proc *p; { 618/* extern vm_map_t upages_map; */ 619 620 /* drop per-process resources */ 621 pmap_remove(vm_map_pmap(u_map), (vm_offset_t) p->p_addr, 622 ((vm_offset_t) p->p_addr) + ctob(UPAGES)); 623 kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 624 vmspace_free(p->p_vmspace); 625} 626 627/* 628 * Dump the machine specific header information at the start of a core dump. 629 */ 630int 631cpu_coredump(p, vp, cred) 632 struct proc *p; 633 struct vnode *vp; 634 struct ucred *cred; 635{ 636 637 return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES), 638 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, 639 p)); 640} 641 642/* 643 * Set a red zone in the kernel stack after the u. area. 644 */ 645static void 646setredzone(pte, vaddr) 647 u_short *pte; 648 caddr_t vaddr; 649{ 650/* eventually do this by setting up an expand-down stack segment 651 for ss0: selector, allowing stack access down to top of u. 652 this means though that protection violations need to be handled 653 thru a double fault exception that must do an integral task 654 switch to a known good context, within which a dump can be 655 taken. a sensible scheme might be to save the initial context 656 used by sched (that has physical memory mapped 1:1 at bottom) 657 and take the dump while still in mapped mode */ 658} 659 660/* 661 * Move pages from one kernel virtual address to another. 662 * Both addresses are assumed to reside in the Sysmap, 663 * and size must be a multiple of CLSIZE. 664 */ 665 666static void 667pagemove(from, to, size) 668 register caddr_t from, to; 669 int size; 670{ 671 register vm_offset_t pa; 672 673 if (size & CLOFSET) 674 panic("pagemove"); 675 while (size > 0) { 676 pa = pmap_kextract((vm_offset_t)from); 677 if (pa == 0) 678 panic("pagemove 2"); 679 if (pmap_kextract((vm_offset_t)to) != 0) 680 panic("pagemove 3"); 681 pmap_kremove((vm_offset_t)from); 682 pmap_kenter((vm_offset_t)to, pa); 683 from += PAGE_SIZE; 684 to += PAGE_SIZE; 685 size -= PAGE_SIZE; 686 } 687} 688 689/* 690 * Convert kernel VA to physical address 691 */ 692u_long 693kvtop(void *addr) 694{ 695 vm_offset_t va; 696 697 va = pmap_kextract((vm_offset_t)addr); 698 if (va == 0) 699 panic("kvtop: zero page frame"); 700 return((int)va); 701} 702 703/* 704 * Map an IO request into kernel virtual address space. 705 * 706 * All requests are (re)mapped into kernel VA space. 707 * Notice that we use b_bufsize for the size of the buffer 708 * to be mapped. b_bcount might be modified by the driver. 709 */ 710void 711vmapbuf(bp) 712 register struct buf *bp; 713{ 714 register int npf; 715 register caddr_t addr; 716 int off; 717 vm_offset_t kva; 718 vm_offset_t pa; 719 720 if ((bp->b_flags & B_PHYS) == 0) 721 panic("vmapbuf"); 722 723 /* 724 * this is the kva that is to be used for 725 * the temporary kernel mapping 726 */ 727 kva = (vm_offset_t) bp->b_saveaddr; 728 729 for (addr = (caddr_t)trunc_page(bp->b_data); 730 addr < bp->b_data + bp->b_bufsize; 731 addr += PAGE_SIZE) { 732 733/* 734 * do the vm_fault if needed, do the copy-on-write thing when 735 * reading stuff off device into memory. 736 */ 737 vm_fault_quick(addr, 738 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ); 739 pa = pmap_kextract((vm_offset_t) addr); 740 if (pa == 0) 741 panic("vmapbuf: page not present"); 742/* 743 * hold the data page 744 */ 745#ifdef DIAGNOSTIC 746 if( VM_PAGE_TO_PHYS(PHYS_TO_VM_PAGE(pa)) != pa) 747 panic("vmapbuf: confused PHYS_TO_VM_PAGE mapping"); 748#endif 749 vm_page_hold(PHYS_TO_VM_PAGE(pa)); 750 } 751 752 addr = bp->b_saveaddr = bp->b_data; 753 off = (int)addr & PGOFSET; 754 npf = btoc(round_page(bp->b_bufsize + off)); 755 bp->b_data = (caddr_t) (kva + off); 756 while (npf--) { 757 pa = pmap_kextract((vm_offset_t)addr); 758 if (pa == 0) 759 panic("vmapbuf: null page frame"); 760 pmap_kenter(kva, trunc_page(pa)); 761 addr += PAGE_SIZE; 762 kva += PAGE_SIZE; 763 } 764} 765 766/* 767 * Free the io map PTEs associated with this IO operation. 768 * We also invalidate the TLB entries and restore the original b_addr. 769 */ 770void 771vunmapbuf(bp) 772 register struct buf *bp; 773{ 774 register caddr_t addr; 775 vm_offset_t pa; 776 777 if ((bp->b_flags & B_PHYS) == 0) 778 panic("vunmapbuf"); 779 780 for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data); 781 addr < bp->b_data + bp->b_bufsize; 782 addr += NBPG) 783 pmap_kremove((vm_offset_t) addr); 784 785 bp->b_data = bp->b_saveaddr; 786 bp->b_saveaddr = NULL; 787 788/* 789 * unhold the pde, and data pages 790 */ 791 for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data); 792 addr < bp->b_data + bp->b_bufsize; 793 addr += NBPG) { 794 /* 795 * release the data page 796 */ 797 pa = pmap_kextract((vm_offset_t) addr); 798 vm_page_unhold(PHYS_TO_VM_PAGE(pa)); 799 } 800} 801 802/* 803 * Force reset the processor by invalidating the entire address space! 804 */ 805void 806cpu_reset() { 807 808 /* 809 * Attempt to do a CPU reset via the keyboard controller, 810 * do not turn of the GateA20, as any machine that fails 811 * to do the reset here would then end up in no man's land. 812 */ 813 814#ifndef BROKEN_KEYBOARD_RESET 815 outb(IO_KBD + 4, 0xFE); 816 DELAY(500000); /* wait 0.5 sec to see if that did it */ 817 printf("Keyboard reset did not work, attempting CPU shutdown\n"); 818 DELAY(1000000); /* wait 1 sec for printf to complete */ 819#endif 820 821 /* force a shutdown by unmapping entire address space ! */ 822 bzero((caddr_t) PTD, NBPG); 823 824 /* "good night, sweet prince .... <THUNK!>" */ 825 pmap_update(); 826 /* NOTREACHED */ 827 while(1); 828} 829 830/* 831 * Grow the user stack to allow for 'sp'. This version grows the stack in 832 * chunks of SGROWSIZ. 833 */ 834int 835grow(p, sp) 836 struct proc *p; 837 u_int sp; 838{ 839 unsigned int nss; 840 caddr_t v; 841 struct vmspace *vm = p->p_vmspace; 842 843 if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK) 844 return (1); 845 846 nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE); 847 848 if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) 849 return (0); 850 851 if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT, 852 SGROWSIZ) < nss) { 853 int grow_amount; 854 /* 855 * If necessary, grow the VM that the stack occupies 856 * to allow for the rlimit. This allows us to not have 857 * to allocate all of the VM up-front in execve (which 858 * is expensive). 859 * Grow the VM by the amount requested rounded up to 860 * the nearest SGROWSIZ to provide for some hysteresis. 861 */ 862 grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ); 863 v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT, 864 SGROWSIZ) - grow_amount; 865 /* 866 * If there isn't enough room to extend by SGROWSIZ, then 867 * just extend to the maximum size 868 */ 869 if (v < vm->vm_maxsaddr) { 870 v = vm->vm_maxsaddr; 871 grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT); 872 } 873 if ((grow_amount == 0) || (vm_map_find(&vm->vm_map, NULL, 0, (vm_offset_t *)&v, 874 grow_amount, FALSE) != KERN_SUCCESS)) { 875 return (0); 876 } 877 vm->vm_ssize += grow_amount >> PAGE_SHIFT; 878 } 879 880 return (1); 881} 882 883/* 884 * prototype routine to implement the pre-zeroed page mechanism 885 * this routine is called from the idle loop. 886 */ 887int 888vm_page_zero_idle() { 889 vm_page_t m; 890 if ((cnt.v_free_count > cnt.v_interrupt_free_min) && 891 (m = vm_page_queue_free.tqh_first)) { 892 TAILQ_REMOVE(&vm_page_queue_free, m, pageq); 893 enable_intr(); 894 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 895 disable_intr(); 896 TAILQ_INSERT_HEAD(&vm_page_queue_zero, m, pageq); 897 ++vm_page_zero_count; 898 return 1; 899 } 900 return 0; 901} 902