Deleted Added
full compact
vm_machdep.c (1415) vm_machdep.c (1549)
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer

--- 28 unchanged lines hidden (view full) ---

37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 * $Id: vm_machdep.c,v 1.20 1994/04/20 07:06:20 davidg Exp $
42 */
43
44#include "npx.h"
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer

--- 28 unchanged lines hidden (view full) ---

37 * SUCH DAMAGE.
38 *
39 * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
40 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 * $Id: vm_machdep.c,v 1.20 1994/04/20 07:06:20 davidg Exp $
42 */
43
44#include "npx.h"
45#include "param.h"
46#include "systm.h"
47#include "proc.h"
48#include "malloc.h"
49#include "buf.h"
50#include "user.h"
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/proc.h>
48#include <sys/malloc.h>
49#include <sys/buf.h>
50#include <sys/vnode.h>
51#include <sys/user.h>
51
52
52#include "../include/cpu.h"
53#include <machine/cpu.h>
53
54
54#include "vm/vm.h"
55#include "vm/vm_kern.h"
55#include <vm/vm.h>
56#include <vm/vm_kern.h>
56
57#define b_cylin b_resid
58
57
58#define b_cylin b_resid
59
59#define MAXCLSTATS 256
60int clstats[MAXCLSTATS];
61int rqstats[MAXCLSTATS];
62
63
64#ifndef NOBOUNCE
65
66caddr_t bouncememory;
67vm_offset_t bouncepa, bouncepaend;
68int bouncepages, bpwait;
69vm_map_t io_map;
70int bmwait, bmfreeing;
71
72#define BITS_IN_UNSIGNED (8*sizeof(unsigned))
73int bounceallocarraysize;
74unsigned *bounceallocarray;
75int bouncefree;
76
77#define SIXTEENMEG (4096*4096)
60caddr_t bouncememory;
61vm_offset_t bouncepa, bouncepaend;
62int bouncepages, bpwait;
63vm_map_t io_map;
64int bmwait, bmfreeing;
65
66#define BITS_IN_UNSIGNED (8*sizeof(unsigned))
67int bounceallocarraysize;
68unsigned *bounceallocarray;
69int bouncefree;
70
71#define SIXTEENMEG (4096*4096)
78#define MAXBKVA 1024
72#define MAXBKVA 512
73int maxbkva=MAXBKVA*NBPG;
79
80/* special list that can be used at interrupt time for eventual kva free */
81struct kvasfree {
82 vm_offset_t addr;
83 vm_offset_t size;
84} kvaf[MAXBKVA];
85
86int kvasfreecnt;

--- 166 unchanged lines hidden (view full) ---

253 kva = (vm_offset_t) malloc(count*NBPG, M_TEMP, M_WAITOK);
254 return kva;
255 }
256 kva = vm_bounce_kva(count, 1);
257 for(i=0;i<count;i++) {
258 pa = vm_bounce_page_find(1);
259 pmap_kenter(kva + i * NBPG, pa);
260 }
74
75/* special list that can be used at interrupt time for eventual kva free */
76struct kvasfree {
77 vm_offset_t addr;
78 vm_offset_t size;
79} kvaf[MAXBKVA];
80
81int kvasfreecnt;

--- 166 unchanged lines hidden (view full) ---

248 kva = (vm_offset_t) malloc(count*NBPG, M_TEMP, M_WAITOK);
249 return kva;
250 }
251 kva = vm_bounce_kva(count, 1);
252 for(i=0;i<count;i++) {
253 pa = vm_bounce_page_find(1);
254 pmap_kenter(kva + i * NBPG, pa);
255 }
256 pmap_update();
261 return kva;
262}
263
264/*
265 * same as vm_bounce_kva_free -- but really free
266 */
267void
268vm_bounce_kva_alloc_free(kva, count)

--- 35 unchanged lines hidden (view full) ---

304 return;
305
306 if (bp->b_bufsize < bp->b_bcount) {
307 printf("vm_bounce_alloc: b_bufsize(%d) < b_bcount(%d) !!!!\n",
308 bp->b_bufsize, bp->b_bcount);
309 bp->b_bufsize = bp->b_bcount;
310 }
311
257 return kva;
258}
259
260/*
261 * same as vm_bounce_kva_free -- but really free
262 */
263void
264vm_bounce_kva_alloc_free(kva, count)

--- 35 unchanged lines hidden (view full) ---

300 return;
301
302 if (bp->b_bufsize < bp->b_bcount) {
303 printf("vm_bounce_alloc: b_bufsize(%d) < b_bcount(%d) !!!!\n",
304 bp->b_bufsize, bp->b_bcount);
305 bp->b_bufsize = bp->b_bcount;
306 }
307
312 vastart = (vm_offset_t) bp->b_un.b_addr;
313 vaend = (vm_offset_t) bp->b_un.b_addr + bp->b_bufsize;
308 vastart = (vm_offset_t) bp->b_data;
309 vaend = (vm_offset_t) bp->b_data + bp->b_bufsize;
314
315 vapstart = i386_trunc_page(vastart);
316 vapend = i386_round_page(vaend);
317 countvmpg = (vapend - vapstart) / NBPG;
318
319/*
320 * if any page is above 16MB, then go into bounce-buffer mode
321 */

--- 42 unchanged lines hidden (view full) ---

364
365/*
366 * flag the buffer as being bounced
367 */
368 bp->b_flags |= B_BOUNCE;
369/*
370 * save the original buffer kva
371 */
310
311 vapstart = i386_trunc_page(vastart);
312 vapend = i386_round_page(vaend);
313 countvmpg = (vapend - vapstart) / NBPG;
314
315/*
316 * if any page is above 16MB, then go into bounce-buffer mode
317 */

--- 42 unchanged lines hidden (view full) ---

360
361/*
362 * flag the buffer as being bounced
363 */
364 bp->b_flags |= B_BOUNCE;
365/*
366 * save the original buffer kva
367 */
372 bp->b_savekva = bp->b_un.b_addr;
368 bp->b_savekva = bp->b_data;
373/*
374 * put our new kva into the buffer (offset by original offset)
375 */
369/*
370 * put our new kva into the buffer (offset by original offset)
371 */
376 bp->b_un.b_addr = (caddr_t) (((vm_offset_t) kva) |
372 bp->b_data = (caddr_t) (((vm_offset_t) kva) |
377 ((vm_offset_t) bp->b_savekva & (NBPG - 1)));
378 return;
379}
380
381/*
382 * hook into biodone to free bounce buffer
383 */
384void

--- 13 unchanged lines hidden (view full) ---

398
399/*
400 * if this isn't a bounced buffer, then just return
401 */
402 if ((bp->b_flags & B_BOUNCE) == 0)
403 return;
404
405 origkva = (vm_offset_t) bp->b_savekva;
373 ((vm_offset_t) bp->b_savekva & (NBPG - 1)));
374 return;
375}
376
377/*
378 * hook into biodone to free bounce buffer
379 */
380void

--- 13 unchanged lines hidden (view full) ---

394
395/*
396 * if this isn't a bounced buffer, then just return
397 */
398 if ((bp->b_flags & B_BOUNCE) == 0)
399 return;
400
401 origkva = (vm_offset_t) bp->b_savekva;
406 bouncekva = (vm_offset_t) bp->b_un.b_addr;
402 bouncekva = (vm_offset_t) bp->b_data;
407
408 vastart = bouncekva;
409 vaend = bouncekva + bp->b_bufsize;
410 bcount = bp->b_bufsize;
411
412 vapstart = i386_trunc_page(vastart);
413 vapend = i386_round_page(vaend);
414

--- 29 unchanged lines hidden (view full) ---

444 origkva += copycount;
445 bouncekva += copycount;
446 bcount -= copycount;
447 }
448
449/*
450 * add the old kva into the "to free" list
451 */
403
404 vastart = bouncekva;
405 vaend = bouncekva + bp->b_bufsize;
406 bcount = bp->b_bufsize;
407
408 vapstart = i386_trunc_page(vastart);
409 vapend = i386_round_page(vaend);
410

--- 29 unchanged lines hidden (view full) ---

440 origkva += copycount;
441 bouncekva += copycount;
442 bcount -= copycount;
443 }
444
445/*
446 * add the old kva into the "to free" list
447 */
452 bouncekva = i386_trunc_page((vm_offset_t) bp->b_un.b_addr);
448 bouncekva = i386_trunc_page((vm_offset_t) bp->b_data);
453 vm_bounce_kva_free( bouncekva, countvmpg*NBPG, 0);
449 vm_bounce_kva_free( bouncekva, countvmpg*NBPG, 0);
454 bp->b_un.b_addr = bp->b_savekva;
450 bp->b_data = bp->b_savekva;
455 bp->b_savekva = 0;
456 bp->b_flags &= ~B_BOUNCE;
457
458 return;
459}
460
451 bp->b_savekva = 0;
452 bp->b_flags &= ~B_BOUNCE;
453
454 return;
455}
456
461#endif /* NOBOUNCE */
462
463/*
464 * init the bounce buffer system
465 */
466void
467vm_bounce_init()
468{
469 vm_offset_t minaddr, maxaddr;
470
457/*
458 * init the bounce buffer system
459 */
460void
461vm_bounce_init()
462{
463 vm_offset_t minaddr, maxaddr;
464
471 io_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, MAXBKVA * NBPG, FALSE);
472 kvasfreecnt = 0;
473
465 kvasfreecnt = 0;
466
474#ifndef NOBOUNCE
475 if (bouncepages == 0)
476 return;
477
478 bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
479 bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
480
481 if (!bounceallocarray)
482 panic("Cannot allocate bounce resource array\n");
483
484 bzero(bounceallocarray, bounceallocarraysize * sizeof(long));
485
486
487 bouncepa = pmap_kextract((vm_offset_t) bouncememory);
488 bouncepaend = bouncepa + bouncepages * NBPG;
489 bouncefree = bouncepages;
467 if (bouncepages == 0)
468 return;
469
470 bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
471 bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
472
473 if (!bounceallocarray)
474 panic("Cannot allocate bounce resource array\n");
475
476 bzero(bounceallocarray, bounceallocarraysize * sizeof(long));
477
478
479 bouncepa = pmap_kextract((vm_offset_t) bouncememory);
480 bouncepaend = bouncepa + bouncepages * NBPG;
481 bouncefree = bouncepages;
490#endif
491
492}
493
494
482}
483
484
485#ifdef BROKEN_IN_44
495static void
496cldiskvamerge( kvanew, orig1, orig1cnt, orig2, orig2cnt)
497 vm_offset_t kvanew;
498 vm_offset_t orig1, orig1cnt;
499 vm_offset_t orig2, orig2cnt;
500{
501 int i;
502 vm_offset_t pa;

--- 319 unchanged lines hidden (view full) ---

822 if( bp->av_forw)
823 bp->av_forw->av_back = bp;
824 else
825 dp->b_actl = bp;
826
827 ap->av_forw = bp;
828 bp->av_back = ap;
829}
486static void
487cldiskvamerge( kvanew, orig1, orig1cnt, orig2, orig2cnt)
488 vm_offset_t kvanew;
489 vm_offset_t orig1, orig1cnt;
490 vm_offset_t orig2, orig2cnt;
491{
492 int i;
493 vm_offset_t pa;

--- 319 unchanged lines hidden (view full) ---

813 if( bp->av_forw)
814 bp->av_forw->av_back = bp;
815 else
816 dp->b_actl = bp;
817
818 ap->av_forw = bp;
819 bp->av_back = ap;
820}
821#endif
830
831/*
832 * quick version of vm_fault
833 */
834
835void
836vm_fault_quick( v, prot)
837 vm_offset_t v;

--- 38 unchanged lines hidden (view full) ---

876 * This should be done differently, with a single call
877 * that copies and updates the pcb+stack,
878 * replacing the bcopy and savectx.
879 */
880 p2->p_addr->u_pcb = p1->p_addr->u_pcb;
881 offset = mvesp() - (int)kstack;
882 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
883 (unsigned) ctob(UPAGES) - offset);
822
823/*
824 * quick version of vm_fault
825 */
826
827void
828vm_fault_quick( v, prot)
829 vm_offset_t v;

--- 38 unchanged lines hidden (view full) ---

868 * This should be done differently, with a single call
869 * that copies and updates the pcb+stack,
870 * replacing the bcopy and savectx.
871 */
872 p2->p_addr->u_pcb = p1->p_addr->u_pcb;
873 offset = mvesp() - (int)kstack;
874 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
875 (unsigned) ctob(UPAGES) - offset);
884 p2->p_regs = p1->p_regs;
876 p2->p_md.md_regs = p1->p_md.md_regs;
885
886 /*
887 * Wire top of address space of child to it's kstack.
888 * First, fault in a page of pte's to map it.
889 */
890#if 0
891 addr = trunc_page((u_int)vtopte(kstack));
892 vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE);

--- 32 unchanged lines hidden (view full) ---

925 * We change to an inactive address space and a "safe" stack,
926 * passing thru an argument to the new stack. Now, safely isolated
927 * from the resources we're shedding, we release the address space
928 * and any remaining machine-dependent resources, including the
929 * memory for the user structure and kernel stack.
930 *
931 * Next, we assign a dummy context to be written over by swtch,
932 * calling it to send this process off to oblivion.
877
878 /*
879 * Wire top of address space of child to it's kstack.
880 * First, fault in a page of pte's to map it.
881 */
882#if 0
883 addr = trunc_page((u_int)vtopte(kstack));
884 vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE);

--- 32 unchanged lines hidden (view full) ---

917 * We change to an inactive address space and a "safe" stack,
918 * passing thru an argument to the new stack. Now, safely isolated
919 * from the resources we're shedding, we release the address space
920 * and any remaining machine-dependent resources, including the
921 * memory for the user structure and kernel stack.
922 *
923 * Next, we assign a dummy context to be written over by swtch,
924 * calling it to send this process off to oblivion.
933 * [The nullpcb allows us to minimize cost in swtch() by not having
925 * [The nullpcb allows us to minimize cost in mi_switch() by not having
934 * a special case].
935 */
936struct proc *swtch_to_inactive();
937volatile void
938cpu_exit(p)
939 register struct proc *p;
940{
941 static struct pcb nullpcb; /* pcb to overwrite on last swtch */

--- 5 unchanged lines hidden (view full) ---

947 /* move to inactive space and stack, passing arg accross */
948 p = swtch_to_inactive(p);
949
950 /* drop per-process resources */
951 vmspace_free(p->p_vmspace);
952 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
953
954 p->p_addr = (struct user *) &nullpcb;
926 * a special case].
927 */
928struct proc *swtch_to_inactive();
929volatile void
930cpu_exit(p)
931 register struct proc *p;
932{
933 static struct pcb nullpcb; /* pcb to overwrite on last swtch */

--- 5 unchanged lines hidden (view full) ---

939 /* move to inactive space and stack, passing arg accross */
940 p = swtch_to_inactive(p);
941
942 /* drop per-process resources */
943 vmspace_free(p->p_vmspace);
944 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
945
946 p->p_addr = (struct user *) &nullpcb;
955 splclock();
956 swtch();
947 mi_switch();
957 /* NOTREACHED */
958}
959#else
960void
961cpu_exit(p)
962 register struct proc *p;
963{
964
965#if NNPX > 0
966 npxexit(p);
967#endif /* NNPX */
948 /* NOTREACHED */
949}
950#else
951void
952cpu_exit(p)
953 register struct proc *p;
954{
955
956#if NNPX > 0
957 npxexit(p);
958#endif /* NNPX */
968 splclock();
969 curproc = 0;
970 swtch();
959 curproc = p;
960 mi_switch();
971 /*
972 * This is to shutup the compiler, and if swtch() failed I suppose
973 * this would be a good thing. This keeps gcc happy because panic
974 * is a volatile void function as well.
975 */
976 panic("cpu_exit");
977}
978

--- 6 unchanged lines hidden (view full) ---

985 pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr,
986 ((vm_offset_t) p->p_addr) + ctob(UPAGES));
987 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
988 vmspace_free(p->p_vmspace);
989}
990#endif
991
992/*
961 /*
962 * This is to shutup the compiler, and if swtch() failed I suppose
963 * this would be a good thing. This keeps gcc happy because panic
964 * is a volatile void function as well.
965 */
966 panic("cpu_exit");
967}
968

--- 6 unchanged lines hidden (view full) ---

975 pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr,
976 ((vm_offset_t) p->p_addr) + ctob(UPAGES));
977 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
978 vmspace_free(p->p_vmspace);
979}
980#endif
981
982/*
983 * Dump the machine specific header information at the start of a core dump.
984 */
985int
986cpu_coredump(p, vp, cred)
987 struct proc *p;
988 struct vnode *vp;
989 struct ucred *cred;
990{
991
992 return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
993 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
994 p));
995}
996
997/*
993 * Set a red zone in the kernel stack after the u. area.
994 */
995void
996setredzone(pte, vaddr)
997 u_short *pte;
998 caddr_t vaddr;
999{
1000/* eventually do this by setting up an expand-down stack segment
1001 for ss0: selector, allowing stack access down to top of u.
1002 this means though that protection violations need to be handled
1003 thru a double fault exception that must do an integral task
1004 switch to a known good context, within which a dump can be
1005 taken. a sensible scheme might be to save the initial context
1006 used by sched (that has physical memory mapped 1:1 at bottom)
1007 and take the dump while still in mapped mode */
1008}
1009
1010/*
998 * Set a red zone in the kernel stack after the u. area.
999 */
1000void
1001setredzone(pte, vaddr)
1002 u_short *pte;
1003 caddr_t vaddr;
1004{
1005/* eventually do this by setting up an expand-down stack segment
1006 for ss0: selector, allowing stack access down to top of u.
1007 this means though that protection violations need to be handled
1008 thru a double fault exception that must do an integral task
1009 switch to a known good context, within which a dump can be
1010 taken. a sensible scheme might be to save the initial context
1011 used by sched (that has physical memory mapped 1:1 at bottom)
1012 and take the dump while still in mapped mode */
1013}
1014
1015/*
1016 * Move pages from one kernel virtual address to another.
1017 * Both addresses are assumed to reside in the Sysmap,
1018 * and size must be a multiple of CLSIZE.
1019 */
1020
1021/*
1022 * Move pages from one kernel virtual address to another.
1023 * Both addresses are assumed to reside in the Sysmap,
1024 * and size must be a multiple of CLSIZE.
1025 */
1026
1027void
1028pagemove(from, to, size)
1029 register caddr_t from, to;
1030 int size;
1031{
1032 register vm_offset_t pa;
1033
1034 if (size & CLOFSET)
1035 panic("pagemove");
1036 while (size > 0) {
1037 pa = pmap_kextract((vm_offset_t)from);
1038 if (pa == 0)
1039 panic("pagemove 2");
1040 if (pmap_kextract((vm_offset_t)to) != 0)
1041 panic("pagemove 3");
1042 pmap_remove(kernel_pmap,
1043 (vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE);
1044 pmap_kenter( (vm_offset_t)to, pa);
1045 from += PAGE_SIZE;
1046 to += PAGE_SIZE;
1047 size -= PAGE_SIZE;
1048 }
1049 pmap_update();
1050}
1051
1052/*
1011 * Convert kernel VA to physical address
1012 */
1013u_long
1014kvtop(void *addr)
1015{
1016 vm_offset_t va;
1017
1018 va = pmap_kextract((vm_offset_t)addr);

--- 12 unchanged lines hidden (view full) ---

1031 * to be mapped. b_bcount might be modified by the driver.
1032 */
1033void
1034vmapbuf(bp)
1035 register struct buf *bp;
1036{
1037 register int npf;
1038 register caddr_t addr;
1053 * Convert kernel VA to physical address
1054 */
1055u_long
1056kvtop(void *addr)
1057{
1058 vm_offset_t va;
1059
1060 va = pmap_kextract((vm_offset_t)addr);

--- 12 unchanged lines hidden (view full) ---

1073 * to be mapped. b_bcount might be modified by the driver.
1074 */
1075void
1076vmapbuf(bp)
1077 register struct buf *bp;
1078{
1079 register int npf;
1080 register caddr_t addr;
1039 register long flags = bp->b_flags;
1040 struct proc *p;
1041 int off;
1042 vm_offset_t kva;
1081 int off;
1082 vm_offset_t kva;
1043 register vm_offset_t pa;
1083 vm_offset_t pa, lastv, v;
1044
1084
1045 if ((flags & B_PHYS) == 0)
1085 if ((bp->b_flags & B_PHYS) == 0)
1046 panic("vmapbuf");
1086 panic("vmapbuf");
1087
1088 lastv = 0;
1089 for (addr = (caddr_t)trunc_page(bp->b_data);
1090 addr < bp->b_data + bp->b_bufsize;
1091 addr += PAGE_SIZE) {
1092
1093/*
1094 * make sure that the pde is valid and held
1095 */
1096 v = trunc_page(((vm_offset_t)vtopte(addr)));
1097 if (v != lastv) {
1098 vm_fault_quick(v, VM_PROT_READ);
1099 pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
1100 vm_page_hold(PHYS_TO_VM_PAGE(pa));
1101 lastv = v;
1102 }
1103
1104/*
1105 * do the vm_fault if needed, do the copy-on-write thing when
1106 * reading stuff off device into memory.
1107 */
1108 vm_fault_quick(addr,
1109 (bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
1110 pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
1111/*
1112 * hold the data page
1113 */
1114 vm_page_hold(PHYS_TO_VM_PAGE(pa));
1115 }
1116
1047 addr = bp->b_saveaddr = bp->b_un.b_addr;
1048 off = (int)addr & PGOFSET;
1117 addr = bp->b_saveaddr = bp->b_un.b_addr;
1118 off = (int)addr & PGOFSET;
1049 p = bp->b_proc;
1050 npf = btoc(round_page(bp->b_bufsize + off));
1051 kva = kmem_alloc_wait(phys_map, ctob(npf));
1052 bp->b_un.b_addr = (caddr_t) (kva + off);
1053 while (npf--) {
1119 npf = btoc(round_page(bp->b_bufsize + off));
1120 kva = kmem_alloc_wait(phys_map, ctob(npf));
1121 bp->b_un.b_addr = (caddr_t) (kva + off);
1122 while (npf--) {
1054 pa = pmap_extract(&p->p_vmspace->vm_pmap, (vm_offset_t)addr);
1123 pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t)addr);
1055 if (pa == 0)
1056 panic("vmapbuf: null page frame");
1057 pmap_kenter(kva, trunc_page(pa));
1058 addr += PAGE_SIZE;
1059 kva += PAGE_SIZE;
1060 }
1061 pmap_update();
1062}
1063
1064/*
1065 * Free the io map PTEs associated with this IO operation.
1066 * We also invalidate the TLB entries and restore the original b_addr.
1067 */
1068void
1069vunmapbuf(bp)
1070 register struct buf *bp;
1071{
1072 register int npf;
1073 register caddr_t addr = bp->b_un.b_addr;
1124 if (pa == 0)
1125 panic("vmapbuf: null page frame");
1126 pmap_kenter(kva, trunc_page(pa));
1127 addr += PAGE_SIZE;
1128 kva += PAGE_SIZE;
1129 }
1130 pmap_update();
1131}
1132
1133/*
1134 * Free the io map PTEs associated with this IO operation.
1135 * We also invalidate the TLB entries and restore the original b_addr.
1136 */
1137void
1138vunmapbuf(bp)
1139 register struct buf *bp;
1140{
1141 register int npf;
1142 register caddr_t addr = bp->b_un.b_addr;
1074 vm_offset_t kva;
1143 vm_offset_t kva,va,v,lastv,pa;
1075
1076 if ((bp->b_flags & B_PHYS) == 0)
1077 panic("vunmapbuf");
1078 npf = btoc(round_page(bp->b_bufsize + ((int)addr & PGOFSET)));
1079 kva = (vm_offset_t)((int)addr & ~PGOFSET);
1080 kmem_free_wakeup(phys_map, kva, ctob(npf));
1081 bp->b_un.b_addr = bp->b_saveaddr;
1082 bp->b_saveaddr = NULL;
1144
1145 if ((bp->b_flags & B_PHYS) == 0)
1146 panic("vunmapbuf");
1147 npf = btoc(round_page(bp->b_bufsize + ((int)addr & PGOFSET)));
1148 kva = (vm_offset_t)((int)addr & ~PGOFSET);
1149 kmem_free_wakeup(phys_map, kva, ctob(npf));
1150 bp->b_un.b_addr = bp->b_saveaddr;
1151 bp->b_saveaddr = NULL;
1152
1153
1154/*
1155 * unhold the pde, and data pages
1156 */
1157 lastv = 0;
1158 for (addr = (caddr_t)trunc_page(bp->b_data);
1159 addr < bp->b_data + bp->b_bufsize;
1160 addr += NBPG) {
1161
1162 /*
1163 * release the data page
1164 */
1165 pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
1166 vm_page_unhold(PHYS_TO_VM_PAGE(pa));
1167
1168 /*
1169 * and unhold the page table
1170 */
1171 v = trunc_page(((vm_offset_t)vtopte(addr)));
1172 if (v != lastv) {
1173 pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
1174 vm_page_unhold(PHYS_TO_VM_PAGE(pa));
1175 lastv = v;
1176 }
1177 }
1083}
1084
1085/*
1086 * Force reset the processor by invalidating the entire address space!
1087 */
1088void
1089cpu_reset() {
1090

--- 8 unchanged lines hidden (view full) ---

1099
1100/*
1101 * Grow the user stack to allow for 'sp'. This version grows the stack in
1102 * chunks of SGROWSIZ.
1103 */
1104int
1105grow(p, sp)
1106 struct proc *p;
1178}
1179
1180/*
1181 * Force reset the processor by invalidating the entire address space!
1182 */
1183void
1184cpu_reset() {
1185

--- 8 unchanged lines hidden (view full) ---

1194
1195/*
1196 * Grow the user stack to allow for 'sp'. This version grows the stack in
1197 * chunks of SGROWSIZ.
1198 */
1199int
1200grow(p, sp)
1201 struct proc *p;
1107 int sp;
1202 u_int sp;
1108{
1109 unsigned int nss;
1110 caddr_t v;
1111 struct vmspace *vm = p->p_vmspace;
1112
1113 if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
1114 return (1);
1115

--- 36 unchanged lines hidden ---
1203{
1204 unsigned int nss;
1205 caddr_t v;
1206 struct vmspace *vm = p->p_vmspace;
1207
1208 if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
1209 return (1);
1210

--- 36 unchanged lines hidden ---