vm_machdep.c revision 1889
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 *	$Id: vm_machdep.c,v 1.22 1994/05/25 08:55:23 rgrimes Exp $
42 */
43
44#include "npx.h"
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/proc.h>
48#include <sys/malloc.h>
49#include <sys/buf.h>
50#include <sys/vnode.h>
51#include <sys/user.h>
52
53#include <machine/cpu.h>
54
55#include <vm/vm.h>
56#include <vm/vm_kern.h>
57
58#ifndef NOBOUNCE
59vm_map_t	io_map;
60volatile int	kvasfreecnt;
61
62
63caddr_t		bouncememory;
64int		bouncepages, bpwait;
65vm_offset_t	*bouncepa;
66int		bmwait, bmfreeing;
67
68#define BITS_IN_UNSIGNED (8*sizeof(unsigned))
69int		bounceallocarraysize;
70unsigned	*bounceallocarray;
71int		bouncefree;
72
73#define SIXTEENMEG (4096*4096)
74#define MAXBKVA 1024
75int		maxbkva = MAXBKVA*NBPG;
76
77/* special list that can be used at interrupt time for eventual kva free */
78struct kvasfree {
79	vm_offset_t addr;
80	vm_offset_t size;
81} kvaf[MAXBKVA];
82
83
84vm_offset_t vm_bounce_kva();
85/*
86 * get bounce buffer pages (count physically contiguous)
87 * (only 1 inplemented now)
88 */
89vm_offset_t
90vm_bounce_page_find(count)
91	int count;
92{
93	int bit;
94	int s,i;
95
96	if (count != 1)
97		panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
98
99	s = splbio();
100retry:
101	for (i = 0; i < bounceallocarraysize; i++) {
102		if (bounceallocarray[i] != 0xffffffff) {
103			if (bit = ffs(~bounceallocarray[i])) {
104				bounceallocarray[i] |= 1 << (bit - 1) ;
105				bouncefree -= count;
106				splx(s);
107				return bouncepa[(i * BITS_IN_UNSIGNED + (bit - 1))];
108			}
109		}
110	}
111	bpwait = 1;
112	tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
113	goto retry;
114}
115
116void
117vm_bounce_kva_free(addr, size, now)
118	vm_offset_t addr;
119	vm_offset_t size;
120	int now;
121{
122	int s = splbio();
123	kvaf[kvasfreecnt].addr = addr;
124	kvaf[kvasfreecnt].size = size;
125	++kvasfreecnt;
126	if( now) {
127		/*
128		 * this will do wakeups
129		 */
130		vm_bounce_kva(0,0);
131	} else {
132		if (bmwait) {
133		/*
134		 * if anyone is waiting on the bounce-map, then wakeup
135		 */
136			wakeup((caddr_t) io_map);
137			bmwait = 0;
138		}
139	}
140	splx(s);
141}
142
143/*
144 * free count bounce buffer pages
145 */
146void
147vm_bounce_page_free(pa, count)
148	vm_offset_t pa;
149	int count;
150{
151	int allocindex;
152	int index;
153	int bit;
154
155	if (count != 1)
156		panic("vm_bounce_page_free -- no support for > 1 page yet!!!\n");
157
158	for(index=0;index<bouncepages;index++) {
159		if( pa == bouncepa[index])
160			break;
161	}
162
163	if( index == bouncepages)
164		panic("vm_bounce_page_free: invalid bounce buffer");
165
166	allocindex = index / BITS_IN_UNSIGNED;
167	bit = index % BITS_IN_UNSIGNED;
168
169	bounceallocarray[allocindex] &= ~(1 << bit);
170
171	bouncefree += count;
172	if (bpwait) {
173		bpwait = 0;
174		wakeup((caddr_t) &bounceallocarray);
175	}
176}
177
178/*
179 * allocate count bounce buffer kva pages
180 */
181vm_offset_t
182vm_bounce_kva(size, waitok)
183	int size;
184	int waitok;
185{
186	int i;
187	int startfree;
188	vm_offset_t kva = 0;
189	int s = splbio();
190more:
191	if (!bmfreeing && kvasfreecnt) {
192		bmfreeing = 1;
193		for (i = 0; i < kvasfreecnt; i++) {
194			pmap_remove(kernel_pmap,
195				kvaf[i].addr, kvaf[i].addr + kvaf[i].size);
196			kmem_free_wakeup(io_map, kvaf[i].addr,
197				kvaf[i].size);
198		}
199		kvasfreecnt = 0;
200		bmfreeing = 0;
201		if( bmwait) {
202			bmwait = 0;
203			wakeup( (caddr_t) io_map);
204		}
205	}
206
207	if( size == 0) {
208		splx(s);
209		return NULL;
210	}
211
212	if ((kva = kmem_alloc_pageable(io_map, size)) == 0) {
213		if( !waitok) {
214			splx(s);
215			return NULL;
216		}
217		bmwait = 1;
218		tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0);
219		goto more;
220	}
221	splx(s);
222	return kva;
223}
224
225/*
226 * same as vm_bounce_kva -- but really allocate (but takes pages as arg)
227 */
228vm_offset_t
229vm_bounce_kva_alloc(count)
230int count;
231{
232	int i;
233	vm_offset_t kva;
234	vm_offset_t pa;
235	if( bouncepages == 0) {
236		kva = (vm_offset_t) malloc(count*NBPG, M_TEMP, M_WAITOK);
237		return kva;
238	}
239	kva = vm_bounce_kva(count*NBPG, 1);
240	for(i=0;i<count;i++) {
241		pa = vm_bounce_page_find(1);
242		pmap_kenter(kva + i * NBPG, pa);
243	}
244	pmap_update();
245	return kva;
246}
247
248/*
249 * same as vm_bounce_kva_free -- but really free
250 */
251void
252vm_bounce_kva_alloc_free(kva, count)
253	vm_offset_t kva;
254	int count;
255{
256	int i;
257	vm_offset_t pa;
258	if( bouncepages == 0) {
259		free((caddr_t) kva, M_TEMP);
260		return;
261	}
262	for(i = 0; i < count; i++) {
263		pa = pmap_kextract(kva + i * NBPG);
264		vm_bounce_page_free(pa, 1);
265	}
266	vm_bounce_kva_free(kva, count*NBPG, 0);
267}
268
269/*
270 * do the things necessary to the struct buf to implement
271 * bounce buffers...  inserted before the disk sort
272 */
273void
274vm_bounce_alloc(bp)
275	struct buf *bp;
276{
277	int countvmpg;
278	vm_offset_t vastart, vaend;
279	vm_offset_t vapstart, vapend;
280	vm_offset_t va, kva;
281	vm_offset_t pa;
282	int dobounceflag = 0;
283	int bounceindex;
284	int i;
285	int s;
286
287	if (bouncepages == 0)
288		return;
289
290	if (bp->b_flags & B_BOUNCE) {
291		printf("vm_bounce_alloc: called recursively???\n");
292		return;
293	}
294
295	if (bp->b_bufsize < bp->b_bcount) {
296		printf("vm_bounce_alloc: b_bufsize(0x%x) < b_bcount(0x%x) !!!!\n",
297			bp->b_bufsize, bp->b_bcount);
298		panic("vm_bounce_alloc");
299	}
300
301/*
302 *  This is not really necessary
303 *	if( bp->b_bufsize != bp->b_bcount) {
304 *		printf("size: %d, count: %d\n", bp->b_bufsize, bp->b_bcount);
305 *	}
306 */
307
308
309	vastart = (vm_offset_t) bp->b_data;
310	vaend = (vm_offset_t) bp->b_data + bp->b_bufsize;
311
312	vapstart = i386_trunc_page(vastart);
313	vapend = i386_round_page(vaend);
314	countvmpg = (vapend - vapstart) / NBPG;
315
316/*
317 * if any page is above 16MB, then go into bounce-buffer mode
318 */
319	va = vapstart;
320	for (i = 0; i < countvmpg; i++) {
321		pa = pmap_kextract(va);
322		if (pa >= SIXTEENMEG)
323			++dobounceflag;
324		va += NBPG;
325	}
326	if (dobounceflag == 0)
327		return;
328
329	if (bouncepages < dobounceflag)
330		panic("Not enough bounce buffers!!!");
331
332/*
333 * allocate a replacement kva for b_addr
334 */
335	kva = vm_bounce_kva(countvmpg*NBPG, 1);
336#if 0
337	printf("%s: vapstart: %x, vapend: %x, countvmpg: %d, kva: %x ",
338		(bp->b_flags & B_READ) ? "read":"write",
339			vapstart, vapend, countvmpg, kva);
340#endif
341	va = vapstart;
342	for (i = 0; i < countvmpg; i++) {
343		pa = pmap_kextract(va);
344		if (pa >= SIXTEENMEG) {
345			/*
346			 * allocate a replacement page
347			 */
348			vm_offset_t bpa = vm_bounce_page_find(1);
349			pmap_kenter(kva + (NBPG * i), bpa);
350#if 0
351			printf("r(%d): (%x,%x,%x) ", i, va, pa, bpa);
352#endif
353			/*
354			 * if we are writing, the copy the data into the page
355			 */
356			if ((bp->b_flags & B_READ) == 0) {
357				pmap_update();
358				bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG);
359			}
360		} else {
361			/*
362			 * use original page
363			 */
364			pmap_kenter(kva + (NBPG * i), pa);
365		}
366		va += NBPG;
367	}
368	pmap_update();
369
370/*
371 * flag the buffer as being bounced
372 */
373	bp->b_flags |= B_BOUNCE;
374/*
375 * save the original buffer kva
376 */
377	bp->b_savekva = bp->b_data;
378/*
379 * put our new kva into the buffer (offset by original offset)
380 */
381	bp->b_data = (caddr_t) (((vm_offset_t) kva) |
382				((vm_offset_t) bp->b_savekva & (NBPG - 1)));
383#if 0
384	printf("b_savekva: %x, newva: %x\n", bp->b_savekva, bp->b_data);
385#endif
386	return;
387}
388
389/*
390 * hook into biodone to free bounce buffer
391 */
392void
393vm_bounce_free(bp)
394	struct buf *bp;
395{
396	int i;
397	vm_offset_t origkva, bouncekva, bouncekvaend;
398	int countvmpg;
399	int s;
400
401/*
402 * if this isn't a bounced buffer, then just return
403 */
404	if ((bp->b_flags & B_BOUNCE) == 0)
405		return;
406
407/*
408 *  This check is not necessary
409 *	if (bp->b_bufsize != bp->b_bcount) {
410 *		printf("vm_bounce_free: b_bufsize=%d, b_bcount=%d\n",
411 *			bp->b_bufsize, bp->b_bcount);
412 *	}
413 */
414
415	origkva = (vm_offset_t) bp->b_savekva;
416	bouncekva = (vm_offset_t) bp->b_data;
417/*
418	printf("free: %d ", bp->b_bufsize);
419*/
420
421/*
422 * check every page in the kva space for b_addr
423 */
424	for (i = 0; i < bp->b_bufsize; ) {
425		vm_offset_t mybouncepa;
426		vm_offset_t copycount;
427
428		copycount = i386_round_page(bouncekva + 1) - bouncekva;
429		mybouncepa = pmap_kextract(i386_trunc_page(bouncekva));
430
431/*
432 * if this is a bounced pa, then process as one
433 */
434		if ( mybouncepa != pmap_kextract( i386_trunc_page( origkva))) {
435			vm_offset_t tocopy = copycount;
436			if (i + tocopy > bp->b_bufsize)
437				tocopy = bp->b_bufsize - i;
438/*
439 * if this is a read, then copy from bounce buffer into original buffer
440 */
441			if (bp->b_flags & B_READ)
442				bcopy((caddr_t) bouncekva, (caddr_t) origkva, tocopy);
443/*
444 * free the bounce allocation
445 */
446
447/*
448			printf("(kva: %x, pa: %x)", bouncekva, mybouncepa);
449*/
450			vm_bounce_page_free(mybouncepa, 1);
451		}
452
453		origkva += copycount;
454		bouncekva += copycount;
455		i += copycount;
456	}
457
458/*
459	printf("\n");
460*/
461/*
462 * add the old kva into the "to free" list
463 */
464
465	bouncekva= i386_trunc_page((vm_offset_t) bp->b_data);
466	bouncekvaend= i386_round_page((vm_offset_t)bp->b_data + bp->b_bufsize);
467
468/*
469	printf("freeva: %d\n", (bouncekvaend - bouncekva) / NBPG);
470*/
471	vm_bounce_kva_free( bouncekva, (bouncekvaend - bouncekva), 0);
472	bp->b_data = bp->b_savekva;
473	bp->b_savekva = 0;
474	bp->b_flags &= ~B_BOUNCE;
475
476	return;
477}
478
479
480/*
481 * init the bounce buffer system
482 */
483void
484vm_bounce_init()
485{
486	vm_offset_t minaddr, maxaddr;
487	int i;
488
489	kvasfreecnt = 0;
490
491	if (bouncepages == 0)
492		return;
493
494	bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
495	bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
496
497	if (!bounceallocarray)
498		panic("Cannot allocate bounce resource array\n");
499
500	bzero(bounceallocarray, bounceallocarraysize * sizeof(unsigned));
501	bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT);
502	if (!bouncepa)
503		panic("Cannot allocate physical memory array\n");
504
505	for(i=0;i<bouncepages;i++) {
506		vm_offset_t pa;
507		if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * NBPG)) >= SIXTEENMEG)
508			panic("bounce memory out of range");
509		if( pa == 0)
510			panic("bounce memory not resident");
511		bouncepa[i] = pa;
512	}
513	bouncefree = bouncepages;
514
515}
516#endif /* NOBOUNCE */
517/*
518 * quick version of vm_fault
519 */
520
521void
522vm_fault_quick( v, prot)
523	vm_offset_t v;
524	int prot;
525{
526	if( (cpu_class == CPUCLASS_386) &&
527		(prot & VM_PROT_WRITE))
528		vm_fault(&curproc->p_vmspace->vm_map, v,
529			VM_PROT_READ|VM_PROT_WRITE, FALSE);
530	else if( prot & VM_PROT_WRITE)
531		*(volatile char *)v += 0;
532	else
533		*(volatile char *)v;
534}
535
536
537/*
538 * Finish a fork operation, with process p2 nearly set up.
539 * Copy and update the kernel stack and pcb, making the child
540 * ready to run, and marking it so that it can return differently
541 * than the parent.  Returns 1 in the child process, 0 in the parent.
542 * We currently double-map the user area so that the stack is at the same
543 * address in each process; in the future we will probably relocate
544 * the frame pointers on the stack after copying.
545 */
546int
547cpu_fork(p1, p2)
548	register struct proc *p1, *p2;
549{
550	register struct user *up = p2->p_addr;
551	int foo, offset, addr, i;
552	extern char kstack[];
553	extern int mvesp();
554
555	/*
556	 * Copy pcb and stack from proc p1 to p2.
557	 * We do this as cheaply as possible, copying only the active
558	 * part of the stack.  The stack and pcb need to agree;
559	 * this is tricky, as the final pcb is constructed by savectx,
560	 * but its frame isn't yet on the stack when the stack is copied.
561	 * swtch compensates for this when the child eventually runs.
562	 * This should be done differently, with a single call
563	 * that copies and updates the pcb+stack,
564	 * replacing the bcopy and savectx.
565	 */
566	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
567	offset = mvesp() - (int)kstack;
568	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
569	    (unsigned) ctob(UPAGES) - offset);
570	p2->p_md.md_regs = p1->p_md.md_regs;
571
572	pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb);
573
574	/*
575	 *
576	 * Arrange for a non-local goto when the new process
577	 * is started, to resume here, returning nonzero from setjmp.
578	 */
579	if (savectx(up, 1)) {
580		/*
581		 * Return 1 in child.
582		 */
583		return (1);
584	}
585	return (0);
586}
587
588void
589cpu_exit(p)
590	register struct proc *p;
591{
592
593#if NNPX > 0
594	npxexit(p);
595#endif	/* NNPX */
596	curproc = p;
597	mi_switch();
598	/*
599	 * This is to shutup the compiler, and if swtch() failed I suppose
600	 * this would be a good thing.  This keeps gcc happy because panic
601	 * is a volatile void function as well.
602	 */
603	panic("cpu_exit");
604}
605
606void
607cpu_wait(p) struct proc *p; {
608/*	extern vm_map_t upages_map; */
609	extern char kstack[];
610
611	/* drop per-process resources */
612 	pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr,
613		((vm_offset_t) p->p_addr) + ctob(UPAGES));
614	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
615	vmspace_free(p->p_vmspace);
616}
617
618/*
619 * Dump the machine specific header information at the start of a core dump.
620 */
621int
622cpu_coredump(p, vp, cred)
623	struct proc *p;
624	struct vnode *vp;
625	struct ucred *cred;
626{
627
628	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
629	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
630	    p));
631}
632
633/*
634 * Set a red zone in the kernel stack after the u. area.
635 */
636void
637setredzone(pte, vaddr)
638	u_short *pte;
639	caddr_t vaddr;
640{
641/* eventually do this by setting up an expand-down stack segment
642   for ss0: selector, allowing stack access down to top of u.
643   this means though that protection violations need to be handled
644   thru a double fault exception that must do an integral task
645   switch to a known good context, within which a dump can be
646   taken. a sensible scheme might be to save the initial context
647   used by sched (that has physical memory mapped 1:1 at bottom)
648   and take the dump while still in mapped mode */
649}
650
651/*
652 * Move pages from one kernel virtual address to another.
653 * Both addresses are assumed to reside in the Sysmap,
654 * and size must be a multiple of CLSIZE.
655 */
656
657void
658pagemove(from, to, size)
659	register caddr_t from, to;
660	int size;
661{
662	register vm_offset_t pa;
663
664	if (size & CLOFSET)
665		panic("pagemove");
666	while (size > 0) {
667		pa = pmap_kextract((vm_offset_t)from);
668		if (pa == 0)
669			panic("pagemove 2");
670		if (pmap_kextract((vm_offset_t)to) != 0)
671			panic("pagemove 3");
672		pmap_remove(kernel_pmap,
673			    (vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE);
674		pmap_kenter( (vm_offset_t)to, pa);
675		from += PAGE_SIZE;
676		to += PAGE_SIZE;
677		size -= PAGE_SIZE;
678	}
679	pmap_update();
680}
681
682/*
683 * Convert kernel VA to physical address
684 */
685u_long
686kvtop(void *addr)
687{
688	vm_offset_t va;
689
690	va = pmap_kextract((vm_offset_t)addr);
691	if (va == 0)
692		panic("kvtop: zero page frame");
693	return((int)va);
694}
695
696/*
697 * Map an IO request into kernel virtual address space.
698 *
699 * All requests are (re)mapped into kernel VA space.
700 * Notice that we use b_bufsize for the size of the buffer
701 * to be mapped.  b_bcount might be modified by the driver.
702 */
703void
704vmapbuf(bp)
705	register struct buf *bp;
706{
707	register int npf;
708	register caddr_t addr;
709	int off;
710	vm_offset_t kva;
711	vm_offset_t pa, lastv, v;
712
713	if ((bp->b_flags & B_PHYS) == 0)
714		panic("vmapbuf");
715
716	/*
717	 * this is the kva that is to be used for
718	 * the temporary kernel mapping
719	 */
720	kva = (vm_offset_t) bp->b_saveaddr;
721
722	lastv = 0;
723	for (addr = (caddr_t)trunc_page(bp->b_data);
724		addr < bp->b_data + bp->b_bufsize;
725		addr += PAGE_SIZE) {
726
727/*
728 * make sure that the pde is valid and held
729 */
730		v = trunc_page(((vm_offset_t)vtopte(addr)));
731		if (v != lastv) {
732			vm_fault_quick(v, VM_PROT_READ);
733			pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
734			vm_page_hold(PHYS_TO_VM_PAGE(pa));
735			lastv = v;
736		}
737
738/*
739 * do the vm_fault if needed, do the copy-on-write thing when
740 * reading stuff off device into memory.
741 */
742		vm_fault_quick(addr,
743			(bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
744		pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
745/*
746 * hold the data page
747 */
748		vm_page_hold(PHYS_TO_VM_PAGE(pa));
749	}
750
751	addr = bp->b_saveaddr = bp->b_data;
752	off = (int)addr & PGOFSET;
753	npf = btoc(round_page(bp->b_bufsize + off));
754	bp->b_data = (caddr_t) (kva + off);
755	while (npf--) {
756		pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t)addr);
757		if (pa == 0)
758			panic("vmapbuf: null page frame");
759		pmap_kenter(kva, trunc_page(pa));
760		addr += PAGE_SIZE;
761		kva += PAGE_SIZE;
762	}
763	pmap_update();
764}
765
766/*
767 * Free the io map PTEs associated with this IO operation.
768 * We also invalidate the TLB entries and restore the original b_addr.
769 */
770void
771vunmapbuf(bp)
772	register struct buf *bp;
773{
774	register int npf;
775	register caddr_t addr = bp->b_data;
776	vm_offset_t kva,va,v,lastv,pa;
777
778	if ((bp->b_flags & B_PHYS) == 0)
779		panic("vunmapbuf");
780	bp->b_data = bp->b_saveaddr;
781	bp->b_saveaddr = NULL;
782
783/*
784 * unhold the pde, and data pages
785 */
786	lastv = 0;
787	for (addr = (caddr_t)trunc_page(bp->b_data);
788		addr < bp->b_data + bp->b_bufsize;
789		addr += NBPG) {
790
791	/*
792	 * release the data page
793	 */
794		pa = pmap_extract(&curproc->p_vmspace->vm_pmap, (vm_offset_t) addr);
795		vm_page_unhold(PHYS_TO_VM_PAGE(pa));
796
797	/*
798	 * and unhold the page table
799	 */
800		v = trunc_page(((vm_offset_t)vtopte(addr)));
801		if (v != lastv) {
802			pa = pmap_extract(&curproc->p_vmspace->vm_pmap, v);
803			vm_page_unhold(PHYS_TO_VM_PAGE(pa));
804			lastv = v;
805		}
806		pmap_kremove( addr);
807	}
808}
809
810/*
811 * Force reset the processor by invalidating the entire address space!
812 */
813void
814cpu_reset() {
815
816	/* force a shutdown by unmapping entire address space ! */
817	bzero((caddr_t) PTD, NBPG);
818
819	/* "good night, sweet prince .... <THUNK!>" */
820	tlbflush();
821	/* NOTREACHED */
822	while(1);
823}
824
825/*
826 * Grow the user stack to allow for 'sp'. This version grows the stack in
827 *	chunks of SGROWSIZ.
828 */
829int
830grow(p, sp)
831	struct proc *p;
832	u_int sp;
833{
834	unsigned int nss;
835	caddr_t v;
836	struct vmspace *vm = p->p_vmspace;
837
838	if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
839	    return (1);
840
841	nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE);
842
843	if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
844		return (0);
845
846	if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT,
847	    SGROWSIZ) < nss) {
848		int grow_amount;
849		/*
850		 * If necessary, grow the VM that the stack occupies
851		 * to allow for the rlimit. This allows us to not have
852		 * to allocate all of the VM up-front in execve (which
853		 * is expensive).
854		 * Grow the VM by the amount requested rounded up to
855		 * the nearest SGROWSIZ to provide for some hysteresis.
856		 */
857		grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ);
858		v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT,
859		    SGROWSIZ) - grow_amount;
860		/*
861		 * If there isn't enough room to extend by SGROWSIZ, then
862		 * just extend to the maximum size
863		 */
864		if (v < vm->vm_maxsaddr) {
865			v = vm->vm_maxsaddr;
866			grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT);
867		}
868		if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
869		    grow_amount, FALSE) != KERN_SUCCESS) {
870			return (0);
871		}
872		vm->vm_ssize += grow_amount >> PAGE_SHIFT;
873	}
874
875	return (1);
876}
877