vm_machdep.c revision 8211
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 *	$Id: vm_machdep.c,v 1.36 1995/04/26 07:38:35 rgrimes Exp $
42 */
43
44#include "npx.h"
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/proc.h>
48#include <sys/malloc.h>
49#include <sys/buf.h>
50#include <sys/vnode.h>
51#include <sys/user.h>
52
53#include <machine/clock.h>
54#include <machine/cpu.h>
55#include <machine/md_var.h>
56
57#include <vm/vm.h>
58#include <vm/vm_kern.h>
59
60#include <i386/isa/isa.h>
61
62#ifdef BOUNCE_BUFFERS
63vm_map_t	io_map;
64volatile int	kvasfreecnt;
65
66
67caddr_t		bouncememory;
68int		bouncepages, bpwait;
69vm_offset_t	*bouncepa;
70int		bmwait, bmfreeing;
71
72#define BITS_IN_UNSIGNED (8*sizeof(unsigned))
73int		bounceallocarraysize;
74unsigned	*bounceallocarray;
75int		bouncefree;
76
77#define SIXTEENMEG (4096*4096)
78#define MAXBKVA 1024
79int		maxbkva = MAXBKVA*NBPG;
80
81/* special list that can be used at interrupt time for eventual kva free */
82struct kvasfree {
83	vm_offset_t addr;
84	vm_offset_t size;
85} kvaf[MAXBKVA];
86
87
88vm_offset_t vm_bounce_kva();
89/*
90 * get bounce buffer pages (count physically contiguous)
91 * (only 1 inplemented now)
92 */
93vm_offset_t
94vm_bounce_page_find(count)
95	int count;
96{
97	int bit;
98	int s,i;
99
100	if (count != 1)
101		panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
102
103	s = splbio();
104retry:
105	for (i = 0; i < bounceallocarraysize; i++) {
106		if (bounceallocarray[i] != 0xffffffff) {
107			bit = ffs(~bounceallocarray[i]);
108			if (bit) {
109				bounceallocarray[i] |= 1 << (bit - 1) ;
110				bouncefree -= count;
111				splx(s);
112				return bouncepa[(i * BITS_IN_UNSIGNED + (bit - 1))];
113			}
114		}
115	}
116	bpwait = 1;
117	tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
118	goto retry;
119}
120
121void
122vm_bounce_kva_free(addr, size, now)
123	vm_offset_t addr;
124	vm_offset_t size;
125	int now;
126{
127	int s = splbio();
128	kvaf[kvasfreecnt].addr = addr;
129	kvaf[kvasfreecnt].size = size;
130	++kvasfreecnt;
131	if( now) {
132		/*
133		 * this will do wakeups
134		 */
135		vm_bounce_kva(0,0);
136	} else {
137		if (bmwait) {
138		/*
139		 * if anyone is waiting on the bounce-map, then wakeup
140		 */
141			wakeup((caddr_t) io_map);
142			bmwait = 0;
143		}
144	}
145	splx(s);
146}
147
148/*
149 * free count bounce buffer pages
150 */
151void
152vm_bounce_page_free(pa, count)
153	vm_offset_t pa;
154	int count;
155{
156	int allocindex;
157	int index;
158	int bit;
159
160	if (count != 1)
161		panic("vm_bounce_page_free -- no support for > 1 page yet!!!");
162
163	for(index=0;index<bouncepages;index++) {
164		if( pa == bouncepa[index])
165			break;
166	}
167
168	if( index == bouncepages)
169		panic("vm_bounce_page_free: invalid bounce buffer");
170
171	allocindex = index / BITS_IN_UNSIGNED;
172	bit = index % BITS_IN_UNSIGNED;
173
174	bounceallocarray[allocindex] &= ~(1 << bit);
175
176	bouncefree += count;
177	if (bpwait) {
178		bpwait = 0;
179		wakeup((caddr_t) &bounceallocarray);
180	}
181}
182
183/*
184 * allocate count bounce buffer kva pages
185 */
186vm_offset_t
187vm_bounce_kva(size, waitok)
188	int size;
189	int waitok;
190{
191	int i;
192	vm_offset_t kva = 0;
193	vm_offset_t off;
194	int s = splbio();
195more:
196	if (!bmfreeing && kvasfreecnt) {
197		bmfreeing = 1;
198		for (i = 0; i < kvasfreecnt; i++) {
199			for(off=0;off<kvaf[i].size;off+=NBPG) {
200				pmap_kremove( kvaf[i].addr + off);
201			}
202			kmem_free_wakeup(io_map, kvaf[i].addr,
203				kvaf[i].size);
204		}
205		kvasfreecnt = 0;
206		bmfreeing = 0;
207		if( bmwait) {
208			bmwait = 0;
209			wakeup( (caddr_t) io_map);
210		}
211	}
212
213	if( size == 0) {
214		splx(s);
215		return NULL;
216	}
217
218	if ((kva = kmem_alloc_pageable(io_map, size)) == 0) {
219		if( !waitok) {
220			splx(s);
221			return NULL;
222		}
223		bmwait = 1;
224		tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0);
225		goto more;
226	}
227	splx(s);
228	return kva;
229}
230
231/*
232 * same as vm_bounce_kva -- but really allocate (but takes pages as arg)
233 */
234vm_offset_t
235vm_bounce_kva_alloc(count)
236int count;
237{
238	int i;
239	vm_offset_t kva;
240	vm_offset_t pa;
241	if( bouncepages == 0) {
242		kva = (vm_offset_t) malloc(count*NBPG, M_TEMP, M_WAITOK);
243		return kva;
244	}
245	kva = vm_bounce_kva(count*NBPG, 1);
246	for(i=0;i<count;i++) {
247		pa = vm_bounce_page_find(1);
248		pmap_kenter(kva + i * NBPG, pa);
249	}
250	return kva;
251}
252
253/*
254 * same as vm_bounce_kva_free -- but really free
255 */
256void
257vm_bounce_kva_alloc_free(kva, count)
258	vm_offset_t kva;
259	int count;
260{
261	int i;
262	vm_offset_t pa;
263	if( bouncepages == 0) {
264		free((caddr_t) kva, M_TEMP);
265		return;
266	}
267	for(i = 0; i < count; i++) {
268		pa = pmap_kextract(kva + i * NBPG);
269		vm_bounce_page_free(pa, 1);
270	}
271	vm_bounce_kva_free(kva, count*NBPG, 0);
272}
273
274/*
275 * do the things necessary to the struct buf to implement
276 * bounce buffers...  inserted before the disk sort
277 */
278void
279vm_bounce_alloc(bp)
280	struct buf *bp;
281{
282	int countvmpg;
283	vm_offset_t vastart, vaend;
284	vm_offset_t vapstart, vapend;
285	vm_offset_t va, kva;
286	vm_offset_t pa;
287	int dobounceflag = 0;
288	int i;
289
290	if (bouncepages == 0)
291		return;
292
293	if (bp->b_flags & B_BOUNCE) {
294		printf("vm_bounce_alloc: called recursively???\n");
295		return;
296	}
297
298	if (bp->b_bufsize < bp->b_bcount) {
299		printf(
300		    "vm_bounce_alloc: b_bufsize(0x%lx) < b_bcount(0x%lx) !!\n",
301			bp->b_bufsize, bp->b_bcount);
302		panic("vm_bounce_alloc");
303	}
304
305/*
306 *  This is not really necessary
307 *	if( bp->b_bufsize != bp->b_bcount) {
308 *		printf("size: %d, count: %d\n", bp->b_bufsize, bp->b_bcount);
309 *	}
310 */
311
312
313	vastart = (vm_offset_t) bp->b_data;
314	vaend = (vm_offset_t) bp->b_data + bp->b_bufsize;
315
316	vapstart = i386_trunc_page(vastart);
317	vapend = i386_round_page(vaend);
318	countvmpg = (vapend - vapstart) / NBPG;
319
320/*
321 * if any page is above 16MB, then go into bounce-buffer mode
322 */
323	va = vapstart;
324	for (i = 0; i < countvmpg; i++) {
325		pa = pmap_kextract(va);
326		if (pa >= SIXTEENMEG)
327			++dobounceflag;
328		if( pa == 0)
329			panic("vm_bounce_alloc: Unmapped page");
330		va += NBPG;
331	}
332	if (dobounceflag == 0)
333		return;
334
335	if (bouncepages < dobounceflag)
336		panic("Not enough bounce buffers!!!");
337
338/*
339 * allocate a replacement kva for b_addr
340 */
341	kva = vm_bounce_kva(countvmpg*NBPG, 1);
342#if 0
343	printf("%s: vapstart: %x, vapend: %x, countvmpg: %d, kva: %x ",
344		(bp->b_flags & B_READ) ? "read":"write",
345			vapstart, vapend, countvmpg, kva);
346#endif
347	va = vapstart;
348	for (i = 0; i < countvmpg; i++) {
349		pa = pmap_kextract(va);
350		if (pa >= SIXTEENMEG) {
351			/*
352			 * allocate a replacement page
353			 */
354			vm_offset_t bpa = vm_bounce_page_find(1);
355			pmap_kenter(kva + (NBPG * i), bpa);
356#if 0
357			printf("r(%d): (%x,%x,%x) ", i, va, pa, bpa);
358#endif
359			/*
360			 * if we are writing, the copy the data into the page
361			 */
362			if ((bp->b_flags & B_READ) == 0) {
363				bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG);
364			}
365		} else {
366			/*
367			 * use original page
368			 */
369			pmap_kenter(kva + (NBPG * i), pa);
370		}
371		va += NBPG;
372	}
373
374/*
375 * flag the buffer as being bounced
376 */
377	bp->b_flags |= B_BOUNCE;
378/*
379 * save the original buffer kva
380 */
381	bp->b_savekva = bp->b_data;
382/*
383 * put our new kva into the buffer (offset by original offset)
384 */
385	bp->b_data = (caddr_t) (((vm_offset_t) kva) |
386				((vm_offset_t) bp->b_savekva & (NBPG - 1)));
387#if 0
388	printf("b_savekva: %x, newva: %x\n", bp->b_savekva, bp->b_data);
389#endif
390	return;
391}
392
393/*
394 * hook into biodone to free bounce buffer
395 */
396void
397vm_bounce_free(bp)
398	struct buf *bp;
399{
400	int i;
401	vm_offset_t origkva, bouncekva, bouncekvaend;
402
403/*
404 * if this isn't a bounced buffer, then just return
405 */
406	if ((bp->b_flags & B_BOUNCE) == 0)
407		return;
408
409/*
410 *  This check is not necessary
411 *	if (bp->b_bufsize != bp->b_bcount) {
412 *		printf("vm_bounce_free: b_bufsize=%d, b_bcount=%d\n",
413 *			bp->b_bufsize, bp->b_bcount);
414 *	}
415 */
416
417	origkva = (vm_offset_t) bp->b_savekva;
418	bouncekva = (vm_offset_t) bp->b_data;
419/*
420	printf("free: %d ", bp->b_bufsize);
421*/
422
423/*
424 * check every page in the kva space for b_addr
425 */
426	for (i = 0; i < bp->b_bufsize; ) {
427		vm_offset_t mybouncepa;
428		vm_offset_t copycount;
429
430		copycount = i386_round_page(bouncekva + 1) - bouncekva;
431		mybouncepa = pmap_kextract(i386_trunc_page(bouncekva));
432
433/*
434 * if this is a bounced pa, then process as one
435 */
436		if ( mybouncepa != pmap_kextract( i386_trunc_page( origkva))) {
437			vm_offset_t tocopy = copycount;
438			if (i + tocopy > bp->b_bufsize)
439				tocopy = bp->b_bufsize - i;
440/*
441 * if this is a read, then copy from bounce buffer into original buffer
442 */
443			if (bp->b_flags & B_READ)
444				bcopy((caddr_t) bouncekva, (caddr_t) origkva, tocopy);
445/*
446 * free the bounce allocation
447 */
448
449/*
450			printf("(kva: %x, pa: %x)", bouncekva, mybouncepa);
451*/
452			vm_bounce_page_free(mybouncepa, 1);
453		}
454
455		origkva += copycount;
456		bouncekva += copycount;
457		i += copycount;
458	}
459
460/*
461	printf("\n");
462*/
463/*
464 * add the old kva into the "to free" list
465 */
466
467	bouncekva= i386_trunc_page((vm_offset_t) bp->b_data);
468	bouncekvaend= i386_round_page((vm_offset_t)bp->b_data + bp->b_bufsize);
469
470/*
471	printf("freeva: %d\n", (bouncekvaend - bouncekva) / NBPG);
472*/
473	vm_bounce_kva_free( bouncekva, (bouncekvaend - bouncekva), 0);
474	bp->b_data = bp->b_savekva;
475	bp->b_savekva = 0;
476	bp->b_flags &= ~B_BOUNCE;
477
478	return;
479}
480
481
482/*
483 * init the bounce buffer system
484 */
485void
486vm_bounce_init()
487{
488	int i;
489
490	kvasfreecnt = 0;
491
492	if (bouncepages == 0)
493		return;
494
495	bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
496	bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
497
498	if (!bounceallocarray)
499		panic("Cannot allocate bounce resource array");
500
501	bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT);
502	if (!bouncepa)
503		panic("Cannot allocate physical memory array");
504
505	for(i=0;i<bounceallocarraysize;i++) {
506		bounceallocarray[i] = 0xffffffff;
507	}
508
509	for(i=0;i<bouncepages;i++) {
510		vm_offset_t pa;
511		if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * NBPG)) >= SIXTEENMEG)
512			panic("bounce memory out of range");
513		if( pa == 0)
514			panic("bounce memory not resident");
515		bouncepa[i] = pa;
516		bounceallocarray[i/(8*sizeof(int))] &= ~(1<<(i%(8*sizeof(int))));
517	}
518	bouncefree = bouncepages;
519
520}
521#endif /* BOUNCE_BUFFERS */
522/*
523 * quick version of vm_fault
524 */
525
526void
527vm_fault_quick( v, prot)
528	vm_offset_t v;
529	int prot;
530{
531	if (prot & VM_PROT_WRITE)
532		subyte((char *)v, fubyte((char *)v));
533	else
534		(void) fubyte((char *)v);
535}
536
537
538/*
539 * Finish a fork operation, with process p2 nearly set up.
540 * Copy and update the kernel stack and pcb, making the child
541 * ready to run, and marking it so that it can return differently
542 * than the parent.  Returns 1 in the child process, 0 in the parent.
543 * We currently double-map the user area so that the stack is at the same
544 * address in each process; in the future we will probably relocate
545 * the frame pointers on the stack after copying.
546 */
547int
548cpu_fork(p1, p2)
549	register struct proc *p1, *p2;
550{
551	register struct user *up = p2->p_addr;
552	int offset;
553
554	/*
555	 * Copy pcb and stack from proc p1 to p2.
556	 * We do this as cheaply as possible, copying only the active
557	 * part of the stack.  The stack and pcb need to agree;
558	 * this is tricky, as the final pcb is constructed by savectx,
559	 * but its frame isn't yet on the stack when the stack is copied.
560	 * swtch compensates for this when the child eventually runs.
561	 * This should be done differently, with a single call
562	 * that copies and updates the pcb+stack,
563	 * replacing the bcopy and savectx.
564	 */
565	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
566	offset = mvesp() - (int)kstack;
567	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
568	    (unsigned) ctob(UPAGES) - offset);
569	p2->p_md.md_regs = p1->p_md.md_regs;
570
571	pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb);
572
573	/*
574	 *
575	 * Arrange for a non-local goto when the new process
576	 * is started, to resume here, returning nonzero from setjmp.
577	 */
578	if (savectx(&up->u_pcb, 1)) {
579		/*
580		 * Return 1 in child.
581		 */
582		return (1);
583	}
584	return (0);
585}
586
587void
588cpu_exit(p)
589	register struct proc *p;
590{
591
592#if NNPX > 0
593	npxexit(p);
594#endif	/* NNPX */
595	cnt.v_swtch++;
596	cpu_switch(p);
597	panic("cpu_exit");
598}
599
600void
601cpu_wait(p) struct proc *p; {
602/*	extern vm_map_t upages_map; */
603
604	/* drop per-process resources */
605 	pmap_remove(vm_map_pmap(u_map), (vm_offset_t) p->p_addr,
606		((vm_offset_t) p->p_addr) + ctob(UPAGES));
607	kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
608	vmspace_free(p->p_vmspace);
609}
610
611/*
612 * Dump the machine specific header information at the start of a core dump.
613 */
614int
615cpu_coredump(p, vp, cred)
616	struct proc *p;
617	struct vnode *vp;
618	struct ucred *cred;
619{
620
621	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
622	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
623	    p));
624}
625
626/*
627 * Set a red zone in the kernel stack after the u. area.
628 */
629void
630setredzone(pte, vaddr)
631	u_short *pte;
632	caddr_t vaddr;
633{
634/* eventually do this by setting up an expand-down stack segment
635   for ss0: selector, allowing stack access down to top of u.
636   this means though that protection violations need to be handled
637   thru a double fault exception that must do an integral task
638   switch to a known good context, within which a dump can be
639   taken. a sensible scheme might be to save the initial context
640   used by sched (that has physical memory mapped 1:1 at bottom)
641   and take the dump while still in mapped mode */
642}
643
644/*
645 * Move pages from one kernel virtual address to another.
646 * Both addresses are assumed to reside in the Sysmap,
647 * and size must be a multiple of CLSIZE.
648 */
649
650void
651pagemove(from, to, size)
652	register caddr_t from, to;
653	int size;
654{
655	register vm_offset_t pa;
656
657	if (size & CLOFSET)
658		panic("pagemove");
659	while (size > 0) {
660		pa = pmap_kextract((vm_offset_t)from);
661		if (pa == 0)
662			panic("pagemove 2");
663		if (pmap_kextract((vm_offset_t)to) != 0)
664			panic("pagemove 3");
665		pmap_kremove((vm_offset_t)from);
666		pmap_kenter((vm_offset_t)to, pa);
667		from += PAGE_SIZE;
668		to += PAGE_SIZE;
669		size -= PAGE_SIZE;
670	}
671}
672
673/*
674 * Convert kernel VA to physical address
675 */
676u_long
677kvtop(void *addr)
678{
679	vm_offset_t va;
680
681	va = pmap_kextract((vm_offset_t)addr);
682	if (va == 0)
683		panic("kvtop: zero page frame");
684	return((int)va);
685}
686
687/*
688 * Map an IO request into kernel virtual address space.
689 *
690 * All requests are (re)mapped into kernel VA space.
691 * Notice that we use b_bufsize for the size of the buffer
692 * to be mapped.  b_bcount might be modified by the driver.
693 */
694void
695vmapbuf(bp)
696	register struct buf *bp;
697{
698	register int npf;
699	register caddr_t addr;
700	int off;
701	vm_offset_t kva;
702	vm_offset_t pa, v;
703
704	if ((bp->b_flags & B_PHYS) == 0)
705		panic("vmapbuf");
706
707	/*
708	 * this is the kva that is to be used for
709	 * the temporary kernel mapping
710	 */
711	kva = (vm_offset_t) bp->b_saveaddr;
712
713	for (addr = (caddr_t)trunc_page(bp->b_data);
714		addr < bp->b_data + bp->b_bufsize;
715		addr += PAGE_SIZE) {
716
717/*
718 * do the vm_fault if needed, do the copy-on-write thing when
719 * reading stuff off device into memory.
720 */
721		vm_fault_quick(addr,
722			(bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
723		pa = pmap_kextract((vm_offset_t) addr);
724		if (pa == 0)
725			panic("vmapbuf: page not present");
726/*
727 * hold the data page
728 */
729#ifdef DIAGNOSTIC
730		if( VM_PAGE_TO_PHYS(PHYS_TO_VM_PAGE(pa)) != pa)
731			panic("vmapbuf: confused PHYS_TO_VM_PAGE mapping");
732#endif
733		vm_page_hold(PHYS_TO_VM_PAGE(pa));
734	}
735
736	addr = bp->b_saveaddr = bp->b_data;
737	off = (int)addr & PGOFSET;
738	npf = btoc(round_page(bp->b_bufsize + off));
739	bp->b_data = (caddr_t) (kva + off);
740	while (npf--) {
741		pa = pmap_kextract((vm_offset_t)addr);
742		if (pa == 0)
743			panic("vmapbuf: null page frame");
744		pmap_kenter(kva, trunc_page(pa));
745		addr += PAGE_SIZE;
746		kva += PAGE_SIZE;
747	}
748}
749
750/*
751 * Free the io map PTEs associated with this IO operation.
752 * We also invalidate the TLB entries and restore the original b_addr.
753 */
754void
755vunmapbuf(bp)
756	register struct buf *bp;
757{
758	register caddr_t addr;
759	vm_offset_t v,pa;
760
761	if ((bp->b_flags & B_PHYS) == 0)
762		panic("vunmapbuf");
763
764	for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data);
765		addr < bp->b_data + bp->b_bufsize;
766		addr += NBPG)
767		pmap_kremove((vm_offset_t) addr);
768
769	bp->b_data = bp->b_saveaddr;
770	bp->b_saveaddr = NULL;
771
772/*
773 * unhold the pde, and data pages
774 */
775	for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data);
776		addr < bp->b_data + bp->b_bufsize;
777		addr += NBPG) {
778	/*
779	 * release the data page
780	 */
781		pa = pmap_kextract((vm_offset_t) addr);
782		vm_page_unhold(PHYS_TO_VM_PAGE(pa));
783	}
784}
785
786/*
787 * Force reset the processor by invalidating the entire address space!
788 */
789void
790cpu_reset() {
791
792	/*
793	 * Attempt to do a CPU reset via the keyboard controller,
794	 * do not turn of the GateA20, as any machine that fails
795	 * to do the reset here would then end up in no man's land.
796	 */
797	outb(IO_KBD + 4, 0xFE);
798	DELAY(500000);	/* wait 0.5 sec to see if that did it */
799	printf("Keyboard reset did not work, attempting CPU shutdown\n");
800	DELAY(1000000);	/* wait 1 sec for printf to complete */
801
802	/* force a shutdown by unmapping entire address space ! */
803	bzero((caddr_t) PTD, NBPG);
804
805	/* "good night, sweet prince .... <THUNK!>" */
806	pmap_update();
807	/* NOTREACHED */
808	while(1);
809}
810
811/*
812 * Grow the user stack to allow for 'sp'. This version grows the stack in
813 *	chunks of SGROWSIZ.
814 */
815int
816grow(p, sp)
817	struct proc *p;
818	u_int sp;
819{
820	unsigned int nss;
821	caddr_t v;
822	struct vmspace *vm = p->p_vmspace;
823
824	if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
825	    return (1);
826
827	nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE);
828
829	if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
830		return (0);
831
832	if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT,
833	    SGROWSIZ) < nss) {
834		int grow_amount;
835		/*
836		 * If necessary, grow the VM that the stack occupies
837		 * to allow for the rlimit. This allows us to not have
838		 * to allocate all of the VM up-front in execve (which
839		 * is expensive).
840		 * Grow the VM by the amount requested rounded up to
841		 * the nearest SGROWSIZ to provide for some hysteresis.
842		 */
843		grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ);
844		v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT,
845		    SGROWSIZ) - grow_amount;
846		/*
847		 * If there isn't enough room to extend by SGROWSIZ, then
848		 * just extend to the maximum size
849		 */
850		if (v < vm->vm_maxsaddr) {
851			v = vm->vm_maxsaddr;
852			grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT);
853		}
854		if ((grow_amount == 0) || (vm_map_find(&vm->vm_map, NULL, 0, (vm_offset_t *)&v,
855		    grow_amount, FALSE) != KERN_SUCCESS)) {
856			return (0);
857		}
858		vm->vm_ssize += grow_amount >> PAGE_SHIFT;
859	}
860
861	return (1);
862}
863