vm_machdep.c revision 5771
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 *	$Id: vm_machdep.c,v 1.30 1995/01/09 16:04:40 davidg Exp $
42 */
43
44#include "npx.h"
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/proc.h>
48#include <sys/malloc.h>
49#include <sys/buf.h>
50#include <sys/vnode.h>
51#include <sys/user.h>
52
53#include <machine/cpu.h>
54
55#include <vm/vm.h>
56#include <vm/vm_kern.h>
57
58#ifdef BOUNCE_BUFFERS
59vm_map_t	io_map;
60volatile int	kvasfreecnt;
61
62
63caddr_t		bouncememory;
64int		bouncepages, bpwait;
65vm_offset_t	*bouncepa;
66int		bmwait, bmfreeing;
67
68#define BITS_IN_UNSIGNED (8*sizeof(unsigned))
69int		bounceallocarraysize;
70unsigned	*bounceallocarray;
71int		bouncefree;
72
73#define SIXTEENMEG (4096*4096)
74#define MAXBKVA 1024
75int		maxbkva = MAXBKVA*NBPG;
76
77/* special list that can be used at interrupt time for eventual kva free */
78struct kvasfree {
79	vm_offset_t addr;
80	vm_offset_t size;
81} kvaf[MAXBKVA];
82
83
84vm_offset_t vm_bounce_kva();
85/*
86 * get bounce buffer pages (count physically contiguous)
87 * (only 1 inplemented now)
88 */
89vm_offset_t
90vm_bounce_page_find(count)
91	int count;
92{
93	int bit;
94	int s,i;
95
96	if (count != 1)
97		panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
98
99	s = splbio();
100retry:
101	for (i = 0; i < bounceallocarraysize; i++) {
102		if (bounceallocarray[i] != 0xffffffff) {
103			bit = ffs(~bounceallocarray[i]);
104			if (bit) {
105				bounceallocarray[i] |= 1 << (bit - 1) ;
106				bouncefree -= count;
107				splx(s);
108				return bouncepa[(i * BITS_IN_UNSIGNED + (bit - 1))];
109			}
110		}
111	}
112	bpwait = 1;
113	tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
114	goto retry;
115}
116
117void
118vm_bounce_kva_free(addr, size, now)
119	vm_offset_t addr;
120	vm_offset_t size;
121	int now;
122{
123	int s = splbio();
124	kvaf[kvasfreecnt].addr = addr;
125	kvaf[kvasfreecnt].size = size;
126	++kvasfreecnt;
127	if( now) {
128		/*
129		 * this will do wakeups
130		 */
131		vm_bounce_kva(0,0);
132	} else {
133		if (bmwait) {
134		/*
135		 * if anyone is waiting on the bounce-map, then wakeup
136		 */
137			wakeup((caddr_t) io_map);
138			bmwait = 0;
139		}
140	}
141	splx(s);
142}
143
144/*
145 * free count bounce buffer pages
146 */
147void
148vm_bounce_page_free(pa, count)
149	vm_offset_t pa;
150	int count;
151{
152	int allocindex;
153	int index;
154	int bit;
155
156	if (count != 1)
157		panic("vm_bounce_page_free -- no support for > 1 page yet!!!\n");
158
159	for(index=0;index<bouncepages;index++) {
160		if( pa == bouncepa[index])
161			break;
162	}
163
164	if( index == bouncepages)
165		panic("vm_bounce_page_free: invalid bounce buffer");
166
167	allocindex = index / BITS_IN_UNSIGNED;
168	bit = index % BITS_IN_UNSIGNED;
169
170	bounceallocarray[allocindex] &= ~(1 << bit);
171
172	bouncefree += count;
173	if (bpwait) {
174		bpwait = 0;
175		wakeup((caddr_t) &bounceallocarray);
176	}
177}
178
179/*
180 * allocate count bounce buffer kva pages
181 */
182vm_offset_t
183vm_bounce_kva(size, waitok)
184	int size;
185	int waitok;
186{
187	int i;
188	vm_offset_t kva = 0;
189	vm_offset_t off;
190	int s = splbio();
191more:
192	if (!bmfreeing && kvasfreecnt) {
193		bmfreeing = 1;
194		for (i = 0; i < kvasfreecnt; i++) {
195			for(off=0;off<kvaf[i].size;off+=NBPG) {
196				pmap_kremove( kvaf[i].addr + off);
197			}
198			kmem_free_wakeup(io_map, kvaf[i].addr,
199				kvaf[i].size);
200		}
201		kvasfreecnt = 0;
202		bmfreeing = 0;
203		if( bmwait) {
204			bmwait = 0;
205			wakeup( (caddr_t) io_map);
206		}
207	}
208
209	if( size == 0) {
210		splx(s);
211		return NULL;
212	}
213
214	if ((kva = kmem_alloc_pageable(io_map, size)) == 0) {
215		if( !waitok) {
216			splx(s);
217			return NULL;
218		}
219		bmwait = 1;
220		tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0);
221		goto more;
222	}
223	splx(s);
224	return kva;
225}
226
227/*
228 * same as vm_bounce_kva -- but really allocate (but takes pages as arg)
229 */
230vm_offset_t
231vm_bounce_kva_alloc(count)
232int count;
233{
234	int i;
235	vm_offset_t kva;
236	vm_offset_t pa;
237	if( bouncepages == 0) {
238		kva = (vm_offset_t) malloc(count*NBPG, M_TEMP, M_WAITOK);
239		return kva;
240	}
241	kva = vm_bounce_kva(count*NBPG, 1);
242	for(i=0;i<count;i++) {
243		pa = vm_bounce_page_find(1);
244		pmap_kenter(kva + i * NBPG, pa);
245	}
246	return kva;
247}
248
249/*
250 * same as vm_bounce_kva_free -- but really free
251 */
252void
253vm_bounce_kva_alloc_free(kva, count)
254	vm_offset_t kva;
255	int count;
256{
257	int i;
258	vm_offset_t pa;
259	if( bouncepages == 0) {
260		free((caddr_t) kva, M_TEMP);
261		return;
262	}
263	for(i = 0; i < count; i++) {
264		pa = pmap_kextract(kva + i * NBPG);
265		vm_bounce_page_free(pa, 1);
266	}
267	vm_bounce_kva_free(kva, count*NBPG, 0);
268}
269
270/*
271 * do the things necessary to the struct buf to implement
272 * bounce buffers...  inserted before the disk sort
273 */
274void
275vm_bounce_alloc(bp)
276	struct buf *bp;
277{
278	int countvmpg;
279	vm_offset_t vastart, vaend;
280	vm_offset_t vapstart, vapend;
281	vm_offset_t va, kva;
282	vm_offset_t pa;
283	int dobounceflag = 0;
284	int i;
285
286	if (bouncepages == 0)
287		return;
288
289	if (bp->b_flags & B_BOUNCE) {
290		printf("vm_bounce_alloc: called recursively???\n");
291		return;
292	}
293
294	if (bp->b_bufsize < bp->b_bcount) {
295		printf(
296		    "vm_bounce_alloc: b_bufsize(0x%lx) < b_bcount(0x%lx) !!\n",
297			bp->b_bufsize, bp->b_bcount);
298		panic("vm_bounce_alloc");
299	}
300
301/*
302 *  This is not really necessary
303 *	if( bp->b_bufsize != bp->b_bcount) {
304 *		printf("size: %d, count: %d\n", bp->b_bufsize, bp->b_bcount);
305 *	}
306 */
307
308
309	vastart = (vm_offset_t) bp->b_data;
310	vaend = (vm_offset_t) bp->b_data + bp->b_bufsize;
311
312	vapstart = i386_trunc_page(vastart);
313	vapend = i386_round_page(vaend);
314	countvmpg = (vapend - vapstart) / NBPG;
315
316/*
317 * if any page is above 16MB, then go into bounce-buffer mode
318 */
319	va = vapstart;
320	for (i = 0; i < countvmpg; i++) {
321		pa = pmap_kextract(va);
322		if (pa >= SIXTEENMEG)
323			++dobounceflag;
324		if( pa == 0)
325			panic("vm_bounce_alloc: Unmapped page");
326		va += NBPG;
327	}
328	if (dobounceflag == 0)
329		return;
330
331	if (bouncepages < dobounceflag)
332		panic("Not enough bounce buffers!!!");
333
334/*
335 * allocate a replacement kva for b_addr
336 */
337	kva = vm_bounce_kva(countvmpg*NBPG, 1);
338#if 0
339	printf("%s: vapstart: %x, vapend: %x, countvmpg: %d, kva: %x ",
340		(bp->b_flags & B_READ) ? "read":"write",
341			vapstart, vapend, countvmpg, kva);
342#endif
343	va = vapstart;
344	for (i = 0; i < countvmpg; i++) {
345		pa = pmap_kextract(va);
346		if (pa >= SIXTEENMEG) {
347			/*
348			 * allocate a replacement page
349			 */
350			vm_offset_t bpa = vm_bounce_page_find(1);
351			pmap_kenter(kva + (NBPG * i), bpa);
352#if 0
353			printf("r(%d): (%x,%x,%x) ", i, va, pa, bpa);
354#endif
355			/*
356			 * if we are writing, the copy the data into the page
357			 */
358			if ((bp->b_flags & B_READ) == 0) {
359				bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG);
360			}
361		} else {
362			/*
363			 * use original page
364			 */
365			pmap_kenter(kva + (NBPG * i), pa);
366		}
367		va += NBPG;
368	}
369
370/*
371 * flag the buffer as being bounced
372 */
373	bp->b_flags |= B_BOUNCE;
374/*
375 * save the original buffer kva
376 */
377	bp->b_savekva = bp->b_data;
378/*
379 * put our new kva into the buffer (offset by original offset)
380 */
381	bp->b_data = (caddr_t) (((vm_offset_t) kva) |
382				((vm_offset_t) bp->b_savekva & (NBPG - 1)));
383#if 0
384	printf("b_savekva: %x, newva: %x\n", bp->b_savekva, bp->b_data);
385#endif
386	return;
387}
388
389/*
390 * hook into biodone to free bounce buffer
391 */
392void
393vm_bounce_free(bp)
394	struct buf *bp;
395{
396	int i;
397	vm_offset_t origkva, bouncekva, bouncekvaend;
398
399/*
400 * if this isn't a bounced buffer, then just return
401 */
402	if ((bp->b_flags & B_BOUNCE) == 0)
403		return;
404
405/*
406 *  This check is not necessary
407 *	if (bp->b_bufsize != bp->b_bcount) {
408 *		printf("vm_bounce_free: b_bufsize=%d, b_bcount=%d\n",
409 *			bp->b_bufsize, bp->b_bcount);
410 *	}
411 */
412
413	origkva = (vm_offset_t) bp->b_savekva;
414	bouncekva = (vm_offset_t) bp->b_data;
415/*
416	printf("free: %d ", bp->b_bufsize);
417*/
418
419/*
420 * check every page in the kva space for b_addr
421 */
422	for (i = 0; i < bp->b_bufsize; ) {
423		vm_offset_t mybouncepa;
424		vm_offset_t copycount;
425
426		copycount = i386_round_page(bouncekva + 1) - bouncekva;
427		mybouncepa = pmap_kextract(i386_trunc_page(bouncekva));
428
429/*
430 * if this is a bounced pa, then process as one
431 */
432		if ( mybouncepa != pmap_kextract( i386_trunc_page( origkva))) {
433			vm_offset_t tocopy = copycount;
434			if (i + tocopy > bp->b_bufsize)
435				tocopy = bp->b_bufsize - i;
436/*
437 * if this is a read, then copy from bounce buffer into original buffer
438 */
439			if (bp->b_flags & B_READ)
440				bcopy((caddr_t) bouncekva, (caddr_t) origkva, tocopy);
441/*
442 * free the bounce allocation
443 */
444
445/*
446			printf("(kva: %x, pa: %x)", bouncekva, mybouncepa);
447*/
448			vm_bounce_page_free(mybouncepa, 1);
449		}
450
451		origkva += copycount;
452		bouncekva += copycount;
453		i += copycount;
454	}
455
456/*
457	printf("\n");
458*/
459/*
460 * add the old kva into the "to free" list
461 */
462
463	bouncekva= i386_trunc_page((vm_offset_t) bp->b_data);
464	bouncekvaend= i386_round_page((vm_offset_t)bp->b_data + bp->b_bufsize);
465
466/*
467	printf("freeva: %d\n", (bouncekvaend - bouncekva) / NBPG);
468*/
469	vm_bounce_kva_free( bouncekva, (bouncekvaend - bouncekva), 0);
470	bp->b_data = bp->b_savekva;
471	bp->b_savekva = 0;
472	bp->b_flags &= ~B_BOUNCE;
473
474	return;
475}
476
477
478/*
479 * init the bounce buffer system
480 */
481void
482vm_bounce_init()
483{
484	int i;
485
486	kvasfreecnt = 0;
487
488	if (bouncepages == 0)
489		return;
490
491	bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
492	bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
493
494	if (!bounceallocarray)
495		panic("Cannot allocate bounce resource array\n");
496
497	bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT);
498	if (!bouncepa)
499		panic("Cannot allocate physical memory array\n");
500
501	for(i=0;i<bounceallocarraysize;i++) {
502		bounceallocarray[i] = 0xffffffff;
503	}
504
505	for(i=0;i<bouncepages;i++) {
506		vm_offset_t pa;
507		if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * NBPG)) >= SIXTEENMEG)
508			panic("bounce memory out of range");
509		if( pa == 0)
510			panic("bounce memory not resident");
511		bouncepa[i] = pa;
512		bounceallocarray[i/(8*sizeof(int))] &= ~(1<<(i%(8*sizeof(int))));
513	}
514	bouncefree = bouncepages;
515
516}
517#endif /* BOUNCE_BUFFERS */
518/*
519 * quick version of vm_fault
520 */
521
522void
523vm_fault_quick( v, prot)
524	vm_offset_t v;
525	int prot;
526{
527	if( (cpu_class == CPUCLASS_386) &&
528		(prot & VM_PROT_WRITE))
529		vm_fault(&curproc->p_vmspace->vm_map, v,
530			VM_PROT_READ|VM_PROT_WRITE, FALSE);
531	else if( prot & VM_PROT_WRITE)
532		*(volatile char *)v += 0;
533	else
534		*(volatile char *)v;
535}
536
537
538/*
539 * Finish a fork operation, with process p2 nearly set up.
540 * Copy and update the kernel stack and pcb, making the child
541 * ready to run, and marking it so that it can return differently
542 * than the parent.  Returns 1 in the child process, 0 in the parent.
543 * We currently double-map the user area so that the stack is at the same
544 * address in each process; in the future we will probably relocate
545 * the frame pointers on the stack after copying.
546 */
547int
548cpu_fork(p1, p2)
549	register struct proc *p1, *p2;
550{
551	register struct user *up = p2->p_addr;
552	int offset;
553	extern char kstack[];
554	extern int mvesp();
555
556	/*
557	 * Copy pcb and stack from proc p1 to p2.
558	 * We do this as cheaply as possible, copying only the active
559	 * part of the stack.  The stack and pcb need to agree;
560	 * this is tricky, as the final pcb is constructed by savectx,
561	 * but its frame isn't yet on the stack when the stack is copied.
562	 * swtch compensates for this when the child eventually runs.
563	 * This should be done differently, with a single call
564	 * that copies and updates the pcb+stack,
565	 * replacing the bcopy and savectx.
566	 */
567	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
568	offset = mvesp() - (int)kstack;
569	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
570	    (unsigned) ctob(UPAGES) - offset);
571	p2->p_md.md_regs = p1->p_md.md_regs;
572
573	pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb);
574
575	/*
576	 *
577	 * Arrange for a non-local goto when the new process
578	 * is started, to resume here, returning nonzero from setjmp.
579	 */
580	if (savectx(&up->u_pcb, 1)) {
581		/*
582		 * Return 1 in child.
583		 */
584		return (1);
585	}
586	return (0);
587}
588
589void
590cpu_exit(p)
591	register struct proc *p;
592{
593
594#if NNPX > 0
595	npxexit(p);
596#endif	/* NNPX */
597	cnt.v_swtch++;
598	cpu_switch(p);
599	panic("cpu_exit");
600}
601
602void
603cpu_wait(p) struct proc *p; {
604/*	extern vm_map_t upages_map; */
605
606	/* drop per-process resources */
607 	pmap_remove(vm_map_pmap(u_map), (vm_offset_t) p->p_addr,
608		((vm_offset_t) p->p_addr) + ctob(UPAGES));
609	kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
610	vmspace_free(p->p_vmspace);
611}
612
613/*
614 * Dump the machine specific header information at the start of a core dump.
615 */
616int
617cpu_coredump(p, vp, cred)
618	struct proc *p;
619	struct vnode *vp;
620	struct ucred *cred;
621{
622
623	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
624	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
625	    p));
626}
627
628/*
629 * Set a red zone in the kernel stack after the u. area.
630 */
631void
632setredzone(pte, vaddr)
633	u_short *pte;
634	caddr_t vaddr;
635{
636/* eventually do this by setting up an expand-down stack segment
637   for ss0: selector, allowing stack access down to top of u.
638   this means though that protection violations need to be handled
639   thru a double fault exception that must do an integral task
640   switch to a known good context, within which a dump can be
641   taken. a sensible scheme might be to save the initial context
642   used by sched (that has physical memory mapped 1:1 at bottom)
643   and take the dump while still in mapped mode */
644}
645
646/*
647 * Move pages from one kernel virtual address to another.
648 * Both addresses are assumed to reside in the Sysmap,
649 * and size must be a multiple of CLSIZE.
650 */
651
652void
653pagemove(from, to, size)
654	register caddr_t from, to;
655	int size;
656{
657	register vm_offset_t pa;
658
659	if (size & CLOFSET)
660		panic("pagemove");
661	while (size > 0) {
662		pa = pmap_kextract((vm_offset_t)from);
663		if (pa == 0)
664			panic("pagemove 2");
665		if (pmap_kextract((vm_offset_t)to) != 0)
666			panic("pagemove 3");
667		pmap_kremove((vm_offset_t)from);
668		pmap_kenter((vm_offset_t)to, pa);
669		from += PAGE_SIZE;
670		to += PAGE_SIZE;
671		size -= PAGE_SIZE;
672	}
673}
674
675/*
676 * Convert kernel VA to physical address
677 */
678u_long
679kvtop(void *addr)
680{
681	vm_offset_t va;
682
683	va = pmap_kextract((vm_offset_t)addr);
684	if (va == 0)
685		panic("kvtop: zero page frame");
686	return((int)va);
687}
688
689/*
690 * Map an IO request into kernel virtual address space.
691 *
692 * All requests are (re)mapped into kernel VA space.
693 * Notice that we use b_bufsize for the size of the buffer
694 * to be mapped.  b_bcount might be modified by the driver.
695 */
696void
697vmapbuf(bp)
698	register struct buf *bp;
699{
700	register int npf;
701	register caddr_t addr;
702	int off;
703	vm_offset_t kva;
704	vm_offset_t pa, lastv, v;
705
706	if ((bp->b_flags & B_PHYS) == 0)
707		panic("vmapbuf");
708
709	/*
710	 * this is the kva that is to be used for
711	 * the temporary kernel mapping
712	 */
713	kva = (vm_offset_t) bp->b_saveaddr;
714
715	lastv = 0;
716	for (addr = (caddr_t)trunc_page(bp->b_data);
717		addr < bp->b_data + bp->b_bufsize;
718		addr += PAGE_SIZE) {
719
720/*
721 * make sure that the pde is valid and held
722 */
723		v = trunc_page(((vm_offset_t)vtopte(addr)));
724		if (v != lastv) {
725			vm_fault_quick(v, VM_PROT_READ);
726			pa = pmap_kextract( v);
727			vm_page_hold(PHYS_TO_VM_PAGE(pa));
728			lastv = v;
729		}
730
731/*
732 * do the vm_fault if needed, do the copy-on-write thing when
733 * reading stuff off device into memory.
734 */
735		vm_fault_quick(addr,
736			(bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
737		pa = pmap_kextract((vm_offset_t) addr);
738/*
739 * hold the data page
740 */
741		vm_page_hold(PHYS_TO_VM_PAGE(pa));
742	}
743
744	addr = bp->b_saveaddr = bp->b_data;
745	off = (int)addr & PGOFSET;
746	npf = btoc(round_page(bp->b_bufsize + off));
747	bp->b_data = (caddr_t) (kva + off);
748	while (npf--) {
749		pa = pmap_kextract((vm_offset_t)addr);
750		if (pa == 0)
751			panic("vmapbuf: null page frame");
752		pmap_kenter(kva, trunc_page(pa));
753		addr += PAGE_SIZE;
754		kva += PAGE_SIZE;
755	}
756}
757
758/*
759 * Free the io map PTEs associated with this IO operation.
760 * We also invalidate the TLB entries and restore the original b_addr.
761 */
762void
763vunmapbuf(bp)
764	register struct buf *bp;
765{
766	register caddr_t addr;
767	vm_offset_t v,lastv,pa;
768
769	if ((bp->b_flags & B_PHYS) == 0)
770		panic("vunmapbuf");
771
772	for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data);
773		addr < bp->b_data + bp->b_bufsize;
774		addr += NBPG)
775		pmap_kremove((vm_offset_t) addr);
776
777	bp->b_data = bp->b_saveaddr;
778	bp->b_saveaddr = NULL;
779
780/*
781 * unhold the pde, and data pages
782 */
783	lastv = 0;
784	for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data);
785		addr < bp->b_data + bp->b_bufsize;
786		addr += NBPG) {
787
788	/*
789	 * release the data page
790	 */
791		pa = pmap_kextract((vm_offset_t) addr);
792		vm_page_unhold(PHYS_TO_VM_PAGE(pa));
793
794	/*
795	 * and unhold the page table
796	 */
797		v = trunc_page(((vm_offset_t)vtopte(addr)));
798		if (v != lastv) {
799			pa = pmap_kextract(v);
800			vm_page_unhold(PHYS_TO_VM_PAGE(pa));
801			lastv = v;
802		}
803	}
804}
805
806/*
807 * Force reset the processor by invalidating the entire address space!
808 */
809void
810cpu_reset() {
811
812	/* force a shutdown by unmapping entire address space ! */
813	bzero((caddr_t) PTD, NBPG);
814
815	/* "good night, sweet prince .... <THUNK!>" */
816	pmap_update();
817	/* NOTREACHED */
818	while(1);
819}
820
821/*
822 * Grow the user stack to allow for 'sp'. This version grows the stack in
823 *	chunks of SGROWSIZ.
824 */
825int
826grow(p, sp)
827	struct proc *p;
828	u_int sp;
829{
830	unsigned int nss;
831	caddr_t v;
832	struct vmspace *vm = p->p_vmspace;
833
834	if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
835	    return (1);
836
837	nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE);
838
839	if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
840		return (0);
841
842	if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT,
843	    SGROWSIZ) < nss) {
844		int grow_amount;
845		/*
846		 * If necessary, grow the VM that the stack occupies
847		 * to allow for the rlimit. This allows us to not have
848		 * to allocate all of the VM up-front in execve (which
849		 * is expensive).
850		 * Grow the VM by the amount requested rounded up to
851		 * the nearest SGROWSIZ to provide for some hysteresis.
852		 */
853		grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ);
854		v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT,
855		    SGROWSIZ) - grow_amount;
856		/*
857		 * If there isn't enough room to extend by SGROWSIZ, then
858		 * just extend to the maximum size
859		 */
860		if (v < vm->vm_maxsaddr) {
861			v = vm->vm_maxsaddr;
862			grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT);
863		}
864		if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
865		    grow_amount, FALSE) != KERN_SUCCESS) {
866			return (0);
867		}
868		vm->vm_ssize += grow_amount >> PAGE_SHIFT;
869	}
870
871	return (1);
872}
873