vm_machdep.c revision 13915
1/*-
2 * Copyright (c) 1982, 1986 The Regents of the University of California.
3 * Copyright (c) 1989, 1990 William Jolitz
4 * Copyright (c) 1994 John Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
40 *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
41 *	$Id: vm_machdep.c,v 1.55 1996/02/04 22:09:12 dyson Exp $
42 */
43
44#include "npx.h"
45#include "opt_bounce.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/proc.h>
50#include <sys/malloc.h>
51#include <sys/buf.h>
52#include <sys/vnode.h>
53#include <sys/vmmeter.h>
54
55#include <machine/clock.h>
56#include <machine/cpu.h>
57#include <machine/md_var.h>
58
59#include <vm/vm.h>
60#include <vm/vm_param.h>
61#include <vm/vm_prot.h>
62#include <vm/lock.h>
63#include <vm/vm_kern.h>
64#include <vm/vm_page.h>
65#include <vm/vm_map.h>
66#include <vm/vm_extern.h>
67
68#include <sys/user.h>
69
70#include <i386/isa/isa.h>
71
72#ifdef BOUNCE_BUFFERS
73static vm_offset_t
74		vm_bounce_kva __P((int size, int waitok));
75static void	vm_bounce_kva_free __P((vm_offset_t addr, vm_offset_t size,
76					int now));
77static vm_offset_t
78		vm_bounce_page_find __P((int count));
79static void	vm_bounce_page_free __P((vm_offset_t pa, int count));
80
81static volatile int	kvasfreecnt;
82
83caddr_t		bouncememory;
84int		bouncepages;
85static int	bpwait;
86static vm_offset_t	*bouncepa;
87static int		bmwait, bmfreeing;
88
89#define BITS_IN_UNSIGNED (8*sizeof(unsigned))
90static int		bounceallocarraysize;
91static unsigned	*bounceallocarray;
92static int		bouncefree;
93
94#define SIXTEENMEG (4096*4096)
95#define MAXBKVA 1024
96int		maxbkva = MAXBKVA*NBPG;
97
98/* special list that can be used at interrupt time for eventual kva free */
99static struct kvasfree {
100	vm_offset_t addr;
101	vm_offset_t size;
102} kvaf[MAXBKVA];
103
104/*
105 * get bounce buffer pages (count physically contiguous)
106 * (only 1 inplemented now)
107 */
108static vm_offset_t
109vm_bounce_page_find(count)
110	int count;
111{
112	int bit;
113	int s,i;
114
115	if (count != 1)
116		panic("vm_bounce_page_find -- no support for > 1 page yet!!!");
117
118	s = splbio();
119retry:
120	for (i = 0; i < bounceallocarraysize; i++) {
121		if (bounceallocarray[i] != 0xffffffff) {
122			bit = ffs(~bounceallocarray[i]);
123			if (bit) {
124				bounceallocarray[i] |= 1 << (bit - 1) ;
125				bouncefree -= count;
126				splx(s);
127				return bouncepa[(i * BITS_IN_UNSIGNED + (bit - 1))];
128			}
129		}
130	}
131	bpwait = 1;
132	tsleep((caddr_t) &bounceallocarray, PRIBIO, "bncwai", 0);
133	goto retry;
134}
135
136static void
137vm_bounce_kva_free(addr, size, now)
138	vm_offset_t addr;
139	vm_offset_t size;
140	int now;
141{
142	int s = splbio();
143	kvaf[kvasfreecnt].addr = addr;
144	kvaf[kvasfreecnt].size = size;
145	++kvasfreecnt;
146	if( now) {
147		/*
148		 * this will do wakeups
149		 */
150		vm_bounce_kva(0,0);
151	} else {
152		if (bmwait) {
153		/*
154		 * if anyone is waiting on the bounce-map, then wakeup
155		 */
156			wakeup((caddr_t) io_map);
157			bmwait = 0;
158		}
159	}
160	splx(s);
161}
162
163/*
164 * free count bounce buffer pages
165 */
166static void
167vm_bounce_page_free(pa, count)
168	vm_offset_t pa;
169	int count;
170{
171	int allocindex;
172	int index;
173	int bit;
174
175	if (count != 1)
176		panic("vm_bounce_page_free -- no support for > 1 page yet!!!");
177
178	for(index=0;index<bouncepages;index++) {
179		if( pa == bouncepa[index])
180			break;
181	}
182
183	if( index == bouncepages)
184		panic("vm_bounce_page_free: invalid bounce buffer");
185
186	allocindex = index / BITS_IN_UNSIGNED;
187	bit = index % BITS_IN_UNSIGNED;
188
189	bounceallocarray[allocindex] &= ~(1 << bit);
190
191	bouncefree += count;
192	if (bpwait) {
193		bpwait = 0;
194		wakeup((caddr_t) &bounceallocarray);
195	}
196}
197
198/*
199 * allocate count bounce buffer kva pages
200 */
201static vm_offset_t
202vm_bounce_kva(size, waitok)
203	int size;
204	int waitok;
205{
206	int i;
207	vm_offset_t kva = 0;
208	vm_offset_t off;
209	int s = splbio();
210more:
211	if (!bmfreeing && kvasfreecnt) {
212		bmfreeing = 1;
213		for (i = 0; i < kvasfreecnt; i++) {
214			for(off=0;off<kvaf[i].size;off+=NBPG) {
215				pmap_kremove( kvaf[i].addr + off);
216			}
217			kmem_free_wakeup(io_map, kvaf[i].addr,
218				kvaf[i].size);
219		}
220		kvasfreecnt = 0;
221		bmfreeing = 0;
222		if( bmwait) {
223			bmwait = 0;
224			wakeup( (caddr_t) io_map);
225		}
226	}
227
228	if( size == 0) {
229		splx(s);
230		return NULL;
231	}
232
233	if ((kva = kmem_alloc_pageable(io_map, size)) == 0) {
234		if( !waitok) {
235			splx(s);
236			return NULL;
237		}
238		bmwait = 1;
239		tsleep((caddr_t) io_map, PRIBIO, "bmwait", 0);
240		goto more;
241	}
242	splx(s);
243	return kva;
244}
245
246/*
247 * same as vm_bounce_kva -- but really allocate (but takes pages as arg)
248 */
249vm_offset_t
250vm_bounce_kva_alloc(count)
251int count;
252{
253	int i;
254	vm_offset_t kva;
255	vm_offset_t pa;
256	if( bouncepages == 0) {
257		kva = (vm_offset_t) malloc(count*NBPG, M_TEMP, M_WAITOK);
258		return kva;
259	}
260	kva = vm_bounce_kva(count*NBPG, 1);
261	for(i=0;i<count;i++) {
262		pa = vm_bounce_page_find(1);
263		pmap_kenter(kva + i * NBPG, pa);
264	}
265	return kva;
266}
267
268/*
269 * same as vm_bounce_kva_free -- but really free
270 */
271void
272vm_bounce_kva_alloc_free(kva, count)
273	vm_offset_t kva;
274	int count;
275{
276	int i;
277	vm_offset_t pa;
278	if( bouncepages == 0) {
279		free((caddr_t) kva, M_TEMP);
280		return;
281	}
282	for(i = 0; i < count; i++) {
283		pa = pmap_kextract(kva + i * NBPG);
284		vm_bounce_page_free(pa, 1);
285	}
286	vm_bounce_kva_free(kva, count*NBPG, 0);
287}
288
289/*
290 * do the things necessary to the struct buf to implement
291 * bounce buffers...  inserted before the disk sort
292 */
293void
294vm_bounce_alloc(bp)
295	struct buf *bp;
296{
297	int countvmpg;
298	vm_offset_t vastart, vaend;
299	vm_offset_t vapstart, vapend;
300	vm_offset_t va, kva;
301	vm_offset_t pa;
302	int dobounceflag = 0;
303	int i;
304
305	if (bouncepages == 0)
306		return;
307
308	if (bp->b_flags & B_BOUNCE) {
309		printf("vm_bounce_alloc: called recursively???\n");
310		return;
311	}
312
313	if (bp->b_bufsize < bp->b_bcount) {
314		printf(
315		    "vm_bounce_alloc: b_bufsize(0x%lx) < b_bcount(0x%lx) !!\n",
316			bp->b_bufsize, bp->b_bcount);
317		panic("vm_bounce_alloc");
318	}
319
320/*
321 *  This is not really necessary
322 *	if( bp->b_bufsize != bp->b_bcount) {
323 *		printf("size: %d, count: %d\n", bp->b_bufsize, bp->b_bcount);
324 *	}
325 */
326
327
328	vastart = (vm_offset_t) bp->b_data;
329	vaend = (vm_offset_t) bp->b_data + bp->b_bufsize;
330
331	vapstart = trunc_page(vastart);
332	vapend = round_page(vaend);
333	countvmpg = (vapend - vapstart) / NBPG;
334
335/*
336 * if any page is above 16MB, then go into bounce-buffer mode
337 */
338	va = vapstart;
339	for (i = 0; i < countvmpg; i++) {
340		pa = pmap_kextract(va);
341		if (pa >= SIXTEENMEG)
342			++dobounceflag;
343		if( pa == 0)
344			panic("vm_bounce_alloc: Unmapped page");
345		va += NBPG;
346	}
347	if (dobounceflag == 0)
348		return;
349
350	if (bouncepages < dobounceflag)
351		panic("Not enough bounce buffers!!!");
352
353/*
354 * allocate a replacement kva for b_addr
355 */
356	kva = vm_bounce_kva(countvmpg*NBPG, 1);
357#if 0
358	printf("%s: vapstart: %x, vapend: %x, countvmpg: %d, kva: %x ",
359		(bp->b_flags & B_READ) ? "read":"write",
360			vapstart, vapend, countvmpg, kva);
361#endif
362	va = vapstart;
363	for (i = 0; i < countvmpg; i++) {
364		pa = pmap_kextract(va);
365		if (pa >= SIXTEENMEG) {
366			/*
367			 * allocate a replacement page
368			 */
369			vm_offset_t bpa = vm_bounce_page_find(1);
370			pmap_kenter(kva + (NBPG * i), bpa);
371#if 0
372			printf("r(%d): (%x,%x,%x) ", i, va, pa, bpa);
373#endif
374			/*
375			 * if we are writing, the copy the data into the page
376			 */
377			if ((bp->b_flags & B_READ) == 0) {
378				bcopy((caddr_t) va, (caddr_t) kva + (NBPG * i), NBPG);
379			}
380		} else {
381			/*
382			 * use original page
383			 */
384			pmap_kenter(kva + (NBPG * i), pa);
385		}
386		va += NBPG;
387	}
388
389/*
390 * flag the buffer as being bounced
391 */
392	bp->b_flags |= B_BOUNCE;
393/*
394 * save the original buffer kva
395 */
396	bp->b_savekva = bp->b_data;
397/*
398 * put our new kva into the buffer (offset by original offset)
399 */
400	bp->b_data = (caddr_t) (((vm_offset_t) kva) |
401				((vm_offset_t) bp->b_savekva & (NBPG - 1)));
402#if 0
403	printf("b_savekva: %x, newva: %x\n", bp->b_savekva, bp->b_data);
404#endif
405	return;
406}
407
408/*
409 * hook into biodone to free bounce buffer
410 */
411void
412vm_bounce_free(bp)
413	struct buf *bp;
414{
415	int i;
416	vm_offset_t origkva, bouncekva, bouncekvaend;
417
418/*
419 * if this isn't a bounced buffer, then just return
420 */
421	if ((bp->b_flags & B_BOUNCE) == 0)
422		return;
423
424/*
425 *  This check is not necessary
426 *	if (bp->b_bufsize != bp->b_bcount) {
427 *		printf("vm_bounce_free: b_bufsize=%d, b_bcount=%d\n",
428 *			bp->b_bufsize, bp->b_bcount);
429 *	}
430 */
431
432	origkva = (vm_offset_t) bp->b_savekva;
433	bouncekva = (vm_offset_t) bp->b_data;
434/*
435	printf("free: %d ", bp->b_bufsize);
436*/
437
438/*
439 * check every page in the kva space for b_addr
440 */
441	for (i = 0; i < bp->b_bufsize; ) {
442		vm_offset_t mybouncepa;
443		vm_offset_t copycount;
444
445		copycount = round_page(bouncekva + 1) - bouncekva;
446		mybouncepa = pmap_kextract(trunc_page(bouncekva));
447
448/*
449 * if this is a bounced pa, then process as one
450 */
451		if ( mybouncepa != pmap_kextract( trunc_page( origkva))) {
452			vm_offset_t tocopy = copycount;
453			if (i + tocopy > bp->b_bufsize)
454				tocopy = bp->b_bufsize - i;
455/*
456 * if this is a read, then copy from bounce buffer into original buffer
457 */
458			if (bp->b_flags & B_READ)
459				bcopy((caddr_t) bouncekva, (caddr_t) origkva, tocopy);
460/*
461 * free the bounce allocation
462 */
463
464/*
465			printf("(kva: %x, pa: %x)", bouncekva, mybouncepa);
466*/
467			vm_bounce_page_free(mybouncepa, 1);
468		}
469
470		origkva += copycount;
471		bouncekva += copycount;
472		i += copycount;
473	}
474
475/*
476	printf("\n");
477*/
478/*
479 * add the old kva into the "to free" list
480 */
481
482	bouncekva= trunc_page((vm_offset_t) bp->b_data);
483	bouncekvaend= round_page((vm_offset_t)bp->b_data + bp->b_bufsize);
484
485/*
486	printf("freeva: %d\n", (bouncekvaend - bouncekva) / NBPG);
487*/
488	vm_bounce_kva_free( bouncekva, (bouncekvaend - bouncekva), 0);
489	bp->b_data = bp->b_savekva;
490	bp->b_savekva = 0;
491	bp->b_flags &= ~B_BOUNCE;
492
493	return;
494}
495
496
497/*
498 * init the bounce buffer system
499 */
500void
501vm_bounce_init()
502{
503	int i;
504
505	kvasfreecnt = 0;
506
507	if (bouncepages == 0)
508		return;
509
510	bounceallocarraysize = (bouncepages + BITS_IN_UNSIGNED - 1) / BITS_IN_UNSIGNED;
511	bounceallocarray = malloc(bounceallocarraysize * sizeof(unsigned), M_TEMP, M_NOWAIT);
512
513	if (!bounceallocarray)
514		panic("Cannot allocate bounce resource array");
515
516	bouncepa = malloc(bouncepages * sizeof(vm_offset_t), M_TEMP, M_NOWAIT);
517	if (!bouncepa)
518		panic("Cannot allocate physical memory array");
519
520	for(i=0;i<bounceallocarraysize;i++) {
521		bounceallocarray[i] = 0xffffffff;
522	}
523
524	for(i=0;i<bouncepages;i++) {
525		vm_offset_t pa;
526		if( (pa = pmap_kextract((vm_offset_t) bouncememory + i * NBPG)) >= SIXTEENMEG)
527			panic("bounce memory out of range");
528		if( pa == 0)
529			panic("bounce memory not resident");
530		bouncepa[i] = pa;
531		bounceallocarray[i/(8*sizeof(int))] &= ~(1<<(i%(8*sizeof(int))));
532	}
533	bouncefree = bouncepages;
534
535}
536#endif /* BOUNCE_BUFFERS */
537
538/*
539 * quick version of vm_fault
540 */
541void
542vm_fault_quick(v, prot)
543	caddr_t v;
544	int prot;
545{
546	if (prot & VM_PROT_WRITE)
547		subyte(v, fubyte(v));
548	else
549		fubyte(v);
550}
551
552/*
553 * Finish a fork operation, with process p2 nearly set up.
554 * Copy and update the kernel stack and pcb, making the child
555 * ready to run, and marking it so that it can return differently
556 * than the parent.  Returns 1 in the child process, 0 in the parent.
557 * We currently double-map the user area so that the stack is at the same
558 * address in each process; in the future we will probably relocate
559 * the frame pointers on the stack after copying.
560 */
561int
562cpu_fork(p1, p2)
563	register struct proc *p1, *p2;
564{
565	struct pcb *pcb2 = &p2->p_addr->u_pcb;
566	int sp, offset;
567
568	/*
569	 * Copy pcb and stack from proc p1 to p2.
570	 * We do this as cheaply as possible, copying only the active
571	 * part of the stack.  The stack and pcb need to agree;
572	 * this is tricky, as the final pcb is constructed by savectx,
573	 * but its frame isn't yet on the stack when the stack is copied.
574	 * This should be done differently, with a single call
575	 * that copies and updates the pcb+stack,
576	 * replacing the bcopy and savectx.
577	 */
578
579	__asm __volatile("movl %%esp,%0" : "=r" (sp));
580	offset = sp - (int)kstack;
581
582	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
583	    (unsigned) ctob(UPAGES) - offset);
584	p2->p_md.md_regs = p1->p_md.md_regs;
585
586	*pcb2 = p1->p_addr->u_pcb;
587	pcb2->pcb_cr3 = vtophys(p2->p_vmspace->vm_pmap.pm_pdir);
588
589	/*
590	 * Returns (0) in parent, (1) in child.
591	 */
592	return (savectx(pcb2));
593}
594
595void
596cpu_exit(p)
597	register struct proc *p;
598{
599
600#if NNPX > 0
601	npxexit(p);
602#endif	/* NNPX */
603	cnt.v_swtch++;
604	cpu_switch(p);
605	panic("cpu_exit");
606}
607
608void
609cpu_wait(p)
610	struct proc *p;
611{
612	/* drop per-process resources */
613	pmap_qremove((vm_offset_t) p->p_addr, UPAGES);
614	kmem_free(u_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
615	vmspace_free(p->p_vmspace);
616}
617
618/*
619 * Dump the machine specific header information at the start of a core dump.
620 */
621int
622cpu_coredump(p, vp, cred)
623	struct proc *p;
624	struct vnode *vp;
625	struct ucred *cred;
626{
627
628	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
629	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
630	    p));
631}
632
633#ifdef notyet
634static void
635setredzone(pte, vaddr)
636	u_short *pte;
637	caddr_t vaddr;
638{
639/* eventually do this by setting up an expand-down stack segment
640   for ss0: selector, allowing stack access down to top of u.
641   this means though that protection violations need to be handled
642   thru a double fault exception that must do an integral task
643   switch to a known good context, within which a dump can be
644   taken. a sensible scheme might be to save the initial context
645   used by sched (that has physical memory mapped 1:1 at bottom)
646   and take the dump while still in mapped mode */
647}
648#endif
649
650/*
651 * Convert kernel VA to physical address
652 */
653u_long
654kvtop(void *addr)
655{
656	vm_offset_t va;
657
658	va = pmap_kextract((vm_offset_t)addr);
659	if (va == 0)
660		panic("kvtop: zero page frame");
661	return((int)va);
662}
663
664/*
665 * Map an IO request into kernel virtual address space.
666 *
667 * All requests are (re)mapped into kernel VA space.
668 * Notice that we use b_bufsize for the size of the buffer
669 * to be mapped.  b_bcount might be modified by the driver.
670 */
671void
672vmapbuf(bp)
673	register struct buf *bp;
674{
675	register int npf;
676	register caddr_t addr;
677	int off;
678	vm_offset_t kva;
679	vm_offset_t pa;
680
681	if ((bp->b_flags & B_PHYS) == 0)
682		panic("vmapbuf");
683
684	/*
685	 * this is the kva that is to be used for
686	 * the temporary kernel mapping
687	 */
688	kva = (vm_offset_t) bp->b_saveaddr;
689
690	for (addr = (caddr_t)trunc_page(bp->b_data);
691		addr < bp->b_data + bp->b_bufsize;
692		addr += PAGE_SIZE) {
693
694/*
695 * do the vm_fault if needed, do the copy-on-write thing when
696 * reading stuff off device into memory.
697 */
698		vm_fault_quick(addr,
699			(bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
700		pa = pmap_kextract((vm_offset_t) addr);
701		if (pa == 0)
702			panic("vmapbuf: page not present");
703/*
704 * hold the data page
705 */
706#ifdef DIAGNOSTIC
707		if( VM_PAGE_TO_PHYS(PHYS_TO_VM_PAGE(pa)) != pa)
708			panic("vmapbuf: confused PHYS_TO_VM_PAGE mapping");
709#endif
710		vm_page_hold(PHYS_TO_VM_PAGE(pa));
711	}
712
713	addr = bp->b_saveaddr = bp->b_data;
714	off = (int)addr & PGOFSET;
715	npf = btoc(round_page(bp->b_bufsize + off));
716	bp->b_data = (caddr_t) (kva + off);
717	while (npf--) {
718		pa = pmap_kextract((vm_offset_t)addr);
719		if (pa == 0)
720			panic("vmapbuf: null page frame");
721		pmap_kenter(kva, trunc_page(pa));
722		addr += PAGE_SIZE;
723		kva += PAGE_SIZE;
724	}
725}
726
727/*
728 * Free the io map PTEs associated with this IO operation.
729 * We also invalidate the TLB entries and restore the original b_addr.
730 */
731void
732vunmapbuf(bp)
733	register struct buf *bp;
734{
735	register caddr_t addr;
736	vm_offset_t pa;
737
738	if ((bp->b_flags & B_PHYS) == 0)
739		panic("vunmapbuf");
740
741	for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data);
742		addr < bp->b_data + bp->b_bufsize;
743		addr += NBPG)
744		pmap_kremove((vm_offset_t) addr);
745
746	bp->b_data = bp->b_saveaddr;
747	bp->b_saveaddr = NULL;
748
749/*
750 * unhold the pde, and data pages
751 */
752	for (addr = (caddr_t)trunc_page((vm_offset_t) bp->b_data);
753		addr < bp->b_data + bp->b_bufsize;
754		addr += NBPG) {
755	/*
756	 * release the data page
757	 */
758		pa = pmap_kextract((vm_offset_t) addr);
759		vm_page_unhold(PHYS_TO_VM_PAGE(pa));
760	}
761}
762
763/*
764 * Force reset the processor by invalidating the entire address space!
765 */
766void
767cpu_reset() {
768
769	/*
770	 * Attempt to do a CPU reset via the keyboard controller,
771	 * do not turn of the GateA20, as any machine that fails
772	 * to do the reset here would then end up in no man's land.
773	 */
774
775#ifndef BROKEN_KEYBOARD_RESET
776	outb(IO_KBD + 4, 0xFE);
777	DELAY(500000);	/* wait 0.5 sec to see if that did it */
778	printf("Keyboard reset did not work, attempting CPU shutdown\n");
779	DELAY(1000000);	/* wait 1 sec for printf to complete */
780#endif
781
782	/* force a shutdown by unmapping entire address space ! */
783	bzero((caddr_t) PTD, NBPG);
784
785	/* "good night, sweet prince .... <THUNK!>" */
786	pmap_update();
787	/* NOTREACHED */
788	while(1);
789}
790
791/*
792 * Grow the user stack to allow for 'sp'. This version grows the stack in
793 *	chunks of SGROWSIZ.
794 */
795int
796grow(p, sp)
797	struct proc *p;
798	u_int sp;
799{
800	unsigned int nss;
801	caddr_t v;
802	struct vmspace *vm = p->p_vmspace;
803
804	if ((caddr_t)sp <= vm->vm_maxsaddr || (unsigned)sp >= (unsigned)USRSTACK)
805	    return (1);
806
807	nss = roundup(USRSTACK - (unsigned)sp, PAGE_SIZE);
808
809	if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
810		return (0);
811
812	if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT,
813	    SGROWSIZ) < nss) {
814		int grow_amount;
815		/*
816		 * If necessary, grow the VM that the stack occupies
817		 * to allow for the rlimit. This allows us to not have
818		 * to allocate all of the VM up-front in execve (which
819		 * is expensive).
820		 * Grow the VM by the amount requested rounded up to
821		 * the nearest SGROWSIZ to provide for some hysteresis.
822		 */
823		grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ);
824		v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT,
825		    SGROWSIZ) - grow_amount;
826		/*
827		 * If there isn't enough room to extend by SGROWSIZ, then
828		 * just extend to the maximum size
829		 */
830		if (v < vm->vm_maxsaddr) {
831			v = vm->vm_maxsaddr;
832			grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT);
833		}
834		if ((grow_amount == 0) || (vm_map_find(&vm->vm_map, NULL, 0, (vm_offset_t *)&v,
835		    grow_amount, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0) != KERN_SUCCESS)) {
836			return (0);
837		}
838		vm->vm_ssize += grow_amount >> PAGE_SHIFT;
839	}
840
841	return (1);
842}
843
844/*
845 * prototype routine to implement the pre-zeroed page mechanism
846 * this routine is called from the idle loop.
847 */
848int
849vm_page_zero_idle() {
850	vm_page_t m;
851	if ((cnt.v_free_count > cnt.v_interrupt_free_min) &&
852		(m = vm_page_queue_free.tqh_first)) {
853		TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
854		enable_intr();
855		pmap_zero_page(VM_PAGE_TO_PHYS(m));
856		disable_intr();
857		TAILQ_INSERT_HEAD(&vm_page_queue_zero, m, pageq);
858		m->queue = PQ_ZERO;
859		++vm_page_zero_count;
860		return 1;
861	}
862	return 0;
863}
864