vm_machdep.c revision 41499
136865Sdfr/*-
236865Sdfr * Copyright (c) 1982, 1986 The Regents of the University of California.
336865Sdfr * Copyright (c) 1989, 1990 William Jolitz
436865Sdfr * Copyright (c) 1994 John Dyson
536865Sdfr * All rights reserved.
636865Sdfr *
736865Sdfr * This code is derived from software contributed to Berkeley by
836865Sdfr * the Systems Programming Group of the University of Utah Computer
936865Sdfr * Science Department, and William Jolitz.
1036865Sdfr *
1136865Sdfr * Redistribution and use in source and binary forms, with or without
1236865Sdfr * modification, are permitted provided that the following conditions
1336865Sdfr * are met:
1436865Sdfr * 1. Redistributions of source code must retain the above copyright
1536865Sdfr *    notice, this list of conditions and the following disclaimer.
1636865Sdfr * 2. Redistributions in binary form must reproduce the above copyright
1736865Sdfr *    notice, this list of conditions and the following disclaimer in the
1836865Sdfr *    documentation and/or other materials provided with the distribution.
1936865Sdfr * 3. All advertising materials mentioning features or use of this software
2036865Sdfr *    must display the following acknowledgement:
2136865Sdfr *	This product includes software developed by the University of
2236865Sdfr *	California, Berkeley and its contributors.
2336865Sdfr * 4. Neither the name of the University nor the names of its contributors
2436865Sdfr *    may be used to endorse or promote products derived from this software
2536865Sdfr *    without specific prior written permission.
2636865Sdfr *
2736865Sdfr * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2836865Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2936865Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3036865Sdfr * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3136865Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3236865Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3336865Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3436865Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3536865Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3636865Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3736865Sdfr * SUCH DAMAGE.
3836865Sdfr *
3936865Sdfr *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
4036865Sdfr *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
4141499Sdfr *	$Id: vm_machdep.c,v 1.4 1998/10/15 09:53:27 dfr Exp $
4236865Sdfr */
4336865Sdfr/*
4436865Sdfr * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
4536865Sdfr * All rights reserved.
4636865Sdfr *
4736865Sdfr * Author: Chris G. Demetriou
4836865Sdfr *
4936865Sdfr * Permission to use, copy, modify and distribute this software and
5036865Sdfr * its documentation is hereby granted, provided that both the copyright
5136865Sdfr * notice and this permission notice appear in all copies of the
5236865Sdfr * software, derivative works or modified versions, and any portions
5336865Sdfr * thereof, and that both notices appear in supporting documentation.
5436865Sdfr *
5536865Sdfr * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
5636865Sdfr * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
5736865Sdfr * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
5836865Sdfr *
5936865Sdfr * Carnegie Mellon requests users of this software to return to
6036865Sdfr *
6136865Sdfr *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
6236865Sdfr *  School of Computer Science
6336865Sdfr *  Carnegie Mellon University
6436865Sdfr *  Pittsburgh PA 15213-3890
6536865Sdfr *
6636865Sdfr * any improvements or extensions that they make and grant Carnegie the
6736865Sdfr * rights to redistribute these changes.
6836865Sdfr */
6936865Sdfr
7036865Sdfr#include <sys/param.h>
7136865Sdfr#include <sys/systm.h>
7236865Sdfr#include <sys/proc.h>
7336865Sdfr#include <sys/malloc.h>
7436865Sdfr#include <sys/buf.h>
7536865Sdfr#include <sys/vnode.h>
7636865Sdfr#include <sys/vmmeter.h>
7736865Sdfr#include <sys/kernel.h>
7836865Sdfr#include <sys/sysctl.h>
7936865Sdfr
8036865Sdfr#include <machine/clock.h>
8136865Sdfr#include <machine/cpu.h>
8241499Sdfr#include <machine/fpu.h>
8336865Sdfr#include <machine/md_var.h>
8436865Sdfr#include <machine/prom.h>
8536865Sdfr
8636865Sdfr#include <vm/vm.h>
8736865Sdfr#include <vm/vm_param.h>
8836865Sdfr#include <vm/vm_prot.h>
8936865Sdfr#include <sys/lock.h>
9036865Sdfr#include <vm/vm_kern.h>
9136865Sdfr#include <vm/vm_page.h>
9236865Sdfr#include <vm/vm_map.h>
9336865Sdfr#include <vm/vm_extern.h>
9436865Sdfr
9536865Sdfr#include <sys/user.h>
9636865Sdfr
9736865Sdfr/*
9836865Sdfr * quick version of vm_fault
9936865Sdfr */
10036865Sdfrvoid
10136865Sdfrvm_fault_quick(v, prot)
10236865Sdfr	caddr_t v;
10336865Sdfr	int prot;
10436865Sdfr{
10536865Sdfr	if (prot & VM_PROT_WRITE)
10636865Sdfr		subyte(v, fubyte(v));
10736865Sdfr	else
10836865Sdfr		fubyte(v);
10936865Sdfr}
11036865Sdfr
11136865Sdfr/*
11236865Sdfr * Finish a fork operation, with process p2 nearly set up.
11336865Sdfr * Copy and update the pcb, set up the stack so that the child
11436865Sdfr * ready to run and return to user mode.
11536865Sdfr */
11636865Sdfrvoid
11736865Sdfrcpu_fork(p1, p2)
11836865Sdfr	register struct proc *p1, *p2;
11936865Sdfr{
12036865Sdfr	struct user *up = p2->p_addr;
12136865Sdfr	int i;
12236865Sdfr
12336865Sdfr	p2->p_md.md_tf = p1->p_md.md_tf;
12436865Sdfr	p2->p_md.md_flags = p1->p_md.md_flags & MDP_FPUSED;
12536865Sdfr
12636865Sdfr	/*
12736865Sdfr	 * Cache the physical address of the pcb, so we can
12836865Sdfr	 * swap to it easily.
12936865Sdfr	 */
13036865Sdfr	p2->p_md.md_pcbpaddr = (void*) vtophys((vm_offset_t) &up->u_pcb);
13136865Sdfr
13236865Sdfr	/*
13336865Sdfr	 * Copy floating point state from the FP chip to the PCB
13436865Sdfr	 * if this process has state stored there.
13536865Sdfr	 */
13636865Sdfr	if (p1 == fpcurproc) {
13736865Sdfr		alpha_pal_wrfen(1);
13836865Sdfr		savefpstate(&fpcurproc->p_addr->u_pcb.pcb_fp);
13936865Sdfr		alpha_pal_wrfen(0);
14036865Sdfr	}
14136865Sdfr
14236865Sdfr	/*
14336865Sdfr	 * Copy pcb and stack from proc p1 to p2.
14436865Sdfr	 * We do this as cheaply as possible, copying only the active
14536865Sdfr	 * part of the stack.  The stack and pcb need to agree;
14636865Sdfr	 */
14736865Sdfr	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
14836865Sdfr	p2->p_addr->u_pcb.pcb_hw.apcb_usp = alpha_pal_rdusp();
14936865Sdfr
15036865Sdfr	/*
15141499Sdfr	 * Set the floating point state.
15241499Sdfr	 */
15341499Sdfr	if ((p2->p_addr->u_pcb.pcb_fp_control & IEEE_INHERIT) == 0) {
15441499Sdfr		p2->p_addr->u_pcb.pcb_fp_control = (IEEE_TRAP_ENABLE_INV
15541499Sdfr						    | IEEE_TRAP_ENABLE_DZE
15641499Sdfr						    | IEEE_TRAP_ENABLE_OVF);
15741499Sdfr		p2->p_addr->u_pcb.pcb_fp.fpr_cr = (FPCR_DYN_NORMAL
15841499Sdfr						   | FPCR_INED | FPCR_UNFD);
15941499Sdfr	}
16041499Sdfr
16141499Sdfr	/*
16236865Sdfr	 * Arrange for a non-local goto when the new process
16336865Sdfr	 * is started, to resume here, returning nonzero from setjmp.
16436865Sdfr	 */
16536865Sdfr#ifdef DIAGNOSTIC
16636865Sdfr	if (p1 != curproc)
16736865Sdfr		panic("cpu_fork: curproc");
16836865Sdfr	if ((up->u_pcb.pcb_hw.apcb_flags & ALPHA_PCB_FLAGS_FEN) != 0)
16936865Sdfr		printf("DANGER WILL ROBINSON: FEN SET IN cpu_fork!\n");
17036865Sdfr#endif
17136865Sdfr
17236865Sdfr	/*
17336865Sdfr	 * create the child's kernel stack, from scratch.
17436865Sdfr	 */
17536865Sdfr	{
17636865Sdfr		struct trapframe *p2tf;
17736865Sdfr
17836865Sdfr		/*
17936865Sdfr		 * Pick a stack pointer, leaving room for a trapframe;
18036865Sdfr		 * copy trapframe from parent so return to user mode
18136865Sdfr		 * will be to right address, with correct registers.
18236865Sdfr		 */
18336865Sdfr		p2tf = p2->p_md.md_tf = (struct trapframe *)
18436865Sdfr		    ((char *)p2->p_addr + USPACE - sizeof(struct trapframe));
18536865Sdfr		bcopy(p1->p_md.md_tf, p2->p_md.md_tf,
18636865Sdfr		    sizeof(struct trapframe));
18736865Sdfr
18836865Sdfr		/*
18936865Sdfr		 * Set up return-value registers as fork() libc stub expects.
19036865Sdfr		 */
19136865Sdfr		p2tf->tf_regs[FRAME_V0] = p1->p_pid;	/* parent's pid */
19236865Sdfr		p2tf->tf_regs[FRAME_A3] = 0;		/* no error */
19336865Sdfr		p2tf->tf_regs[FRAME_A4] = 1;		/* is child */
19436865Sdfr
19536865Sdfr		/*
19636865Sdfr		 * Arrange for continuation at child_return(), which
19736865Sdfr		 * will return to exception_return().  Note that the child
19836865Sdfr		 * process doesn't stay in the kernel for long!
19936865Sdfr		 *
20036865Sdfr		 * This is an inlined version of cpu_set_kpc.
20136865Sdfr		 */
20236865Sdfr		up->u_pcb.pcb_hw.apcb_ksp = (u_int64_t)p2tf;
20336865Sdfr		up->u_pcb.pcb_context[0] =
20436865Sdfr		    (u_int64_t)child_return;		/* s0: pc */
20536865Sdfr		up->u_pcb.pcb_context[1] =
20636865Sdfr		    (u_int64_t)exception_return;	/* s1: ra */
20736865Sdfr		up->u_pcb.pcb_context[2] = (u_long) p2;	/* s2: a0 */
20836865Sdfr		up->u_pcb.pcb_context[7] =
20936865Sdfr		    (u_int64_t)switch_trampoline;	/* ra: assembly magic */
21036865Sdfr	}
21136865Sdfr}
21236865Sdfr
21336865Sdfr/*
21436865Sdfr * Intercept the return address from a freshly forked process that has NOT
21536865Sdfr * been scheduled yet.
21636865Sdfr *
21736865Sdfr * This is needed to make kernel threads stay in kernel mode.
21836865Sdfr */
21936865Sdfrvoid
22036865Sdfrcpu_set_fork_handler(p, func, arg)
22136865Sdfr	struct proc *p;
22236865Sdfr	void (*func) __P((void *));
22336865Sdfr	void *arg;
22436865Sdfr{
22536865Sdfr	/*
22636865Sdfr	 * Note that the trap frame follows the args, so the function
22736865Sdfr	 * is really called like this:  func(arg, frame);
22836865Sdfr	 */
22936865Sdfr	p->p_addr->u_pcb.pcb_context[0] = (u_long) func;
23036865Sdfr	p->p_addr->u_pcb.pcb_context[2] = (u_long) arg;
23136865Sdfr}
23236865Sdfr
23336865Sdfr/*
23436865Sdfr * cpu_exit is called as the last action during exit.
23536865Sdfr * We release the address space of the process, block interrupts,
23636865Sdfr * and call switch_exit.  switch_exit switches to proc0's PCB and stack,
23736865Sdfr * then jumps into the middle of cpu_switch, as if it were switching
23836865Sdfr * from proc0.
23936865Sdfr */
24036865Sdfrvoid
24136865Sdfrcpu_exit(p)
24236865Sdfr	register struct proc *p;
24336865Sdfr{
24436865Sdfr	if (p == fpcurproc)
24536865Sdfr		fpcurproc = NULL;
24636865Sdfr
24736865Sdfr	(void) splhigh();
24836865Sdfr	cnt.v_swtch++;
24936865Sdfr	cpu_switch(p);
25036865Sdfr	panic("cpu_exit");
25136865Sdfr}
25236865Sdfr
25336865Sdfrvoid
25436865Sdfrcpu_wait(p)
25536865Sdfr	struct proc *p;
25636865Sdfr{
25736865Sdfr	/* drop per-process resources */
25836865Sdfr	pmap_dispose_proc(p);
25936865Sdfr
26036865Sdfr	/* and clean-out the vmspace */
26136865Sdfr	vmspace_free(p->p_vmspace);
26236865Sdfr}
26336865Sdfr
26436865Sdfr/*
26536865Sdfr * Dump the machine specific header information at the start of a core dump.
26636865Sdfr */
26736865Sdfrint
26836865Sdfrcpu_coredump(p, vp, cred)
26936865Sdfr	struct proc *p;
27036865Sdfr	struct vnode *vp;
27136865Sdfr	struct ucred *cred;
27236865Sdfr{
27336865Sdfr
27436865Sdfr	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
27536865Sdfr	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
27636865Sdfr	    p));
27736865Sdfr}
27836865Sdfr
27936865Sdfr#ifdef notyet
28036865Sdfrstatic void
28136865Sdfrsetredzone(pte, vaddr)
28236865Sdfr	u_short *pte;
28336865Sdfr	caddr_t vaddr;
28436865Sdfr{
28536865Sdfr/* eventually do this by setting up an expand-down stack segment
28636865Sdfr   for ss0: selector, allowing stack access down to top of u.
28736865Sdfr   this means though that protection violations need to be handled
28836865Sdfr   thru a double fault exception that must do an integral task
28936865Sdfr   switch to a known good context, within which a dump can be
29036865Sdfr   taken. a sensible scheme might be to save the initial context
29136865Sdfr   used by sched (that has physical memory mapped 1:1 at bottom)
29236865Sdfr   and take the dump while still in mapped mode */
29336865Sdfr}
29436865Sdfr#endif
29536865Sdfr
29636865Sdfr/*
29736865Sdfr * Map an IO request into kernel virtual address space.
29836865Sdfr *
29936865Sdfr * All requests are (re)mapped into kernel VA space.
30036865Sdfr * Notice that we use b_bufsize for the size of the buffer
30136865Sdfr * to be mapped.  b_bcount might be modified by the driver.
30236865Sdfr */
30336865Sdfrvoid
30436865Sdfrvmapbuf(bp)
30536865Sdfr	register struct buf *bp;
30636865Sdfr{
30736865Sdfr	register caddr_t addr, v, kva;
30836865Sdfr	vm_offset_t pa;
30936865Sdfr
31036865Sdfr	if ((bp->b_flags & B_PHYS) == 0)
31136865Sdfr		panic("vmapbuf");
31236865Sdfr
31336865Sdfr	for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
31436865Sdfr	    addr < bp->b_data + bp->b_bufsize;
31536865Sdfr	    addr += PAGE_SIZE, v += PAGE_SIZE) {
31636865Sdfr		/*
31736865Sdfr		 * Do the vm_fault if needed; do the copy-on-write thing
31836865Sdfr		 * when reading stuff off device into memory.
31936865Sdfr		 */
32036865Sdfr		vm_fault_quick(addr,
32136865Sdfr			(bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
32236865Sdfr		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
32336865Sdfr		if (pa == 0)
32436865Sdfr			panic("vmapbuf: page not present");
32536865Sdfr		vm_page_hold(PHYS_TO_VM_PAGE(pa));
32636865Sdfr		pmap_kenter((vm_offset_t) v, pa);
32736865Sdfr	}
32836865Sdfr
32936865Sdfr	kva = bp->b_saveaddr;
33036865Sdfr	bp->b_saveaddr = bp->b_data;
33136865Sdfr	bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
33236865Sdfr}
33336865Sdfr
33436865Sdfr/*
33536865Sdfr * Free the io map PTEs associated with this IO operation.
33636865Sdfr * We also invalidate the TLB entries and restore the original b_addr.
33736865Sdfr */
33836865Sdfrvoid
33936865Sdfrvunmapbuf(bp)
34036865Sdfr	register struct buf *bp;
34136865Sdfr{
34236865Sdfr	register caddr_t addr;
34336865Sdfr	vm_offset_t pa;
34436865Sdfr
34536865Sdfr	if ((bp->b_flags & B_PHYS) == 0)
34636865Sdfr		panic("vunmapbuf");
34736865Sdfr
34836865Sdfr	for (addr = (caddr_t)trunc_page(bp->b_data);
34936865Sdfr	    addr < bp->b_data + bp->b_bufsize;
35036865Sdfr	    addr += PAGE_SIZE) {
35136865Sdfr		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
35236865Sdfr		pmap_kremove((vm_offset_t) addr);
35336865Sdfr		vm_page_unhold(PHYS_TO_VM_PAGE(pa));
35436865Sdfr	}
35536865Sdfr
35636865Sdfr	bp->b_data = bp->b_saveaddr;
35736865Sdfr}
35836865Sdfr
35936865Sdfr/*
36036865Sdfr * Force reset the processor by invalidating the entire address space!
36136865Sdfr */
36236865Sdfrvoid
36336865Sdfrcpu_reset()
36436865Sdfr{
36536865Sdfr	prom_halt(0);
36636865Sdfr}
36736865Sdfr
36836865Sdfr/*
36936865Sdfr * Grow the user stack to allow for 'sp'. This version grows the stack in
37036865Sdfr *	chunks of SGROWSIZ.
37136865Sdfr */
37236865Sdfrint
37336865Sdfrgrow(p, sp)
37436865Sdfr	struct proc *p;
37536865Sdfr	size_t sp;
37636865Sdfr{
37736865Sdfr	unsigned int nss;
37836865Sdfr	caddr_t v;
37936865Sdfr	struct vmspace *vm = p->p_vmspace;
38036865Sdfr
38136865Sdfr	if ((caddr_t)sp <= vm->vm_maxsaddr || sp >= (size_t) USRSTACK)
38236865Sdfr	    return (1);
38336865Sdfr
38440377Sdfr	nss = roundup(USRSTACK - (vm_offset_t)sp, PAGE_SIZE);
38536865Sdfr
38636865Sdfr	if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
38736865Sdfr		return (0);
38836865Sdfr
38936865Sdfr	if (vm->vm_ssize && roundup(vm->vm_ssize << PAGE_SHIFT,
39036865Sdfr	    SGROWSIZ) < nss) {
39136865Sdfr		int grow_amount;
39236865Sdfr		/*
39336865Sdfr		 * If necessary, grow the VM that the stack occupies
39436865Sdfr		 * to allow for the rlimit. This allows us to not have
39536865Sdfr		 * to allocate all of the VM up-front in execve (which
39636865Sdfr		 * is expensive).
39736865Sdfr		 * Grow the VM by the amount requested rounded up to
39836865Sdfr		 * the nearest SGROWSIZ to provide for some hysteresis.
39936865Sdfr		 */
40036865Sdfr		grow_amount = roundup((nss - (vm->vm_ssize << PAGE_SHIFT)), SGROWSIZ);
40136865Sdfr		v = (char *)USRSTACK - roundup(vm->vm_ssize << PAGE_SHIFT,
40236865Sdfr		    SGROWSIZ) - grow_amount;
40336865Sdfr		/*
40436865Sdfr		 * If there isn't enough room to extend by SGROWSIZ, then
40536865Sdfr		 * just extend to the maximum size
40636865Sdfr		 */
40736865Sdfr		if (v < vm->vm_maxsaddr) {
40836865Sdfr			v = vm->vm_maxsaddr;
40936865Sdfr			grow_amount = MAXSSIZ - (vm->vm_ssize << PAGE_SHIFT);
41036865Sdfr		}
41136865Sdfr		if ((grow_amount == 0) || (vm_map_find(&vm->vm_map, NULL, 0, (vm_offset_t *)&v,
41236865Sdfr		    grow_amount, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0) != KERN_SUCCESS)) {
41336865Sdfr			return (0);
41436865Sdfr		}
41536865Sdfr		vm->vm_ssize += grow_amount >> PAGE_SHIFT;
41636865Sdfr	}
41736865Sdfr
41836865Sdfr	return (1);
41936865Sdfr}
42036865Sdfr
42136865Sdfrstatic int cnt_prezero;
42236865Sdfr
42336865SdfrSYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
42436865Sdfr
42536865Sdfr/*
42636865Sdfr * Implement the pre-zeroed page mechanism.
42736865Sdfr * This routine is called from the idle loop.
42836865Sdfr */
42936865Sdfrint
43036865Sdfrvm_page_zero_idle()
43136865Sdfr{
43236865Sdfr	static int free_rover;
43336865Sdfr	vm_page_t m;
43436865Sdfr	int s;
43536865Sdfr
43636865Sdfr	/*
43736865Sdfr	 * XXX
43836865Sdfr	 * We stop zeroing pages when there are sufficent prezeroed pages.
43936865Sdfr	 * This threshold isn't really needed, except we want to
44036865Sdfr	 * bypass unneeded calls to vm_page_list_find, and the
44136865Sdfr	 * associated cache flush and latency.  The pre-zero will
44236865Sdfr	 * still be called when there are significantly more
44336865Sdfr	 * non-prezeroed pages than zeroed pages.  The threshold
44436865Sdfr	 * of half the number of reserved pages is arbitrary, but
44536865Sdfr	 * approximately the right amount.  Eventually, we should
44636865Sdfr	 * perhaps interrupt the zero operation when a process
44736865Sdfr	 * is found to be ready to run.
44836865Sdfr	 */
44936865Sdfr	if (cnt.v_free_count - vm_page_zero_count <= cnt.v_free_reserved / 2)
45036865Sdfr		return (0);
45136865Sdfr#ifdef SMP
45236865Sdfr	if (try_mplock()) {
45336865Sdfr#endif
45436865Sdfr		s = splvm();
45536865Sdfr		m = vm_page_list_find(PQ_FREE, free_rover);
45636865Sdfr		if (m != NULL) {
45736865Sdfr			--(*vm_page_queues[m->queue].lcnt);
45836865Sdfr			TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
45936865Sdfr			m->queue = PQ_NONE;
46036865Sdfr			splx(s);
46136865Sdfr#if 0
46236865Sdfr			rel_mplock();
46336865Sdfr#endif
46436865Sdfr			pmap_zero_page(VM_PAGE_TO_PHYS(m));
46536865Sdfr#if 0
46636865Sdfr			get_mplock();
46736865Sdfr#endif
46836865Sdfr			(void)splvm();
46936865Sdfr			m->queue = PQ_ZERO + m->pc;
47036865Sdfr			++(*vm_page_queues[m->queue].lcnt);
47136865Sdfr			TAILQ_INSERT_HEAD(vm_page_queues[m->queue].pl, m,
47236865Sdfr			    pageq);
47336865Sdfr			free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
47436865Sdfr			++vm_page_zero_count;
47536865Sdfr			++cnt_prezero;
47636865Sdfr		}
47736865Sdfr		splx(s);
47836865Sdfr#ifdef SMP
47936865Sdfr		rel_mplock();
48036865Sdfr#endif
48136865Sdfr		return (1);
48236865Sdfr#ifdef SMP
48336865Sdfr	}
48436865Sdfr#endif
48536865Sdfr	return (0);
48636865Sdfr}
48736865Sdfr
48836865Sdfr/*
48936865Sdfr * Software interrupt handler for queued VM system processing.
49036865Sdfr */
49136865Sdfrvoid
49236865Sdfrswi_vm()
49336865Sdfr{
49436865Sdfr#if 0
49536865Sdfr	if (busdma_swi_pending != 0)
49636865Sdfr		busdma_swi();
49736865Sdfr#endif
49836865Sdfr}
49936865Sdfr
50036865Sdfr/*
50136865Sdfr * Tell whether this address is in some physical memory region.
50236865Sdfr * Currently used by the kernel coredump code in order to avoid
50336865Sdfr * dumping the ``ISA memory hole'' which could cause indefinite hangs,
50436865Sdfr * or other unpredictable behaviour.
50536865Sdfr */
50636865Sdfr
50736865Sdfr
50836865Sdfrint
50936865Sdfris_physical_memory(addr)
51036865Sdfr	vm_offset_t addr;
51136865Sdfr{
51236865Sdfr	/*
51336865Sdfr	 * stuff other tests for known memory-mapped devices (PCI?)
51436865Sdfr	 * here
51536865Sdfr	 */
51636865Sdfr
51736865Sdfr	return 1;
51836865Sdfr}
519