vm_machdep.c revision 54207
136865Sdfr/*-
236865Sdfr * Copyright (c) 1982, 1986 The Regents of the University of California.
336865Sdfr * Copyright (c) 1989, 1990 William Jolitz
436865Sdfr * Copyright (c) 1994 John Dyson
536865Sdfr * All rights reserved.
636865Sdfr *
736865Sdfr * This code is derived from software contributed to Berkeley by
836865Sdfr * the Systems Programming Group of the University of Utah Computer
936865Sdfr * Science Department, and William Jolitz.
1036865Sdfr *
1136865Sdfr * Redistribution and use in source and binary forms, with or without
1236865Sdfr * modification, are permitted provided that the following conditions
1336865Sdfr * are met:
1436865Sdfr * 1. Redistributions of source code must retain the above copyright
1536865Sdfr *    notice, this list of conditions and the following disclaimer.
1636865Sdfr * 2. Redistributions in binary form must reproduce the above copyright
1736865Sdfr *    notice, this list of conditions and the following disclaimer in the
1836865Sdfr *    documentation and/or other materials provided with the distribution.
1936865Sdfr * 3. All advertising materials mentioning features or use of this software
2036865Sdfr *    must display the following acknowledgement:
2136865Sdfr *	This product includes software developed by the University of
2236865Sdfr *	California, Berkeley and its contributors.
2336865Sdfr * 4. Neither the name of the University nor the names of its contributors
2436865Sdfr *    may be used to endorse or promote products derived from this software
2536865Sdfr *    without specific prior written permission.
2636865Sdfr *
2736865Sdfr * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2836865Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2936865Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3036865Sdfr * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3136865Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3236865Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3336865Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3436865Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3536865Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3636865Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3736865Sdfr * SUCH DAMAGE.
3836865Sdfr *
3936865Sdfr *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
4036865Sdfr *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
4150477Speter * $FreeBSD: head/sys/powerpc/aim/vm_machdep.c 54207 1999-12-06 18:12:29Z peter $
4236865Sdfr */
4336865Sdfr/*
4436865Sdfr * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
4536865Sdfr * All rights reserved.
4636865Sdfr *
4736865Sdfr * Author: Chris G. Demetriou
4836865Sdfr *
4936865Sdfr * Permission to use, copy, modify and distribute this software and
5036865Sdfr * its documentation is hereby granted, provided that both the copyright
5136865Sdfr * notice and this permission notice appear in all copies of the
5236865Sdfr * software, derivative works or modified versions, and any portions
5336865Sdfr * thereof, and that both notices appear in supporting documentation.
5436865Sdfr *
5536865Sdfr * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
5636865Sdfr * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
5736865Sdfr * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
5836865Sdfr *
5936865Sdfr * Carnegie Mellon requests users of this software to return to
6036865Sdfr *
6136865Sdfr *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
6236865Sdfr *  School of Computer Science
6336865Sdfr *  Carnegie Mellon University
6436865Sdfr *  Pittsburgh PA 15213-3890
6536865Sdfr *
6636865Sdfr * any improvements or extensions that they make and grant Carnegie the
6736865Sdfr * rights to redistribute these changes.
6836865Sdfr */
6936865Sdfr
7036865Sdfr#include <sys/param.h>
7136865Sdfr#include <sys/systm.h>
7236865Sdfr#include <sys/proc.h>
7336865Sdfr#include <sys/malloc.h>
7436865Sdfr#include <sys/buf.h>
7536865Sdfr#include <sys/vnode.h>
7636865Sdfr#include <sys/vmmeter.h>
7736865Sdfr#include <sys/kernel.h>
7836865Sdfr#include <sys/sysctl.h>
7954207Speter#include <sys/unistd.h>
8036865Sdfr
8136865Sdfr#include <machine/clock.h>
8236865Sdfr#include <machine/cpu.h>
8341499Sdfr#include <machine/fpu.h>
8436865Sdfr#include <machine/md_var.h>
8536865Sdfr#include <machine/prom.h>
8636865Sdfr
8736865Sdfr#include <vm/vm.h>
8836865Sdfr#include <vm/vm_param.h>
8936865Sdfr#include <sys/lock.h>
9036865Sdfr#include <vm/vm_kern.h>
9136865Sdfr#include <vm/vm_page.h>
9236865Sdfr#include <vm/vm_map.h>
9336865Sdfr#include <vm/vm_extern.h>
9436865Sdfr
9536865Sdfr#include <sys/user.h>
9636865Sdfr
9736865Sdfr/*
9836865Sdfr * quick version of vm_fault
9936865Sdfr */
10051474Sdillonint
10136865Sdfrvm_fault_quick(v, prot)
10236865Sdfr	caddr_t v;
10336865Sdfr	int prot;
10436865Sdfr{
10551474Sdillon	int r;
10636865Sdfr	if (prot & VM_PROT_WRITE)
10751474Sdillon		r = subyte(v, fubyte(v));
10836865Sdfr	else
10951474Sdillon		r = fubyte(v);
11051474Sdillon	return(r);
11136865Sdfr}
11236865Sdfr
11336865Sdfr/*
11436865Sdfr * Finish a fork operation, with process p2 nearly set up.
11536865Sdfr * Copy and update the pcb, set up the stack so that the child
11636865Sdfr * ready to run and return to user mode.
11736865Sdfr */
11836865Sdfrvoid
11954188Sluoqicpu_fork(p1, p2, flags)
12036865Sdfr	register struct proc *p1, *p2;
12154188Sluoqi	int flags;
12236865Sdfr{
12336865Sdfr	struct user *up = p2->p_addr;
12436865Sdfr
12554188Sluoqi	if ((flags & RFPROC) == 0)
12654188Sluoqi		return;
12754188Sluoqi
12836865Sdfr	p2->p_md.md_tf = p1->p_md.md_tf;
12936865Sdfr	p2->p_md.md_flags = p1->p_md.md_flags & MDP_FPUSED;
13036865Sdfr
13136865Sdfr	/*
13236865Sdfr	 * Cache the physical address of the pcb, so we can
13336865Sdfr	 * swap to it easily.
13436865Sdfr	 */
13536865Sdfr	p2->p_md.md_pcbpaddr = (void*) vtophys((vm_offset_t) &up->u_pcb);
13636865Sdfr
13736865Sdfr	/*
13836865Sdfr	 * Copy floating point state from the FP chip to the PCB
13936865Sdfr	 * if this process has state stored there.
14036865Sdfr	 */
14153086Sdfr	alpha_fpstate_save(p1, 0);
14236865Sdfr
14336865Sdfr	/*
14453086Sdfr	 * Copy pcb and stack from proc p1 to p2.  We do this as
14553086Sdfr	 * cheaply as possible, copying only the active part of the
14653086Sdfr	 * stack.  The stack and pcb need to agree. Make sure that the
14753086Sdfr	 * new process has FEN disabled.
14836865Sdfr	 */
14936865Sdfr	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
15036865Sdfr	p2->p_addr->u_pcb.pcb_hw.apcb_usp = alpha_pal_rdusp();
15153086Sdfr	p2->p_addr->u_pcb.pcb_hw.apcb_flags &= ~ALPHA_PCB_FLAGS_FEN;
15236865Sdfr
15336865Sdfr	/*
15441499Sdfr	 * Set the floating point state.
15541499Sdfr	 */
15641499Sdfr	if ((p2->p_addr->u_pcb.pcb_fp_control & IEEE_INHERIT) == 0) {
15742175Sdfr		p2->p_addr->u_pcb.pcb_fp_control = 0;
15841499Sdfr		p2->p_addr->u_pcb.pcb_fp.fpr_cr = (FPCR_DYN_NORMAL
15942175Sdfr						   | FPCR_INVD | FPCR_DZED
16042175Sdfr						   | FPCR_OVFD | FPCR_INED
16142175Sdfr						   | FPCR_UNFD);
16241499Sdfr	}
16341499Sdfr
16441499Sdfr	/*
16536865Sdfr	 * Arrange for a non-local goto when the new process
16636865Sdfr	 * is started, to resume here, returning nonzero from setjmp.
16736865Sdfr	 */
16836865Sdfr#ifdef DIAGNOSTIC
16936865Sdfr	if (p1 != curproc)
17036865Sdfr		panic("cpu_fork: curproc");
17153086Sdfr	alpha_fpstate_check(p1);
17236865Sdfr#endif
17336865Sdfr
17436865Sdfr	/*
17536865Sdfr	 * create the child's kernel stack, from scratch.
17636865Sdfr	 */
17736865Sdfr	{
17836865Sdfr		struct trapframe *p2tf;
17936865Sdfr
18036865Sdfr		/*
18136865Sdfr		 * Pick a stack pointer, leaving room for a trapframe;
18236865Sdfr		 * copy trapframe from parent so return to user mode
18336865Sdfr		 * will be to right address, with correct registers.
18436865Sdfr		 */
18536865Sdfr		p2tf = p2->p_md.md_tf = (struct trapframe *)
18636865Sdfr		    ((char *)p2->p_addr + USPACE - sizeof(struct trapframe));
18736865Sdfr		bcopy(p1->p_md.md_tf, p2->p_md.md_tf,
18836865Sdfr		    sizeof(struct trapframe));
18936865Sdfr
19036865Sdfr		/*
19136865Sdfr		 * Set up return-value registers as fork() libc stub expects.
19236865Sdfr		 */
19350458Sgallatin		p2tf->tf_regs[FRAME_V0] = 0; 	/* child's pid (linux) 	*/
19450458Sgallatin		p2tf->tf_regs[FRAME_A3] = 0;	/* no error 		*/
19550458Sgallatin		p2tf->tf_regs[FRAME_A4] = 1;	/* is child (FreeBSD) 	*/
19636865Sdfr
19736865Sdfr		/*
19836865Sdfr		 * Arrange for continuation at child_return(), which
19936865Sdfr		 * will return to exception_return().  Note that the child
20036865Sdfr		 * process doesn't stay in the kernel for long!
20136865Sdfr		 *
20236865Sdfr		 * This is an inlined version of cpu_set_kpc.
20336865Sdfr		 */
20436865Sdfr		up->u_pcb.pcb_hw.apcb_ksp = (u_int64_t)p2tf;
20536865Sdfr		up->u_pcb.pcb_context[0] =
20636865Sdfr		    (u_int64_t)child_return;		/* s0: pc */
20736865Sdfr		up->u_pcb.pcb_context[1] =
20836865Sdfr		    (u_int64_t)exception_return;	/* s1: ra */
20936865Sdfr		up->u_pcb.pcb_context[2] = (u_long) p2;	/* s2: a0 */
21036865Sdfr		up->u_pcb.pcb_context[7] =
21136865Sdfr		    (u_int64_t)switch_trampoline;	/* ra: assembly magic */
21236865Sdfr	}
21336865Sdfr}
21436865Sdfr
21536865Sdfr/*
21636865Sdfr * Intercept the return address from a freshly forked process that has NOT
21736865Sdfr * been scheduled yet.
21836865Sdfr *
21936865Sdfr * This is needed to make kernel threads stay in kernel mode.
22036865Sdfr */
22136865Sdfrvoid
22236865Sdfrcpu_set_fork_handler(p, func, arg)
22336865Sdfr	struct proc *p;
22448391Speter	void (*func) __P((void *));
22548391Speter	void *arg;
22636865Sdfr{
22736865Sdfr	/*
22836865Sdfr	 * Note that the trap frame follows the args, so the function
22936865Sdfr	 * is really called like this:  func(arg, frame);
23036865Sdfr	 */
23136865Sdfr	p->p_addr->u_pcb.pcb_context[0] = (u_long) func;
23236865Sdfr	p->p_addr->u_pcb.pcb_context[2] = (u_long) arg;
23336865Sdfr}
23436865Sdfr
23536865Sdfr/*
23636865Sdfr * cpu_exit is called as the last action during exit.
23736865Sdfr * We release the address space of the process, block interrupts,
23836865Sdfr * and call switch_exit.  switch_exit switches to proc0's PCB and stack,
23936865Sdfr * then jumps into the middle of cpu_switch, as if it were switching
24036865Sdfr * from proc0.
24136865Sdfr */
24236865Sdfrvoid
24336865Sdfrcpu_exit(p)
24436865Sdfr	register struct proc *p;
24536865Sdfr{
24653086Sdfr	alpha_fpstate_drop(p);
24736865Sdfr
24836865Sdfr	(void) splhigh();
24936865Sdfr	cnt.v_swtch++;
25036865Sdfr	cpu_switch(p);
25136865Sdfr	panic("cpu_exit");
25236865Sdfr}
25336865Sdfr
25436865Sdfrvoid
25536865Sdfrcpu_wait(p)
25636865Sdfr	struct proc *p;
25736865Sdfr{
25836865Sdfr	/* drop per-process resources */
25936865Sdfr	pmap_dispose_proc(p);
26036865Sdfr
26136865Sdfr	/* and clean-out the vmspace */
26236865Sdfr	vmspace_free(p->p_vmspace);
26336865Sdfr}
26436865Sdfr
26536865Sdfr/*
26636865Sdfr * Dump the machine specific header information at the start of a core dump.
26736865Sdfr */
26836865Sdfrint
26936865Sdfrcpu_coredump(p, vp, cred)
27036865Sdfr	struct proc *p;
27136865Sdfr	struct vnode *vp;
27236865Sdfr	struct ucred *cred;
27336865Sdfr{
27436865Sdfr
27536865Sdfr	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
27636865Sdfr	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
27736865Sdfr	    p));
27836865Sdfr}
27936865Sdfr
28036865Sdfr#ifdef notyet
28136865Sdfrstatic void
28236865Sdfrsetredzone(pte, vaddr)
28336865Sdfr	u_short *pte;
28436865Sdfr	caddr_t vaddr;
28536865Sdfr{
28636865Sdfr/* eventually do this by setting up an expand-down stack segment
28736865Sdfr   for ss0: selector, allowing stack access down to top of u.
28836865Sdfr   this means though that protection violations need to be handled
28936865Sdfr   thru a double fault exception that must do an integral task
29036865Sdfr   switch to a known good context, within which a dump can be
29136865Sdfr   taken. a sensible scheme might be to save the initial context
29236865Sdfr   used by sched (that has physical memory mapped 1:1 at bottom)
29336865Sdfr   and take the dump while still in mapped mode */
29436865Sdfr}
29536865Sdfr#endif
29636865Sdfr
29736865Sdfr/*
29836865Sdfr * Map an IO request into kernel virtual address space.
29936865Sdfr *
30036865Sdfr * All requests are (re)mapped into kernel VA space.
30136865Sdfr * Notice that we use b_bufsize for the size of the buffer
30236865Sdfr * to be mapped.  b_bcount might be modified by the driver.
30336865Sdfr */
30436865Sdfrvoid
30536865Sdfrvmapbuf(bp)
30636865Sdfr	register struct buf *bp;
30736865Sdfr{
30836865Sdfr	register caddr_t addr, v, kva;
30936865Sdfr	vm_offset_t pa;
31036865Sdfr
31136865Sdfr	if ((bp->b_flags & B_PHYS) == 0)
31236865Sdfr		panic("vmapbuf");
31336865Sdfr
31436865Sdfr	for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
31536865Sdfr	    addr < bp->b_data + bp->b_bufsize;
31636865Sdfr	    addr += PAGE_SIZE, v += PAGE_SIZE) {
31736865Sdfr		/*
31836865Sdfr		 * Do the vm_fault if needed; do the copy-on-write thing
31936865Sdfr		 * when reading stuff off device into memory.
32036865Sdfr		 */
32136865Sdfr		vm_fault_quick(addr,
32236865Sdfr			(bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
32336865Sdfr		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
32436865Sdfr		if (pa == 0)
32536865Sdfr			panic("vmapbuf: page not present");
32636865Sdfr		vm_page_hold(PHYS_TO_VM_PAGE(pa));
32736865Sdfr		pmap_kenter((vm_offset_t) v, pa);
32836865Sdfr	}
32936865Sdfr
33036865Sdfr	kva = bp->b_saveaddr;
33136865Sdfr	bp->b_saveaddr = bp->b_data;
33236865Sdfr	bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
33336865Sdfr}
33436865Sdfr
33536865Sdfr/*
33636865Sdfr * Free the io map PTEs associated with this IO operation.
33736865Sdfr * We also invalidate the TLB entries and restore the original b_addr.
33836865Sdfr */
33936865Sdfrvoid
34036865Sdfrvunmapbuf(bp)
34136865Sdfr	register struct buf *bp;
34236865Sdfr{
34336865Sdfr	register caddr_t addr;
34436865Sdfr	vm_offset_t pa;
34536865Sdfr
34636865Sdfr	if ((bp->b_flags & B_PHYS) == 0)
34736865Sdfr		panic("vunmapbuf");
34836865Sdfr
34936865Sdfr	for (addr = (caddr_t)trunc_page(bp->b_data);
35036865Sdfr	    addr < bp->b_data + bp->b_bufsize;
35136865Sdfr	    addr += PAGE_SIZE) {
35236865Sdfr		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
35336865Sdfr		pmap_kremove((vm_offset_t) addr);
35436865Sdfr		vm_page_unhold(PHYS_TO_VM_PAGE(pa));
35536865Sdfr	}
35636865Sdfr
35736865Sdfr	bp->b_data = bp->b_saveaddr;
35836865Sdfr}
35936865Sdfr
36036865Sdfr/*
36136865Sdfr * Force reset the processor by invalidating the entire address space!
36236865Sdfr */
36336865Sdfrvoid
36436865Sdfrcpu_reset()
36536865Sdfr{
36636865Sdfr	prom_halt(0);
36736865Sdfr}
36836865Sdfr
36936865Sdfrint
37043209Sjuliangrow_stack(p, sp)
37143209Sjulian	struct proc *p;
37243209Sjulian	size_t sp;
37343209Sjulian{
37443209Sjulian	int rv;
37536865Sdfr
37643209Sjulian	rv = vm_map_growstack (p, sp);
37743209Sjulian	if (rv != KERN_SUCCESS)
37843209Sjulian		return (0);
37943209Sjulian
38043209Sjulian	return (1);
38143209Sjulian}
38243209Sjulian
38343209Sjulian
38436865Sdfrstatic int cnt_prezero;
38536865Sdfr
38636865SdfrSYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
38736865Sdfr
38836865Sdfr/*
38936865Sdfr * Implement the pre-zeroed page mechanism.
39036865Sdfr * This routine is called from the idle loop.
39136865Sdfr */
39243758Sdillon
39343758Sdillon#define ZIDLE_LO(v)    ((v) * 2 / 3)
39443758Sdillon#define ZIDLE_HI(v)    ((v) * 4 / 5)
39543758Sdillon
39636865Sdfrint
39736865Sdfrvm_page_zero_idle()
39836865Sdfr{
39936865Sdfr	static int free_rover;
40043753Sdillon	static int zero_state;
40136865Sdfr	vm_page_t m;
40236865Sdfr	int s;
40336865Sdfr
40436865Sdfr	/*
40543753Sdillon         * Attempt to maintain approximately 1/2 of our free pages in a
40643753Sdillon         * PG_ZERO'd state.   Add some hysteresis to (attempt to) avoid
40743753Sdillon         * generally zeroing a page when the system is near steady-state.
40843753Sdillon         * Otherwise we might get 'flutter' during disk I/O / IPC or
40943753Sdillon         * fast sleeps.  We also do not want to be continuously zeroing
41043753Sdillon         * pages because doing so may flush our L1 and L2 caches too much.
41136865Sdfr	 */
41243753Sdillon
41343758Sdillon	if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
41443753Sdillon		return(0);
41543758Sdillon	if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
41643753Sdillon		return(0);
41743753Sdillon
41836865Sdfr#ifdef SMP
41936865Sdfr	if (try_mplock()) {
42036865Sdfr#endif
42136865Sdfr		s = splvm();
42243752Sdillon		m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
42343753Sdillon		zero_state = 0;
42443752Sdillon		if (m != NULL && (m->flags & PG_ZERO) == 0) {
42549444Sjdp			vm_page_queues[m->queue].lcnt--;
42652647Salc			TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq);
42736865Sdfr			m->queue = PQ_NONE;
42836865Sdfr			splx(s);
42936865Sdfr#if 0
43036865Sdfr			rel_mplock();
43136865Sdfr#endif
43236865Sdfr			pmap_zero_page(VM_PAGE_TO_PHYS(m));
43336865Sdfr#if 0
43436865Sdfr			get_mplock();
43536865Sdfr#endif
43636865Sdfr			(void)splvm();
43743752Sdillon			vm_page_flag_set(m, PG_ZERO);
43843752Sdillon			m->queue = PQ_FREE + m->pc;
43949444Sjdp			vm_page_queues[m->queue].lcnt++;
44052647Salc			TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m,
44136865Sdfr			    pageq);
44236865Sdfr			++vm_page_zero_count;
44336865Sdfr			++cnt_prezero;
44443758Sdillon			if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
44543753Sdillon				zero_state = 1;
44636865Sdfr		}
44748974Salc		free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK;
44836865Sdfr		splx(s);
44936865Sdfr#ifdef SMP
45036865Sdfr		rel_mplock();
45136865Sdfr#endif
45236865Sdfr		return (1);
45336865Sdfr#ifdef SMP
45436865Sdfr	}
45536865Sdfr#endif
45636865Sdfr	return (0);
45736865Sdfr}
45836865Sdfr
45936865Sdfr/*
46036865Sdfr * Software interrupt handler for queued VM system processing.
46136865Sdfr */
46236865Sdfrvoid
46336865Sdfrswi_vm()
46436865Sdfr{
46536865Sdfr#if 0
46636865Sdfr	if (busdma_swi_pending != 0)
46736865Sdfr		busdma_swi();
46836865Sdfr#endif
46936865Sdfr}
47036865Sdfr
47136865Sdfr/*
47236865Sdfr * Tell whether this address is in some physical memory region.
47336865Sdfr * Currently used by the kernel coredump code in order to avoid
47436865Sdfr * dumping the ``ISA memory hole'' which could cause indefinite hangs,
47536865Sdfr * or other unpredictable behaviour.
47636865Sdfr */
47736865Sdfr
47836865Sdfr
47936865Sdfrint
48036865Sdfris_physical_memory(addr)
48136865Sdfr	vm_offset_t addr;
48236865Sdfr{
48336865Sdfr	/*
48436865Sdfr	 * stuff other tests for known memory-mapped devices (PCI?)
48536865Sdfr	 * here
48636865Sdfr	 */
48736865Sdfr
48836865Sdfr	return 1;
48936865Sdfr}
490