vm_machdep.c revision 48391
136865Sdfr/*-
236865Sdfr * Copyright (c) 1982, 1986 The Regents of the University of California.
336865Sdfr * Copyright (c) 1989, 1990 William Jolitz
436865Sdfr * Copyright (c) 1994 John Dyson
536865Sdfr * All rights reserved.
636865Sdfr *
736865Sdfr * This code is derived from software contributed to Berkeley by
836865Sdfr * the Systems Programming Group of the University of Utah Computer
936865Sdfr * Science Department, and William Jolitz.
1036865Sdfr *
1136865Sdfr * Redistribution and use in source and binary forms, with or without
1236865Sdfr * modification, are permitted provided that the following conditions
1336865Sdfr * are met:
1436865Sdfr * 1. Redistributions of source code must retain the above copyright
1536865Sdfr *    notice, this list of conditions and the following disclaimer.
1636865Sdfr * 2. Redistributions in binary form must reproduce the above copyright
1736865Sdfr *    notice, this list of conditions and the following disclaimer in the
1836865Sdfr *    documentation and/or other materials provided with the distribution.
1936865Sdfr * 3. All advertising materials mentioning features or use of this software
2036865Sdfr *    must display the following acknowledgement:
2136865Sdfr *	This product includes software developed by the University of
2236865Sdfr *	California, Berkeley and its contributors.
2336865Sdfr * 4. Neither the name of the University nor the names of its contributors
2436865Sdfr *    may be used to endorse or promote products derived from this software
2536865Sdfr *    without specific prior written permission.
2636865Sdfr *
2736865Sdfr * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2836865Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2936865Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
3036865Sdfr * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3136865Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3236865Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3336865Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3436865Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3536865Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3636865Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3736865Sdfr * SUCH DAMAGE.
3836865Sdfr *
3936865Sdfr *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
4036865Sdfr *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
4148391Speter *	$Id: vm_machdep.c,v 1.16 1999/06/10 20:40:59 dt Exp $
4236865Sdfr */
4336865Sdfr/*
4436865Sdfr * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
4536865Sdfr * All rights reserved.
4636865Sdfr *
4736865Sdfr * Author: Chris G. Demetriou
4836865Sdfr *
4936865Sdfr * Permission to use, copy, modify and distribute this software and
5036865Sdfr * its documentation is hereby granted, provided that both the copyright
5136865Sdfr * notice and this permission notice appear in all copies of the
5236865Sdfr * software, derivative works or modified versions, and any portions
5336865Sdfr * thereof, and that both notices appear in supporting documentation.
5436865Sdfr *
5536865Sdfr * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
5636865Sdfr * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
5736865Sdfr * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
5836865Sdfr *
5936865Sdfr * Carnegie Mellon requests users of this software to return to
6036865Sdfr *
6136865Sdfr *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
6236865Sdfr *  School of Computer Science
6336865Sdfr *  Carnegie Mellon University
6436865Sdfr *  Pittsburgh PA 15213-3890
6536865Sdfr *
6636865Sdfr * any improvements or extensions that they make and grant Carnegie the
6736865Sdfr * rights to redistribute these changes.
6836865Sdfr */
6936865Sdfr
7036865Sdfr#include <sys/param.h>
7136865Sdfr#include <sys/systm.h>
7236865Sdfr#include <sys/proc.h>
7336865Sdfr#include <sys/malloc.h>
7436865Sdfr#include <sys/buf.h>
7536865Sdfr#include <sys/vnode.h>
7636865Sdfr#include <sys/vmmeter.h>
7736865Sdfr#include <sys/kernel.h>
7836865Sdfr#include <sys/sysctl.h>
7936865Sdfr
8036865Sdfr#include <machine/clock.h>
8136865Sdfr#include <machine/cpu.h>
8241499Sdfr#include <machine/fpu.h>
8336865Sdfr#include <machine/md_var.h>
8436865Sdfr#include <machine/prom.h>
8536865Sdfr
8636865Sdfr#include <vm/vm.h>
8736865Sdfr#include <vm/vm_param.h>
8836865Sdfr#include <vm/vm_prot.h>
8936865Sdfr#include <sys/lock.h>
9036865Sdfr#include <vm/vm_kern.h>
9136865Sdfr#include <vm/vm_page.h>
9236865Sdfr#include <vm/vm_map.h>
9336865Sdfr#include <vm/vm_extern.h>
9436865Sdfr
9536865Sdfr#include <sys/user.h>
9636865Sdfr
9736865Sdfr/*
9836865Sdfr * quick version of vm_fault
9936865Sdfr */
10036865Sdfrvoid
10136865Sdfrvm_fault_quick(v, prot)
10236865Sdfr	caddr_t v;
10336865Sdfr	int prot;
10436865Sdfr{
10536865Sdfr	if (prot & VM_PROT_WRITE)
10636865Sdfr		subyte(v, fubyte(v));
10736865Sdfr	else
10836865Sdfr		fubyte(v);
10936865Sdfr}
11036865Sdfr
11136865Sdfr/*
11236865Sdfr * Finish a fork operation, with process p2 nearly set up.
11336865Sdfr * Copy and update the pcb, set up the stack so that the child
11436865Sdfr * ready to run and return to user mode.
11536865Sdfr */
11636865Sdfrvoid
11736865Sdfrcpu_fork(p1, p2)
11836865Sdfr	register struct proc *p1, *p2;
11936865Sdfr{
12036865Sdfr	struct user *up = p2->p_addr;
12136865Sdfr
12236865Sdfr	p2->p_md.md_tf = p1->p_md.md_tf;
12336865Sdfr	p2->p_md.md_flags = p1->p_md.md_flags & MDP_FPUSED;
12436865Sdfr
12536865Sdfr	/*
12636865Sdfr	 * Cache the physical address of the pcb, so we can
12736865Sdfr	 * swap to it easily.
12836865Sdfr	 */
12936865Sdfr	p2->p_md.md_pcbpaddr = (void*) vtophys((vm_offset_t) &up->u_pcb);
13036865Sdfr
13136865Sdfr	/*
13236865Sdfr	 * Copy floating point state from the FP chip to the PCB
13336865Sdfr	 * if this process has state stored there.
13436865Sdfr	 */
13536865Sdfr	if (p1 == fpcurproc) {
13636865Sdfr		alpha_pal_wrfen(1);
13736865Sdfr		savefpstate(&fpcurproc->p_addr->u_pcb.pcb_fp);
13836865Sdfr		alpha_pal_wrfen(0);
13936865Sdfr	}
14036865Sdfr
14136865Sdfr	/*
14236865Sdfr	 * Copy pcb and stack from proc p1 to p2.
14336865Sdfr	 * We do this as cheaply as possible, copying only the active
14436865Sdfr	 * part of the stack.  The stack and pcb need to agree;
14536865Sdfr	 */
14636865Sdfr	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
14736865Sdfr	p2->p_addr->u_pcb.pcb_hw.apcb_usp = alpha_pal_rdusp();
14836865Sdfr
14936865Sdfr	/*
15041499Sdfr	 * Set the floating point state.
15141499Sdfr	 */
15241499Sdfr	if ((p2->p_addr->u_pcb.pcb_fp_control & IEEE_INHERIT) == 0) {
15342175Sdfr		p2->p_addr->u_pcb.pcb_fp_control = 0;
15441499Sdfr		p2->p_addr->u_pcb.pcb_fp.fpr_cr = (FPCR_DYN_NORMAL
15542175Sdfr						   | FPCR_INVD | FPCR_DZED
15642175Sdfr						   | FPCR_OVFD | FPCR_INED
15742175Sdfr						   | FPCR_UNFD);
15841499Sdfr	}
15941499Sdfr
16041499Sdfr	/*
16136865Sdfr	 * Arrange for a non-local goto when the new process
16236865Sdfr	 * is started, to resume here, returning nonzero from setjmp.
16336865Sdfr	 */
16436865Sdfr#ifdef DIAGNOSTIC
16536865Sdfr	if (p1 != curproc)
16636865Sdfr		panic("cpu_fork: curproc");
16736865Sdfr	if ((up->u_pcb.pcb_hw.apcb_flags & ALPHA_PCB_FLAGS_FEN) != 0)
16836865Sdfr		printf("DANGER WILL ROBINSON: FEN SET IN cpu_fork!\n");
16936865Sdfr#endif
17036865Sdfr
17136865Sdfr	/*
17236865Sdfr	 * create the child's kernel stack, from scratch.
17336865Sdfr	 */
17436865Sdfr	{
17536865Sdfr		struct trapframe *p2tf;
17636865Sdfr
17736865Sdfr		/*
17836865Sdfr		 * Pick a stack pointer, leaving room for a trapframe;
17936865Sdfr		 * copy trapframe from parent so return to user mode
18036865Sdfr		 * will be to right address, with correct registers.
18136865Sdfr		 */
18236865Sdfr		p2tf = p2->p_md.md_tf = (struct trapframe *)
18336865Sdfr		    ((char *)p2->p_addr + USPACE - sizeof(struct trapframe));
18436865Sdfr		bcopy(p1->p_md.md_tf, p2->p_md.md_tf,
18536865Sdfr		    sizeof(struct trapframe));
18636865Sdfr
18736865Sdfr		/*
18836865Sdfr		 * Set up return-value registers as fork() libc stub expects.
18936865Sdfr		 */
19036865Sdfr		p2tf->tf_regs[FRAME_V0] = p1->p_pid;	/* parent's pid */
19136865Sdfr		p2tf->tf_regs[FRAME_A3] = 0;		/* no error */
19236865Sdfr		p2tf->tf_regs[FRAME_A4] = 1;		/* is child */
19336865Sdfr
19436865Sdfr		/*
19536865Sdfr		 * Arrange for continuation at child_return(), which
19636865Sdfr		 * will return to exception_return().  Note that the child
19736865Sdfr		 * process doesn't stay in the kernel for long!
19836865Sdfr		 *
19936865Sdfr		 * This is an inlined version of cpu_set_kpc.
20036865Sdfr		 */
20136865Sdfr		up->u_pcb.pcb_hw.apcb_ksp = (u_int64_t)p2tf;
20236865Sdfr		up->u_pcb.pcb_context[0] =
20336865Sdfr		    (u_int64_t)child_return;		/* s0: pc */
20436865Sdfr		up->u_pcb.pcb_context[1] =
20536865Sdfr		    (u_int64_t)exception_return;	/* s1: ra */
20636865Sdfr		up->u_pcb.pcb_context[2] = (u_long) p2;	/* s2: a0 */
20736865Sdfr		up->u_pcb.pcb_context[7] =
20836865Sdfr		    (u_int64_t)switch_trampoline;	/* ra: assembly magic */
20936865Sdfr	}
21036865Sdfr}
21136865Sdfr
21236865Sdfr/*
21336865Sdfr * Intercept the return address from a freshly forked process that has NOT
21436865Sdfr * been scheduled yet.
21536865Sdfr *
21636865Sdfr * This is needed to make kernel threads stay in kernel mode.
21736865Sdfr */
21836865Sdfrvoid
21936865Sdfrcpu_set_fork_handler(p, func, arg)
22036865Sdfr	struct proc *p;
22148391Speter	void (*func) __P((void *));
22248391Speter	void *arg;
22336865Sdfr{
22436865Sdfr	/*
22536865Sdfr	 * Note that the trap frame follows the args, so the function
22636865Sdfr	 * is really called like this:  func(arg, frame);
22736865Sdfr	 */
22836865Sdfr	p->p_addr->u_pcb.pcb_context[0] = (u_long) func;
22936865Sdfr	p->p_addr->u_pcb.pcb_context[2] = (u_long) arg;
23036865Sdfr}
23136865Sdfr
23236865Sdfr/*
23336865Sdfr * cpu_exit is called as the last action during exit.
23436865Sdfr * We release the address space of the process, block interrupts,
23536865Sdfr * and call switch_exit.  switch_exit switches to proc0's PCB and stack,
23636865Sdfr * then jumps into the middle of cpu_switch, as if it were switching
23736865Sdfr * from proc0.
23836865Sdfr */
23936865Sdfrvoid
24036865Sdfrcpu_exit(p)
24136865Sdfr	register struct proc *p;
24236865Sdfr{
24347869Sdt	if (p == fpcurproc)
24436865Sdfr		fpcurproc = NULL;
24536865Sdfr
24636865Sdfr	(void) splhigh();
24736865Sdfr	cnt.v_swtch++;
24836865Sdfr	cpu_switch(p);
24936865Sdfr	panic("cpu_exit");
25036865Sdfr}
25136865Sdfr
25236865Sdfrvoid
25336865Sdfrcpu_wait(p)
25436865Sdfr	struct proc *p;
25536865Sdfr{
25636865Sdfr	/* drop per-process resources */
25736865Sdfr	pmap_dispose_proc(p);
25836865Sdfr
25936865Sdfr	/* and clean-out the vmspace */
26036865Sdfr	vmspace_free(p->p_vmspace);
26136865Sdfr}
26236865Sdfr
26336865Sdfr/*
26436865Sdfr * Dump the machine specific header information at the start of a core dump.
26536865Sdfr */
26636865Sdfrint
26736865Sdfrcpu_coredump(p, vp, cred)
26836865Sdfr	struct proc *p;
26936865Sdfr	struct vnode *vp;
27036865Sdfr	struct ucred *cred;
27136865Sdfr{
27236865Sdfr
27336865Sdfr	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
27436865Sdfr	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
27536865Sdfr	    p));
27636865Sdfr}
27736865Sdfr
27836865Sdfr#ifdef notyet
27936865Sdfrstatic void
28036865Sdfrsetredzone(pte, vaddr)
28136865Sdfr	u_short *pte;
28236865Sdfr	caddr_t vaddr;
28336865Sdfr{
28436865Sdfr/* eventually do this by setting up an expand-down stack segment
28536865Sdfr   for ss0: selector, allowing stack access down to top of u.
28636865Sdfr   this means though that protection violations need to be handled
28736865Sdfr   thru a double fault exception that must do an integral task
28836865Sdfr   switch to a known good context, within which a dump can be
28936865Sdfr   taken. a sensible scheme might be to save the initial context
29036865Sdfr   used by sched (that has physical memory mapped 1:1 at bottom)
29136865Sdfr   and take the dump while still in mapped mode */
29236865Sdfr}
29336865Sdfr#endif
29436865Sdfr
29536865Sdfr/*
29636865Sdfr * Map an IO request into kernel virtual address space.
29736865Sdfr *
29836865Sdfr * All requests are (re)mapped into kernel VA space.
29936865Sdfr * Notice that we use b_bufsize for the size of the buffer
30036865Sdfr * to be mapped.  b_bcount might be modified by the driver.
30136865Sdfr */
30236865Sdfrvoid
30336865Sdfrvmapbuf(bp)
30436865Sdfr	register struct buf *bp;
30536865Sdfr{
30636865Sdfr	register caddr_t addr, v, kva;
30736865Sdfr	vm_offset_t pa;
30836865Sdfr
30936865Sdfr	if ((bp->b_flags & B_PHYS) == 0)
31036865Sdfr		panic("vmapbuf");
31136865Sdfr
31236865Sdfr	for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
31336865Sdfr	    addr < bp->b_data + bp->b_bufsize;
31436865Sdfr	    addr += PAGE_SIZE, v += PAGE_SIZE) {
31536865Sdfr		/*
31636865Sdfr		 * Do the vm_fault if needed; do the copy-on-write thing
31736865Sdfr		 * when reading stuff off device into memory.
31836865Sdfr		 */
31936865Sdfr		vm_fault_quick(addr,
32036865Sdfr			(bp->b_flags&B_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
32136865Sdfr		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
32236865Sdfr		if (pa == 0)
32336865Sdfr			panic("vmapbuf: page not present");
32436865Sdfr		vm_page_hold(PHYS_TO_VM_PAGE(pa));
32536865Sdfr		pmap_kenter((vm_offset_t) v, pa);
32636865Sdfr	}
32736865Sdfr
32836865Sdfr	kva = bp->b_saveaddr;
32936865Sdfr	bp->b_saveaddr = bp->b_data;
33036865Sdfr	bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
33136865Sdfr}
33236865Sdfr
33336865Sdfr/*
33436865Sdfr * Free the io map PTEs associated with this IO operation.
33536865Sdfr * We also invalidate the TLB entries and restore the original b_addr.
33636865Sdfr */
33736865Sdfrvoid
33836865Sdfrvunmapbuf(bp)
33936865Sdfr	register struct buf *bp;
34036865Sdfr{
34136865Sdfr	register caddr_t addr;
34236865Sdfr	vm_offset_t pa;
34336865Sdfr
34436865Sdfr	if ((bp->b_flags & B_PHYS) == 0)
34536865Sdfr		panic("vunmapbuf");
34636865Sdfr
34736865Sdfr	for (addr = (caddr_t)trunc_page(bp->b_data);
34836865Sdfr	    addr < bp->b_data + bp->b_bufsize;
34936865Sdfr	    addr += PAGE_SIZE) {
35036865Sdfr		pa = trunc_page(pmap_kextract((vm_offset_t) addr));
35136865Sdfr		pmap_kremove((vm_offset_t) addr);
35236865Sdfr		vm_page_unhold(PHYS_TO_VM_PAGE(pa));
35336865Sdfr	}
35436865Sdfr
35536865Sdfr	bp->b_data = bp->b_saveaddr;
35636865Sdfr}
35736865Sdfr
35836865Sdfr/*
35936865Sdfr * Force reset the processor by invalidating the entire address space!
36036865Sdfr */
36136865Sdfrvoid
36236865Sdfrcpu_reset()
36336865Sdfr{
36436865Sdfr	prom_halt(0);
36536865Sdfr}
36636865Sdfr
36736865Sdfrint
36843209Sjuliangrow_stack(p, sp)
36943209Sjulian	struct proc *p;
37043209Sjulian	size_t sp;
37143209Sjulian{
37243209Sjulian	int rv;
37336865Sdfr
37443209Sjulian	rv = vm_map_growstack (p, sp);
37543209Sjulian	if (rv != KERN_SUCCESS)
37643209Sjulian		return (0);
37743209Sjulian
37843209Sjulian	return (1);
37943209Sjulian}
38043209Sjulian
38143209Sjulian
38236865Sdfrstatic int cnt_prezero;
38336865Sdfr
38436865SdfrSYSCTL_INT(_machdep, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, "");
38536865Sdfr
38636865Sdfr/*
38736865Sdfr * Implement the pre-zeroed page mechanism.
38836865Sdfr * This routine is called from the idle loop.
38936865Sdfr */
39043758Sdillon
39143758Sdillon#define ZIDLE_LO(v)    ((v) * 2 / 3)
39243758Sdillon#define ZIDLE_HI(v)    ((v) * 4 / 5)
39343758Sdillon
39436865Sdfrint
39536865Sdfrvm_page_zero_idle()
39636865Sdfr{
39736865Sdfr	static int free_rover;
39843753Sdillon	static int zero_state;
39936865Sdfr	vm_page_t m;
40036865Sdfr	int s;
40136865Sdfr
40236865Sdfr	/*
40343753Sdillon         * Attempt to maintain approximately 1/2 of our free pages in a
40443753Sdillon         * PG_ZERO'd state.   Add some hysteresis to (attempt to) avoid
40543753Sdillon         * generally zeroing a page when the system is near steady-state.
40643753Sdillon         * Otherwise we might get 'flutter' during disk I/O / IPC or
40743753Sdillon         * fast sleeps.  We also do not want to be continuously zeroing
40843753Sdillon         * pages because doing so may flush our L1 and L2 caches too much.
40936865Sdfr	 */
41043753Sdillon
41143758Sdillon	if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count))
41243753Sdillon		return(0);
41343758Sdillon	if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
41443753Sdillon		return(0);
41543753Sdillon
41636865Sdfr#ifdef SMP
41736865Sdfr	if (try_mplock()) {
41836865Sdfr#endif
41936865Sdfr		s = splvm();
42043752Sdillon		m = vm_page_list_find(PQ_FREE, free_rover, FALSE);
42143753Sdillon		zero_state = 0;
42243752Sdillon		if (m != NULL && (m->flags & PG_ZERO) == 0) {
42336865Sdfr			--(*vm_page_queues[m->queue].lcnt);
42436865Sdfr			TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
42536865Sdfr			m->queue = PQ_NONE;
42636865Sdfr			splx(s);
42736865Sdfr#if 0
42836865Sdfr			rel_mplock();
42936865Sdfr#endif
43036865Sdfr			pmap_zero_page(VM_PAGE_TO_PHYS(m));
43136865Sdfr#if 0
43236865Sdfr			get_mplock();
43336865Sdfr#endif
43436865Sdfr			(void)splvm();
43543752Sdillon			vm_page_flag_set(m, PG_ZERO);
43643752Sdillon			m->queue = PQ_FREE + m->pc;
43736865Sdfr			++(*vm_page_queues[m->queue].lcnt);
43843752Sdillon			TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m,
43936865Sdfr			    pageq);
44036865Sdfr			++vm_page_zero_count;
44136865Sdfr			++cnt_prezero;
44243758Sdillon			if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count))
44343753Sdillon				zero_state = 1;
44436865Sdfr		}
44543752Sdillon		free_rover = (free_rover + PQ_PRIME3) & PQ_L2_MASK;
44636865Sdfr		splx(s);
44736865Sdfr#ifdef SMP
44836865Sdfr		rel_mplock();
44936865Sdfr#endif
45036865Sdfr		return (1);
45136865Sdfr#ifdef SMP
45236865Sdfr	}
45336865Sdfr#endif
45436865Sdfr	return (0);
45536865Sdfr}
45636865Sdfr
45736865Sdfr/*
45836865Sdfr * Software interrupt handler for queued VM system processing.
45936865Sdfr */
46036865Sdfrvoid
46136865Sdfrswi_vm()
46236865Sdfr{
46336865Sdfr#if 0
46436865Sdfr	if (busdma_swi_pending != 0)
46536865Sdfr		busdma_swi();
46636865Sdfr#endif
46736865Sdfr}
46836865Sdfr
46936865Sdfr/*
47036865Sdfr * Tell whether this address is in some physical memory region.
47136865Sdfr * Currently used by the kernel coredump code in order to avoid
47236865Sdfr * dumping the ``ISA memory hole'' which could cause indefinite hangs,
47336865Sdfr * or other unpredictable behaviour.
47436865Sdfr */
47536865Sdfr
47636865Sdfr
47736865Sdfrint
47836865Sdfris_physical_memory(addr)
47936865Sdfr	vm_offset_t addr;
48036865Sdfr{
48136865Sdfr	/*
48236865Sdfr	 * stuff other tests for known memory-mapped devices (PCI?)
48336865Sdfr	 * here
48436865Sdfr	 */
48536865Sdfr
48636865Sdfr	return 1;
48736865Sdfr}
488