mmu_oea.c revision 131808
177957Sbenno/*
290643Sbenno * Copyright (c) 2001 The NetBSD Foundation, Inc.
390643Sbenno * All rights reserved.
490643Sbenno *
590643Sbenno * This code is derived from software contributed to The NetBSD Foundation
690643Sbenno * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
790643Sbenno *
890643Sbenno * Redistribution and use in source and binary forms, with or without
990643Sbenno * modification, are permitted provided that the following conditions
1090643Sbenno * are met:
1190643Sbenno * 1. Redistributions of source code must retain the above copyright
1290643Sbenno *    notice, this list of conditions and the following disclaimer.
1390643Sbenno * 2. Redistributions in binary form must reproduce the above copyright
1490643Sbenno *    notice, this list of conditions and the following disclaimer in the
1590643Sbenno *    documentation and/or other materials provided with the distribution.
1690643Sbenno * 3. All advertising materials mentioning features or use of this software
1790643Sbenno *    must display the following acknowledgement:
1890643Sbenno *        This product includes software developed by the NetBSD
1990643Sbenno *        Foundation, Inc. and its contributors.
2090643Sbenno * 4. Neither the name of The NetBSD Foundation nor the names of its
2190643Sbenno *    contributors may be used to endorse or promote products derived
2290643Sbenno *    from this software without specific prior written permission.
2390643Sbenno *
2490643Sbenno * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2590643Sbenno * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2690643Sbenno * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2790643Sbenno * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2890643Sbenno * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2990643Sbenno * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
3090643Sbenno * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
3190643Sbenno * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3290643Sbenno * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3390643Sbenno * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3490643Sbenno * POSSIBILITY OF SUCH DAMAGE.
3590643Sbenno */
3690643Sbenno/*
3777957Sbenno * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3877957Sbenno * Copyright (C) 1995, 1996 TooLs GmbH.
3977957Sbenno * All rights reserved.
4077957Sbenno *
4177957Sbenno * Redistribution and use in source and binary forms, with or without
4277957Sbenno * modification, are permitted provided that the following conditions
4377957Sbenno * are met:
4477957Sbenno * 1. Redistributions of source code must retain the above copyright
4577957Sbenno *    notice, this list of conditions and the following disclaimer.
4677957Sbenno * 2. Redistributions in binary form must reproduce the above copyright
4777957Sbenno *    notice, this list of conditions and the following disclaimer in the
4877957Sbenno *    documentation and/or other materials provided with the distribution.
4977957Sbenno * 3. All advertising materials mentioning features or use of this software
5077957Sbenno *    must display the following acknowledgement:
5177957Sbenno *	This product includes software developed by TooLs GmbH.
5277957Sbenno * 4. The name of TooLs GmbH may not be used to endorse or promote products
5377957Sbenno *    derived from this software without specific prior written permission.
5477957Sbenno *
5577957Sbenno * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
5677957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5777957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5877957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
5977957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6077957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6177957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6277957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6377957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6477957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6577957Sbenno *
6678880Sbenno * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
6777957Sbenno */
6877957Sbenno/*
6977957Sbenno * Copyright (C) 2001 Benno Rice.
7077957Sbenno * All rights reserved.
7177957Sbenno *
7277957Sbenno * Redistribution and use in source and binary forms, with or without
7377957Sbenno * modification, are permitted provided that the following conditions
7477957Sbenno * are met:
7577957Sbenno * 1. Redistributions of source code must retain the above copyright
7677957Sbenno *    notice, this list of conditions and the following disclaimer.
7777957Sbenno * 2. Redistributions in binary form must reproduce the above copyright
7877957Sbenno *    notice, this list of conditions and the following disclaimer in the
7977957Sbenno *    documentation and/or other materials provided with the distribution.
8077957Sbenno *
8177957Sbenno * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
8277957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
8377957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
8477957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
8577957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
8677957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
8777957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
8877957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
8977957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
9077957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9177957Sbenno */
9277957Sbenno
93113038Sobrien#include <sys/cdefs.h>
94113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 131808 2004-07-08 12:47:36Z grehan $");
9577957Sbenno
9690643Sbenno/*
9790643Sbenno * Manages physical address maps.
9890643Sbenno *
9990643Sbenno * In addition to hardware address maps, this module is called upon to
10090643Sbenno * provide software-use-only maps which may or may not be stored in the
10190643Sbenno * same form as hardware maps.  These pseudo-maps are used to store
10290643Sbenno * intermediate results from copy operations to and from address spaces.
10390643Sbenno *
10490643Sbenno * Since the information managed by this module is also stored by the
10590643Sbenno * logical address mapping module, this module may throw away valid virtual
10690643Sbenno * to physical mappings at almost any time.  However, invalidations of
10790643Sbenno * mappings must be done as requested.
10890643Sbenno *
10990643Sbenno * In order to cope with hardware architectures which make virtual to
11090643Sbenno * physical map invalidates expensive, this module may delay invalidate
11190643Sbenno * reduced protection operations until such time as they are actually
11290643Sbenno * necessary.  This module is given full information as to which processors
11390643Sbenno * are currently using which maps, and to when physical maps must be made
11490643Sbenno * correct.
11590643Sbenno */
11690643Sbenno
117118239Speter#include "opt_kstack_pages.h"
118118239Speter
11977957Sbenno#include <sys/param.h>
12080431Speter#include <sys/kernel.h>
12190643Sbenno#include <sys/ktr.h>
12290643Sbenno#include <sys/lock.h>
12390643Sbenno#include <sys/msgbuf.h>
12490643Sbenno#include <sys/mutex.h>
12577957Sbenno#include <sys/proc.h>
12690643Sbenno#include <sys/sysctl.h>
12790643Sbenno#include <sys/systm.h>
12877957Sbenno#include <sys/vmmeter.h>
12977957Sbenno
13090643Sbenno#include <dev/ofw/openfirm.h>
13190643Sbenno
13290643Sbenno#include <vm/vm.h>
13377957Sbenno#include <vm/vm_param.h>
13477957Sbenno#include <vm/vm_kern.h>
13577957Sbenno#include <vm/vm_page.h>
13677957Sbenno#include <vm/vm_map.h>
13777957Sbenno#include <vm/vm_object.h>
13877957Sbenno#include <vm/vm_extern.h>
13977957Sbenno#include <vm/vm_pageout.h>
14077957Sbenno#include <vm/vm_pager.h>
14192847Sjeff#include <vm/uma.h>
14277957Sbenno
143125687Sgrehan#include <machine/cpu.h>
14497346Sbenno#include <machine/powerpc.h>
14583730Smp#include <machine/bat.h>
14690643Sbenno#include <machine/frame.h>
14790643Sbenno#include <machine/md_var.h>
14890643Sbenno#include <machine/psl.h>
14977957Sbenno#include <machine/pte.h>
15090643Sbenno#include <machine/sr.h>
15177957Sbenno
15290643Sbenno#define	PMAP_DEBUG
15377957Sbenno
15490643Sbenno#define TODO	panic("%s: not implemented", __func__);
15577957Sbenno
15690643Sbenno#define	PMAP_LOCK(pm)
15790643Sbenno#define	PMAP_UNLOCK(pm)
15890643Sbenno
15990643Sbenno#define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
16090643Sbenno#define	TLBSYNC()	__asm __volatile("tlbsync");
16190643Sbenno#define	SYNC()		__asm __volatile("sync");
16290643Sbenno#define	EIEIO()		__asm __volatile("eieio");
16390643Sbenno
16490643Sbenno#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
16590643Sbenno#define	VSID_TO_SR(vsid)	((vsid) & 0xf)
16690643Sbenno#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
16790643Sbenno
16890643Sbenno#define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
16990643Sbenno#define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
17090643Sbenno#define	PVO_WIRED		0x0010		/* PVO entry is wired */
17190643Sbenno#define	PVO_MANAGED		0x0020		/* PVO entry is managed */
17290643Sbenno#define	PVO_EXECUTABLE		0x0040		/* PVO entry is executable */
17394835Sbenno#define	PVO_BOOTSTRAP		0x0080		/* PVO entry allocated during
17492521Sbenno						   bootstrap */
17590643Sbenno#define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
17690643Sbenno#define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
17790643Sbenno#define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
17890643Sbenno#define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
17990643Sbenno#define	PVO_PTEGIDX_CLR(pvo)	\
18090643Sbenno	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
18190643Sbenno#define	PVO_PTEGIDX_SET(pvo, i)	\
18290643Sbenno	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
18390643Sbenno
18490643Sbenno#define	PMAP_PVO_CHECK(pvo)
18590643Sbenno
18690643Sbennostruct ofw_map {
18790643Sbenno	vm_offset_t	om_va;
18890643Sbenno	vm_size_t	om_len;
18990643Sbenno	vm_offset_t	om_pa;
19090643Sbenno	u_int		om_mode;
19190643Sbenno};
19277957Sbenno
19390643Sbennoint	pmap_bootstrapped = 0;
19477957Sbenno
19590643Sbenno/*
19690643Sbenno * Virtual and physical address of message buffer.
19790643Sbenno */
19890643Sbennostruct		msgbuf *msgbufp;
19990643Sbennovm_offset_t	msgbuf_phys;
20077957Sbenno
201110172Sgrehanint pmap_pagedaemon_waken;
202110172Sgrehan
20390643Sbenno/*
20490643Sbenno * Map of physical memory regions.
20590643Sbenno */
20690643Sbennovm_offset_t	phys_avail[128];
20790643Sbennou_int		phys_avail_count;
20897346Sbennostatic struct	mem_region *regions;
20997346Sbennostatic struct	mem_region *pregions;
21097346Sbennoint		regions_sz, pregions_sz;
211100319Sbennostatic struct	ofw_map *translations;
21277957Sbenno
21390643Sbenno/*
21490643Sbenno * First and last available kernel virtual addresses.
21590643Sbenno */
21690643Sbennovm_offset_t virtual_avail;
21790643Sbennovm_offset_t virtual_end;
21890643Sbennovm_offset_t kernel_vm_end;
21977957Sbenno
22090643Sbenno/*
22190643Sbenno * Kernel pmap.
22290643Sbenno */
22390643Sbennostruct pmap kernel_pmap_store;
22490643Sbennoextern struct pmap ofw_pmap;
22577957Sbenno
22690643Sbenno/*
22790643Sbenno * PTEG data.
22890643Sbenno */
22990643Sbennostatic struct	pteg *pmap_pteg_table;
23090643Sbennou_int		pmap_pteg_count;
23190643Sbennou_int		pmap_pteg_mask;
23277957Sbenno
23390643Sbenno/*
23490643Sbenno * PVO data.
23590643Sbenno */
23690643Sbennostruct	pvo_head *pmap_pvo_table;		/* pvo entries by pteg index */
23790643Sbennostruct	pvo_head pmap_pvo_kunmanaged =
23890643Sbenno    LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
23990643Sbennostruct	pvo_head pmap_pvo_unmanaged =
24090643Sbenno    LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
24177957Sbenno
24292847Sjeffuma_zone_t	pmap_upvo_zone;	/* zone for pvo entries for unmanaged pages */
24392847Sjeffuma_zone_t	pmap_mpvo_zone;	/* zone for pvo entries for managed pages */
24477957Sbenno
24599037Sbenno#define	BPVO_POOL_SIZE	32768
24692521Sbennostatic struct	pvo_entry *pmap_bpvo_pool;
24799037Sbennostatic int	pmap_bpvo_pool_index = 0;
24877957Sbenno
24990643Sbenno#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
25090643Sbennostatic u_int	pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
25177957Sbenno
25290643Sbennostatic boolean_t pmap_initialized = FALSE;
25377957Sbenno
25490643Sbenno/*
25590643Sbenno * Statistics.
25690643Sbenno */
25790643Sbennou_int	pmap_pte_valid = 0;
25890643Sbennou_int	pmap_pte_overflow = 0;
25990643Sbennou_int	pmap_pte_replacements = 0;
26090643Sbennou_int	pmap_pvo_entries = 0;
26190643Sbennou_int	pmap_pvo_enter_calls = 0;
26290643Sbennou_int	pmap_pvo_remove_calls = 0;
26390643Sbennou_int	pmap_pte_spills = 0;
26490643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid,
26590643Sbenno    0, "");
26690643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD,
26790643Sbenno    &pmap_pte_overflow, 0, "");
26890643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD,
26990643Sbenno    &pmap_pte_replacements, 0, "");
27090643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries,
27190643Sbenno    0, "");
27290643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD,
27390643Sbenno    &pmap_pvo_enter_calls, 0, "");
27490643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD,
27590643Sbenno    &pmap_pvo_remove_calls, 0, "");
27690643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD,
27790643Sbenno    &pmap_pte_spills, 0, "");
27877957Sbenno
27990643Sbennostruct	pvo_entry *pmap_pvo_zeropage;
28077957Sbenno
28190643Sbennovm_offset_t	pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
28290643Sbennou_int		pmap_rkva_count = 4;
28377957Sbenno
28490643Sbenno/*
28590643Sbenno * Allocate physical memory for use in pmap_bootstrap.
28690643Sbenno */
28790643Sbennostatic vm_offset_t	pmap_bootstrap_alloc(vm_size_t, u_int);
28877957Sbenno
28990643Sbenno/*
29090643Sbenno * PTE calls.
29190643Sbenno */
29290643Sbennostatic int		pmap_pte_insert(u_int, struct pte *);
29377957Sbenno
29477957Sbenno/*
29590643Sbenno * PVO calls.
29677957Sbenno */
29792847Sjeffstatic int	pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
29890643Sbenno		    vm_offset_t, vm_offset_t, u_int, int);
29990643Sbennostatic void	pmap_pvo_remove(struct pvo_entry *, int);
30090643Sbennostatic struct	pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *);
30190643Sbennostatic struct	pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
30290643Sbenno
30390643Sbenno/*
30490643Sbenno * Utility routines.
30590643Sbenno */
30690643Sbennostatic struct		pvo_entry *pmap_rkva_alloc(void);
30790643Sbennostatic void		pmap_pa_map(struct pvo_entry *, vm_offset_t,
30890643Sbenno			    struct pte *, int *);
30990643Sbennostatic void		pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
31090643Sbennostatic void		pmap_syncicache(vm_offset_t, vm_size_t);
31190643Sbennostatic boolean_t	pmap_query_bit(vm_page_t, int);
312110172Sgrehanstatic u_int		pmap_clear_bit(vm_page_t, int, int *);
31390643Sbennostatic void		tlbia(void);
31490643Sbenno
31590643Sbennostatic __inline int
31690643Sbennova_to_sr(u_int *sr, vm_offset_t va)
31777957Sbenno{
31890643Sbenno	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
31990643Sbenno}
32077957Sbenno
32190643Sbennostatic __inline u_int
32290643Sbennova_to_pteg(u_int sr, vm_offset_t addr)
32390643Sbenno{
32490643Sbenno	u_int hash;
32590643Sbenno
32690643Sbenno	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
32790643Sbenno	    ADDR_PIDX_SHFT);
32890643Sbenno	return (hash & pmap_pteg_mask);
32977957Sbenno}
33077957Sbenno
33190643Sbennostatic __inline struct pvo_head *
33296250Sbennopa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
33377957Sbenno{
33490643Sbenno	struct	vm_page *pg;
33577957Sbenno
33690643Sbenno	pg = PHYS_TO_VM_PAGE(pa);
33790643Sbenno
33896250Sbenno	if (pg_p != NULL)
33996250Sbenno		*pg_p = pg;
34096250Sbenno
34190643Sbenno	if (pg == NULL)
34290643Sbenno		return (&pmap_pvo_unmanaged);
34390643Sbenno
34490643Sbenno	return (&pg->md.mdpg_pvoh);
34577957Sbenno}
34677957Sbenno
34790643Sbennostatic __inline struct pvo_head *
34890643Sbennovm_page_to_pvoh(vm_page_t m)
34990643Sbenno{
35090643Sbenno
35190643Sbenno	return (&m->md.mdpg_pvoh);
35290643Sbenno}
35390643Sbenno
35477957Sbennostatic __inline void
35590643Sbennopmap_attr_clear(vm_page_t m, int ptebit)
35677957Sbenno{
35790643Sbenno
35890643Sbenno	m->md.mdpg_attrs &= ~ptebit;
35977957Sbenno}
36077957Sbenno
36177957Sbennostatic __inline int
36290643Sbennopmap_attr_fetch(vm_page_t m)
36377957Sbenno{
36477957Sbenno
36590643Sbenno	return (m->md.mdpg_attrs);
36677957Sbenno}
36777957Sbenno
36890643Sbennostatic __inline void
36990643Sbennopmap_attr_save(vm_page_t m, int ptebit)
37090643Sbenno{
37190643Sbenno
37290643Sbenno	m->md.mdpg_attrs |= ptebit;
37390643Sbenno}
37490643Sbenno
37577957Sbennostatic __inline int
37690643Sbennopmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
37777957Sbenno{
37890643Sbenno	if (pt->pte_hi == pvo_pt->pte_hi)
37990643Sbenno		return (1);
38090643Sbenno
38190643Sbenno	return (0);
38277957Sbenno}
38377957Sbenno
38477957Sbennostatic __inline int
38590643Sbennopmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
38677957Sbenno{
38790643Sbenno	return (pt->pte_hi & ~PTE_VALID) ==
38890643Sbenno	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
38990643Sbenno	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
39090643Sbenno}
39177957Sbenno
39290643Sbennostatic __inline void
39390643Sbennopmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
39490643Sbenno{
39590643Sbenno	/*
39690643Sbenno	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
39790643Sbenno	 * set when the real pte is set in memory.
39890643Sbenno	 *
39990643Sbenno	 * Note: Don't set the valid bit for correct operation of tlb update.
40090643Sbenno	 */
40190643Sbenno	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
40290643Sbenno	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
40390643Sbenno	pt->pte_lo = pte_lo;
40477957Sbenno}
40577957Sbenno
40690643Sbennostatic __inline void
40790643Sbennopmap_pte_synch(struct pte *pt, struct pte *pvo_pt)
40877957Sbenno{
40977957Sbenno
41090643Sbenno	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
41177957Sbenno}
41277957Sbenno
41390643Sbennostatic __inline void
41490643Sbennopmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
41577957Sbenno{
41677957Sbenno
41790643Sbenno	/*
41890643Sbenno	 * As shown in Section 7.6.3.2.3
41990643Sbenno	 */
42090643Sbenno	pt->pte_lo &= ~ptebit;
42190643Sbenno	TLBIE(va);
42290643Sbenno	EIEIO();
42390643Sbenno	TLBSYNC();
42490643Sbenno	SYNC();
42577957Sbenno}
42677957Sbenno
42790643Sbennostatic __inline void
42890643Sbennopmap_pte_set(struct pte *pt, struct pte *pvo_pt)
42977957Sbenno{
43077957Sbenno
43190643Sbenno	pvo_pt->pte_hi |= PTE_VALID;
43290643Sbenno
43377957Sbenno	/*
43490643Sbenno	 * Update the PTE as defined in section 7.6.3.1.
43590643Sbenno	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
43690643Sbenno	 * been saved so this routine can restore them (if desired).
43777957Sbenno	 */
43890643Sbenno	pt->pte_lo = pvo_pt->pte_lo;
43990643Sbenno	EIEIO();
44090643Sbenno	pt->pte_hi = pvo_pt->pte_hi;
44190643Sbenno	SYNC();
44290643Sbenno	pmap_pte_valid++;
44390643Sbenno}
44477957Sbenno
44590643Sbennostatic __inline void
44690643Sbennopmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
44790643Sbenno{
44890643Sbenno
44990643Sbenno	pvo_pt->pte_hi &= ~PTE_VALID;
45090643Sbenno
45177957Sbenno	/*
45290643Sbenno	 * Force the reg & chg bits back into the PTEs.
45377957Sbenno	 */
45490643Sbenno	SYNC();
45577957Sbenno
45690643Sbenno	/*
45790643Sbenno	 * Invalidate the pte.
45890643Sbenno	 */
45990643Sbenno	pt->pte_hi &= ~PTE_VALID;
46077957Sbenno
46190643Sbenno	SYNC();
46290643Sbenno	TLBIE(va);
46390643Sbenno	EIEIO();
46490643Sbenno	TLBSYNC();
46590643Sbenno	SYNC();
46677957Sbenno
46790643Sbenno	/*
46890643Sbenno	 * Save the reg & chg bits.
46990643Sbenno	 */
47090643Sbenno	pmap_pte_synch(pt, pvo_pt);
47190643Sbenno	pmap_pte_valid--;
47277957Sbenno}
47377957Sbenno
47490643Sbennostatic __inline void
47590643Sbennopmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
47690643Sbenno{
47790643Sbenno
47890643Sbenno	/*
47990643Sbenno	 * Invalidate the PTE
48090643Sbenno	 */
48190643Sbenno	pmap_pte_unset(pt, pvo_pt, va);
48290643Sbenno	pmap_pte_set(pt, pvo_pt);
48390643Sbenno}
48490643Sbenno
48577957Sbenno/*
48690643Sbenno * Quick sort callout for comparing memory regions.
48777957Sbenno */
48890643Sbennostatic int	mr_cmp(const void *a, const void *b);
48990643Sbennostatic int	om_cmp(const void *a, const void *b);
49090643Sbenno
49190643Sbennostatic int
49290643Sbennomr_cmp(const void *a, const void *b)
49377957Sbenno{
49490643Sbenno	const struct	mem_region *regiona;
49590643Sbenno	const struct	mem_region *regionb;
49677957Sbenno
49790643Sbenno	regiona = a;
49890643Sbenno	regionb = b;
49990643Sbenno	if (regiona->mr_start < regionb->mr_start)
50090643Sbenno		return (-1);
50190643Sbenno	else if (regiona->mr_start > regionb->mr_start)
50290643Sbenno		return (1);
50390643Sbenno	else
50490643Sbenno		return (0);
50590643Sbenno}
50677957Sbenno
50790643Sbennostatic int
50890643Sbennoom_cmp(const void *a, const void *b)
50990643Sbenno{
51090643Sbenno	const struct	ofw_map *mapa;
51190643Sbenno	const struct	ofw_map *mapb;
51290643Sbenno
51390643Sbenno	mapa = a;
51490643Sbenno	mapb = b;
51590643Sbenno	if (mapa->om_pa < mapb->om_pa)
51690643Sbenno		return (-1);
51790643Sbenno	else if (mapa->om_pa > mapb->om_pa)
51890643Sbenno		return (1);
51990643Sbenno	else
52090643Sbenno		return (0);
52177957Sbenno}
52277957Sbenno
52377957Sbennovoid
52490643Sbennopmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
52577957Sbenno{
52697346Sbenno	ihandle_t	mmui;
52790643Sbenno	phandle_t	chosen, mmu;
52890643Sbenno	int		sz;
52990643Sbenno	int		i, j;
530103604Sgrehan	int		ofw_mappings;
53191793Sbenno	vm_size_t	size, physsz;
53290643Sbenno	vm_offset_t	pa, va, off;
53390643Sbenno	u_int		batl, batu;
53477957Sbenno
53599037Sbenno        /*
536103604Sgrehan         * Set up BAT0 to map the lowest 256 MB area
53799037Sbenno         */
53899037Sbenno        battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
53999037Sbenno        battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
54099037Sbenno
54199037Sbenno        /*
54299037Sbenno         * Map PCI memory space.
54399037Sbenno         */
54499037Sbenno        battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
54599037Sbenno        battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
54699037Sbenno
54799037Sbenno        battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
54899037Sbenno        battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
54999037Sbenno
55099037Sbenno        battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
55199037Sbenno        battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
55299037Sbenno
55399037Sbenno        battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
55499037Sbenno        battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
55599037Sbenno
55699037Sbenno        /*
55799037Sbenno         * Map obio devices.
55899037Sbenno         */
55999037Sbenno        battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
56099037Sbenno        battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
56199037Sbenno
56277957Sbenno	/*
56390643Sbenno	 * Use an IBAT and a DBAT to map the bottom segment of memory
56490643Sbenno	 * where we are.
56577957Sbenno	 */
56690643Sbenno	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
56790643Sbenno	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
568131808Sgrehan	__asm ("mtibatu 0,%0; mtibatl 0,%1; isync; \n"
569131808Sgrehan	       "mtdbatu 0,%0; mtdbatl 0,%1; isync"
57090643Sbenno	    :: "r"(batu), "r"(batl));
57199037Sbenno
57290643Sbenno#if 0
57399037Sbenno	/* map frame buffer */
57499037Sbenno	batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
57599037Sbenno	batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
576131808Sgrehan	__asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync"
57799037Sbenno	    :: "r"(batu), "r"(batl));
57899037Sbenno#endif
57999037Sbenno
58099037Sbenno#if 1
58199037Sbenno	/* map pci space */
58290643Sbenno	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
58399037Sbenno	batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
584131808Sgrehan	__asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync"
58590643Sbenno	    :: "r"(batu), "r"(batl));
58690643Sbenno#endif
58777957Sbenno
58877957Sbenno	/*
58990643Sbenno	 * Set the start and end of kva.
59077957Sbenno	 */
59190643Sbenno	virtual_avail = VM_MIN_KERNEL_ADDRESS;
59290643Sbenno	virtual_end = VM_MAX_KERNEL_ADDRESS;
59390643Sbenno
59497346Sbenno	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
59597346Sbenno	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
59697346Sbenno
59797346Sbenno	qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
59897346Sbenno	for (i = 0; i < pregions_sz; i++) {
599103604Sgrehan		vm_offset_t pa;
600103604Sgrehan		vm_offset_t end;
601103604Sgrehan
60297346Sbenno		CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
60397346Sbenno			pregions[i].mr_start,
60497346Sbenno			pregions[i].mr_start + pregions[i].mr_size,
60597346Sbenno			pregions[i].mr_size);
606103604Sgrehan		/*
607103604Sgrehan		 * Install entries into the BAT table to allow all
608103604Sgrehan		 * of physmem to be convered by on-demand BAT entries.
609103604Sgrehan		 * The loop will sometimes set the same battable element
610103604Sgrehan		 * twice, but that's fine since they won't be used for
611103604Sgrehan		 * a while yet.
612103604Sgrehan		 */
613103604Sgrehan		pa = pregions[i].mr_start & 0xf0000000;
614103604Sgrehan		end = pregions[i].mr_start + pregions[i].mr_size;
615103604Sgrehan		do {
616103604Sgrehan                        u_int n = pa >> ADDR_SR_SHFT;
617103604Sgrehan
618103604Sgrehan			battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
619103604Sgrehan			battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
620103604Sgrehan			pa += SEGMENT_LENGTH;
621103604Sgrehan		} while (pa < end);
62297346Sbenno	}
62397346Sbenno
62497346Sbenno	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
62590643Sbenno		panic("pmap_bootstrap: phys_avail too small");
62697346Sbenno	qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
62790643Sbenno	phys_avail_count = 0;
62891793Sbenno	physsz = 0;
62997346Sbenno	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
63090643Sbenno		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
63190643Sbenno		    regions[i].mr_start + regions[i].mr_size,
63290643Sbenno		    regions[i].mr_size);
63390643Sbenno		phys_avail[j] = regions[i].mr_start;
63490643Sbenno		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
63590643Sbenno		phys_avail_count++;
63691793Sbenno		physsz += regions[i].mr_size;
63777957Sbenno	}
63891793Sbenno	physmem = btoc(physsz);
63977957Sbenno
64077957Sbenno	/*
64190643Sbenno	 * Allocate PTEG table.
64277957Sbenno	 */
64390643Sbenno#ifdef PTEGCOUNT
64490643Sbenno	pmap_pteg_count = PTEGCOUNT;
64590643Sbenno#else
64690643Sbenno	pmap_pteg_count = 0x1000;
64777957Sbenno
64890643Sbenno	while (pmap_pteg_count < physmem)
64990643Sbenno		pmap_pteg_count <<= 1;
65077957Sbenno
65190643Sbenno	pmap_pteg_count >>= 1;
65290643Sbenno#endif /* PTEGCOUNT */
65377957Sbenno
65490643Sbenno	size = pmap_pteg_count * sizeof(struct pteg);
65590643Sbenno	CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count,
65690643Sbenno	    size);
65790643Sbenno	pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size);
65890643Sbenno	CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table);
65990643Sbenno	bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg));
66090643Sbenno	pmap_pteg_mask = pmap_pteg_count - 1;
66177957Sbenno
66290643Sbenno	/*
66394839Sbenno	 * Allocate pv/overflow lists.
66490643Sbenno	 */
66590643Sbenno	size = sizeof(struct pvo_head) * pmap_pteg_count;
66690643Sbenno	pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size,
66790643Sbenno	    PAGE_SIZE);
66890643Sbenno	CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table);
66990643Sbenno	for (i = 0; i < pmap_pteg_count; i++)
67090643Sbenno		LIST_INIT(&pmap_pvo_table[i]);
67177957Sbenno
67290643Sbenno	/*
67390643Sbenno	 * Allocate the message buffer.
67490643Sbenno	 */
67590643Sbenno	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0);
67677957Sbenno
67790643Sbenno	/*
67890643Sbenno	 * Initialise the unmanaged pvo pool.
67990643Sbenno	 */
68099037Sbenno	pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(
68199037Sbenno		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
68292521Sbenno	pmap_bpvo_pool_index = 0;
68377957Sbenno
68477957Sbenno	/*
68590643Sbenno	 * Make sure kernel vsid is allocated as well as VSID 0.
68677957Sbenno	 */
68790643Sbenno	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
68890643Sbenno		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
68990643Sbenno	pmap_vsid_bitmap[0] |= 1;
69077957Sbenno
69190643Sbenno	/*
69290643Sbenno	 * Set up the OpenFirmware pmap and add it's mappings.
69390643Sbenno	 */
69490643Sbenno	pmap_pinit(&ofw_pmap);
69590643Sbenno	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
696126478Sgrehan	ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
69790643Sbenno	if ((chosen = OF_finddevice("/chosen")) == -1)
69890643Sbenno		panic("pmap_bootstrap: can't find /chosen");
69990643Sbenno	OF_getprop(chosen, "mmu", &mmui, 4);
70090643Sbenno	if ((mmu = OF_instance_to_package(mmui)) == -1)
70190643Sbenno		panic("pmap_bootstrap: can't get mmu package");
70290643Sbenno	if ((sz = OF_getproplen(mmu, "translations")) == -1)
70390643Sbenno		panic("pmap_bootstrap: can't get ofw translation count");
704100319Sbenno	translations = NULL;
705131401Sgrehan	for (i = 0; phys_avail[i] != 0; i += 2) {
706131401Sgrehan		if (phys_avail[i + 1] >= sz) {
707100319Sbenno			translations = (struct ofw_map *)phys_avail[i];
708131401Sgrehan			break;
709131401Sgrehan		}
710100319Sbenno	}
711100319Sbenno	if (translations == NULL)
712100319Sbenno		panic("pmap_bootstrap: no space to copy translations");
71390643Sbenno	bzero(translations, sz);
71490643Sbenno	if (OF_getprop(mmu, "translations", translations, sz) == -1)
71590643Sbenno		panic("pmap_bootstrap: can't get ofw translations");
71690643Sbenno	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
71797346Sbenno	sz /= sizeof(*translations);
71890643Sbenno	qsort(translations, sz, sizeof (*translations), om_cmp);
719103604Sgrehan	for (i = 0, ofw_mappings = 0; i < sz; i++) {
72090643Sbenno		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
72190643Sbenno		    translations[i].om_pa, translations[i].om_va,
72290643Sbenno		    translations[i].om_len);
72377957Sbenno
724103604Sgrehan		/*
725103604Sgrehan		 * If the mapping is 1:1, let the RAM and device on-demand
726103604Sgrehan		 * BAT tables take care of the translation.
727103604Sgrehan		 */
728103604Sgrehan		if (translations[i].om_va == translations[i].om_pa)
729103604Sgrehan			continue;
73077957Sbenno
731103604Sgrehan		/* Enter the pages */
73290643Sbenno		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
73390643Sbenno			struct	vm_page m;
73477957Sbenno
73590643Sbenno			m.phys_addr = translations[i].om_pa + off;
73690643Sbenno			pmap_enter(&ofw_pmap, translations[i].om_va + off, &m,
737103604Sgrehan				   VM_PROT_ALL, 1);
738103604Sgrehan			ofw_mappings++;
73977957Sbenno		}
74077957Sbenno	}
74190643Sbenno#ifdef SMP
74290643Sbenno	TLBSYNC();
74390643Sbenno#endif
74477957Sbenno
74590643Sbenno	/*
74690643Sbenno	 * Initialize the kernel pmap (which is statically allocated).
74790643Sbenno	 */
74890643Sbenno	for (i = 0; i < 16; i++) {
74990643Sbenno		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
75077957Sbenno	}
75190643Sbenno	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
752126478Sgrehan	kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL_SEGMENT;
75390643Sbenno	kernel_pmap->pm_active = ~0;
75477957Sbenno
75577957Sbenno	/*
75690643Sbenno	 * Allocate a kernel stack with a guard page for thread0 and map it
75790643Sbenno	 * into the kernel page map.
75877957Sbenno	 */
75990643Sbenno	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
76090643Sbenno	kstack0_phys = pa;
76190643Sbenno	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
76290643Sbenno	CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
76390643Sbenno	    kstack0);
76490643Sbenno	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
76590643Sbenno	for (i = 0; i < KSTACK_PAGES; i++) {
76690643Sbenno		pa = kstack0_phys + i * PAGE_SIZE;
76790643Sbenno		va = kstack0 + i * PAGE_SIZE;
76890643Sbenno		pmap_kenter(va, pa);
76990643Sbenno		TLBIE(va);
77077957Sbenno	}
77177957Sbenno
77290643Sbenno	/*
773127875Salc	 * Calculate the last available physical address.
77490643Sbenno	 */
77590643Sbenno	for (i = 0; phys_avail[i + 2] != 0; i += 2)
77690643Sbenno		;
777128103Salc	Maxmem = powerpc_btop(phys_avail[i + 1]);
77877957Sbenno
77977957Sbenno	/*
78090643Sbenno	 * Allocate virtual address space for the message buffer.
78177957Sbenno	 */
78290643Sbenno	msgbufp = (struct msgbuf *)virtual_avail;
78390643Sbenno	virtual_avail += round_page(MSGBUF_SIZE);
78477957Sbenno
78577957Sbenno	/*
78690643Sbenno	 * Initialize hardware.
78777957Sbenno	 */
78877957Sbenno	for (i = 0; i < 16; i++) {
78994836Sbenno		mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT);
79077957Sbenno	}
79177957Sbenno	__asm __volatile ("mtsr %0,%1"
79290643Sbenno	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
79377957Sbenno	__asm __volatile ("sync; mtsdr1 %0; isync"
79490643Sbenno	    :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
79577957Sbenno	tlbia();
79677957Sbenno
79790643Sbenno	pmap_bootstrapped++;
79877957Sbenno}
79977957Sbenno
80077957Sbenno/*
80190643Sbenno * Activate a user pmap.  The pmap must be activated before it's address
80290643Sbenno * space can be accessed in any way.
80377957Sbenno */
80477957Sbennovoid
80590643Sbennopmap_activate(struct thread *td)
80677957Sbenno{
80796250Sbenno	pmap_t	pm, pmr;
80877957Sbenno
80977957Sbenno	/*
810103604Sgrehan	 * Load all the data we need up front to encourage the compiler to
81190643Sbenno	 * not issue any loads while we have interrupts disabled below.
81277957Sbenno	 */
81390643Sbenno	pm = &td->td_proc->p_vmspace->vm_pmap;
81477957Sbenno
81596250Sbenno	if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL)
81696250Sbenno		pmr = pm;
81796250Sbenno
81890643Sbenno	pm->pm_active |= PCPU_GET(cpumask);
81996250Sbenno	PCPU_SET(curpmap, pmr);
82077957Sbenno}
82177957Sbenno
82291483Sbennovoid
82391483Sbennopmap_deactivate(struct thread *td)
82491483Sbenno{
82591483Sbenno	pmap_t	pm;
82691483Sbenno
82791483Sbenno	pm = &td->td_proc->p_vmspace->vm_pmap;
82891483Sbenno	pm->pm_active &= ~(PCPU_GET(cpumask));
82996250Sbenno	PCPU_SET(curpmap, NULL);
83091483Sbenno}
83191483Sbenno
83290643Sbennovm_offset_t
83390643Sbennopmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
83477957Sbenno{
83596353Sbenno
83696353Sbenno	return (va);
83777957Sbenno}
83877957Sbenno
83977957Sbennovoid
84096353Sbennopmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
84177957Sbenno{
84296353Sbenno	struct	pvo_entry *pvo;
84396353Sbenno
84496353Sbenno	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
84596353Sbenno
84696353Sbenno	if (pvo != NULL) {
84796353Sbenno		if (wired) {
84896353Sbenno			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
84996353Sbenno				pm->pm_stats.wired_count++;
85096353Sbenno			pvo->pvo_vaddr |= PVO_WIRED;
85196353Sbenno		} else {
85296353Sbenno			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
85396353Sbenno				pm->pm_stats.wired_count--;
85496353Sbenno			pvo->pvo_vaddr &= ~PVO_WIRED;
85596353Sbenno		}
85696353Sbenno	}
85777957Sbenno}
85877957Sbenno
85977957Sbennovoid
86090643Sbennopmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
86190643Sbenno	  vm_size_t len, vm_offset_t src_addr)
86277957Sbenno{
86397385Sbenno
86497385Sbenno	/*
86597385Sbenno	 * This is not needed as it's mainly an optimisation.
86697385Sbenno	 * It may want to be implemented later though.
86797385Sbenno	 */
86877957Sbenno}
86977957Sbenno
87077957Sbennovoid
87197385Sbennopmap_copy_page(vm_page_t msrc, vm_page_t mdst)
87277957Sbenno{
87397385Sbenno	vm_offset_t	dst;
87497385Sbenno	vm_offset_t	src;
87597385Sbenno
87697385Sbenno	dst = VM_PAGE_TO_PHYS(mdst);
87797385Sbenno	src = VM_PAGE_TO_PHYS(msrc);
87897385Sbenno
87997385Sbenno	kcopy((void *)src, (void *)dst, PAGE_SIZE);
88077957Sbenno}
88177957Sbenno
88277957Sbenno/*
88390643Sbenno * Zero a page of physical memory by temporarily mapping it into the tlb.
88477957Sbenno */
88577957Sbennovoid
88694777Speterpmap_zero_page(vm_page_t m)
88777957Sbenno{
88894777Speter	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
889110172Sgrehan	caddr_t va;
89077957Sbenno
89190643Sbenno	if (pa < SEGMENT_LENGTH) {
89290643Sbenno		va = (caddr_t) pa;
89390643Sbenno	} else if (pmap_initialized) {
89490643Sbenno		if (pmap_pvo_zeropage == NULL)
89590643Sbenno			pmap_pvo_zeropage = pmap_rkva_alloc();
89690643Sbenno		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
89790643Sbenno		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
89890643Sbenno	} else {
89990643Sbenno		panic("pmap_zero_page: can't zero pa %#x", pa);
90077957Sbenno	}
90190643Sbenno
90290643Sbenno	bzero(va, PAGE_SIZE);
90390643Sbenno
90490643Sbenno	if (pa >= SEGMENT_LENGTH)
90590643Sbenno		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
90677957Sbenno}
90777957Sbenno
90877957Sbennovoid
90994777Speterpmap_zero_page_area(vm_page_t m, int off, int size)
91077957Sbenno{
91199666Sbenno	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
912103604Sgrehan	caddr_t va;
91399666Sbenno
91499666Sbenno	if (pa < SEGMENT_LENGTH) {
91599666Sbenno		va = (caddr_t) pa;
91699666Sbenno	} else if (pmap_initialized) {
91799666Sbenno		if (pmap_pvo_zeropage == NULL)
91899666Sbenno			pmap_pvo_zeropage = pmap_rkva_alloc();
91999666Sbenno		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
92099666Sbenno		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
92199666Sbenno	} else {
92299666Sbenno		panic("pmap_zero_page: can't zero pa %#x", pa);
92399666Sbenno	}
92499666Sbenno
925103604Sgrehan	bzero(va + off, size);
92699666Sbenno
92799666Sbenno	if (pa >= SEGMENT_LENGTH)
92899666Sbenno		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
92977957Sbenno}
93077957Sbenno
93199571Spetervoid
93299571Speterpmap_zero_page_idle(vm_page_t m)
93399571Speter{
93499571Speter
93599571Speter	/* XXX this is called outside of Giant, is pmap_zero_page safe? */
93699571Speter	/* XXX maybe have a dedicated mapping for this to avoid the problem? */
93799571Speter	mtx_lock(&Giant);
93899571Speter	pmap_zero_page(m);
93999571Speter	mtx_unlock(&Giant);
94099571Speter}
94199571Speter
94277957Sbenno/*
94390643Sbenno * Map the given physical page at the specified virtual address in the
94490643Sbenno * target pmap with the protection requested.  If specified the page
94590643Sbenno * will be wired down.
94677957Sbenno */
94777957Sbennovoid
94890643Sbennopmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
94990643Sbenno	   boolean_t wired)
95077957Sbenno{
95190643Sbenno	struct		pvo_head *pvo_head;
95292847Sjeff	uma_zone_t	zone;
95396250Sbenno	vm_page_t	pg;
95496250Sbenno	u_int		pte_lo, pvo_flags, was_exec, i;
95590643Sbenno	int		error;
95677957Sbenno
95790643Sbenno	if (!pmap_initialized) {
95890643Sbenno		pvo_head = &pmap_pvo_kunmanaged;
95990643Sbenno		zone = pmap_upvo_zone;
96090643Sbenno		pvo_flags = 0;
96196250Sbenno		pg = NULL;
96296250Sbenno		was_exec = PTE_EXEC;
96390643Sbenno	} else {
964110172Sgrehan		pvo_head = vm_page_to_pvoh(m);
965110172Sgrehan		pg = m;
96690643Sbenno		zone = pmap_mpvo_zone;
96790643Sbenno		pvo_flags = PVO_MANAGED;
96896250Sbenno		was_exec = 0;
96990643Sbenno	}
97077957Sbenno
97196250Sbenno	/*
97296250Sbenno	 * If this is a managed page, and it's the first reference to the page,
97396250Sbenno	 * clear the execness of the page.  Otherwise fetch the execness.
97496250Sbenno	 */
97596250Sbenno	if (pg != NULL) {
97696250Sbenno		if (LIST_EMPTY(pvo_head)) {
97796250Sbenno			pmap_attr_clear(pg, PTE_EXEC);
97896250Sbenno		} else {
97996250Sbenno			was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
98096250Sbenno		}
98196250Sbenno	}
98296250Sbenno
98396250Sbenno
98496250Sbenno	/*
98596250Sbenno	 * Assume the page is cache inhibited and access is guarded unless
98696250Sbenno	 * it's in our available memory array.
98796250Sbenno	 */
98890643Sbenno	pte_lo = PTE_I | PTE_G;
98997346Sbenno	for (i = 0; i < pregions_sz; i++) {
99097346Sbenno		if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) &&
99197346Sbenno		    (VM_PAGE_TO_PHYS(m) <
99297346Sbenno			(pregions[i].mr_start + pregions[i].mr_size))) {
99396250Sbenno			pte_lo &= ~(PTE_I | PTE_G);
99496250Sbenno			break;
99596250Sbenno		}
99696250Sbenno	}
99777957Sbenno
99890643Sbenno	if (prot & VM_PROT_WRITE)
99990643Sbenno		pte_lo |= PTE_BW;
100090643Sbenno	else
100190643Sbenno		pte_lo |= PTE_BR;
100277957Sbenno
100396250Sbenno	pvo_flags |= (prot & VM_PROT_EXECUTE);
100477957Sbenno
100590643Sbenno	if (wired)
100690643Sbenno		pvo_flags |= PVO_WIRED;
100777957Sbenno
100896250Sbenno	error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
100996250Sbenno	    pte_lo, pvo_flags);
101090643Sbenno
101196250Sbenno	/*
101296250Sbenno	 * Flush the real page from the instruction cache if this page is
101396250Sbenno	 * mapped executable and cacheable and was not previously mapped (or
101496250Sbenno	 * was not mapped executable).
101596250Sbenno	 */
101696250Sbenno	if (error == 0 && (pvo_flags & PVO_EXECUTABLE) &&
101796250Sbenno	    (pte_lo & PTE_I) == 0 && was_exec == 0) {
101877957Sbenno		/*
101990643Sbenno		 * Flush the real memory from the cache.
102077957Sbenno		 */
102196250Sbenno		pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
102296250Sbenno		if (pg != NULL)
102396250Sbenno			pmap_attr_save(pg, PTE_EXEC);
102477957Sbenno	}
1025103604Sgrehan
1026103604Sgrehan	/* XXX syncicache always until problems are sorted */
1027103604Sgrehan	pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
102877957Sbenno}
102977957Sbenno
1030117045Salcvm_page_t
1031117045Salcpmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
1032117045Salc{
1033117045Salc
1034117045Salc	pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
1035117045Salc	return (NULL);
1036117045Salc}
1037117045Salc
1038131658Salcvm_paddr_t
103996353Sbennopmap_extract(pmap_t pm, vm_offset_t va)
104077957Sbenno{
104196353Sbenno	struct	pvo_entry *pvo;
104296353Sbenno
104396353Sbenno	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
104496353Sbenno
104596353Sbenno	if (pvo != NULL) {
104696353Sbenno		return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF));
104796353Sbenno	}
104896353Sbenno
104990643Sbenno	return (0);
105077957Sbenno}
105177957Sbenno
105277957Sbenno/*
1053120336Sgrehan * Atomically extract and hold the physical page with the given
1054120336Sgrehan * pmap and virtual address pair if that mapping permits the given
1055120336Sgrehan * protection.
1056120336Sgrehan */
1057120336Sgrehanvm_page_t
1058120336Sgrehanpmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1059120336Sgrehan{
1060120336Sgrehan	vm_paddr_t pa;
1061120336Sgrehan	vm_page_t m;
1062120336Sgrehan
1063120336Sgrehan	m = NULL;
1064120336Sgrehan	mtx_lock(&Giant);
1065120336Sgrehan	if ((pa = pmap_extract(pmap, va)) != 0) {
1066120336Sgrehan		m = PHYS_TO_VM_PAGE(pa);
1067120336Sgrehan		vm_page_lock_queues();
1068120336Sgrehan		vm_page_hold(m);
1069120336Sgrehan		vm_page_unlock_queues();
1070120336Sgrehan	}
1071120336Sgrehan	mtx_unlock(&Giant);
1072120336Sgrehan	return (m);
1073120336Sgrehan}
1074120336Sgrehan
1075120336Sgrehan/*
107690643Sbenno * Grow the number of kernel page table entries.  Unneeded.
107777957Sbenno */
107890643Sbennovoid
107990643Sbennopmap_growkernel(vm_offset_t addr)
108077957Sbenno{
108190643Sbenno}
108277957Sbenno
108390643Sbennovoid
1084127869Salcpmap_init(void)
108590643Sbenno{
108677957Sbenno
108794753Sbenno	CTR0(KTR_PMAP, "pmap_init");
108877957Sbenno
108992847Sjeff	pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1090125442Sgrehan	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1091125442Sgrehan	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
109292847Sjeff	pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1093125442Sgrehan	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1094125442Sgrehan	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
109590643Sbenno	pmap_initialized = TRUE;
109677957Sbenno}
109777957Sbenno
109899037Sbennovoid
109999037Sbennopmap_init2(void)
110099037Sbenno{
110199037Sbenno
110299037Sbenno	CTR0(KTR_PMAP, "pmap_init2");
110399037Sbenno}
110499037Sbenno
110590643Sbennoboolean_t
110690643Sbennopmap_is_modified(vm_page_t m)
110790643Sbenno{
110896353Sbenno
1109110172Sgrehan	if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
111096353Sbenno		return (FALSE);
111196353Sbenno
111296353Sbenno	return (pmap_query_bit(m, PTE_CHG));
111390643Sbenno}
111490643Sbenno
1115120722Salc/*
1116120722Salc *	pmap_is_prefaultable:
1117120722Salc *
1118120722Salc *	Return whether or not the specified virtual address is elgible
1119120722Salc *	for prefault.
1120120722Salc */
1121120722Salcboolean_t
1122120722Salcpmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
1123120722Salc{
1124120722Salc
1125120722Salc	return (FALSE);
1126120722Salc}
1127120722Salc
112890643Sbennovoid
112990643Sbennopmap_clear_reference(vm_page_t m)
113090643Sbenno{
1131110172Sgrehan
1132110172Sgrehan	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1133110172Sgrehan		return;
1134110172Sgrehan	pmap_clear_bit(m, PTE_REF, NULL);
113590643Sbenno}
113690643Sbenno
1137110172Sgrehanvoid
1138110172Sgrehanpmap_clear_modify(vm_page_t m)
1139110172Sgrehan{
1140110172Sgrehan
1141110172Sgrehan	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1142110172Sgrehan		return;
1143110172Sgrehan	pmap_clear_bit(m, PTE_CHG, NULL);
1144110172Sgrehan}
1145110172Sgrehan
114691403Ssilby/*
114791403Ssilby *	pmap_ts_referenced:
114891403Ssilby *
114991403Ssilby *	Return a count of reference bits for a page, clearing those bits.
115091403Ssilby *	It is not necessary for every reference bit to be cleared, but it
115191403Ssilby *	is necessary that 0 only be returned when there are truly no
115291403Ssilby *	reference bits set.
115391403Ssilby *
115491403Ssilby *	XXX: The exact number of bits to check and clear is a matter that
115591403Ssilby *	should be tested and standardized at some point in the future for
115691403Ssilby *	optimal aging of shared pages.
115791403Ssilby */
115890643Sbennoint
115990643Sbennopmap_ts_referenced(vm_page_t m)
116090643Sbenno{
1161110172Sgrehan	int count;
1162110172Sgrehan
1163110172Sgrehan	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1164110172Sgrehan		return (0);
1165110172Sgrehan
1166110172Sgrehan	count = pmap_clear_bit(m, PTE_REF, NULL);
1167110172Sgrehan
1168110172Sgrehan	return (count);
116990643Sbenno}
117090643Sbenno
117177957Sbenno/*
117290643Sbenno * Map a wired page into kernel virtual address space.
117377957Sbenno */
117477957Sbennovoid
117590643Sbennopmap_kenter(vm_offset_t va, vm_offset_t pa)
117677957Sbenno{
117790643Sbenno	u_int		pte_lo;
117890643Sbenno	int		error;
117990643Sbenno	int		i;
118077957Sbenno
118190643Sbenno#if 0
118290643Sbenno	if (va < VM_MIN_KERNEL_ADDRESS)
118390643Sbenno		panic("pmap_kenter: attempt to enter non-kernel address %#x",
118490643Sbenno		    va);
118590643Sbenno#endif
118677957Sbenno
1187103604Sgrehan	pte_lo = PTE_I | PTE_G;
1188103604Sgrehan	for (i = 0; i < pregions_sz; i++) {
1189103604Sgrehan		if ((pa >= pregions[i].mr_start) &&
1190103604Sgrehan		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
119190643Sbenno			pte_lo &= ~(PTE_I | PTE_G);
119277957Sbenno			break;
119377957Sbenno		}
1194103604Sgrehan	}
119577957Sbenno
119690643Sbenno	error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone,
119790643Sbenno	    &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
119890643Sbenno
119990643Sbenno	if (error != 0 && error != ENOENT)
120090643Sbenno		panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va,
120190643Sbenno		    pa, error);
120290643Sbenno
120377957Sbenno	/*
120490643Sbenno	 * Flush the real memory from the instruction cache.
120577957Sbenno	 */
120690643Sbenno	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
120790643Sbenno		pmap_syncicache(pa, PAGE_SIZE);
120877957Sbenno	}
120977957Sbenno}
121077957Sbenno
121194838Sbenno/*
121294838Sbenno * Extract the physical page address associated with the given kernel virtual
121394838Sbenno * address.
121494838Sbenno */
121590643Sbennovm_offset_t
121690643Sbennopmap_kextract(vm_offset_t va)
121777957Sbenno{
121894838Sbenno	struct		pvo_entry *pvo;
121994838Sbenno
1220125185Sgrehan#ifdef UMA_MD_SMALL_ALLOC
1221125185Sgrehan	/*
1222125185Sgrehan	 * Allow direct mappings
1223125185Sgrehan	 */
1224125185Sgrehan	if (va < VM_MIN_KERNEL_ADDRESS) {
1225125185Sgrehan		return (va);
1226125185Sgrehan	}
1227125185Sgrehan#endif
1228125185Sgrehan
122994838Sbenno	pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1230125185Sgrehan	KASSERT(pvo != NULL, ("pmap_kextract: no addr found"));
123194838Sbenno	if (pvo == NULL) {
123294838Sbenno		return (0);
123394838Sbenno	}
123494838Sbenno
123594838Sbenno	return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF));
123677957Sbenno}
123777957Sbenno
123891456Sbenno/*
123991456Sbenno * Remove a wired page from kernel virtual address space.
124091456Sbenno */
124177957Sbennovoid
124277957Sbennopmap_kremove(vm_offset_t va)
124377957Sbenno{
124491456Sbenno
1245103604Sgrehan	pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
124677957Sbenno}
124777957Sbenno
124877957Sbenno/*
124990643Sbenno * Map a range of physical addresses into kernel virtual address space.
125090643Sbenno *
125190643Sbenno * The value passed in *virt is a suggested virtual address for the mapping.
125290643Sbenno * Architectures which can support a direct-mapped physical to virtual region
125390643Sbenno * can return the appropriate address within that region, leaving '*virt'
125490643Sbenno * unchanged.  We cannot and therefore do not; *virt is updated with the
125590643Sbenno * first usable address after the mapped region.
125677957Sbenno */
125790643Sbennovm_offset_t
125890643Sbennopmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
125977957Sbenno{
126090643Sbenno	vm_offset_t	sva, va;
126177957Sbenno
126290643Sbenno	sva = *virt;
126390643Sbenno	va = sva;
126490643Sbenno	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
126590643Sbenno		pmap_kenter(va, pa_start);
126690643Sbenno	*virt = va;
126790643Sbenno	return (sva);
126877957Sbenno}
126977957Sbenno
127090643Sbennoint
127190643Sbennopmap_mincore(pmap_t pmap, vm_offset_t addr)
127277957Sbenno{
127390643Sbenno	TODO;
127490643Sbenno	return (0);
127577957Sbenno}
127677957Sbenno
127777957Sbennovoid
127894838Sbennopmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1279117206Salc		    vm_pindex_t pindex, vm_size_t size)
128090643Sbenno{
128194838Sbenno
1282117206Salc	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1283117206Salc	KASSERT(object->type == OBJT_DEVICE,
1284117206Salc	    ("pmap_object_init_pt: non-device object"));
128594838Sbenno	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1286117206Salc	    ("pmap_object_init_pt: non current pmap"));
128777957Sbenno}
128877957Sbenno
128977957Sbenno/*
129090643Sbenno * Lower the permission for all mappings to a given page.
129177957Sbenno */
129277957Sbennovoid
129377957Sbennopmap_page_protect(vm_page_t m, vm_prot_t prot)
129477957Sbenno{
129590643Sbenno	struct	pvo_head *pvo_head;
129690643Sbenno	struct	pvo_entry *pvo, *next_pvo;
129790643Sbenno	struct	pte *pt;
129877957Sbenno
129990643Sbenno	/*
130090643Sbenno	 * Since the routine only downgrades protection, if the
130190643Sbenno	 * maximal protection is desired, there isn't any change
130290643Sbenno	 * to be made.
130390643Sbenno	 */
130490643Sbenno	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
130590643Sbenno	    (VM_PROT_READ|VM_PROT_WRITE))
130677957Sbenno		return;
130777957Sbenno
130890643Sbenno	pvo_head = vm_page_to_pvoh(m);
130990643Sbenno	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
131090643Sbenno		next_pvo = LIST_NEXT(pvo, pvo_vlink);
131190643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
131290643Sbenno
131390643Sbenno		/*
131490643Sbenno		 * Downgrading to no mapping at all, we just remove the entry.
131590643Sbenno		 */
131690643Sbenno		if ((prot & VM_PROT_READ) == 0) {
131790643Sbenno			pmap_pvo_remove(pvo, -1);
131890643Sbenno			continue;
131977957Sbenno		}
132090643Sbenno
132190643Sbenno		/*
132290643Sbenno		 * If EXEC permission is being revoked, just clear the flag
132390643Sbenno		 * in the PVO.
132490643Sbenno		 */
132590643Sbenno		if ((prot & VM_PROT_EXECUTE) == 0)
132690643Sbenno			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
132790643Sbenno
132890643Sbenno		/*
132990643Sbenno		 * If this entry is already RO, don't diddle with the page
133090643Sbenno		 * table.
133190643Sbenno		 */
133290643Sbenno		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
133390643Sbenno			PMAP_PVO_CHECK(pvo);
133490643Sbenno			continue;
133577957Sbenno		}
133690643Sbenno
133790643Sbenno		/*
133890643Sbenno		 * Grab the PTE before we diddle the bits so pvo_to_pte can
133990643Sbenno		 * verify the pte contents are as expected.
134090643Sbenno		 */
134190643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
134290643Sbenno		pvo->pvo_pte.pte_lo &= ~PTE_PP;
134390643Sbenno		pvo->pvo_pte.pte_lo |= PTE_BR;
134490643Sbenno		if (pt != NULL)
134590643Sbenno			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
134690643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
134777957Sbenno	}
134877957Sbenno}
134977957Sbenno
135077957Sbenno/*
135191403Ssilby * Returns true if the pmap's pv is one of the first
135291403Ssilby * 16 pvs linked to from this page.  This count may
135391403Ssilby * be changed upwards or downwards in the future; it
135491403Ssilby * is only necessary that true be returned for a small
135591403Ssilby * subset of pmaps for proper page aging.
135691403Ssilby */
135790643Sbennoboolean_t
135891403Ssilbypmap_page_exists_quick(pmap_t pmap, vm_page_t m)
135990643Sbenno{
1360110172Sgrehan        int loops;
1361110172Sgrehan	struct pvo_entry *pvo;
1362110172Sgrehan
1363110172Sgrehan        if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1364110172Sgrehan                return FALSE;
1365110172Sgrehan
1366110172Sgrehan	loops = 0;
1367110172Sgrehan	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1368110172Sgrehan		if (pvo->pvo_pmap == pmap)
1369110172Sgrehan			return (TRUE);
1370110172Sgrehan		if (++loops >= 16)
1371110172Sgrehan			break;
1372110172Sgrehan	}
1373110172Sgrehan
1374110172Sgrehan	return (FALSE);
137590643Sbenno}
137677957Sbenno
137790643Sbennostatic u_int	pmap_vsidcontext;
137877957Sbenno
137990643Sbennovoid
138090643Sbennopmap_pinit(pmap_t pmap)
138190643Sbenno{
138290643Sbenno	int	i, mask;
138390643Sbenno	u_int	entropy;
138477957Sbenno
1385126478Sgrehan	KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("pmap_pinit: virt pmap"));
1386126478Sgrehan
138790643Sbenno	entropy = 0;
138890643Sbenno	__asm __volatile("mftb %0" : "=r"(entropy));
138977957Sbenno
139090643Sbenno	/*
139190643Sbenno	 * Allocate some segment registers for this pmap.
139290643Sbenno	 */
139390643Sbenno	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
139490643Sbenno		u_int	hash, n;
139577957Sbenno
139677957Sbenno		/*
139790643Sbenno		 * Create a new value by mutiplying by a prime and adding in
139890643Sbenno		 * entropy from the timebase register.  This is to make the
139990643Sbenno		 * VSID more random so that the PT hash function collides
140090643Sbenno		 * less often.  (Note that the prime casues gcc to do shifts
140190643Sbenno		 * instead of a multiply.)
140277957Sbenno		 */
140390643Sbenno		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
140490643Sbenno		hash = pmap_vsidcontext & (NPMAPS - 1);
140590643Sbenno		if (hash == 0)		/* 0 is special, avoid it */
140690643Sbenno			continue;
140790643Sbenno		n = hash >> 5;
140890643Sbenno		mask = 1 << (hash & (VSID_NBPW - 1));
140990643Sbenno		hash = (pmap_vsidcontext & 0xfffff);
141090643Sbenno		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
141190643Sbenno			/* anything free in this bucket? */
141290643Sbenno			if (pmap_vsid_bitmap[n] == 0xffffffff) {
141390643Sbenno				entropy = (pmap_vsidcontext >> 20);
141490643Sbenno				continue;
141590643Sbenno			}
141690643Sbenno			i = ffs(~pmap_vsid_bitmap[i]) - 1;
141790643Sbenno			mask = 1 << i;
141890643Sbenno			hash &= 0xfffff & ~(VSID_NBPW - 1);
141990643Sbenno			hash |= i;
142077957Sbenno		}
142190643Sbenno		pmap_vsid_bitmap[n] |= mask;
142290643Sbenno		for (i = 0; i < 16; i++)
142390643Sbenno			pmap->pm_sr[i] = VSID_MAKE(i, hash);
142490643Sbenno		return;
142590643Sbenno	}
142677957Sbenno
142790643Sbenno	panic("pmap_pinit: out of segments");
142877957Sbenno}
142977957Sbenno
143077957Sbenno/*
143190643Sbenno * Initialize the pmap associated with process 0.
143277957Sbenno */
143377957Sbennovoid
143490643Sbennopmap_pinit0(pmap_t pm)
143577957Sbenno{
143677957Sbenno
143790643Sbenno	pmap_pinit(pm);
143890643Sbenno	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
143977957Sbenno}
144077957Sbenno
144194838Sbenno/*
144294838Sbenno * Set the physical protection on the specified range of this map as requested.
144394838Sbenno */
144490643Sbennovoid
144594838Sbennopmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
144690643Sbenno{
144794838Sbenno	struct	pvo_entry *pvo;
144894838Sbenno	struct	pte *pt;
144994838Sbenno	int	pteidx;
145094838Sbenno
145194838Sbenno	CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
145294838Sbenno	    eva, prot);
145394838Sbenno
145494838Sbenno
145594838Sbenno	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
145694838Sbenno	    ("pmap_protect: non current pmap"));
145794838Sbenno
145894838Sbenno	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
145994838Sbenno		pmap_remove(pm, sva, eva);
146094838Sbenno		return;
146194838Sbenno	}
146294838Sbenno
146394838Sbenno	for (; sva < eva; sva += PAGE_SIZE) {
146494838Sbenno		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
146594838Sbenno		if (pvo == NULL)
146694838Sbenno			continue;
146794838Sbenno
146894838Sbenno		if ((prot & VM_PROT_EXECUTE) == 0)
146994838Sbenno			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
147094838Sbenno
147194838Sbenno		/*
147294838Sbenno		 * Grab the PTE pointer before we diddle with the cached PTE
147394838Sbenno		 * copy.
147494838Sbenno		 */
147594838Sbenno		pt = pmap_pvo_to_pte(pvo, pteidx);
147694838Sbenno		/*
147794838Sbenno		 * Change the protection of the page.
147894838Sbenno		 */
147994838Sbenno		pvo->pvo_pte.pte_lo &= ~PTE_PP;
148094838Sbenno		pvo->pvo_pte.pte_lo |= PTE_BR;
148194838Sbenno
148294838Sbenno		/*
148394838Sbenno		 * If the PVO is in the page table, update that pte as well.
148494838Sbenno		 */
148594838Sbenno		if (pt != NULL)
148694838Sbenno			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
148794838Sbenno	}
148877957Sbenno}
148977957Sbenno
149091456Sbenno/*
149191456Sbenno * Map a list of wired pages into kernel virtual address space.  This is
149291456Sbenno * intended for temporary mappings which do not need page modification or
149391456Sbenno * references recorded.  Existing mappings in the region are overwritten.
149491456Sbenno */
149590643Sbennovoid
1496110172Sgrehanpmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
149777957Sbenno{
1498110172Sgrehan	vm_offset_t va;
149977957Sbenno
1500110172Sgrehan	va = sva;
1501110172Sgrehan	while (count-- > 0) {
1502110172Sgrehan		pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
1503110172Sgrehan		va += PAGE_SIZE;
1504110172Sgrehan		m++;
1505110172Sgrehan	}
150690643Sbenno}
150777957Sbenno
150891456Sbenno/*
150991456Sbenno * Remove page mappings from kernel virtual address space.  Intended for
151091456Sbenno * temporary mappings entered by pmap_qenter.
151191456Sbenno */
151290643Sbennovoid
1513110172Sgrehanpmap_qremove(vm_offset_t sva, int count)
151490643Sbenno{
1515110172Sgrehan	vm_offset_t va;
151691456Sbenno
1517110172Sgrehan	va = sva;
1518110172Sgrehan	while (count-- > 0) {
151991456Sbenno		pmap_kremove(va);
1520110172Sgrehan		va += PAGE_SIZE;
1521110172Sgrehan	}
152277957Sbenno}
152377957Sbenno
152490643Sbennovoid
152590643Sbennopmap_release(pmap_t pmap)
152690643Sbenno{
1527103604Sgrehan        int idx, mask;
1528103604Sgrehan
1529103604Sgrehan	/*
1530103604Sgrehan	 * Free segment register's VSID
1531103604Sgrehan	 */
1532103604Sgrehan        if (pmap->pm_sr[0] == 0)
1533103604Sgrehan                panic("pmap_release");
1534103604Sgrehan
1535103604Sgrehan        idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
1536103604Sgrehan        mask = 1 << (idx % VSID_NBPW);
1537103604Sgrehan        idx /= VSID_NBPW;
1538103604Sgrehan        pmap_vsid_bitmap[idx] &= ~mask;
153977957Sbenno}
154077957Sbenno
154191456Sbenno/*
154291456Sbenno * Remove the given range of addresses from the specified map.
154391456Sbenno */
154490643Sbennovoid
154591456Sbennopmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
154677957Sbenno{
154791456Sbenno	struct	pvo_entry *pvo;
154891456Sbenno	int	pteidx;
154991456Sbenno
155091456Sbenno	for (; sva < eva; sva += PAGE_SIZE) {
155191456Sbenno		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
155291456Sbenno		if (pvo != NULL) {
155391456Sbenno			pmap_pvo_remove(pvo, pteidx);
155491456Sbenno		}
155591456Sbenno	}
155677957Sbenno}
155777957Sbenno
155894838Sbenno/*
1559110172Sgrehan * Remove physical page from all pmaps in which it resides. pmap_pvo_remove()
1560110172Sgrehan * will reflect changes in pte's back to the vm_page.
1561110172Sgrehan */
1562110172Sgrehanvoid
1563110172Sgrehanpmap_remove_all(vm_page_t m)
1564110172Sgrehan{
1565110172Sgrehan	struct  pvo_head *pvo_head;
1566110172Sgrehan	struct	pvo_entry *pvo, *next_pvo;
1567110172Sgrehan
1568120336Sgrehan	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1569120336Sgrehan
1570110172Sgrehan	pvo_head = vm_page_to_pvoh(m);
1571110172Sgrehan	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1572110172Sgrehan		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1573110172Sgrehan
1574110172Sgrehan		PMAP_PVO_CHECK(pvo);	/* sanity check */
1575110172Sgrehan		pmap_pvo_remove(pvo, -1);
1576110172Sgrehan	}
1577110172Sgrehan	vm_page_flag_clear(m, PG_WRITEABLE);
1578110172Sgrehan}
1579110172Sgrehan
1580110172Sgrehan/*
158194838Sbenno * Remove all pages from specified address space, this aids process exit
158294838Sbenno * speeds.  This is much faster than pmap_remove in the case of running down
158394838Sbenno * an entire address space.  Only works for the current pmap.
158494838Sbenno */
158590643Sbennovoid
158694838Sbennopmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
158777957Sbenno{
158894838Sbenno
158994838Sbenno	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
159094838Sbenno	    ("pmap_remove_pages: non current pmap"));
159194838Sbenno	pmap_remove(pm, sva, eva);
159277957Sbenno}
159377957Sbenno
159477957Sbenno/*
159590643Sbenno * Allocate a physical page of memory directly from the phys_avail map.
159690643Sbenno * Can only be called from pmap_bootstrap before avail start and end are
159790643Sbenno * calculated.
159883682Smp */
159990643Sbennostatic vm_offset_t
160090643Sbennopmap_bootstrap_alloc(vm_size_t size, u_int align)
160183682Smp{
160290643Sbenno	vm_offset_t	s, e;
160390643Sbenno	int		i, j;
160483682Smp
160590643Sbenno	size = round_page(size);
160690643Sbenno	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
160790643Sbenno		if (align != 0)
160890643Sbenno			s = (phys_avail[i] + align - 1) & ~(align - 1);
160990643Sbenno		else
161090643Sbenno			s = phys_avail[i];
161190643Sbenno		e = s + size;
161290643Sbenno
161390643Sbenno		if (s < phys_avail[i] || e > phys_avail[i + 1])
161490643Sbenno			continue;
161590643Sbenno
161690643Sbenno		if (s == phys_avail[i]) {
161790643Sbenno			phys_avail[i] += size;
161890643Sbenno		} else if (e == phys_avail[i + 1]) {
161990643Sbenno			phys_avail[i + 1] -= size;
162090643Sbenno		} else {
162190643Sbenno			for (j = phys_avail_count * 2; j > i; j -= 2) {
162290643Sbenno				phys_avail[j] = phys_avail[j - 2];
162390643Sbenno				phys_avail[j + 1] = phys_avail[j - 1];
162490643Sbenno			}
162590643Sbenno
162690643Sbenno			phys_avail[i + 3] = phys_avail[i + 1];
162790643Sbenno			phys_avail[i + 1] = s;
162890643Sbenno			phys_avail[i + 2] = e;
162990643Sbenno			phys_avail_count++;
163090643Sbenno		}
163190643Sbenno
163290643Sbenno		return (s);
163383682Smp	}
163490643Sbenno	panic("pmap_bootstrap_alloc: could not allocate memory");
163583682Smp}
163683682Smp
163783682Smp/*
163890643Sbenno * Return an unmapped pvo for a kernel virtual address.
163990643Sbenno * Used by pmap functions that operate on physical pages.
164083682Smp */
164190643Sbennostatic struct pvo_entry *
164290643Sbennopmap_rkva_alloc(void)
164383682Smp{
164490643Sbenno	struct		pvo_entry *pvo;
164590643Sbenno	struct		pte *pt;
164690643Sbenno	vm_offset_t	kva;
164790643Sbenno	int		pteidx;
164883682Smp
164990643Sbenno	if (pmap_rkva_count == 0)
165090643Sbenno		panic("pmap_rkva_alloc: no more reserved KVAs");
165190643Sbenno
165290643Sbenno	kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count);
165390643Sbenno	pmap_kenter(kva, 0);
165490643Sbenno
165590643Sbenno	pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx);
165690643Sbenno
165790643Sbenno	if (pvo == NULL)
165890643Sbenno		panic("pmap_kva_alloc: pmap_pvo_find_va failed");
165990643Sbenno
166090643Sbenno	pt = pmap_pvo_to_pte(pvo, pteidx);
166190643Sbenno
166290643Sbenno	if (pt == NULL)
166390643Sbenno		panic("pmap_kva_alloc: pmap_pvo_to_pte failed");
166490643Sbenno
166590643Sbenno	pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
166690643Sbenno	PVO_PTEGIDX_CLR(pvo);
166790643Sbenno
166890643Sbenno	pmap_pte_overflow++;
166990643Sbenno
167090643Sbenno	return (pvo);
167190643Sbenno}
167290643Sbenno
167390643Sbennostatic void
167490643Sbennopmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
167590643Sbenno    int *depth_p)
167690643Sbenno{
167790643Sbenno	struct	pte *pt;
167890643Sbenno
167990643Sbenno	/*
168090643Sbenno	 * If this pvo already has a valid pte, we need to save it so it can
168190643Sbenno	 * be restored later.  We then just reload the new PTE over the old
168290643Sbenno	 * slot.
168390643Sbenno	 */
168490643Sbenno	if (saved_pt != NULL) {
168590643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
168690643Sbenno
168790643Sbenno		if (pt != NULL) {
168890643Sbenno			pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
168990643Sbenno			PVO_PTEGIDX_CLR(pvo);
169090643Sbenno			pmap_pte_overflow++;
169183682Smp		}
169290643Sbenno
169390643Sbenno		*saved_pt = pvo->pvo_pte;
169490643Sbenno
169590643Sbenno		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
169683682Smp	}
169790643Sbenno
169890643Sbenno	pvo->pvo_pte.pte_lo |= pa;
169990643Sbenno
170090643Sbenno	if (!pmap_pte_spill(pvo->pvo_vaddr))
170190643Sbenno		panic("pmap_pa_map: could not spill pvo %p", pvo);
170290643Sbenno
170390643Sbenno	if (depth_p != NULL)
170490643Sbenno		(*depth_p)++;
170583682Smp}
170683682Smp
170790643Sbennostatic void
170890643Sbennopmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
170977957Sbenno{
171090643Sbenno	struct	pte *pt;
171177957Sbenno
171290643Sbenno	pt = pmap_pvo_to_pte(pvo, -1);
171390643Sbenno
171490643Sbenno	if (pt != NULL) {
171590643Sbenno		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
171690643Sbenno		PVO_PTEGIDX_CLR(pvo);
171790643Sbenno		pmap_pte_overflow++;
171890643Sbenno	}
171990643Sbenno
172090643Sbenno	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
172190643Sbenno
172290643Sbenno	/*
172390643Sbenno	 * If there is a saved PTE and it's valid, restore it and return.
172490643Sbenno	 */
172590643Sbenno	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
172690643Sbenno		if (depth_p != NULL && --(*depth_p) == 0)
172790643Sbenno			panic("pmap_pa_unmap: restoring but depth == 0");
172890643Sbenno
172990643Sbenno		pvo->pvo_pte = *saved_pt;
173090643Sbenno
173190643Sbenno		if (!pmap_pte_spill(pvo->pvo_vaddr))
173290643Sbenno			panic("pmap_pa_unmap: could not spill pvo %p", pvo);
173390643Sbenno	}
173477957Sbenno}
173577957Sbenno
173690643Sbennostatic void
173790643Sbennopmap_syncicache(vm_offset_t pa, vm_size_t len)
173877957Sbenno{
173990643Sbenno	__syncicache((void *)pa, len);
174090643Sbenno}
174177957Sbenno
174290643Sbennostatic void
174390643Sbennotlbia(void)
174490643Sbenno{
174590643Sbenno	caddr_t	i;
174690643Sbenno
174790643Sbenno	SYNC();
174890643Sbenno	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
174990643Sbenno		TLBIE(i);
175090643Sbenno		EIEIO();
175190643Sbenno	}
175290643Sbenno	TLBSYNC();
175390643Sbenno	SYNC();
175477957Sbenno}
175577957Sbenno
175690643Sbennostatic int
175792847Sjeffpmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
175890643Sbenno    vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
175977957Sbenno{
176090643Sbenno	struct	pvo_entry *pvo;
176190643Sbenno	u_int	sr;
176290643Sbenno	int	first;
176390643Sbenno	u_int	ptegidx;
176490643Sbenno	int	i;
1765103604Sgrehan	int     bootstrap;
176677957Sbenno
176790643Sbenno	pmap_pvo_enter_calls++;
176896250Sbenno	first = 0;
1769103604Sgrehan
1770103604Sgrehan	bootstrap = 0;
177190643Sbenno
177290643Sbenno	/*
177390643Sbenno	 * Compute the PTE Group index.
177490643Sbenno	 */
177590643Sbenno	va &= ~ADDR_POFF;
177690643Sbenno	sr = va_to_sr(pm->pm_sr, va);
177790643Sbenno	ptegidx = va_to_pteg(sr, va);
177890643Sbenno
177990643Sbenno	/*
178090643Sbenno	 * Remove any existing mapping for this page.  Reuse the pvo entry if
178190643Sbenno	 * there is a mapping.
178290643Sbenno	 */
178390643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
178490643Sbenno		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
178596334Sbenno			if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa &&
178696334Sbenno			    (pvo->pvo_pte.pte_lo & PTE_PP) ==
178796334Sbenno			    (pte_lo & PTE_PP)) {
178892521Sbenno				return (0);
178996334Sbenno			}
179090643Sbenno			pmap_pvo_remove(pvo, -1);
179190643Sbenno			break;
179290643Sbenno		}
179390643Sbenno	}
179490643Sbenno
179590643Sbenno	/*
179690643Sbenno	 * If we aren't overwriting a mapping, try to allocate.
179790643Sbenno	 */
179892521Sbenno	if (pmap_initialized) {
179992847Sjeff		pvo = uma_zalloc(zone, M_NOWAIT);
180092521Sbenno	} else {
180199037Sbenno		if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) {
180299037Sbenno			panic("pmap_enter: bpvo pool exhausted, %d, %d, %d",
180399037Sbenno			      pmap_bpvo_pool_index, BPVO_POOL_SIZE,
180499037Sbenno			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
180592521Sbenno		}
180692521Sbenno		pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index];
180792521Sbenno		pmap_bpvo_pool_index++;
1808103604Sgrehan		bootstrap = 1;
180992521Sbenno	}
181090643Sbenno
181190643Sbenno	if (pvo == NULL) {
181290643Sbenno		return (ENOMEM);
181390643Sbenno	}
181490643Sbenno
181590643Sbenno	pmap_pvo_entries++;
181690643Sbenno	pvo->pvo_vaddr = va;
181790643Sbenno	pvo->pvo_pmap = pm;
181890643Sbenno	LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
181990643Sbenno	pvo->pvo_vaddr &= ~ADDR_POFF;
182090643Sbenno	if (flags & VM_PROT_EXECUTE)
182190643Sbenno		pvo->pvo_vaddr |= PVO_EXECUTABLE;
182290643Sbenno	if (flags & PVO_WIRED)
182390643Sbenno		pvo->pvo_vaddr |= PVO_WIRED;
182490643Sbenno	if (pvo_head != &pmap_pvo_kunmanaged)
182590643Sbenno		pvo->pvo_vaddr |= PVO_MANAGED;
1826103604Sgrehan	if (bootstrap)
1827103604Sgrehan		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
182890643Sbenno	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
182990643Sbenno
183090643Sbenno	/*
183190643Sbenno	 * Remember if the list was empty and therefore will be the first
183290643Sbenno	 * item.
183390643Sbenno	 */
183496250Sbenno	if (LIST_FIRST(pvo_head) == NULL)
183596250Sbenno		first = 1;
183690643Sbenno
183790643Sbenno	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
183890643Sbenno	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
183990643Sbenno		pvo->pvo_pmap->pm_stats.wired_count++;
184090643Sbenno	pvo->pvo_pmap->pm_stats.resident_count++;
184190643Sbenno
184290643Sbenno	/*
184390643Sbenno	 * We hope this succeeds but it isn't required.
184490643Sbenno	 */
184590643Sbenno	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
184690643Sbenno	if (i >= 0) {
184790643Sbenno		PVO_PTEGIDX_SET(pvo, i);
184890643Sbenno	} else {
184990643Sbenno		panic("pmap_pvo_enter: overflow");
185090643Sbenno		pmap_pte_overflow++;
185190643Sbenno	}
185290643Sbenno
185390643Sbenno	return (first ? ENOENT : 0);
185477957Sbenno}
185577957Sbenno
185690643Sbennostatic void
185790643Sbennopmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
185877957Sbenno{
185990643Sbenno	struct	pte *pt;
186077957Sbenno
186190643Sbenno	/*
186290643Sbenno	 * If there is an active pte entry, we need to deactivate it (and
186390643Sbenno	 * save the ref & cfg bits).
186490643Sbenno	 */
186590643Sbenno	pt = pmap_pvo_to_pte(pvo, pteidx);
186690643Sbenno	if (pt != NULL) {
186790643Sbenno		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
186890643Sbenno		PVO_PTEGIDX_CLR(pvo);
186990643Sbenno	} else {
187090643Sbenno		pmap_pte_overflow--;
1871110172Sgrehan	}
187290643Sbenno
187390643Sbenno	/*
187490643Sbenno	 * Update our statistics.
187590643Sbenno	 */
187690643Sbenno	pvo->pvo_pmap->pm_stats.resident_count--;
187790643Sbenno	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
187890643Sbenno		pvo->pvo_pmap->pm_stats.wired_count--;
187990643Sbenno
188090643Sbenno	/*
188190643Sbenno	 * Save the REF/CHG bits into their cache if the page is managed.
188290643Sbenno	 */
188390643Sbenno	if (pvo->pvo_vaddr & PVO_MANAGED) {
188490643Sbenno		struct	vm_page *pg;
188590643Sbenno
188692067Sbenno		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
188790643Sbenno		if (pg != NULL) {
188890643Sbenno			pmap_attr_save(pg, pvo->pvo_pte.pte_lo &
188990643Sbenno			    (PTE_REF | PTE_CHG));
189090643Sbenno		}
189190643Sbenno	}
189290643Sbenno
189390643Sbenno	/*
189490643Sbenno	 * Remove this PVO from the PV list.
189590643Sbenno	 */
189690643Sbenno	LIST_REMOVE(pvo, pvo_vlink);
189790643Sbenno
189890643Sbenno	/*
189990643Sbenno	 * Remove this from the overflow list and return it to the pool
190090643Sbenno	 * if we aren't going to reuse it.
190190643Sbenno	 */
190290643Sbenno	LIST_REMOVE(pvo, pvo_olink);
190392521Sbenno	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
190492847Sjeff		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone :
190592521Sbenno		    pmap_upvo_zone, pvo);
190690643Sbenno	pmap_pvo_entries--;
190790643Sbenno	pmap_pvo_remove_calls++;
190877957Sbenno}
190977957Sbenno
191090643Sbennostatic __inline int
191190643Sbennopmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
191277957Sbenno{
191390643Sbenno	int	pteidx;
191477957Sbenno
191590643Sbenno	/*
191690643Sbenno	 * We can find the actual pte entry without searching by grabbing
191790643Sbenno	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
191890643Sbenno	 * noticing the HID bit.
191990643Sbenno	 */
192090643Sbenno	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
192190643Sbenno	if (pvo->pvo_pte.pte_hi & PTE_HID)
192290643Sbenno		pteidx ^= pmap_pteg_mask * 8;
192390643Sbenno
192490643Sbenno	return (pteidx);
192577957Sbenno}
192677957Sbenno
192790643Sbennostatic struct pvo_entry *
192890643Sbennopmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
192977957Sbenno{
193090643Sbenno	struct	pvo_entry *pvo;
193190643Sbenno	int	ptegidx;
193290643Sbenno	u_int	sr;
193377957Sbenno
193490643Sbenno	va &= ~ADDR_POFF;
193590643Sbenno	sr = va_to_sr(pm->pm_sr, va);
193690643Sbenno	ptegidx = va_to_pteg(sr, va);
193790643Sbenno
193890643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
193990643Sbenno		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
194090643Sbenno			if (pteidx_p)
194190643Sbenno				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
194290643Sbenno			return (pvo);
194390643Sbenno		}
194490643Sbenno	}
194590643Sbenno
194690643Sbenno	return (NULL);
194777957Sbenno}
194877957Sbenno
194990643Sbennostatic struct pte *
195090643Sbennopmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
195177957Sbenno{
195290643Sbenno	struct	pte *pt;
195377957Sbenno
195490643Sbenno	/*
195590643Sbenno	 * If we haven't been supplied the ptegidx, calculate it.
195690643Sbenno	 */
195790643Sbenno	if (pteidx == -1) {
195890643Sbenno		int	ptegidx;
195990643Sbenno		u_int	sr;
196077957Sbenno
196190643Sbenno		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
196290643Sbenno		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
196390643Sbenno		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
196490643Sbenno	}
196590643Sbenno
196690643Sbenno	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
196790643Sbenno
196890643Sbenno	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
196990643Sbenno		panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no "
197090643Sbenno		    "valid pte index", pvo);
197190643Sbenno	}
197290643Sbenno
197390643Sbenno	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
197490643Sbenno		panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo "
197590643Sbenno		    "pvo but no valid pte", pvo);
197690643Sbenno	}
197790643Sbenno
197890643Sbenno	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
197990643Sbenno		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
198090643Sbenno			panic("pmap_pvo_to_pte: pvo %p has valid pte in "
198190643Sbenno			    "pmap_pteg_table %p but invalid in pvo", pvo, pt);
198277957Sbenno		}
198390643Sbenno
198490643Sbenno		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
198590643Sbenno		    != 0) {
198690643Sbenno			panic("pmap_pvo_to_pte: pvo %p pte does not match "
198790643Sbenno			    "pte %p in pmap_pteg_table", pvo, pt);
198890643Sbenno		}
198990643Sbenno
199090643Sbenno		return (pt);
199177957Sbenno	}
199277957Sbenno
199390643Sbenno	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
199490643Sbenno		panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in "
199590643Sbenno		    "pmap_pteg_table but valid in pvo", pvo, pt);
199690643Sbenno	}
199777957Sbenno
199890643Sbenno	return (NULL);
199977957Sbenno}
200078880Sbenno
200178880Sbenno/*
200290643Sbenno * XXX: THIS STUFF SHOULD BE IN pte.c?
200378880Sbenno */
200490643Sbennoint
200590643Sbennopmap_pte_spill(vm_offset_t addr)
200678880Sbenno{
200790643Sbenno	struct	pvo_entry *source_pvo, *victim_pvo;
200890643Sbenno	struct	pvo_entry *pvo;
200990643Sbenno	int	ptegidx, i, j;
201090643Sbenno	u_int	sr;
201190643Sbenno	struct	pteg *pteg;
201290643Sbenno	struct	pte *pt;
201378880Sbenno
201490643Sbenno	pmap_pte_spills++;
201590643Sbenno
201694836Sbenno	sr = mfsrin(addr);
201790643Sbenno	ptegidx = va_to_pteg(sr, addr);
201890643Sbenno
201978880Sbenno	/*
202090643Sbenno	 * Have to substitute some entry.  Use the primary hash for this.
202190643Sbenno	 * Use low bits of timebase as random generator.
202278880Sbenno	 */
202390643Sbenno	pteg = &pmap_pteg_table[ptegidx];
202490643Sbenno	__asm __volatile("mftb %0" : "=r"(i));
202590643Sbenno	i &= 7;
202690643Sbenno	pt = &pteg->pt[i];
202778880Sbenno
202890643Sbenno	source_pvo = NULL;
202990643Sbenno	victim_pvo = NULL;
203090643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
203178880Sbenno		/*
203290643Sbenno		 * We need to find a pvo entry for this address.
203378880Sbenno		 */
203490643Sbenno		PMAP_PVO_CHECK(pvo);
203590643Sbenno		if (source_pvo == NULL &&
203690643Sbenno		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
203790643Sbenno		    pvo->pvo_pte.pte_hi & PTE_HID)) {
203890643Sbenno			/*
203990643Sbenno			 * Now found an entry to be spilled into the pteg.
204090643Sbenno			 * The PTE is now valid, so we know it's active.
204190643Sbenno			 */
204290643Sbenno			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
204378880Sbenno
204490643Sbenno			if (j >= 0) {
204590643Sbenno				PVO_PTEGIDX_SET(pvo, j);
204690643Sbenno				pmap_pte_overflow--;
204790643Sbenno				PMAP_PVO_CHECK(pvo);
204890643Sbenno				return (1);
204990643Sbenno			}
205090643Sbenno
205190643Sbenno			source_pvo = pvo;
205290643Sbenno
205390643Sbenno			if (victim_pvo != NULL)
205490643Sbenno				break;
205590643Sbenno		}
205690643Sbenno
205778880Sbenno		/*
205890643Sbenno		 * We also need the pvo entry of the victim we are replacing
205990643Sbenno		 * so save the R & C bits of the PTE.
206078880Sbenno		 */
206190643Sbenno		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
206290643Sbenno		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
206390643Sbenno			victim_pvo = pvo;
206490643Sbenno			if (source_pvo != NULL)
206590643Sbenno				break;
206690643Sbenno		}
206790643Sbenno	}
206878880Sbenno
206990643Sbenno	if (source_pvo == NULL)
207090643Sbenno		return (0);
207190643Sbenno
207290643Sbenno	if (victim_pvo == NULL) {
207390643Sbenno		if ((pt->pte_hi & PTE_HID) == 0)
207490643Sbenno			panic("pmap_pte_spill: victim p-pte (%p) has no pvo"
207590643Sbenno			    "entry", pt);
207690643Sbenno
207778880Sbenno		/*
207890643Sbenno		 * If this is a secondary PTE, we need to search it's primary
207990643Sbenno		 * pvo bucket for the matching PVO.
208078880Sbenno		 */
208190643Sbenno		LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
208290643Sbenno		    pvo_olink) {
208390643Sbenno			PMAP_PVO_CHECK(pvo);
208490643Sbenno			/*
208590643Sbenno			 * We also need the pvo entry of the victim we are
208690643Sbenno			 * replacing so save the R & C bits of the PTE.
208790643Sbenno			 */
208890643Sbenno			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
208990643Sbenno				victim_pvo = pvo;
209090643Sbenno				break;
209190643Sbenno			}
209290643Sbenno		}
209378880Sbenno
209490643Sbenno		if (victim_pvo == NULL)
209590643Sbenno			panic("pmap_pte_spill: victim s-pte (%p) has no pvo"
209690643Sbenno			    "entry", pt);
209790643Sbenno	}
209878880Sbenno
209990643Sbenno	/*
210090643Sbenno	 * We are invalidating the TLB entry for the EA we are replacing even
210190643Sbenno	 * though it's valid.  If we don't, we lose any ref/chg bit changes
210290643Sbenno	 * contained in the TLB entry.
210390643Sbenno	 */
210490643Sbenno	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
210578880Sbenno
210690643Sbenno	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
210790643Sbenno	pmap_pte_set(pt, &source_pvo->pvo_pte);
210890643Sbenno
210990643Sbenno	PVO_PTEGIDX_CLR(victim_pvo);
211090643Sbenno	PVO_PTEGIDX_SET(source_pvo, i);
211190643Sbenno	pmap_pte_replacements++;
211290643Sbenno
211390643Sbenno	PMAP_PVO_CHECK(victim_pvo);
211490643Sbenno	PMAP_PVO_CHECK(source_pvo);
211590643Sbenno
211690643Sbenno	return (1);
211790643Sbenno}
211890643Sbenno
211990643Sbennostatic int
212090643Sbennopmap_pte_insert(u_int ptegidx, struct pte *pvo_pt)
212190643Sbenno{
212290643Sbenno	struct	pte *pt;
212390643Sbenno	int	i;
212490643Sbenno
212590643Sbenno	/*
212690643Sbenno	 * First try primary hash.
212790643Sbenno	 */
212890643Sbenno	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
212990643Sbenno		if ((pt->pte_hi & PTE_VALID) == 0) {
213090643Sbenno			pvo_pt->pte_hi &= ~PTE_HID;
213190643Sbenno			pmap_pte_set(pt, pvo_pt);
213290643Sbenno			return (i);
213378880Sbenno		}
213490643Sbenno	}
213578880Sbenno
213690643Sbenno	/*
213790643Sbenno	 * Now try secondary hash.
213890643Sbenno	 */
213990643Sbenno	ptegidx ^= pmap_pteg_mask;
214090643Sbenno	ptegidx++;
214190643Sbenno	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
214290643Sbenno		if ((pt->pte_hi & PTE_VALID) == 0) {
214390643Sbenno			pvo_pt->pte_hi |= PTE_HID;
214490643Sbenno			pmap_pte_set(pt, pvo_pt);
214590643Sbenno			return (i);
214690643Sbenno		}
214790643Sbenno	}
214878880Sbenno
214990643Sbenno	panic("pmap_pte_insert: overflow");
215090643Sbenno	return (-1);
215178880Sbenno}
215284921Sbenno
215390643Sbennostatic boolean_t
215490643Sbennopmap_query_bit(vm_page_t m, int ptebit)
215584921Sbenno{
215690643Sbenno	struct	pvo_entry *pvo;
215790643Sbenno	struct	pte *pt;
215884921Sbenno
2159123560Sgrehan#if 0
216090643Sbenno	if (pmap_attr_fetch(m) & ptebit)
216190643Sbenno		return (TRUE);
2162123560Sgrehan#endif
216384921Sbenno
216490643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
216590643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
216684921Sbenno
216790643Sbenno		/*
216890643Sbenno		 * See if we saved the bit off.  If so, cache it and return
216990643Sbenno		 * success.
217090643Sbenno		 */
217190643Sbenno		if (pvo->pvo_pte.pte_lo & ptebit) {
217290643Sbenno			pmap_attr_save(m, ptebit);
217390643Sbenno			PMAP_PVO_CHECK(pvo);	/* sanity check */
217490643Sbenno			return (TRUE);
217590643Sbenno		}
217690643Sbenno	}
217784921Sbenno
217890643Sbenno	/*
217990643Sbenno	 * No luck, now go through the hard part of looking at the PTEs
218090643Sbenno	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
218190643Sbenno	 * the PTEs.
218290643Sbenno	 */
218390643Sbenno	SYNC();
218490643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
218590643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
218690643Sbenno
218790643Sbenno		/*
218890643Sbenno		 * See if this pvo has a valid PTE.  if so, fetch the
218990643Sbenno		 * REF/CHG bits from the valid PTE.  If the appropriate
219090643Sbenno		 * ptebit is set, cache it and return success.
219190643Sbenno		 */
219290643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
219390643Sbenno		if (pt != NULL) {
219490643Sbenno			pmap_pte_synch(pt, &pvo->pvo_pte);
219590643Sbenno			if (pvo->pvo_pte.pte_lo & ptebit) {
219690643Sbenno				pmap_attr_save(m, ptebit);
219790643Sbenno				PMAP_PVO_CHECK(pvo);	/* sanity check */
219890643Sbenno				return (TRUE);
219990643Sbenno			}
220090643Sbenno		}
220184921Sbenno	}
220284921Sbenno
2203123354Sgallatin	return (FALSE);
220484921Sbenno}
220590643Sbenno
2206110172Sgrehanstatic u_int
2207110172Sgrehanpmap_clear_bit(vm_page_t m, int ptebit, int *origbit)
220890643Sbenno{
2209110172Sgrehan	u_int	count;
221090643Sbenno	struct	pvo_entry *pvo;
221190643Sbenno	struct	pte *pt;
221290643Sbenno	int	rv;
221390643Sbenno
221490643Sbenno	/*
221590643Sbenno	 * Clear the cached value.
221690643Sbenno	 */
221790643Sbenno	rv = pmap_attr_fetch(m);
221890643Sbenno	pmap_attr_clear(m, ptebit);
221990643Sbenno
222090643Sbenno	/*
222190643Sbenno	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
222290643Sbenno	 * we can reset the right ones).  note that since the pvo entries and
222390643Sbenno	 * list heads are accessed via BAT0 and are never placed in the page
222490643Sbenno	 * table, we don't have to worry about further accesses setting the
222590643Sbenno	 * REF/CHG bits.
222690643Sbenno	 */
222790643Sbenno	SYNC();
222890643Sbenno
222990643Sbenno	/*
223090643Sbenno	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
223190643Sbenno	 * valid pte clear the ptebit from the valid pte.
223290643Sbenno	 */
2233110172Sgrehan	count = 0;
223490643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
223590643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
223690643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
223790643Sbenno		if (pt != NULL) {
223890643Sbenno			pmap_pte_synch(pt, &pvo->pvo_pte);
2239110172Sgrehan			if (pvo->pvo_pte.pte_lo & ptebit) {
2240110172Sgrehan				count++;
224190643Sbenno				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2242110172Sgrehan			}
224390643Sbenno		}
224490643Sbenno		rv |= pvo->pvo_pte.pte_lo;
224590643Sbenno		pvo->pvo_pte.pte_lo &= ~ptebit;
224690643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
224790643Sbenno	}
224890643Sbenno
2249110172Sgrehan	if (origbit != NULL) {
2250110172Sgrehan		*origbit = rv;
2251110172Sgrehan	}
2252110172Sgrehan
2253110172Sgrehan	return (count);
225490643Sbenno}
225599038Sbenno
225699038Sbenno/*
2257103604Sgrehan * Return true if the physical range is encompassed by the battable[idx]
2258103604Sgrehan */
2259103604Sgrehanstatic int
2260103604Sgrehanpmap_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
2261103604Sgrehan{
2262103604Sgrehan	u_int prot;
2263103604Sgrehan	u_int32_t start;
2264103604Sgrehan	u_int32_t end;
2265103604Sgrehan	u_int32_t bat_ble;
2266103604Sgrehan
2267103604Sgrehan	/*
2268103604Sgrehan	 * Return immediately if not a valid mapping
2269103604Sgrehan	 */
2270103604Sgrehan	if (!battable[idx].batu & BAT_Vs)
2271103604Sgrehan		return (EINVAL);
2272103604Sgrehan
2273103604Sgrehan	/*
2274103604Sgrehan	 * The BAT entry must be cache-inhibited, guarded, and r/w
2275103604Sgrehan	 * so it can function as an i/o page
2276103604Sgrehan	 */
2277103604Sgrehan	prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
2278103604Sgrehan	if (prot != (BAT_I|BAT_G|BAT_PP_RW))
2279103604Sgrehan		return (EPERM);
2280103604Sgrehan
2281103604Sgrehan	/*
2282103604Sgrehan	 * The address should be within the BAT range. Assume that the
2283103604Sgrehan	 * start address in the BAT has the correct alignment (thus
2284103604Sgrehan	 * not requiring masking)
2285103604Sgrehan	 */
2286103604Sgrehan	start = battable[idx].batl & BAT_PBS;
2287103604Sgrehan	bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
2288103604Sgrehan	end = start | (bat_ble << 15) | 0x7fff;
2289103604Sgrehan
2290103604Sgrehan	if ((pa < start) || ((pa + size) > end))
2291103604Sgrehan		return (ERANGE);
2292103604Sgrehan
2293103604Sgrehan	return (0);
2294103604Sgrehan}
2295103604Sgrehan
2296103604Sgrehan
2297103604Sgrehan/*
229899038Sbenno * Map a set of physical memory pages into the kernel virtual
229999038Sbenno * address space. Return a pointer to where it is mapped. This
230099038Sbenno * routine is intended to be used for mapping device memory,
230199038Sbenno * NOT real memory.
230299038Sbenno */
230399038Sbennovoid *
230499038Sbennopmap_mapdev(vm_offset_t pa, vm_size_t size)
230599038Sbenno{
2306103604Sgrehan	vm_offset_t va, tmpva, ppa, offset;
2307103604Sgrehan	int i;
2308103604Sgrehan
2309103604Sgrehan	ppa = trunc_page(pa);
231099038Sbenno	offset = pa & PAGE_MASK;
231199038Sbenno	size = roundup(offset + size, PAGE_SIZE);
231299038Sbenno
231399038Sbenno	GIANT_REQUIRED;
231499038Sbenno
2315103604Sgrehan	/*
2316103604Sgrehan	 * If the physical address lies within a valid BAT table entry,
2317103604Sgrehan	 * return the 1:1 mapping. This currently doesn't work
2318103604Sgrehan	 * for regions that overlap 256M BAT segments.
2319103604Sgrehan	 */
2320103604Sgrehan	for (i = 0; i < 16; i++) {
2321103604Sgrehan		if (pmap_bat_mapped(i, pa, size) == 0)
2322103604Sgrehan			return ((void *) pa);
2323103604Sgrehan	}
2324103604Sgrehan
2325118365Salc	va = kmem_alloc_nofault(kernel_map, size);
232699038Sbenno	if (!va)
232799038Sbenno		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
232899038Sbenno
232999038Sbenno	for (tmpva = va; size > 0;) {
2330103604Sgrehan		pmap_kenter(tmpva, ppa);
233199038Sbenno		TLBIE(tmpva); /* XXX or should it be invalidate-all ? */
233299038Sbenno		size -= PAGE_SIZE;
233399038Sbenno		tmpva += PAGE_SIZE;
2334103604Sgrehan		ppa += PAGE_SIZE;
233599038Sbenno	}
233699038Sbenno
233799038Sbenno	return ((void *)(va + offset));
233899038Sbenno}
233999038Sbenno
234099038Sbennovoid
234199038Sbennopmap_unmapdev(vm_offset_t va, vm_size_t size)
234299038Sbenno{
234399038Sbenno	vm_offset_t base, offset;
234499038Sbenno
2345103604Sgrehan	/*
2346103604Sgrehan	 * If this is outside kernel virtual space, then it's a
2347103604Sgrehan	 * battable entry and doesn't require unmapping
2348103604Sgrehan	 */
2349103604Sgrehan	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2350103604Sgrehan		base = trunc_page(va);
2351103604Sgrehan		offset = va & PAGE_MASK;
2352103604Sgrehan		size = roundup(offset + size, PAGE_SIZE);
2353103604Sgrehan		kmem_free(kernel_map, base, size);
2354103604Sgrehan	}
235599038Sbenno}
2356