mmu_oea.c revision 126478
177957Sbenno/*
290643Sbenno * Copyright (c) 2001 The NetBSD Foundation, Inc.
390643Sbenno * All rights reserved.
490643Sbenno *
590643Sbenno * This code is derived from software contributed to The NetBSD Foundation
690643Sbenno * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
790643Sbenno *
890643Sbenno * Redistribution and use in source and binary forms, with or without
990643Sbenno * modification, are permitted provided that the following conditions
1090643Sbenno * are met:
1190643Sbenno * 1. Redistributions of source code must retain the above copyright
1290643Sbenno *    notice, this list of conditions and the following disclaimer.
1390643Sbenno * 2. Redistributions in binary form must reproduce the above copyright
1490643Sbenno *    notice, this list of conditions and the following disclaimer in the
1590643Sbenno *    documentation and/or other materials provided with the distribution.
1690643Sbenno * 3. All advertising materials mentioning features or use of this software
1790643Sbenno *    must display the following acknowledgement:
1890643Sbenno *        This product includes software developed by the NetBSD
1990643Sbenno *        Foundation, Inc. and its contributors.
2090643Sbenno * 4. Neither the name of The NetBSD Foundation nor the names of its
2190643Sbenno *    contributors may be used to endorse or promote products derived
2290643Sbenno *    from this software without specific prior written permission.
2390643Sbenno *
2490643Sbenno * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2590643Sbenno * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2690643Sbenno * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2790643Sbenno * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2890643Sbenno * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2990643Sbenno * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
3090643Sbenno * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
3190643Sbenno * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3290643Sbenno * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3390643Sbenno * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3490643Sbenno * POSSIBILITY OF SUCH DAMAGE.
3590643Sbenno */
3690643Sbenno/*
3777957Sbenno * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3877957Sbenno * Copyright (C) 1995, 1996 TooLs GmbH.
3977957Sbenno * All rights reserved.
4077957Sbenno *
4177957Sbenno * Redistribution and use in source and binary forms, with or without
4277957Sbenno * modification, are permitted provided that the following conditions
4377957Sbenno * are met:
4477957Sbenno * 1. Redistributions of source code must retain the above copyright
4577957Sbenno *    notice, this list of conditions and the following disclaimer.
4677957Sbenno * 2. Redistributions in binary form must reproduce the above copyright
4777957Sbenno *    notice, this list of conditions and the following disclaimer in the
4877957Sbenno *    documentation and/or other materials provided with the distribution.
4977957Sbenno * 3. All advertising materials mentioning features or use of this software
5077957Sbenno *    must display the following acknowledgement:
5177957Sbenno *	This product includes software developed by TooLs GmbH.
5277957Sbenno * 4. The name of TooLs GmbH may not be used to endorse or promote products
5377957Sbenno *    derived from this software without specific prior written permission.
5477957Sbenno *
5577957Sbenno * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
5677957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5777957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5877957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
5977957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6077957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6177957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6277957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6377957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6477957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6577957Sbenno *
6678880Sbenno * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
6777957Sbenno */
6877957Sbenno/*
6977957Sbenno * Copyright (C) 2001 Benno Rice.
7077957Sbenno * All rights reserved.
7177957Sbenno *
7277957Sbenno * Redistribution and use in source and binary forms, with or without
7377957Sbenno * modification, are permitted provided that the following conditions
7477957Sbenno * are met:
7577957Sbenno * 1. Redistributions of source code must retain the above copyright
7677957Sbenno *    notice, this list of conditions and the following disclaimer.
7777957Sbenno * 2. Redistributions in binary form must reproduce the above copyright
7877957Sbenno *    notice, this list of conditions and the following disclaimer in the
7977957Sbenno *    documentation and/or other materials provided with the distribution.
8077957Sbenno *
8177957Sbenno * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
8277957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
8377957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
8477957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
8577957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
8677957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
8777957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
8877957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
8977957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
9077957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9177957Sbenno */
9277957Sbenno
93113038Sobrien#include <sys/cdefs.h>
94113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 126478 2004-03-02 06:49:21Z grehan $");
9577957Sbenno
9690643Sbenno/*
9790643Sbenno * Manages physical address maps.
9890643Sbenno *
9990643Sbenno * In addition to hardware address maps, this module is called upon to
10090643Sbenno * provide software-use-only maps which may or may not be stored in the
10190643Sbenno * same form as hardware maps.  These pseudo-maps are used to store
10290643Sbenno * intermediate results from copy operations to and from address spaces.
10390643Sbenno *
10490643Sbenno * Since the information managed by this module is also stored by the
10590643Sbenno * logical address mapping module, this module may throw away valid virtual
10690643Sbenno * to physical mappings at almost any time.  However, invalidations of
10790643Sbenno * mappings must be done as requested.
10890643Sbenno *
10990643Sbenno * In order to cope with hardware architectures which make virtual to
11090643Sbenno * physical map invalidates expensive, this module may delay invalidate
11190643Sbenno * reduced protection operations until such time as they are actually
11290643Sbenno * necessary.  This module is given full information as to which processors
11390643Sbenno * are currently using which maps, and to when physical maps must be made
11490643Sbenno * correct.
11590643Sbenno */
11690643Sbenno
117118239Speter#include "opt_kstack_pages.h"
118118239Speter
11977957Sbenno#include <sys/param.h>
12080431Speter#include <sys/kernel.h>
12190643Sbenno#include <sys/ktr.h>
12290643Sbenno#include <sys/lock.h>
12390643Sbenno#include <sys/msgbuf.h>
12490643Sbenno#include <sys/mutex.h>
12577957Sbenno#include <sys/proc.h>
12690643Sbenno#include <sys/sysctl.h>
12790643Sbenno#include <sys/systm.h>
12877957Sbenno#include <sys/vmmeter.h>
12977957Sbenno
13090643Sbenno#include <dev/ofw/openfirm.h>
13190643Sbenno
13290643Sbenno#include <vm/vm.h>
13377957Sbenno#include <vm/vm_param.h>
13477957Sbenno#include <vm/vm_kern.h>
13577957Sbenno#include <vm/vm_page.h>
13677957Sbenno#include <vm/vm_map.h>
13777957Sbenno#include <vm/vm_object.h>
13877957Sbenno#include <vm/vm_extern.h>
13977957Sbenno#include <vm/vm_pageout.h>
14077957Sbenno#include <vm/vm_pager.h>
14192847Sjeff#include <vm/uma.h>
14277957Sbenno
143125687Sgrehan#include <machine/cpu.h>
14497346Sbenno#include <machine/powerpc.h>
14583730Smp#include <machine/bat.h>
14690643Sbenno#include <machine/frame.h>
14790643Sbenno#include <machine/md_var.h>
14890643Sbenno#include <machine/psl.h>
14977957Sbenno#include <machine/pte.h>
15090643Sbenno#include <machine/sr.h>
15177957Sbenno
15290643Sbenno#define	PMAP_DEBUG
15377957Sbenno
15490643Sbenno#define TODO	panic("%s: not implemented", __func__);
15577957Sbenno
15690643Sbenno#define	PMAP_LOCK(pm)
15790643Sbenno#define	PMAP_UNLOCK(pm)
15890643Sbenno
15990643Sbenno#define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
16090643Sbenno#define	TLBSYNC()	__asm __volatile("tlbsync");
16190643Sbenno#define	SYNC()		__asm __volatile("sync");
16290643Sbenno#define	EIEIO()		__asm __volatile("eieio");
16390643Sbenno
16490643Sbenno#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
16590643Sbenno#define	VSID_TO_SR(vsid)	((vsid) & 0xf)
16690643Sbenno#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
16790643Sbenno
16890643Sbenno#define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
16990643Sbenno#define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
17090643Sbenno#define	PVO_WIRED		0x0010		/* PVO entry is wired */
17190643Sbenno#define	PVO_MANAGED		0x0020		/* PVO entry is managed */
17290643Sbenno#define	PVO_EXECUTABLE		0x0040		/* PVO entry is executable */
17394835Sbenno#define	PVO_BOOTSTRAP		0x0080		/* PVO entry allocated during
17492521Sbenno						   bootstrap */
17590643Sbenno#define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
17690643Sbenno#define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
17790643Sbenno#define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
17890643Sbenno#define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
17990643Sbenno#define	PVO_PTEGIDX_CLR(pvo)	\
18090643Sbenno	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
18190643Sbenno#define	PVO_PTEGIDX_SET(pvo, i)	\
18290643Sbenno	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
18390643Sbenno
18490643Sbenno#define	PMAP_PVO_CHECK(pvo)
18590643Sbenno
18690643Sbennostruct ofw_map {
18790643Sbenno	vm_offset_t	om_va;
18890643Sbenno	vm_size_t	om_len;
18990643Sbenno	vm_offset_t	om_pa;
19090643Sbenno	u_int		om_mode;
19190643Sbenno};
19277957Sbenno
19390643Sbennoint	pmap_bootstrapped = 0;
19477957Sbenno
19590643Sbenno/*
19690643Sbenno * Virtual and physical address of message buffer.
19790643Sbenno */
19890643Sbennostruct		msgbuf *msgbufp;
19990643Sbennovm_offset_t	msgbuf_phys;
20077957Sbenno
20190643Sbenno/*
20290643Sbenno * Physical addresses of first and last available physical page.
20390643Sbenno */
20490643Sbennovm_offset_t avail_start;
20590643Sbennovm_offset_t avail_end;
20677957Sbenno
207110172Sgrehanint pmap_pagedaemon_waken;
208110172Sgrehan
20990643Sbenno/*
21090643Sbenno * Map of physical memory regions.
21190643Sbenno */
21290643Sbennovm_offset_t	phys_avail[128];
21390643Sbennou_int		phys_avail_count;
21497346Sbennostatic struct	mem_region *regions;
21597346Sbennostatic struct	mem_region *pregions;
21697346Sbennoint		regions_sz, pregions_sz;
217100319Sbennostatic struct	ofw_map *translations;
21877957Sbenno
21990643Sbenno/*
22090643Sbenno * First and last available kernel virtual addresses.
22190643Sbenno */
22290643Sbennovm_offset_t virtual_avail;
22390643Sbennovm_offset_t virtual_end;
22490643Sbennovm_offset_t kernel_vm_end;
22577957Sbenno
22690643Sbenno/*
22790643Sbenno * Kernel pmap.
22890643Sbenno */
22990643Sbennostruct pmap kernel_pmap_store;
23090643Sbennoextern struct pmap ofw_pmap;
23177957Sbenno
23290643Sbenno/*
23390643Sbenno * PTEG data.
23490643Sbenno */
23590643Sbennostatic struct	pteg *pmap_pteg_table;
23690643Sbennou_int		pmap_pteg_count;
23790643Sbennou_int		pmap_pteg_mask;
23877957Sbenno
23990643Sbenno/*
24090643Sbenno * PVO data.
24190643Sbenno */
24290643Sbennostruct	pvo_head *pmap_pvo_table;		/* pvo entries by pteg index */
24390643Sbennostruct	pvo_head pmap_pvo_kunmanaged =
24490643Sbenno    LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
24590643Sbennostruct	pvo_head pmap_pvo_unmanaged =
24690643Sbenno    LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
24777957Sbenno
24892847Sjeffuma_zone_t	pmap_upvo_zone;	/* zone for pvo entries for unmanaged pages */
24992847Sjeffuma_zone_t	pmap_mpvo_zone;	/* zone for pvo entries for managed pages */
25077957Sbenno
25199037Sbenno#define	BPVO_POOL_SIZE	32768
25292521Sbennostatic struct	pvo_entry *pmap_bpvo_pool;
25399037Sbennostatic int	pmap_bpvo_pool_index = 0;
25477957Sbenno
25590643Sbenno#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
25690643Sbennostatic u_int	pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
25777957Sbenno
25890643Sbennostatic boolean_t pmap_initialized = FALSE;
25977957Sbenno
26090643Sbenno/*
26190643Sbenno * Statistics.
26290643Sbenno */
26390643Sbennou_int	pmap_pte_valid = 0;
26490643Sbennou_int	pmap_pte_overflow = 0;
26590643Sbennou_int	pmap_pte_replacements = 0;
26690643Sbennou_int	pmap_pvo_entries = 0;
26790643Sbennou_int	pmap_pvo_enter_calls = 0;
26890643Sbennou_int	pmap_pvo_remove_calls = 0;
26990643Sbennou_int	pmap_pte_spills = 0;
27090643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid,
27190643Sbenno    0, "");
27290643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD,
27390643Sbenno    &pmap_pte_overflow, 0, "");
27490643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD,
27590643Sbenno    &pmap_pte_replacements, 0, "");
27690643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries,
27790643Sbenno    0, "");
27890643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD,
27990643Sbenno    &pmap_pvo_enter_calls, 0, "");
28090643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD,
28190643Sbenno    &pmap_pvo_remove_calls, 0, "");
28290643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD,
28390643Sbenno    &pmap_pte_spills, 0, "");
28477957Sbenno
28590643Sbennostruct	pvo_entry *pmap_pvo_zeropage;
28677957Sbenno
28790643Sbennovm_offset_t	pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
28890643Sbennou_int		pmap_rkva_count = 4;
28977957Sbenno
29090643Sbenno/*
29190643Sbenno * Allocate physical memory for use in pmap_bootstrap.
29290643Sbenno */
29390643Sbennostatic vm_offset_t	pmap_bootstrap_alloc(vm_size_t, u_int);
29477957Sbenno
29590643Sbenno/*
29690643Sbenno * PTE calls.
29790643Sbenno */
29890643Sbennostatic int		pmap_pte_insert(u_int, struct pte *);
29977957Sbenno
30077957Sbenno/*
30190643Sbenno * PVO calls.
30277957Sbenno */
30392847Sjeffstatic int	pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
30490643Sbenno		    vm_offset_t, vm_offset_t, u_int, int);
30590643Sbennostatic void	pmap_pvo_remove(struct pvo_entry *, int);
30690643Sbennostatic struct	pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *);
30790643Sbennostatic struct	pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
30890643Sbenno
30990643Sbenno/*
31090643Sbenno * Utility routines.
31190643Sbenno */
31290643Sbennostatic struct		pvo_entry *pmap_rkva_alloc(void);
31390643Sbennostatic void		pmap_pa_map(struct pvo_entry *, vm_offset_t,
31490643Sbenno			    struct pte *, int *);
31590643Sbennostatic void		pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
31690643Sbennostatic void		pmap_syncicache(vm_offset_t, vm_size_t);
31790643Sbennostatic boolean_t	pmap_query_bit(vm_page_t, int);
318110172Sgrehanstatic u_int		pmap_clear_bit(vm_page_t, int, int *);
31990643Sbennostatic void		tlbia(void);
32090643Sbenno
32190643Sbennostatic __inline int
32290643Sbennova_to_sr(u_int *sr, vm_offset_t va)
32377957Sbenno{
32490643Sbenno	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
32590643Sbenno}
32677957Sbenno
32790643Sbennostatic __inline u_int
32890643Sbennova_to_pteg(u_int sr, vm_offset_t addr)
32990643Sbenno{
33090643Sbenno	u_int hash;
33190643Sbenno
33290643Sbenno	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
33390643Sbenno	    ADDR_PIDX_SHFT);
33490643Sbenno	return (hash & pmap_pteg_mask);
33577957Sbenno}
33677957Sbenno
33790643Sbennostatic __inline struct pvo_head *
33896250Sbennopa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
33977957Sbenno{
34090643Sbenno	struct	vm_page *pg;
34177957Sbenno
34290643Sbenno	pg = PHYS_TO_VM_PAGE(pa);
34390643Sbenno
34496250Sbenno	if (pg_p != NULL)
34596250Sbenno		*pg_p = pg;
34696250Sbenno
34790643Sbenno	if (pg == NULL)
34890643Sbenno		return (&pmap_pvo_unmanaged);
34990643Sbenno
35090643Sbenno	return (&pg->md.mdpg_pvoh);
35177957Sbenno}
35277957Sbenno
35390643Sbennostatic __inline struct pvo_head *
35490643Sbennovm_page_to_pvoh(vm_page_t m)
35590643Sbenno{
35690643Sbenno
35790643Sbenno	return (&m->md.mdpg_pvoh);
35890643Sbenno}
35990643Sbenno
36077957Sbennostatic __inline void
36190643Sbennopmap_attr_clear(vm_page_t m, int ptebit)
36277957Sbenno{
36390643Sbenno
36490643Sbenno	m->md.mdpg_attrs &= ~ptebit;
36577957Sbenno}
36677957Sbenno
36777957Sbennostatic __inline int
36890643Sbennopmap_attr_fetch(vm_page_t m)
36977957Sbenno{
37077957Sbenno
37190643Sbenno	return (m->md.mdpg_attrs);
37277957Sbenno}
37377957Sbenno
37490643Sbennostatic __inline void
37590643Sbennopmap_attr_save(vm_page_t m, int ptebit)
37690643Sbenno{
37790643Sbenno
37890643Sbenno	m->md.mdpg_attrs |= ptebit;
37990643Sbenno}
38090643Sbenno
38177957Sbennostatic __inline int
38290643Sbennopmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
38377957Sbenno{
38490643Sbenno	if (pt->pte_hi == pvo_pt->pte_hi)
38590643Sbenno		return (1);
38690643Sbenno
38790643Sbenno	return (0);
38877957Sbenno}
38977957Sbenno
39077957Sbennostatic __inline int
39190643Sbennopmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
39277957Sbenno{
39390643Sbenno	return (pt->pte_hi & ~PTE_VALID) ==
39490643Sbenno	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
39590643Sbenno	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
39690643Sbenno}
39777957Sbenno
39890643Sbennostatic __inline void
39990643Sbennopmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
40090643Sbenno{
40190643Sbenno	/*
40290643Sbenno	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
40390643Sbenno	 * set when the real pte is set in memory.
40490643Sbenno	 *
40590643Sbenno	 * Note: Don't set the valid bit for correct operation of tlb update.
40690643Sbenno	 */
40790643Sbenno	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
40890643Sbenno	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
40990643Sbenno	pt->pte_lo = pte_lo;
41077957Sbenno}
41177957Sbenno
41290643Sbennostatic __inline void
41390643Sbennopmap_pte_synch(struct pte *pt, struct pte *pvo_pt)
41477957Sbenno{
41577957Sbenno
41690643Sbenno	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
41777957Sbenno}
41877957Sbenno
41990643Sbennostatic __inline void
42090643Sbennopmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
42177957Sbenno{
42277957Sbenno
42390643Sbenno	/*
42490643Sbenno	 * As shown in Section 7.6.3.2.3
42590643Sbenno	 */
42690643Sbenno	pt->pte_lo &= ~ptebit;
42790643Sbenno	TLBIE(va);
42890643Sbenno	EIEIO();
42990643Sbenno	TLBSYNC();
43090643Sbenno	SYNC();
43177957Sbenno}
43277957Sbenno
43390643Sbennostatic __inline void
43490643Sbennopmap_pte_set(struct pte *pt, struct pte *pvo_pt)
43577957Sbenno{
43677957Sbenno
43790643Sbenno	pvo_pt->pte_hi |= PTE_VALID;
43890643Sbenno
43977957Sbenno	/*
44090643Sbenno	 * Update the PTE as defined in section 7.6.3.1.
44190643Sbenno	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
44290643Sbenno	 * been saved so this routine can restore them (if desired).
44377957Sbenno	 */
44490643Sbenno	pt->pte_lo = pvo_pt->pte_lo;
44590643Sbenno	EIEIO();
44690643Sbenno	pt->pte_hi = pvo_pt->pte_hi;
44790643Sbenno	SYNC();
44890643Sbenno	pmap_pte_valid++;
44990643Sbenno}
45077957Sbenno
45190643Sbennostatic __inline void
45290643Sbennopmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
45390643Sbenno{
45490643Sbenno
45590643Sbenno	pvo_pt->pte_hi &= ~PTE_VALID;
45690643Sbenno
45777957Sbenno	/*
45890643Sbenno	 * Force the reg & chg bits back into the PTEs.
45977957Sbenno	 */
46090643Sbenno	SYNC();
46177957Sbenno
46290643Sbenno	/*
46390643Sbenno	 * Invalidate the pte.
46490643Sbenno	 */
46590643Sbenno	pt->pte_hi &= ~PTE_VALID;
46677957Sbenno
46790643Sbenno	SYNC();
46890643Sbenno	TLBIE(va);
46990643Sbenno	EIEIO();
47090643Sbenno	TLBSYNC();
47190643Sbenno	SYNC();
47277957Sbenno
47390643Sbenno	/*
47490643Sbenno	 * Save the reg & chg bits.
47590643Sbenno	 */
47690643Sbenno	pmap_pte_synch(pt, pvo_pt);
47790643Sbenno	pmap_pte_valid--;
47877957Sbenno}
47977957Sbenno
48090643Sbennostatic __inline void
48190643Sbennopmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
48290643Sbenno{
48390643Sbenno
48490643Sbenno	/*
48590643Sbenno	 * Invalidate the PTE
48690643Sbenno	 */
48790643Sbenno	pmap_pte_unset(pt, pvo_pt, va);
48890643Sbenno	pmap_pte_set(pt, pvo_pt);
48990643Sbenno}
49090643Sbenno
49177957Sbenno/*
49290643Sbenno * Quick sort callout for comparing memory regions.
49377957Sbenno */
49490643Sbennostatic int	mr_cmp(const void *a, const void *b);
49590643Sbennostatic int	om_cmp(const void *a, const void *b);
49690643Sbenno
49790643Sbennostatic int
49890643Sbennomr_cmp(const void *a, const void *b)
49977957Sbenno{
50090643Sbenno	const struct	mem_region *regiona;
50190643Sbenno	const struct	mem_region *regionb;
50277957Sbenno
50390643Sbenno	regiona = a;
50490643Sbenno	regionb = b;
50590643Sbenno	if (regiona->mr_start < regionb->mr_start)
50690643Sbenno		return (-1);
50790643Sbenno	else if (regiona->mr_start > regionb->mr_start)
50890643Sbenno		return (1);
50990643Sbenno	else
51090643Sbenno		return (0);
51190643Sbenno}
51277957Sbenno
51390643Sbennostatic int
51490643Sbennoom_cmp(const void *a, const void *b)
51590643Sbenno{
51690643Sbenno	const struct	ofw_map *mapa;
51790643Sbenno	const struct	ofw_map *mapb;
51890643Sbenno
51990643Sbenno	mapa = a;
52090643Sbenno	mapb = b;
52190643Sbenno	if (mapa->om_pa < mapb->om_pa)
52290643Sbenno		return (-1);
52390643Sbenno	else if (mapa->om_pa > mapb->om_pa)
52490643Sbenno		return (1);
52590643Sbenno	else
52690643Sbenno		return (0);
52777957Sbenno}
52877957Sbenno
52977957Sbennovoid
53090643Sbennopmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
53177957Sbenno{
53297346Sbenno	ihandle_t	mmui;
53390643Sbenno	phandle_t	chosen, mmu;
53490643Sbenno	int		sz;
53590643Sbenno	int		i, j;
536103604Sgrehan	int		ofw_mappings;
53791793Sbenno	vm_size_t	size, physsz;
53890643Sbenno	vm_offset_t	pa, va, off;
53990643Sbenno	u_int		batl, batu;
54077957Sbenno
54199037Sbenno        /*
542103604Sgrehan         * Set up BAT0 to map the lowest 256 MB area
54399037Sbenno         */
54499037Sbenno        battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
54599037Sbenno        battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
54699037Sbenno
54799037Sbenno        /*
54899037Sbenno         * Map PCI memory space.
54999037Sbenno         */
55099037Sbenno        battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
55199037Sbenno        battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
55299037Sbenno
55399037Sbenno        battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
55499037Sbenno        battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
55599037Sbenno
55699037Sbenno        battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
55799037Sbenno        battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
55899037Sbenno
55999037Sbenno        battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
56099037Sbenno        battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
56199037Sbenno
56299037Sbenno        /*
56399037Sbenno         * Map obio devices.
56499037Sbenno         */
56599037Sbenno        battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
56699037Sbenno        battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
56799037Sbenno
56877957Sbenno	/*
56990643Sbenno	 * Use an IBAT and a DBAT to map the bottom segment of memory
57090643Sbenno	 * where we are.
57177957Sbenno	 */
57290643Sbenno	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
57390643Sbenno	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
57490643Sbenno	__asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1"
57590643Sbenno	    :: "r"(batu), "r"(batl));
57699037Sbenno
57790643Sbenno#if 0
57899037Sbenno	/* map frame buffer */
57999037Sbenno	batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
58099037Sbenno	batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
58199037Sbenno	__asm ("mtdbatu 1,%0; mtdbatl 1,%1"
58299037Sbenno	    :: "r"(batu), "r"(batl));
58399037Sbenno#endif
58499037Sbenno
58599037Sbenno#if 1
58699037Sbenno	/* map pci space */
58790643Sbenno	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
58899037Sbenno	batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
58999037Sbenno	__asm ("mtdbatu 1,%0; mtdbatl 1,%1"
59090643Sbenno	    :: "r"(batu), "r"(batl));
59190643Sbenno#endif
59277957Sbenno
59377957Sbenno	/*
59490643Sbenno	 * Set the start and end of kva.
59577957Sbenno	 */
59690643Sbenno	virtual_avail = VM_MIN_KERNEL_ADDRESS;
59790643Sbenno	virtual_end = VM_MAX_KERNEL_ADDRESS;
59890643Sbenno
59997346Sbenno	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
60097346Sbenno	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
60197346Sbenno
60297346Sbenno	qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
60397346Sbenno	for (i = 0; i < pregions_sz; i++) {
604103604Sgrehan		vm_offset_t pa;
605103604Sgrehan		vm_offset_t end;
606103604Sgrehan
60797346Sbenno		CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
60897346Sbenno			pregions[i].mr_start,
60997346Sbenno			pregions[i].mr_start + pregions[i].mr_size,
61097346Sbenno			pregions[i].mr_size);
611103604Sgrehan		/*
612103604Sgrehan		 * Install entries into the BAT table to allow all
613103604Sgrehan		 * of physmem to be convered by on-demand BAT entries.
614103604Sgrehan		 * The loop will sometimes set the same battable element
615103604Sgrehan		 * twice, but that's fine since they won't be used for
616103604Sgrehan		 * a while yet.
617103604Sgrehan		 */
618103604Sgrehan		pa = pregions[i].mr_start & 0xf0000000;
619103604Sgrehan		end = pregions[i].mr_start + pregions[i].mr_size;
620103604Sgrehan		do {
621103604Sgrehan                        u_int n = pa >> ADDR_SR_SHFT;
622103604Sgrehan
623103604Sgrehan			battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
624103604Sgrehan			battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
625103604Sgrehan			pa += SEGMENT_LENGTH;
626103604Sgrehan		} while (pa < end);
62797346Sbenno	}
62897346Sbenno
62997346Sbenno	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
63090643Sbenno		panic("pmap_bootstrap: phys_avail too small");
63197346Sbenno	qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
63290643Sbenno	phys_avail_count = 0;
63391793Sbenno	physsz = 0;
63497346Sbenno	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
63590643Sbenno		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
63690643Sbenno		    regions[i].mr_start + regions[i].mr_size,
63790643Sbenno		    regions[i].mr_size);
63890643Sbenno		phys_avail[j] = regions[i].mr_start;
63990643Sbenno		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
64090643Sbenno		phys_avail_count++;
64191793Sbenno		physsz += regions[i].mr_size;
64277957Sbenno	}
64391793Sbenno	physmem = btoc(physsz);
64477957Sbenno
64577957Sbenno	/*
64690643Sbenno	 * Allocate PTEG table.
64777957Sbenno	 */
64890643Sbenno#ifdef PTEGCOUNT
64990643Sbenno	pmap_pteg_count = PTEGCOUNT;
65090643Sbenno#else
65190643Sbenno	pmap_pteg_count = 0x1000;
65277957Sbenno
65390643Sbenno	while (pmap_pteg_count < physmem)
65490643Sbenno		pmap_pteg_count <<= 1;
65577957Sbenno
65690643Sbenno	pmap_pteg_count >>= 1;
65790643Sbenno#endif /* PTEGCOUNT */
65877957Sbenno
65990643Sbenno	size = pmap_pteg_count * sizeof(struct pteg);
66090643Sbenno	CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count,
66190643Sbenno	    size);
66290643Sbenno	pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size);
66390643Sbenno	CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table);
66490643Sbenno	bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg));
66590643Sbenno	pmap_pteg_mask = pmap_pteg_count - 1;
66677957Sbenno
66790643Sbenno	/*
66894839Sbenno	 * Allocate pv/overflow lists.
66990643Sbenno	 */
67090643Sbenno	size = sizeof(struct pvo_head) * pmap_pteg_count;
67190643Sbenno	pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size,
67290643Sbenno	    PAGE_SIZE);
67390643Sbenno	CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table);
67490643Sbenno	for (i = 0; i < pmap_pteg_count; i++)
67590643Sbenno		LIST_INIT(&pmap_pvo_table[i]);
67677957Sbenno
67790643Sbenno	/*
67890643Sbenno	 * Allocate the message buffer.
67990643Sbenno	 */
68090643Sbenno	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0);
68177957Sbenno
68290643Sbenno	/*
68390643Sbenno	 * Initialise the unmanaged pvo pool.
68490643Sbenno	 */
68599037Sbenno	pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(
68699037Sbenno		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
68792521Sbenno	pmap_bpvo_pool_index = 0;
68877957Sbenno
68977957Sbenno	/*
69090643Sbenno	 * Make sure kernel vsid is allocated as well as VSID 0.
69177957Sbenno	 */
69290643Sbenno	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
69390643Sbenno		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
69490643Sbenno	pmap_vsid_bitmap[0] |= 1;
69577957Sbenno
69690643Sbenno	/*
69790643Sbenno	 * Set up the OpenFirmware pmap and add it's mappings.
69890643Sbenno	 */
69990643Sbenno	pmap_pinit(&ofw_pmap);
70090643Sbenno	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
701126478Sgrehan	ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
70290643Sbenno	if ((chosen = OF_finddevice("/chosen")) == -1)
70390643Sbenno		panic("pmap_bootstrap: can't find /chosen");
70490643Sbenno	OF_getprop(chosen, "mmu", &mmui, 4);
70590643Sbenno	if ((mmu = OF_instance_to_package(mmui)) == -1)
70690643Sbenno		panic("pmap_bootstrap: can't get mmu package");
70790643Sbenno	if ((sz = OF_getproplen(mmu, "translations")) == -1)
70890643Sbenno		panic("pmap_bootstrap: can't get ofw translation count");
709100319Sbenno	translations = NULL;
710100319Sbenno	for (i = 0; phys_avail[i + 2] != 0; i += 2) {
711100319Sbenno		if (phys_avail[i + 1] >= sz)
712100319Sbenno			translations = (struct ofw_map *)phys_avail[i];
713100319Sbenno	}
714100319Sbenno	if (translations == NULL)
715100319Sbenno		panic("pmap_bootstrap: no space to copy translations");
71690643Sbenno	bzero(translations, sz);
71790643Sbenno	if (OF_getprop(mmu, "translations", translations, sz) == -1)
71890643Sbenno		panic("pmap_bootstrap: can't get ofw translations");
71990643Sbenno	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
72097346Sbenno	sz /= sizeof(*translations);
72190643Sbenno	qsort(translations, sz, sizeof (*translations), om_cmp);
722103604Sgrehan	for (i = 0, ofw_mappings = 0; i < sz; i++) {
72390643Sbenno		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
72490643Sbenno		    translations[i].om_pa, translations[i].om_va,
72590643Sbenno		    translations[i].om_len);
72677957Sbenno
727103604Sgrehan		/*
728103604Sgrehan		 * If the mapping is 1:1, let the RAM and device on-demand
729103604Sgrehan		 * BAT tables take care of the translation.
730103604Sgrehan		 */
731103604Sgrehan		if (translations[i].om_va == translations[i].om_pa)
732103604Sgrehan			continue;
73377957Sbenno
734103604Sgrehan		/* Enter the pages */
73590643Sbenno		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
73690643Sbenno			struct	vm_page m;
73777957Sbenno
73890643Sbenno			m.phys_addr = translations[i].om_pa + off;
73990643Sbenno			pmap_enter(&ofw_pmap, translations[i].om_va + off, &m,
740103604Sgrehan				   VM_PROT_ALL, 1);
741103604Sgrehan			ofw_mappings++;
74277957Sbenno		}
74377957Sbenno	}
74490643Sbenno#ifdef SMP
74590643Sbenno	TLBSYNC();
74690643Sbenno#endif
74777957Sbenno
74890643Sbenno	/*
74990643Sbenno	 * Initialize the kernel pmap (which is statically allocated).
75090643Sbenno	 */
75190643Sbenno	for (i = 0; i < 16; i++) {
75290643Sbenno		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
75377957Sbenno	}
75490643Sbenno	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
755126478Sgrehan	kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL_SEGMENT;
75690643Sbenno	kernel_pmap->pm_active = ~0;
75777957Sbenno
75877957Sbenno	/*
75990643Sbenno	 * Allocate a kernel stack with a guard page for thread0 and map it
76090643Sbenno	 * into the kernel page map.
76177957Sbenno	 */
76290643Sbenno	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
76390643Sbenno	kstack0_phys = pa;
76490643Sbenno	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
76590643Sbenno	CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
76690643Sbenno	    kstack0);
76790643Sbenno	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
76890643Sbenno	for (i = 0; i < KSTACK_PAGES; i++) {
76990643Sbenno		pa = kstack0_phys + i * PAGE_SIZE;
77090643Sbenno		va = kstack0 + i * PAGE_SIZE;
77190643Sbenno		pmap_kenter(va, pa);
77290643Sbenno		TLBIE(va);
77377957Sbenno	}
77477957Sbenno
77590643Sbenno	/*
77690643Sbenno	 * Calculate the first and last available physical addresses.
77790643Sbenno	 */
77890643Sbenno	avail_start = phys_avail[0];
77990643Sbenno	for (i = 0; phys_avail[i + 2] != 0; i += 2)
78090643Sbenno		;
78190643Sbenno	avail_end = phys_avail[i + 1];
78290643Sbenno	Maxmem = powerpc_btop(avail_end);
78377957Sbenno
78477957Sbenno	/*
78590643Sbenno	 * Allocate virtual address space for the message buffer.
78677957Sbenno	 */
78790643Sbenno	msgbufp = (struct msgbuf *)virtual_avail;
78890643Sbenno	virtual_avail += round_page(MSGBUF_SIZE);
78977957Sbenno
79077957Sbenno	/*
79190643Sbenno	 * Initialize hardware.
79277957Sbenno	 */
79377957Sbenno	for (i = 0; i < 16; i++) {
79494836Sbenno		mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT);
79577957Sbenno	}
79677957Sbenno	__asm __volatile ("mtsr %0,%1"
79790643Sbenno	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
79877957Sbenno	__asm __volatile ("sync; mtsdr1 %0; isync"
79990643Sbenno	    :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
80077957Sbenno	tlbia();
80177957Sbenno
80290643Sbenno	pmap_bootstrapped++;
80377957Sbenno}
80477957Sbenno
80577957Sbenno/*
80690643Sbenno * Activate a user pmap.  The pmap must be activated before it's address
80790643Sbenno * space can be accessed in any way.
80877957Sbenno */
80977957Sbennovoid
81090643Sbennopmap_activate(struct thread *td)
81177957Sbenno{
81296250Sbenno	pmap_t	pm, pmr;
81377957Sbenno
81477957Sbenno	/*
815103604Sgrehan	 * Load all the data we need up front to encourage the compiler to
81690643Sbenno	 * not issue any loads while we have interrupts disabled below.
81777957Sbenno	 */
81890643Sbenno	pm = &td->td_proc->p_vmspace->vm_pmap;
81977957Sbenno
82096250Sbenno	if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL)
82196250Sbenno		pmr = pm;
82296250Sbenno
82390643Sbenno	pm->pm_active |= PCPU_GET(cpumask);
82496250Sbenno	PCPU_SET(curpmap, pmr);
82577957Sbenno}
82677957Sbenno
82791483Sbennovoid
82891483Sbennopmap_deactivate(struct thread *td)
82991483Sbenno{
83091483Sbenno	pmap_t	pm;
83191483Sbenno
83291483Sbenno	pm = &td->td_proc->p_vmspace->vm_pmap;
83391483Sbenno	pm->pm_active &= ~(PCPU_GET(cpumask));
83496250Sbenno	PCPU_SET(curpmap, NULL);
83591483Sbenno}
83691483Sbenno
83790643Sbennovm_offset_t
83890643Sbennopmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
83977957Sbenno{
84096353Sbenno
84196353Sbenno	return (va);
84277957Sbenno}
84377957Sbenno
84477957Sbennovoid
84596353Sbennopmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
84677957Sbenno{
84796353Sbenno	struct	pvo_entry *pvo;
84896353Sbenno
84996353Sbenno	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
85096353Sbenno
85196353Sbenno	if (pvo != NULL) {
85296353Sbenno		if (wired) {
85396353Sbenno			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
85496353Sbenno				pm->pm_stats.wired_count++;
85596353Sbenno			pvo->pvo_vaddr |= PVO_WIRED;
85696353Sbenno		} else {
85796353Sbenno			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
85896353Sbenno				pm->pm_stats.wired_count--;
85996353Sbenno			pvo->pvo_vaddr &= ~PVO_WIRED;
86096353Sbenno		}
86196353Sbenno	}
86277957Sbenno}
86377957Sbenno
86477957Sbennovoid
86590643Sbennopmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
86690643Sbenno	  vm_size_t len, vm_offset_t src_addr)
86777957Sbenno{
86897385Sbenno
86997385Sbenno	/*
87097385Sbenno	 * This is not needed as it's mainly an optimisation.
87197385Sbenno	 * It may want to be implemented later though.
87297385Sbenno	 */
87377957Sbenno}
87477957Sbenno
87577957Sbennovoid
87697385Sbennopmap_copy_page(vm_page_t msrc, vm_page_t mdst)
87777957Sbenno{
87897385Sbenno	vm_offset_t	dst;
87997385Sbenno	vm_offset_t	src;
88097385Sbenno
88197385Sbenno	dst = VM_PAGE_TO_PHYS(mdst);
88297385Sbenno	src = VM_PAGE_TO_PHYS(msrc);
88397385Sbenno
88497385Sbenno	kcopy((void *)src, (void *)dst, PAGE_SIZE);
88577957Sbenno}
88677957Sbenno
88777957Sbenno/*
88890643Sbenno * Zero a page of physical memory by temporarily mapping it into the tlb.
88977957Sbenno */
89077957Sbennovoid
89194777Speterpmap_zero_page(vm_page_t m)
89277957Sbenno{
89394777Speter	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
894110172Sgrehan	caddr_t va;
89577957Sbenno
89690643Sbenno	if (pa < SEGMENT_LENGTH) {
89790643Sbenno		va = (caddr_t) pa;
89890643Sbenno	} else if (pmap_initialized) {
89990643Sbenno		if (pmap_pvo_zeropage == NULL)
90090643Sbenno			pmap_pvo_zeropage = pmap_rkva_alloc();
90190643Sbenno		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
90290643Sbenno		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
90390643Sbenno	} else {
90490643Sbenno		panic("pmap_zero_page: can't zero pa %#x", pa);
90577957Sbenno	}
90690643Sbenno
90790643Sbenno	bzero(va, PAGE_SIZE);
90890643Sbenno
90990643Sbenno	if (pa >= SEGMENT_LENGTH)
91090643Sbenno		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
91177957Sbenno}
91277957Sbenno
91377957Sbennovoid
91494777Speterpmap_zero_page_area(vm_page_t m, int off, int size)
91577957Sbenno{
91699666Sbenno	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
917103604Sgrehan	caddr_t va;
91899666Sbenno
91999666Sbenno	if (pa < SEGMENT_LENGTH) {
92099666Sbenno		va = (caddr_t) pa;
92199666Sbenno	} else if (pmap_initialized) {
92299666Sbenno		if (pmap_pvo_zeropage == NULL)
92399666Sbenno			pmap_pvo_zeropage = pmap_rkva_alloc();
92499666Sbenno		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
92599666Sbenno		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
92699666Sbenno	} else {
92799666Sbenno		panic("pmap_zero_page: can't zero pa %#x", pa);
92899666Sbenno	}
92999666Sbenno
930103604Sgrehan	bzero(va + off, size);
93199666Sbenno
93299666Sbenno	if (pa >= SEGMENT_LENGTH)
93399666Sbenno		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
93477957Sbenno}
93577957Sbenno
93699571Spetervoid
93799571Speterpmap_zero_page_idle(vm_page_t m)
93899571Speter{
93999571Speter
94099571Speter	/* XXX this is called outside of Giant, is pmap_zero_page safe? */
94199571Speter	/* XXX maybe have a dedicated mapping for this to avoid the problem? */
94299571Speter	mtx_lock(&Giant);
94399571Speter	pmap_zero_page(m);
94499571Speter	mtx_unlock(&Giant);
94599571Speter}
94699571Speter
94777957Sbenno/*
94890643Sbenno * Map the given physical page at the specified virtual address in the
94990643Sbenno * target pmap with the protection requested.  If specified the page
95090643Sbenno * will be wired down.
95177957Sbenno */
95277957Sbennovoid
95390643Sbennopmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
95490643Sbenno	   boolean_t wired)
95577957Sbenno{
95690643Sbenno	struct		pvo_head *pvo_head;
95792847Sjeff	uma_zone_t	zone;
95896250Sbenno	vm_page_t	pg;
95996250Sbenno	u_int		pte_lo, pvo_flags, was_exec, i;
96090643Sbenno	int		error;
96177957Sbenno
96290643Sbenno	if (!pmap_initialized) {
96390643Sbenno		pvo_head = &pmap_pvo_kunmanaged;
96490643Sbenno		zone = pmap_upvo_zone;
96590643Sbenno		pvo_flags = 0;
96696250Sbenno		pg = NULL;
96796250Sbenno		was_exec = PTE_EXEC;
96890643Sbenno	} else {
969110172Sgrehan		pvo_head = vm_page_to_pvoh(m);
970110172Sgrehan		pg = m;
97190643Sbenno		zone = pmap_mpvo_zone;
97290643Sbenno		pvo_flags = PVO_MANAGED;
97396250Sbenno		was_exec = 0;
97490643Sbenno	}
97577957Sbenno
97696250Sbenno	/*
97796250Sbenno	 * If this is a managed page, and it's the first reference to the page,
97896250Sbenno	 * clear the execness of the page.  Otherwise fetch the execness.
97996250Sbenno	 */
98096250Sbenno	if (pg != NULL) {
98196250Sbenno		if (LIST_EMPTY(pvo_head)) {
98296250Sbenno			pmap_attr_clear(pg, PTE_EXEC);
98396250Sbenno		} else {
98496250Sbenno			was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
98596250Sbenno		}
98696250Sbenno	}
98796250Sbenno
98896250Sbenno
98996250Sbenno	/*
99096250Sbenno	 * Assume the page is cache inhibited and access is guarded unless
99196250Sbenno	 * it's in our available memory array.
99296250Sbenno	 */
99390643Sbenno	pte_lo = PTE_I | PTE_G;
99497346Sbenno	for (i = 0; i < pregions_sz; i++) {
99597346Sbenno		if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) &&
99697346Sbenno		    (VM_PAGE_TO_PHYS(m) <
99797346Sbenno			(pregions[i].mr_start + pregions[i].mr_size))) {
99896250Sbenno			pte_lo &= ~(PTE_I | PTE_G);
99996250Sbenno			break;
100096250Sbenno		}
100196250Sbenno	}
100277957Sbenno
100390643Sbenno	if (prot & VM_PROT_WRITE)
100490643Sbenno		pte_lo |= PTE_BW;
100590643Sbenno	else
100690643Sbenno		pte_lo |= PTE_BR;
100777957Sbenno
100896250Sbenno	pvo_flags |= (prot & VM_PROT_EXECUTE);
100977957Sbenno
101090643Sbenno	if (wired)
101190643Sbenno		pvo_flags |= PVO_WIRED;
101277957Sbenno
101396250Sbenno	error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
101496250Sbenno	    pte_lo, pvo_flags);
101590643Sbenno
101696250Sbenno	/*
101796250Sbenno	 * Flush the real page from the instruction cache if this page is
101896250Sbenno	 * mapped executable and cacheable and was not previously mapped (or
101996250Sbenno	 * was not mapped executable).
102096250Sbenno	 */
102196250Sbenno	if (error == 0 && (pvo_flags & PVO_EXECUTABLE) &&
102296250Sbenno	    (pte_lo & PTE_I) == 0 && was_exec == 0) {
102377957Sbenno		/*
102490643Sbenno		 * Flush the real memory from the cache.
102577957Sbenno		 */
102696250Sbenno		pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
102796250Sbenno		if (pg != NULL)
102896250Sbenno			pmap_attr_save(pg, PTE_EXEC);
102977957Sbenno	}
1030103604Sgrehan
1031103604Sgrehan	/* XXX syncicache always until problems are sorted */
1032103604Sgrehan	pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
103377957Sbenno}
103477957Sbenno
1035117045Salcvm_page_t
1036117045Salcpmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
1037117045Salc{
1038117045Salc
1039117045Salc	pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
1040117045Salc	return (NULL);
1041117045Salc}
1042117045Salc
104390643Sbennovm_offset_t
104496353Sbennopmap_extract(pmap_t pm, vm_offset_t va)
104577957Sbenno{
104696353Sbenno	struct	pvo_entry *pvo;
104796353Sbenno
104896353Sbenno	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
104996353Sbenno
105096353Sbenno	if (pvo != NULL) {
105196353Sbenno		return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF));
105296353Sbenno	}
105396353Sbenno
105490643Sbenno	return (0);
105577957Sbenno}
105677957Sbenno
105777957Sbenno/*
1058120336Sgrehan * Atomically extract and hold the physical page with the given
1059120336Sgrehan * pmap and virtual address pair if that mapping permits the given
1060120336Sgrehan * protection.
1061120336Sgrehan */
1062120336Sgrehanvm_page_t
1063120336Sgrehanpmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1064120336Sgrehan{
1065120336Sgrehan	vm_paddr_t pa;
1066120336Sgrehan	vm_page_t m;
1067120336Sgrehan
1068120336Sgrehan	m = NULL;
1069120336Sgrehan	mtx_lock(&Giant);
1070120336Sgrehan	if ((pa = pmap_extract(pmap, va)) != 0) {
1071120336Sgrehan		m = PHYS_TO_VM_PAGE(pa);
1072120336Sgrehan		vm_page_lock_queues();
1073120336Sgrehan		vm_page_hold(m);
1074120336Sgrehan		vm_page_unlock_queues();
1075120336Sgrehan	}
1076120336Sgrehan	mtx_unlock(&Giant);
1077120336Sgrehan	return (m);
1078120336Sgrehan}
1079120336Sgrehan
1080120336Sgrehan/*
108190643Sbenno * Grow the number of kernel page table entries.  Unneeded.
108277957Sbenno */
108390643Sbennovoid
108490643Sbennopmap_growkernel(vm_offset_t addr)
108577957Sbenno{
108690643Sbenno}
108777957Sbenno
108890643Sbennovoid
108990643Sbennopmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
109090643Sbenno{
109177957Sbenno
109294753Sbenno	CTR0(KTR_PMAP, "pmap_init");
109377957Sbenno
109492847Sjeff	pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1095125442Sgrehan	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1096125442Sgrehan	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
109792847Sjeff	pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1098125442Sgrehan	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1099125442Sgrehan	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
110090643Sbenno	pmap_initialized = TRUE;
110177957Sbenno}
110277957Sbenno
110399037Sbennovoid
110499037Sbennopmap_init2(void)
110599037Sbenno{
110699037Sbenno
110799037Sbenno	CTR0(KTR_PMAP, "pmap_init2");
110899037Sbenno}
110999037Sbenno
111090643Sbennoboolean_t
111190643Sbennopmap_is_modified(vm_page_t m)
111290643Sbenno{
111396353Sbenno
1114110172Sgrehan	if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
111596353Sbenno		return (FALSE);
111696353Sbenno
111796353Sbenno	return (pmap_query_bit(m, PTE_CHG));
111890643Sbenno}
111990643Sbenno
1120120722Salc/*
1121120722Salc *	pmap_is_prefaultable:
1122120722Salc *
1123120722Salc *	Return whether or not the specified virtual address is elgible
1124120722Salc *	for prefault.
1125120722Salc */
1126120722Salcboolean_t
1127120722Salcpmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
1128120722Salc{
1129120722Salc
1130120722Salc	return (FALSE);
1131120722Salc}
1132120722Salc
113390643Sbennovoid
113490643Sbennopmap_clear_reference(vm_page_t m)
113590643Sbenno{
1136110172Sgrehan
1137110172Sgrehan	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1138110172Sgrehan		return;
1139110172Sgrehan	pmap_clear_bit(m, PTE_REF, NULL);
114090643Sbenno}
114190643Sbenno
1142110172Sgrehanvoid
1143110172Sgrehanpmap_clear_modify(vm_page_t m)
1144110172Sgrehan{
1145110172Sgrehan
1146110172Sgrehan	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1147110172Sgrehan		return;
1148110172Sgrehan	pmap_clear_bit(m, PTE_CHG, NULL);
1149110172Sgrehan}
1150110172Sgrehan
115191403Ssilby/*
115291403Ssilby *	pmap_ts_referenced:
115391403Ssilby *
115491403Ssilby *	Return a count of reference bits for a page, clearing those bits.
115591403Ssilby *	It is not necessary for every reference bit to be cleared, but it
115691403Ssilby *	is necessary that 0 only be returned when there are truly no
115791403Ssilby *	reference bits set.
115891403Ssilby *
115991403Ssilby *	XXX: The exact number of bits to check and clear is a matter that
116091403Ssilby *	should be tested and standardized at some point in the future for
116191403Ssilby *	optimal aging of shared pages.
116291403Ssilby */
116390643Sbennoint
116490643Sbennopmap_ts_referenced(vm_page_t m)
116590643Sbenno{
1166110172Sgrehan	int count;
1167110172Sgrehan
1168110172Sgrehan	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1169110172Sgrehan		return (0);
1170110172Sgrehan
1171110172Sgrehan	count = pmap_clear_bit(m, PTE_REF, NULL);
1172110172Sgrehan
1173110172Sgrehan	return (count);
117490643Sbenno}
117590643Sbenno
117677957Sbenno/*
117790643Sbenno * Map a wired page into kernel virtual address space.
117877957Sbenno */
117977957Sbennovoid
118090643Sbennopmap_kenter(vm_offset_t va, vm_offset_t pa)
118177957Sbenno{
118290643Sbenno	u_int		pte_lo;
118390643Sbenno	int		error;
118490643Sbenno	int		i;
118577957Sbenno
118690643Sbenno#if 0
118790643Sbenno	if (va < VM_MIN_KERNEL_ADDRESS)
118890643Sbenno		panic("pmap_kenter: attempt to enter non-kernel address %#x",
118990643Sbenno		    va);
119090643Sbenno#endif
119177957Sbenno
1192103604Sgrehan	pte_lo = PTE_I | PTE_G;
1193103604Sgrehan	for (i = 0; i < pregions_sz; i++) {
1194103604Sgrehan		if ((pa >= pregions[i].mr_start) &&
1195103604Sgrehan		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
119690643Sbenno			pte_lo &= ~(PTE_I | PTE_G);
119777957Sbenno			break;
119877957Sbenno		}
1199103604Sgrehan	}
120077957Sbenno
120190643Sbenno	error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone,
120290643Sbenno	    &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
120390643Sbenno
120490643Sbenno	if (error != 0 && error != ENOENT)
120590643Sbenno		panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va,
120690643Sbenno		    pa, error);
120790643Sbenno
120877957Sbenno	/*
120990643Sbenno	 * Flush the real memory from the instruction cache.
121077957Sbenno	 */
121190643Sbenno	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
121290643Sbenno		pmap_syncicache(pa, PAGE_SIZE);
121377957Sbenno	}
121477957Sbenno}
121577957Sbenno
121694838Sbenno/*
121794838Sbenno * Extract the physical page address associated with the given kernel virtual
121894838Sbenno * address.
121994838Sbenno */
122090643Sbennovm_offset_t
122190643Sbennopmap_kextract(vm_offset_t va)
122277957Sbenno{
122394838Sbenno	struct		pvo_entry *pvo;
122494838Sbenno
1225125185Sgrehan#ifdef UMA_MD_SMALL_ALLOC
1226125185Sgrehan	/*
1227125185Sgrehan	 * Allow direct mappings
1228125185Sgrehan	 */
1229125185Sgrehan	if (va < VM_MIN_KERNEL_ADDRESS) {
1230125185Sgrehan		return (va);
1231125185Sgrehan	}
1232125185Sgrehan#endif
1233125185Sgrehan
123494838Sbenno	pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1235125185Sgrehan	KASSERT(pvo != NULL, ("pmap_kextract: no addr found"));
123694838Sbenno	if (pvo == NULL) {
123794838Sbenno		return (0);
123894838Sbenno	}
123994838Sbenno
124094838Sbenno	return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF));
124177957Sbenno}
124277957Sbenno
124391456Sbenno/*
124491456Sbenno * Remove a wired page from kernel virtual address space.
124591456Sbenno */
124677957Sbennovoid
124777957Sbennopmap_kremove(vm_offset_t va)
124877957Sbenno{
124991456Sbenno
1250103604Sgrehan	pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
125177957Sbenno}
125277957Sbenno
125377957Sbenno/*
125490643Sbenno * Map a range of physical addresses into kernel virtual address space.
125590643Sbenno *
125690643Sbenno * The value passed in *virt is a suggested virtual address for the mapping.
125790643Sbenno * Architectures which can support a direct-mapped physical to virtual region
125890643Sbenno * can return the appropriate address within that region, leaving '*virt'
125990643Sbenno * unchanged.  We cannot and therefore do not; *virt is updated with the
126090643Sbenno * first usable address after the mapped region.
126177957Sbenno */
126290643Sbennovm_offset_t
126390643Sbennopmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
126477957Sbenno{
126590643Sbenno	vm_offset_t	sva, va;
126677957Sbenno
126790643Sbenno	sva = *virt;
126890643Sbenno	va = sva;
126990643Sbenno	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
127090643Sbenno		pmap_kenter(va, pa_start);
127190643Sbenno	*virt = va;
127290643Sbenno	return (sva);
127377957Sbenno}
127477957Sbenno
127590643Sbennoint
127690643Sbennopmap_mincore(pmap_t pmap, vm_offset_t addr)
127777957Sbenno{
127890643Sbenno	TODO;
127990643Sbenno	return (0);
128077957Sbenno}
128177957Sbenno
128277957Sbennovoid
128394838Sbennopmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1284117206Salc		    vm_pindex_t pindex, vm_size_t size)
128590643Sbenno{
128694838Sbenno
1287117206Salc	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1288117206Salc	KASSERT(object->type == OBJT_DEVICE,
1289117206Salc	    ("pmap_object_init_pt: non-device object"));
129094838Sbenno	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1291117206Salc	    ("pmap_object_init_pt: non current pmap"));
129277957Sbenno}
129377957Sbenno
129477957Sbenno/*
129590643Sbenno * Lower the permission for all mappings to a given page.
129677957Sbenno */
129777957Sbennovoid
129877957Sbennopmap_page_protect(vm_page_t m, vm_prot_t prot)
129977957Sbenno{
130090643Sbenno	struct	pvo_head *pvo_head;
130190643Sbenno	struct	pvo_entry *pvo, *next_pvo;
130290643Sbenno	struct	pte *pt;
130377957Sbenno
130490643Sbenno	/*
130590643Sbenno	 * Since the routine only downgrades protection, if the
130690643Sbenno	 * maximal protection is desired, there isn't any change
130790643Sbenno	 * to be made.
130890643Sbenno	 */
130990643Sbenno	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
131090643Sbenno	    (VM_PROT_READ|VM_PROT_WRITE))
131177957Sbenno		return;
131277957Sbenno
131390643Sbenno	pvo_head = vm_page_to_pvoh(m);
131490643Sbenno	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
131590643Sbenno		next_pvo = LIST_NEXT(pvo, pvo_vlink);
131690643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
131790643Sbenno
131890643Sbenno		/*
131990643Sbenno		 * Downgrading to no mapping at all, we just remove the entry.
132090643Sbenno		 */
132190643Sbenno		if ((prot & VM_PROT_READ) == 0) {
132290643Sbenno			pmap_pvo_remove(pvo, -1);
132390643Sbenno			continue;
132477957Sbenno		}
132590643Sbenno
132690643Sbenno		/*
132790643Sbenno		 * If EXEC permission is being revoked, just clear the flag
132890643Sbenno		 * in the PVO.
132990643Sbenno		 */
133090643Sbenno		if ((prot & VM_PROT_EXECUTE) == 0)
133190643Sbenno			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
133290643Sbenno
133390643Sbenno		/*
133490643Sbenno		 * If this entry is already RO, don't diddle with the page
133590643Sbenno		 * table.
133690643Sbenno		 */
133790643Sbenno		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
133890643Sbenno			PMAP_PVO_CHECK(pvo);
133990643Sbenno			continue;
134077957Sbenno		}
134190643Sbenno
134290643Sbenno		/*
134390643Sbenno		 * Grab the PTE before we diddle the bits so pvo_to_pte can
134490643Sbenno		 * verify the pte contents are as expected.
134590643Sbenno		 */
134690643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
134790643Sbenno		pvo->pvo_pte.pte_lo &= ~PTE_PP;
134890643Sbenno		pvo->pvo_pte.pte_lo |= PTE_BR;
134990643Sbenno		if (pt != NULL)
135090643Sbenno			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
135190643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
135277957Sbenno	}
135377957Sbenno}
135477957Sbenno
135577957Sbenno/*
135691403Ssilby * Returns true if the pmap's pv is one of the first
135791403Ssilby * 16 pvs linked to from this page.  This count may
135891403Ssilby * be changed upwards or downwards in the future; it
135991403Ssilby * is only necessary that true be returned for a small
136091403Ssilby * subset of pmaps for proper page aging.
136191403Ssilby */
136290643Sbennoboolean_t
136391403Ssilbypmap_page_exists_quick(pmap_t pmap, vm_page_t m)
136490643Sbenno{
1365110172Sgrehan        int loops;
1366110172Sgrehan	struct pvo_entry *pvo;
1367110172Sgrehan
1368110172Sgrehan        if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1369110172Sgrehan                return FALSE;
1370110172Sgrehan
1371110172Sgrehan	loops = 0;
1372110172Sgrehan	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1373110172Sgrehan		if (pvo->pvo_pmap == pmap)
1374110172Sgrehan			return (TRUE);
1375110172Sgrehan		if (++loops >= 16)
1376110172Sgrehan			break;
1377110172Sgrehan	}
1378110172Sgrehan
1379110172Sgrehan	return (FALSE);
138090643Sbenno}
138177957Sbenno
138290643Sbennostatic u_int	pmap_vsidcontext;
138377957Sbenno
138490643Sbennovoid
138590643Sbennopmap_pinit(pmap_t pmap)
138690643Sbenno{
138790643Sbenno	int	i, mask;
138890643Sbenno	u_int	entropy;
138977957Sbenno
1390126478Sgrehan	KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("pmap_pinit: virt pmap"));
1391126478Sgrehan
139290643Sbenno	entropy = 0;
139390643Sbenno	__asm __volatile("mftb %0" : "=r"(entropy));
139477957Sbenno
139590643Sbenno	/*
139690643Sbenno	 * Allocate some segment registers for this pmap.
139790643Sbenno	 */
139890643Sbenno	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
139990643Sbenno		u_int	hash, n;
140077957Sbenno
140177957Sbenno		/*
140290643Sbenno		 * Create a new value by mutiplying by a prime and adding in
140390643Sbenno		 * entropy from the timebase register.  This is to make the
140490643Sbenno		 * VSID more random so that the PT hash function collides
140590643Sbenno		 * less often.  (Note that the prime casues gcc to do shifts
140690643Sbenno		 * instead of a multiply.)
140777957Sbenno		 */
140890643Sbenno		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
140990643Sbenno		hash = pmap_vsidcontext & (NPMAPS - 1);
141090643Sbenno		if (hash == 0)		/* 0 is special, avoid it */
141190643Sbenno			continue;
141290643Sbenno		n = hash >> 5;
141390643Sbenno		mask = 1 << (hash & (VSID_NBPW - 1));
141490643Sbenno		hash = (pmap_vsidcontext & 0xfffff);
141590643Sbenno		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
141690643Sbenno			/* anything free in this bucket? */
141790643Sbenno			if (pmap_vsid_bitmap[n] == 0xffffffff) {
141890643Sbenno				entropy = (pmap_vsidcontext >> 20);
141990643Sbenno				continue;
142090643Sbenno			}
142190643Sbenno			i = ffs(~pmap_vsid_bitmap[i]) - 1;
142290643Sbenno			mask = 1 << i;
142390643Sbenno			hash &= 0xfffff & ~(VSID_NBPW - 1);
142490643Sbenno			hash |= i;
142577957Sbenno		}
142690643Sbenno		pmap_vsid_bitmap[n] |= mask;
142790643Sbenno		for (i = 0; i < 16; i++)
142890643Sbenno			pmap->pm_sr[i] = VSID_MAKE(i, hash);
142990643Sbenno		return;
143090643Sbenno	}
143177957Sbenno
143290643Sbenno	panic("pmap_pinit: out of segments");
143377957Sbenno}
143477957Sbenno
143577957Sbenno/*
143690643Sbenno * Initialize the pmap associated with process 0.
143777957Sbenno */
143877957Sbennovoid
143990643Sbennopmap_pinit0(pmap_t pm)
144077957Sbenno{
144177957Sbenno
144290643Sbenno	pmap_pinit(pm);
144390643Sbenno	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
144477957Sbenno}
144577957Sbenno
144677957Sbennovoid
144790643Sbennopmap_pinit2(pmap_t pmap)
144877957Sbenno{
144990643Sbenno	/* XXX: Remove this stub when no longer called */
145090643Sbenno}
145177957Sbenno
145294838Sbenno/*
145394838Sbenno * Set the physical protection on the specified range of this map as requested.
145494838Sbenno */
145590643Sbennovoid
145694838Sbennopmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
145790643Sbenno{
145894838Sbenno	struct	pvo_entry *pvo;
145994838Sbenno	struct	pte *pt;
146094838Sbenno	int	pteidx;
146194838Sbenno
146294838Sbenno	CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
146394838Sbenno	    eva, prot);
146494838Sbenno
146594838Sbenno
146694838Sbenno	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
146794838Sbenno	    ("pmap_protect: non current pmap"));
146894838Sbenno
146994838Sbenno	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
147094838Sbenno		pmap_remove(pm, sva, eva);
147194838Sbenno		return;
147294838Sbenno	}
147394838Sbenno
147494838Sbenno	for (; sva < eva; sva += PAGE_SIZE) {
147594838Sbenno		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
147694838Sbenno		if (pvo == NULL)
147794838Sbenno			continue;
147894838Sbenno
147994838Sbenno		if ((prot & VM_PROT_EXECUTE) == 0)
148094838Sbenno			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
148194838Sbenno
148294838Sbenno		/*
148394838Sbenno		 * Grab the PTE pointer before we diddle with the cached PTE
148494838Sbenno		 * copy.
148594838Sbenno		 */
148694838Sbenno		pt = pmap_pvo_to_pte(pvo, pteidx);
148794838Sbenno		/*
148894838Sbenno		 * Change the protection of the page.
148994838Sbenno		 */
149094838Sbenno		pvo->pvo_pte.pte_lo &= ~PTE_PP;
149194838Sbenno		pvo->pvo_pte.pte_lo |= PTE_BR;
149294838Sbenno
149394838Sbenno		/*
149494838Sbenno		 * If the PVO is in the page table, update that pte as well.
149594838Sbenno		 */
149694838Sbenno		if (pt != NULL)
149794838Sbenno			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
149894838Sbenno	}
149977957Sbenno}
150077957Sbenno
150191456Sbenno/*
150291456Sbenno * Map a list of wired pages into kernel virtual address space.  This is
150391456Sbenno * intended for temporary mappings which do not need page modification or
150491456Sbenno * references recorded.  Existing mappings in the region are overwritten.
150591456Sbenno */
150690643Sbennovoid
1507110172Sgrehanpmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
150877957Sbenno{
1509110172Sgrehan	vm_offset_t va;
151077957Sbenno
1511110172Sgrehan	va = sva;
1512110172Sgrehan	while (count-- > 0) {
1513110172Sgrehan		pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
1514110172Sgrehan		va += PAGE_SIZE;
1515110172Sgrehan		m++;
1516110172Sgrehan	}
151790643Sbenno}
151877957Sbenno
151991456Sbenno/*
152091456Sbenno * Remove page mappings from kernel virtual address space.  Intended for
152191456Sbenno * temporary mappings entered by pmap_qenter.
152291456Sbenno */
152390643Sbennovoid
1524110172Sgrehanpmap_qremove(vm_offset_t sva, int count)
152590643Sbenno{
1526110172Sgrehan	vm_offset_t va;
152791456Sbenno
1528110172Sgrehan	va = sva;
1529110172Sgrehan	while (count-- > 0) {
153091456Sbenno		pmap_kremove(va);
1531110172Sgrehan		va += PAGE_SIZE;
1532110172Sgrehan	}
153377957Sbenno}
153477957Sbenno
153590643Sbennovoid
153690643Sbennopmap_release(pmap_t pmap)
153790643Sbenno{
1538103604Sgrehan        int idx, mask;
1539103604Sgrehan
1540103604Sgrehan	/*
1541103604Sgrehan	 * Free segment register's VSID
1542103604Sgrehan	 */
1543103604Sgrehan        if (pmap->pm_sr[0] == 0)
1544103604Sgrehan                panic("pmap_release");
1545103604Sgrehan
1546103604Sgrehan        idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
1547103604Sgrehan        mask = 1 << (idx % VSID_NBPW);
1548103604Sgrehan        idx /= VSID_NBPW;
1549103604Sgrehan        pmap_vsid_bitmap[idx] &= ~mask;
155077957Sbenno}
155177957Sbenno
155291456Sbenno/*
155391456Sbenno * Remove the given range of addresses from the specified map.
155491456Sbenno */
155590643Sbennovoid
155691456Sbennopmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
155777957Sbenno{
155891456Sbenno	struct	pvo_entry *pvo;
155991456Sbenno	int	pteidx;
156091456Sbenno
156191456Sbenno	for (; sva < eva; sva += PAGE_SIZE) {
156291456Sbenno		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
156391456Sbenno		if (pvo != NULL) {
156491456Sbenno			pmap_pvo_remove(pvo, pteidx);
156591456Sbenno		}
156691456Sbenno	}
156777957Sbenno}
156877957Sbenno
156994838Sbenno/*
1570110172Sgrehan * Remove physical page from all pmaps in which it resides. pmap_pvo_remove()
1571110172Sgrehan * will reflect changes in pte's back to the vm_page.
1572110172Sgrehan */
1573110172Sgrehanvoid
1574110172Sgrehanpmap_remove_all(vm_page_t m)
1575110172Sgrehan{
1576110172Sgrehan	struct  pvo_head *pvo_head;
1577110172Sgrehan	struct	pvo_entry *pvo, *next_pvo;
1578110172Sgrehan
1579120336Sgrehan	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1580120336Sgrehan
1581110172Sgrehan	pvo_head = vm_page_to_pvoh(m);
1582110172Sgrehan	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1583110172Sgrehan		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1584110172Sgrehan
1585110172Sgrehan		PMAP_PVO_CHECK(pvo);	/* sanity check */
1586110172Sgrehan		pmap_pvo_remove(pvo, -1);
1587110172Sgrehan	}
1588110172Sgrehan	vm_page_flag_clear(m, PG_WRITEABLE);
1589110172Sgrehan}
1590110172Sgrehan
1591110172Sgrehan/*
159294838Sbenno * Remove all pages from specified address space, this aids process exit
159394838Sbenno * speeds.  This is much faster than pmap_remove in the case of running down
159494838Sbenno * an entire address space.  Only works for the current pmap.
159594838Sbenno */
159690643Sbennovoid
159794838Sbennopmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
159877957Sbenno{
159994838Sbenno
160094838Sbenno	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
160194838Sbenno	    ("pmap_remove_pages: non current pmap"));
160294838Sbenno	pmap_remove(pm, sva, eva);
160377957Sbenno}
160477957Sbenno
160577957Sbenno/*
160690643Sbenno * Allocate a physical page of memory directly from the phys_avail map.
160790643Sbenno * Can only be called from pmap_bootstrap before avail start and end are
160890643Sbenno * calculated.
160983682Smp */
161090643Sbennostatic vm_offset_t
161190643Sbennopmap_bootstrap_alloc(vm_size_t size, u_int align)
161283682Smp{
161390643Sbenno	vm_offset_t	s, e;
161490643Sbenno	int		i, j;
161583682Smp
161690643Sbenno	size = round_page(size);
161790643Sbenno	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
161890643Sbenno		if (align != 0)
161990643Sbenno			s = (phys_avail[i] + align - 1) & ~(align - 1);
162090643Sbenno		else
162190643Sbenno			s = phys_avail[i];
162290643Sbenno		e = s + size;
162390643Sbenno
162490643Sbenno		if (s < phys_avail[i] || e > phys_avail[i + 1])
162590643Sbenno			continue;
162690643Sbenno
162790643Sbenno		if (s == phys_avail[i]) {
162890643Sbenno			phys_avail[i] += size;
162990643Sbenno		} else if (e == phys_avail[i + 1]) {
163090643Sbenno			phys_avail[i + 1] -= size;
163190643Sbenno		} else {
163290643Sbenno			for (j = phys_avail_count * 2; j > i; j -= 2) {
163390643Sbenno				phys_avail[j] = phys_avail[j - 2];
163490643Sbenno				phys_avail[j + 1] = phys_avail[j - 1];
163590643Sbenno			}
163690643Sbenno
163790643Sbenno			phys_avail[i + 3] = phys_avail[i + 1];
163890643Sbenno			phys_avail[i + 1] = s;
163990643Sbenno			phys_avail[i + 2] = e;
164090643Sbenno			phys_avail_count++;
164190643Sbenno		}
164290643Sbenno
164390643Sbenno		return (s);
164483682Smp	}
164590643Sbenno	panic("pmap_bootstrap_alloc: could not allocate memory");
164683682Smp}
164783682Smp
164883682Smp/*
164990643Sbenno * Return an unmapped pvo for a kernel virtual address.
165090643Sbenno * Used by pmap functions that operate on physical pages.
165183682Smp */
165290643Sbennostatic struct pvo_entry *
165390643Sbennopmap_rkva_alloc(void)
165483682Smp{
165590643Sbenno	struct		pvo_entry *pvo;
165690643Sbenno	struct		pte *pt;
165790643Sbenno	vm_offset_t	kva;
165890643Sbenno	int		pteidx;
165983682Smp
166090643Sbenno	if (pmap_rkva_count == 0)
166190643Sbenno		panic("pmap_rkva_alloc: no more reserved KVAs");
166290643Sbenno
166390643Sbenno	kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count);
166490643Sbenno	pmap_kenter(kva, 0);
166590643Sbenno
166690643Sbenno	pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx);
166790643Sbenno
166890643Sbenno	if (pvo == NULL)
166990643Sbenno		panic("pmap_kva_alloc: pmap_pvo_find_va failed");
167090643Sbenno
167190643Sbenno	pt = pmap_pvo_to_pte(pvo, pteidx);
167290643Sbenno
167390643Sbenno	if (pt == NULL)
167490643Sbenno		panic("pmap_kva_alloc: pmap_pvo_to_pte failed");
167590643Sbenno
167690643Sbenno	pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
167790643Sbenno	PVO_PTEGIDX_CLR(pvo);
167890643Sbenno
167990643Sbenno	pmap_pte_overflow++;
168090643Sbenno
168190643Sbenno	return (pvo);
168290643Sbenno}
168390643Sbenno
168490643Sbennostatic void
168590643Sbennopmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
168690643Sbenno    int *depth_p)
168790643Sbenno{
168890643Sbenno	struct	pte *pt;
168990643Sbenno
169090643Sbenno	/*
169190643Sbenno	 * If this pvo already has a valid pte, we need to save it so it can
169290643Sbenno	 * be restored later.  We then just reload the new PTE over the old
169390643Sbenno	 * slot.
169490643Sbenno	 */
169590643Sbenno	if (saved_pt != NULL) {
169690643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
169790643Sbenno
169890643Sbenno		if (pt != NULL) {
169990643Sbenno			pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
170090643Sbenno			PVO_PTEGIDX_CLR(pvo);
170190643Sbenno			pmap_pte_overflow++;
170283682Smp		}
170390643Sbenno
170490643Sbenno		*saved_pt = pvo->pvo_pte;
170590643Sbenno
170690643Sbenno		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
170783682Smp	}
170890643Sbenno
170990643Sbenno	pvo->pvo_pte.pte_lo |= pa;
171090643Sbenno
171190643Sbenno	if (!pmap_pte_spill(pvo->pvo_vaddr))
171290643Sbenno		panic("pmap_pa_map: could not spill pvo %p", pvo);
171390643Sbenno
171490643Sbenno	if (depth_p != NULL)
171590643Sbenno		(*depth_p)++;
171683682Smp}
171783682Smp
171890643Sbennostatic void
171990643Sbennopmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
172077957Sbenno{
172190643Sbenno	struct	pte *pt;
172277957Sbenno
172390643Sbenno	pt = pmap_pvo_to_pte(pvo, -1);
172490643Sbenno
172590643Sbenno	if (pt != NULL) {
172690643Sbenno		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
172790643Sbenno		PVO_PTEGIDX_CLR(pvo);
172890643Sbenno		pmap_pte_overflow++;
172990643Sbenno	}
173090643Sbenno
173190643Sbenno	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
173290643Sbenno
173390643Sbenno	/*
173490643Sbenno	 * If there is a saved PTE and it's valid, restore it and return.
173590643Sbenno	 */
173690643Sbenno	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
173790643Sbenno		if (depth_p != NULL && --(*depth_p) == 0)
173890643Sbenno			panic("pmap_pa_unmap: restoring but depth == 0");
173990643Sbenno
174090643Sbenno		pvo->pvo_pte = *saved_pt;
174190643Sbenno
174290643Sbenno		if (!pmap_pte_spill(pvo->pvo_vaddr))
174390643Sbenno			panic("pmap_pa_unmap: could not spill pvo %p", pvo);
174490643Sbenno	}
174577957Sbenno}
174677957Sbenno
174790643Sbennostatic void
174890643Sbennopmap_syncicache(vm_offset_t pa, vm_size_t len)
174977957Sbenno{
175090643Sbenno	__syncicache((void *)pa, len);
175190643Sbenno}
175277957Sbenno
175390643Sbennostatic void
175490643Sbennotlbia(void)
175590643Sbenno{
175690643Sbenno	caddr_t	i;
175790643Sbenno
175890643Sbenno	SYNC();
175990643Sbenno	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
176090643Sbenno		TLBIE(i);
176190643Sbenno		EIEIO();
176290643Sbenno	}
176390643Sbenno	TLBSYNC();
176490643Sbenno	SYNC();
176577957Sbenno}
176677957Sbenno
176790643Sbennostatic int
176892847Sjeffpmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
176990643Sbenno    vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
177077957Sbenno{
177190643Sbenno	struct	pvo_entry *pvo;
177290643Sbenno	u_int	sr;
177390643Sbenno	int	first;
177490643Sbenno	u_int	ptegidx;
177590643Sbenno	int	i;
1776103604Sgrehan	int     bootstrap;
177777957Sbenno
177890643Sbenno	pmap_pvo_enter_calls++;
177996250Sbenno	first = 0;
1780103604Sgrehan
1781103604Sgrehan	bootstrap = 0;
178290643Sbenno
178390643Sbenno	/*
178490643Sbenno	 * Compute the PTE Group index.
178590643Sbenno	 */
178690643Sbenno	va &= ~ADDR_POFF;
178790643Sbenno	sr = va_to_sr(pm->pm_sr, va);
178890643Sbenno	ptegidx = va_to_pteg(sr, va);
178990643Sbenno
179090643Sbenno	/*
179190643Sbenno	 * Remove any existing mapping for this page.  Reuse the pvo entry if
179290643Sbenno	 * there is a mapping.
179390643Sbenno	 */
179490643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
179590643Sbenno		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
179696334Sbenno			if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa &&
179796334Sbenno			    (pvo->pvo_pte.pte_lo & PTE_PP) ==
179896334Sbenno			    (pte_lo & PTE_PP)) {
179992521Sbenno				return (0);
180096334Sbenno			}
180190643Sbenno			pmap_pvo_remove(pvo, -1);
180290643Sbenno			break;
180390643Sbenno		}
180490643Sbenno	}
180590643Sbenno
180690643Sbenno	/*
180790643Sbenno	 * If we aren't overwriting a mapping, try to allocate.
180890643Sbenno	 */
180992521Sbenno	if (pmap_initialized) {
181092847Sjeff		pvo = uma_zalloc(zone, M_NOWAIT);
181192521Sbenno	} else {
181299037Sbenno		if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) {
181399037Sbenno			panic("pmap_enter: bpvo pool exhausted, %d, %d, %d",
181499037Sbenno			      pmap_bpvo_pool_index, BPVO_POOL_SIZE,
181599037Sbenno			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
181692521Sbenno		}
181792521Sbenno		pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index];
181892521Sbenno		pmap_bpvo_pool_index++;
1819103604Sgrehan		bootstrap = 1;
182092521Sbenno	}
182190643Sbenno
182290643Sbenno	if (pvo == NULL) {
182390643Sbenno		return (ENOMEM);
182490643Sbenno	}
182590643Sbenno
182690643Sbenno	pmap_pvo_entries++;
182790643Sbenno	pvo->pvo_vaddr = va;
182890643Sbenno	pvo->pvo_pmap = pm;
182990643Sbenno	LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
183090643Sbenno	pvo->pvo_vaddr &= ~ADDR_POFF;
183190643Sbenno	if (flags & VM_PROT_EXECUTE)
183290643Sbenno		pvo->pvo_vaddr |= PVO_EXECUTABLE;
183390643Sbenno	if (flags & PVO_WIRED)
183490643Sbenno		pvo->pvo_vaddr |= PVO_WIRED;
183590643Sbenno	if (pvo_head != &pmap_pvo_kunmanaged)
183690643Sbenno		pvo->pvo_vaddr |= PVO_MANAGED;
1837103604Sgrehan	if (bootstrap)
1838103604Sgrehan		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
183990643Sbenno	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
184090643Sbenno
184190643Sbenno	/*
184290643Sbenno	 * Remember if the list was empty and therefore will be the first
184390643Sbenno	 * item.
184490643Sbenno	 */
184596250Sbenno	if (LIST_FIRST(pvo_head) == NULL)
184696250Sbenno		first = 1;
184790643Sbenno
184890643Sbenno	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
184990643Sbenno	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
185090643Sbenno		pvo->pvo_pmap->pm_stats.wired_count++;
185190643Sbenno	pvo->pvo_pmap->pm_stats.resident_count++;
185290643Sbenno
185390643Sbenno	/*
185490643Sbenno	 * We hope this succeeds but it isn't required.
185590643Sbenno	 */
185690643Sbenno	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
185790643Sbenno	if (i >= 0) {
185890643Sbenno		PVO_PTEGIDX_SET(pvo, i);
185990643Sbenno	} else {
186090643Sbenno		panic("pmap_pvo_enter: overflow");
186190643Sbenno		pmap_pte_overflow++;
186290643Sbenno	}
186390643Sbenno
186490643Sbenno	return (first ? ENOENT : 0);
186577957Sbenno}
186677957Sbenno
186790643Sbennostatic void
186890643Sbennopmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
186977957Sbenno{
187090643Sbenno	struct	pte *pt;
187177957Sbenno
187290643Sbenno	/*
187390643Sbenno	 * If there is an active pte entry, we need to deactivate it (and
187490643Sbenno	 * save the ref & cfg bits).
187590643Sbenno	 */
187690643Sbenno	pt = pmap_pvo_to_pte(pvo, pteidx);
187790643Sbenno	if (pt != NULL) {
187890643Sbenno		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
187990643Sbenno		PVO_PTEGIDX_CLR(pvo);
188090643Sbenno	} else {
188190643Sbenno		pmap_pte_overflow--;
1882110172Sgrehan	}
188390643Sbenno
188490643Sbenno	/*
188590643Sbenno	 * Update our statistics.
188690643Sbenno	 */
188790643Sbenno	pvo->pvo_pmap->pm_stats.resident_count--;
188890643Sbenno	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
188990643Sbenno		pvo->pvo_pmap->pm_stats.wired_count--;
189090643Sbenno
189190643Sbenno	/*
189290643Sbenno	 * Save the REF/CHG bits into their cache if the page is managed.
189390643Sbenno	 */
189490643Sbenno	if (pvo->pvo_vaddr & PVO_MANAGED) {
189590643Sbenno		struct	vm_page *pg;
189690643Sbenno
189792067Sbenno		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
189890643Sbenno		if (pg != NULL) {
189990643Sbenno			pmap_attr_save(pg, pvo->pvo_pte.pte_lo &
190090643Sbenno			    (PTE_REF | PTE_CHG));
190190643Sbenno		}
190290643Sbenno	}
190390643Sbenno
190490643Sbenno	/*
190590643Sbenno	 * Remove this PVO from the PV list.
190690643Sbenno	 */
190790643Sbenno	LIST_REMOVE(pvo, pvo_vlink);
190890643Sbenno
190990643Sbenno	/*
191090643Sbenno	 * Remove this from the overflow list and return it to the pool
191190643Sbenno	 * if we aren't going to reuse it.
191290643Sbenno	 */
191390643Sbenno	LIST_REMOVE(pvo, pvo_olink);
191492521Sbenno	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
191592847Sjeff		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone :
191692521Sbenno		    pmap_upvo_zone, pvo);
191790643Sbenno	pmap_pvo_entries--;
191890643Sbenno	pmap_pvo_remove_calls++;
191977957Sbenno}
192077957Sbenno
192190643Sbennostatic __inline int
192290643Sbennopmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
192377957Sbenno{
192490643Sbenno	int	pteidx;
192577957Sbenno
192690643Sbenno	/*
192790643Sbenno	 * We can find the actual pte entry without searching by grabbing
192890643Sbenno	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
192990643Sbenno	 * noticing the HID bit.
193090643Sbenno	 */
193190643Sbenno	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
193290643Sbenno	if (pvo->pvo_pte.pte_hi & PTE_HID)
193390643Sbenno		pteidx ^= pmap_pteg_mask * 8;
193490643Sbenno
193590643Sbenno	return (pteidx);
193677957Sbenno}
193777957Sbenno
193890643Sbennostatic struct pvo_entry *
193990643Sbennopmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
194077957Sbenno{
194190643Sbenno	struct	pvo_entry *pvo;
194290643Sbenno	int	ptegidx;
194390643Sbenno	u_int	sr;
194477957Sbenno
194590643Sbenno	va &= ~ADDR_POFF;
194690643Sbenno	sr = va_to_sr(pm->pm_sr, va);
194790643Sbenno	ptegidx = va_to_pteg(sr, va);
194890643Sbenno
194990643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
195090643Sbenno		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
195190643Sbenno			if (pteidx_p)
195290643Sbenno				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
195390643Sbenno			return (pvo);
195490643Sbenno		}
195590643Sbenno	}
195690643Sbenno
195790643Sbenno	return (NULL);
195877957Sbenno}
195977957Sbenno
196090643Sbennostatic struct pte *
196190643Sbennopmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
196277957Sbenno{
196390643Sbenno	struct	pte *pt;
196477957Sbenno
196590643Sbenno	/*
196690643Sbenno	 * If we haven't been supplied the ptegidx, calculate it.
196790643Sbenno	 */
196890643Sbenno	if (pteidx == -1) {
196990643Sbenno		int	ptegidx;
197090643Sbenno		u_int	sr;
197177957Sbenno
197290643Sbenno		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
197390643Sbenno		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
197490643Sbenno		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
197590643Sbenno	}
197690643Sbenno
197790643Sbenno	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
197890643Sbenno
197990643Sbenno	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
198090643Sbenno		panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no "
198190643Sbenno		    "valid pte index", pvo);
198290643Sbenno	}
198390643Sbenno
198490643Sbenno	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
198590643Sbenno		panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo "
198690643Sbenno		    "pvo but no valid pte", pvo);
198790643Sbenno	}
198890643Sbenno
198990643Sbenno	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
199090643Sbenno		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
199190643Sbenno			panic("pmap_pvo_to_pte: pvo %p has valid pte in "
199290643Sbenno			    "pmap_pteg_table %p but invalid in pvo", pvo, pt);
199377957Sbenno		}
199490643Sbenno
199590643Sbenno		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
199690643Sbenno		    != 0) {
199790643Sbenno			panic("pmap_pvo_to_pte: pvo %p pte does not match "
199890643Sbenno			    "pte %p in pmap_pteg_table", pvo, pt);
199990643Sbenno		}
200090643Sbenno
200190643Sbenno		return (pt);
200277957Sbenno	}
200377957Sbenno
200490643Sbenno	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
200590643Sbenno		panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in "
200690643Sbenno		    "pmap_pteg_table but valid in pvo", pvo, pt);
200790643Sbenno	}
200877957Sbenno
200990643Sbenno	return (NULL);
201077957Sbenno}
201178880Sbenno
201278880Sbenno/*
201390643Sbenno * XXX: THIS STUFF SHOULD BE IN pte.c?
201478880Sbenno */
201590643Sbennoint
201690643Sbennopmap_pte_spill(vm_offset_t addr)
201778880Sbenno{
201890643Sbenno	struct	pvo_entry *source_pvo, *victim_pvo;
201990643Sbenno	struct	pvo_entry *pvo;
202090643Sbenno	int	ptegidx, i, j;
202190643Sbenno	u_int	sr;
202290643Sbenno	struct	pteg *pteg;
202390643Sbenno	struct	pte *pt;
202478880Sbenno
202590643Sbenno	pmap_pte_spills++;
202690643Sbenno
202794836Sbenno	sr = mfsrin(addr);
202890643Sbenno	ptegidx = va_to_pteg(sr, addr);
202990643Sbenno
203078880Sbenno	/*
203190643Sbenno	 * Have to substitute some entry.  Use the primary hash for this.
203290643Sbenno	 * Use low bits of timebase as random generator.
203378880Sbenno	 */
203490643Sbenno	pteg = &pmap_pteg_table[ptegidx];
203590643Sbenno	__asm __volatile("mftb %0" : "=r"(i));
203690643Sbenno	i &= 7;
203790643Sbenno	pt = &pteg->pt[i];
203878880Sbenno
203990643Sbenno	source_pvo = NULL;
204090643Sbenno	victim_pvo = NULL;
204190643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
204278880Sbenno		/*
204390643Sbenno		 * We need to find a pvo entry for this address.
204478880Sbenno		 */
204590643Sbenno		PMAP_PVO_CHECK(pvo);
204690643Sbenno		if (source_pvo == NULL &&
204790643Sbenno		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
204890643Sbenno		    pvo->pvo_pte.pte_hi & PTE_HID)) {
204990643Sbenno			/*
205090643Sbenno			 * Now found an entry to be spilled into the pteg.
205190643Sbenno			 * The PTE is now valid, so we know it's active.
205290643Sbenno			 */
205390643Sbenno			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
205478880Sbenno
205590643Sbenno			if (j >= 0) {
205690643Sbenno				PVO_PTEGIDX_SET(pvo, j);
205790643Sbenno				pmap_pte_overflow--;
205890643Sbenno				PMAP_PVO_CHECK(pvo);
205990643Sbenno				return (1);
206090643Sbenno			}
206190643Sbenno
206290643Sbenno			source_pvo = pvo;
206390643Sbenno
206490643Sbenno			if (victim_pvo != NULL)
206590643Sbenno				break;
206690643Sbenno		}
206790643Sbenno
206878880Sbenno		/*
206990643Sbenno		 * We also need the pvo entry of the victim we are replacing
207090643Sbenno		 * so save the R & C bits of the PTE.
207178880Sbenno		 */
207290643Sbenno		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
207390643Sbenno		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
207490643Sbenno			victim_pvo = pvo;
207590643Sbenno			if (source_pvo != NULL)
207690643Sbenno				break;
207790643Sbenno		}
207890643Sbenno	}
207978880Sbenno
208090643Sbenno	if (source_pvo == NULL)
208190643Sbenno		return (0);
208290643Sbenno
208390643Sbenno	if (victim_pvo == NULL) {
208490643Sbenno		if ((pt->pte_hi & PTE_HID) == 0)
208590643Sbenno			panic("pmap_pte_spill: victim p-pte (%p) has no pvo"
208690643Sbenno			    "entry", pt);
208790643Sbenno
208878880Sbenno		/*
208990643Sbenno		 * If this is a secondary PTE, we need to search it's primary
209090643Sbenno		 * pvo bucket for the matching PVO.
209178880Sbenno		 */
209290643Sbenno		LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
209390643Sbenno		    pvo_olink) {
209490643Sbenno			PMAP_PVO_CHECK(pvo);
209590643Sbenno			/*
209690643Sbenno			 * We also need the pvo entry of the victim we are
209790643Sbenno			 * replacing so save the R & C bits of the PTE.
209890643Sbenno			 */
209990643Sbenno			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
210090643Sbenno				victim_pvo = pvo;
210190643Sbenno				break;
210290643Sbenno			}
210390643Sbenno		}
210478880Sbenno
210590643Sbenno		if (victim_pvo == NULL)
210690643Sbenno			panic("pmap_pte_spill: victim s-pte (%p) has no pvo"
210790643Sbenno			    "entry", pt);
210890643Sbenno	}
210978880Sbenno
211090643Sbenno	/*
211190643Sbenno	 * We are invalidating the TLB entry for the EA we are replacing even
211290643Sbenno	 * though it's valid.  If we don't, we lose any ref/chg bit changes
211390643Sbenno	 * contained in the TLB entry.
211490643Sbenno	 */
211590643Sbenno	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
211678880Sbenno
211790643Sbenno	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
211890643Sbenno	pmap_pte_set(pt, &source_pvo->pvo_pte);
211990643Sbenno
212090643Sbenno	PVO_PTEGIDX_CLR(victim_pvo);
212190643Sbenno	PVO_PTEGIDX_SET(source_pvo, i);
212290643Sbenno	pmap_pte_replacements++;
212390643Sbenno
212490643Sbenno	PMAP_PVO_CHECK(victim_pvo);
212590643Sbenno	PMAP_PVO_CHECK(source_pvo);
212690643Sbenno
212790643Sbenno	return (1);
212890643Sbenno}
212990643Sbenno
213090643Sbennostatic int
213190643Sbennopmap_pte_insert(u_int ptegidx, struct pte *pvo_pt)
213290643Sbenno{
213390643Sbenno	struct	pte *pt;
213490643Sbenno	int	i;
213590643Sbenno
213690643Sbenno	/*
213790643Sbenno	 * First try primary hash.
213890643Sbenno	 */
213990643Sbenno	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
214090643Sbenno		if ((pt->pte_hi & PTE_VALID) == 0) {
214190643Sbenno			pvo_pt->pte_hi &= ~PTE_HID;
214290643Sbenno			pmap_pte_set(pt, pvo_pt);
214390643Sbenno			return (i);
214478880Sbenno		}
214590643Sbenno	}
214678880Sbenno
214790643Sbenno	/*
214890643Sbenno	 * Now try secondary hash.
214990643Sbenno	 */
215090643Sbenno	ptegidx ^= pmap_pteg_mask;
215190643Sbenno	ptegidx++;
215290643Sbenno	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
215390643Sbenno		if ((pt->pte_hi & PTE_VALID) == 0) {
215490643Sbenno			pvo_pt->pte_hi |= PTE_HID;
215590643Sbenno			pmap_pte_set(pt, pvo_pt);
215690643Sbenno			return (i);
215790643Sbenno		}
215890643Sbenno	}
215978880Sbenno
216090643Sbenno	panic("pmap_pte_insert: overflow");
216190643Sbenno	return (-1);
216278880Sbenno}
216384921Sbenno
216490643Sbennostatic boolean_t
216590643Sbennopmap_query_bit(vm_page_t m, int ptebit)
216684921Sbenno{
216790643Sbenno	struct	pvo_entry *pvo;
216890643Sbenno	struct	pte *pt;
216984921Sbenno
2170123560Sgrehan#if 0
217190643Sbenno	if (pmap_attr_fetch(m) & ptebit)
217290643Sbenno		return (TRUE);
2173123560Sgrehan#endif
217484921Sbenno
217590643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
217690643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
217784921Sbenno
217890643Sbenno		/*
217990643Sbenno		 * See if we saved the bit off.  If so, cache it and return
218090643Sbenno		 * success.
218190643Sbenno		 */
218290643Sbenno		if (pvo->pvo_pte.pte_lo & ptebit) {
218390643Sbenno			pmap_attr_save(m, ptebit);
218490643Sbenno			PMAP_PVO_CHECK(pvo);	/* sanity check */
218590643Sbenno			return (TRUE);
218690643Sbenno		}
218790643Sbenno	}
218884921Sbenno
218990643Sbenno	/*
219090643Sbenno	 * No luck, now go through the hard part of looking at the PTEs
219190643Sbenno	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
219290643Sbenno	 * the PTEs.
219390643Sbenno	 */
219490643Sbenno	SYNC();
219590643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
219690643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
219790643Sbenno
219890643Sbenno		/*
219990643Sbenno		 * See if this pvo has a valid PTE.  if so, fetch the
220090643Sbenno		 * REF/CHG bits from the valid PTE.  If the appropriate
220190643Sbenno		 * ptebit is set, cache it and return success.
220290643Sbenno		 */
220390643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
220490643Sbenno		if (pt != NULL) {
220590643Sbenno			pmap_pte_synch(pt, &pvo->pvo_pte);
220690643Sbenno			if (pvo->pvo_pte.pte_lo & ptebit) {
220790643Sbenno				pmap_attr_save(m, ptebit);
220890643Sbenno				PMAP_PVO_CHECK(pvo);	/* sanity check */
220990643Sbenno				return (TRUE);
221090643Sbenno			}
221190643Sbenno		}
221284921Sbenno	}
221384921Sbenno
2214123354Sgallatin	return (FALSE);
221584921Sbenno}
221690643Sbenno
2217110172Sgrehanstatic u_int
2218110172Sgrehanpmap_clear_bit(vm_page_t m, int ptebit, int *origbit)
221990643Sbenno{
2220110172Sgrehan	u_int	count;
222190643Sbenno	struct	pvo_entry *pvo;
222290643Sbenno	struct	pte *pt;
222390643Sbenno	int	rv;
222490643Sbenno
222590643Sbenno	/*
222690643Sbenno	 * Clear the cached value.
222790643Sbenno	 */
222890643Sbenno	rv = pmap_attr_fetch(m);
222990643Sbenno	pmap_attr_clear(m, ptebit);
223090643Sbenno
223190643Sbenno	/*
223290643Sbenno	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
223390643Sbenno	 * we can reset the right ones).  note that since the pvo entries and
223490643Sbenno	 * list heads are accessed via BAT0 and are never placed in the page
223590643Sbenno	 * table, we don't have to worry about further accesses setting the
223690643Sbenno	 * REF/CHG bits.
223790643Sbenno	 */
223890643Sbenno	SYNC();
223990643Sbenno
224090643Sbenno	/*
224190643Sbenno	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
224290643Sbenno	 * valid pte clear the ptebit from the valid pte.
224390643Sbenno	 */
2244110172Sgrehan	count = 0;
224590643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
224690643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
224790643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
224890643Sbenno		if (pt != NULL) {
224990643Sbenno			pmap_pte_synch(pt, &pvo->pvo_pte);
2250110172Sgrehan			if (pvo->pvo_pte.pte_lo & ptebit) {
2251110172Sgrehan				count++;
225290643Sbenno				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2253110172Sgrehan			}
225490643Sbenno		}
225590643Sbenno		rv |= pvo->pvo_pte.pte_lo;
225690643Sbenno		pvo->pvo_pte.pte_lo &= ~ptebit;
225790643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
225890643Sbenno	}
225990643Sbenno
2260110172Sgrehan	if (origbit != NULL) {
2261110172Sgrehan		*origbit = rv;
2262110172Sgrehan	}
2263110172Sgrehan
2264110172Sgrehan	return (count);
226590643Sbenno}
226699038Sbenno
226799038Sbenno/*
2268103604Sgrehan * Return true if the physical range is encompassed by the battable[idx]
2269103604Sgrehan */
2270103604Sgrehanstatic int
2271103604Sgrehanpmap_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
2272103604Sgrehan{
2273103604Sgrehan	u_int prot;
2274103604Sgrehan	u_int32_t start;
2275103604Sgrehan	u_int32_t end;
2276103604Sgrehan	u_int32_t bat_ble;
2277103604Sgrehan
2278103604Sgrehan	/*
2279103604Sgrehan	 * Return immediately if not a valid mapping
2280103604Sgrehan	 */
2281103604Sgrehan	if (!battable[idx].batu & BAT_Vs)
2282103604Sgrehan		return (EINVAL);
2283103604Sgrehan
2284103604Sgrehan	/*
2285103604Sgrehan	 * The BAT entry must be cache-inhibited, guarded, and r/w
2286103604Sgrehan	 * so it can function as an i/o page
2287103604Sgrehan	 */
2288103604Sgrehan	prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
2289103604Sgrehan	if (prot != (BAT_I|BAT_G|BAT_PP_RW))
2290103604Sgrehan		return (EPERM);
2291103604Sgrehan
2292103604Sgrehan	/*
2293103604Sgrehan	 * The address should be within the BAT range. Assume that the
2294103604Sgrehan	 * start address in the BAT has the correct alignment (thus
2295103604Sgrehan	 * not requiring masking)
2296103604Sgrehan	 */
2297103604Sgrehan	start = battable[idx].batl & BAT_PBS;
2298103604Sgrehan	bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
2299103604Sgrehan	end = start | (bat_ble << 15) | 0x7fff;
2300103604Sgrehan
2301103604Sgrehan	if ((pa < start) || ((pa + size) > end))
2302103604Sgrehan		return (ERANGE);
2303103604Sgrehan
2304103604Sgrehan	return (0);
2305103604Sgrehan}
2306103604Sgrehan
2307103604Sgrehan
2308103604Sgrehan/*
230999038Sbenno * Map a set of physical memory pages into the kernel virtual
231099038Sbenno * address space. Return a pointer to where it is mapped. This
231199038Sbenno * routine is intended to be used for mapping device memory,
231299038Sbenno * NOT real memory.
231399038Sbenno */
231499038Sbennovoid *
231599038Sbennopmap_mapdev(vm_offset_t pa, vm_size_t size)
231699038Sbenno{
2317103604Sgrehan	vm_offset_t va, tmpva, ppa, offset;
2318103604Sgrehan	int i;
2319103604Sgrehan
2320103604Sgrehan	ppa = trunc_page(pa);
232199038Sbenno	offset = pa & PAGE_MASK;
232299038Sbenno	size = roundup(offset + size, PAGE_SIZE);
232399038Sbenno
232499038Sbenno	GIANT_REQUIRED;
232599038Sbenno
2326103604Sgrehan	/*
2327103604Sgrehan	 * If the physical address lies within a valid BAT table entry,
2328103604Sgrehan	 * return the 1:1 mapping. This currently doesn't work
2329103604Sgrehan	 * for regions that overlap 256M BAT segments.
2330103604Sgrehan	 */
2331103604Sgrehan	for (i = 0; i < 16; i++) {
2332103604Sgrehan		if (pmap_bat_mapped(i, pa, size) == 0)
2333103604Sgrehan			return ((void *) pa);
2334103604Sgrehan	}
2335103604Sgrehan
2336118365Salc	va = kmem_alloc_nofault(kernel_map, size);
233799038Sbenno	if (!va)
233899038Sbenno		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
233999038Sbenno
234099038Sbenno	for (tmpva = va; size > 0;) {
2341103604Sgrehan		pmap_kenter(tmpva, ppa);
234299038Sbenno		TLBIE(tmpva); /* XXX or should it be invalidate-all ? */
234399038Sbenno		size -= PAGE_SIZE;
234499038Sbenno		tmpva += PAGE_SIZE;
2345103604Sgrehan		ppa += PAGE_SIZE;
234699038Sbenno	}
234799038Sbenno
234899038Sbenno	return ((void *)(va + offset));
234999038Sbenno}
235099038Sbenno
235199038Sbennovoid
235299038Sbennopmap_unmapdev(vm_offset_t va, vm_size_t size)
235399038Sbenno{
235499038Sbenno	vm_offset_t base, offset;
235599038Sbenno
2356103604Sgrehan	/*
2357103604Sgrehan	 * If this is outside kernel virtual space, then it's a
2358103604Sgrehan	 * battable entry and doesn't require unmapping
2359103604Sgrehan	 */
2360103604Sgrehan	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2361103604Sgrehan		base = trunc_page(va);
2362103604Sgrehan		offset = va & PAGE_MASK;
2363103604Sgrehan		size = roundup(offset + size, PAGE_SIZE);
2364103604Sgrehan		kmem_free(kernel_map, base, size);
2365103604Sgrehan	}
236699038Sbenno}
2367