mmu_oea.c revision 143234
1139825Simp/*-
290643Sbenno * Copyright (c) 2001 The NetBSD Foundation, Inc.
390643Sbenno * All rights reserved.
490643Sbenno *
590643Sbenno * This code is derived from software contributed to The NetBSD Foundation
690643Sbenno * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
790643Sbenno *
890643Sbenno * Redistribution and use in source and binary forms, with or without
990643Sbenno * modification, are permitted provided that the following conditions
1090643Sbenno * are met:
1190643Sbenno * 1. Redistributions of source code must retain the above copyright
1290643Sbenno *    notice, this list of conditions and the following disclaimer.
1390643Sbenno * 2. Redistributions in binary form must reproduce the above copyright
1490643Sbenno *    notice, this list of conditions and the following disclaimer in the
1590643Sbenno *    documentation and/or other materials provided with the distribution.
1690643Sbenno * 3. All advertising materials mentioning features or use of this software
1790643Sbenno *    must display the following acknowledgement:
1890643Sbenno *        This product includes software developed by the NetBSD
1990643Sbenno *        Foundation, Inc. and its contributors.
2090643Sbenno * 4. Neither the name of The NetBSD Foundation nor the names of its
2190643Sbenno *    contributors may be used to endorse or promote products derived
2290643Sbenno *    from this software without specific prior written permission.
2390643Sbenno *
2490643Sbenno * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2590643Sbenno * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2690643Sbenno * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2790643Sbenno * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2890643Sbenno * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2990643Sbenno * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
3090643Sbenno * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
3190643Sbenno * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3290643Sbenno * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3390643Sbenno * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3490643Sbenno * POSSIBILITY OF SUCH DAMAGE.
3590643Sbenno */
36139825Simp/*-
3777957Sbenno * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3877957Sbenno * Copyright (C) 1995, 1996 TooLs GmbH.
3977957Sbenno * All rights reserved.
4077957Sbenno *
4177957Sbenno * Redistribution and use in source and binary forms, with or without
4277957Sbenno * modification, are permitted provided that the following conditions
4377957Sbenno * are met:
4477957Sbenno * 1. Redistributions of source code must retain the above copyright
4577957Sbenno *    notice, this list of conditions and the following disclaimer.
4677957Sbenno * 2. Redistributions in binary form must reproduce the above copyright
4777957Sbenno *    notice, this list of conditions and the following disclaimer in the
4877957Sbenno *    documentation and/or other materials provided with the distribution.
4977957Sbenno * 3. All advertising materials mentioning features or use of this software
5077957Sbenno *    must display the following acknowledgement:
5177957Sbenno *	This product includes software developed by TooLs GmbH.
5277957Sbenno * 4. The name of TooLs GmbH may not be used to endorse or promote products
5377957Sbenno *    derived from this software without specific prior written permission.
5477957Sbenno *
5577957Sbenno * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
5677957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5777957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5877957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
5977957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6077957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6177957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6277957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6377957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6477957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6577957Sbenno *
6678880Sbenno * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
6777957Sbenno */
68139825Simp/*-
6977957Sbenno * Copyright (C) 2001 Benno Rice.
7077957Sbenno * All rights reserved.
7177957Sbenno *
7277957Sbenno * Redistribution and use in source and binary forms, with or without
7377957Sbenno * modification, are permitted provided that the following conditions
7477957Sbenno * are met:
7577957Sbenno * 1. Redistributions of source code must retain the above copyright
7677957Sbenno *    notice, this list of conditions and the following disclaimer.
7777957Sbenno * 2. Redistributions in binary form must reproduce the above copyright
7877957Sbenno *    notice, this list of conditions and the following disclaimer in the
7977957Sbenno *    documentation and/or other materials provided with the distribution.
8077957Sbenno *
8177957Sbenno * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
8277957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
8377957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
8477957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
8577957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
8677957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
8777957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
8877957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
8977957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
9077957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9177957Sbenno */
9277957Sbenno
93113038Sobrien#include <sys/cdefs.h>
94113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 143234 2005-03-07 07:31:20Z grehan $");
9577957Sbenno
9690643Sbenno/*
9790643Sbenno * Manages physical address maps.
9890643Sbenno *
9990643Sbenno * In addition to hardware address maps, this module is called upon to
10090643Sbenno * provide software-use-only maps which may or may not be stored in the
10190643Sbenno * same form as hardware maps.  These pseudo-maps are used to store
10290643Sbenno * intermediate results from copy operations to and from address spaces.
10390643Sbenno *
10490643Sbenno * Since the information managed by this module is also stored by the
10590643Sbenno * logical address mapping module, this module may throw away valid virtual
10690643Sbenno * to physical mappings at almost any time.  However, invalidations of
10790643Sbenno * mappings must be done as requested.
10890643Sbenno *
10990643Sbenno * In order to cope with hardware architectures which make virtual to
11090643Sbenno * physical map invalidates expensive, this module may delay invalidate
11190643Sbenno * reduced protection operations until such time as they are actually
11290643Sbenno * necessary.  This module is given full information as to which processors
11390643Sbenno * are currently using which maps, and to when physical maps must be made
11490643Sbenno * correct.
11590643Sbenno */
11690643Sbenno
117118239Speter#include "opt_kstack_pages.h"
118118239Speter
11977957Sbenno#include <sys/param.h>
12080431Speter#include <sys/kernel.h>
12190643Sbenno#include <sys/ktr.h>
12290643Sbenno#include <sys/lock.h>
12390643Sbenno#include <sys/msgbuf.h>
12490643Sbenno#include <sys/mutex.h>
12577957Sbenno#include <sys/proc.h>
12690643Sbenno#include <sys/sysctl.h>
12790643Sbenno#include <sys/systm.h>
12877957Sbenno#include <sys/vmmeter.h>
12977957Sbenno
13090643Sbenno#include <dev/ofw/openfirm.h>
13190643Sbenno
13290643Sbenno#include <vm/vm.h>
13377957Sbenno#include <vm/vm_param.h>
13477957Sbenno#include <vm/vm_kern.h>
13577957Sbenno#include <vm/vm_page.h>
13677957Sbenno#include <vm/vm_map.h>
13777957Sbenno#include <vm/vm_object.h>
13877957Sbenno#include <vm/vm_extern.h>
13977957Sbenno#include <vm/vm_pageout.h>
14077957Sbenno#include <vm/vm_pager.h>
14192847Sjeff#include <vm/uma.h>
14277957Sbenno
143125687Sgrehan#include <machine/cpu.h>
14497346Sbenno#include <machine/powerpc.h>
14583730Smp#include <machine/bat.h>
14690643Sbenno#include <machine/frame.h>
14790643Sbenno#include <machine/md_var.h>
14890643Sbenno#include <machine/psl.h>
14977957Sbenno#include <machine/pte.h>
15090643Sbenno#include <machine/sr.h>
15177957Sbenno
15290643Sbenno#define	PMAP_DEBUG
15377957Sbenno
15490643Sbenno#define TODO	panic("%s: not implemented", __func__);
15577957Sbenno
15690643Sbenno#define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
15790643Sbenno#define	TLBSYNC()	__asm __volatile("tlbsync");
15890643Sbenno#define	SYNC()		__asm __volatile("sync");
15990643Sbenno#define	EIEIO()		__asm __volatile("eieio");
16090643Sbenno
16190643Sbenno#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
16290643Sbenno#define	VSID_TO_SR(vsid)	((vsid) & 0xf)
16390643Sbenno#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
16490643Sbenno
165142416Sgrehan#define	PVO_PTEGIDX_MASK	0x007		/* which PTEG slot */
166142416Sgrehan#define	PVO_PTEGIDX_VALID	0x008		/* slot is valid */
167142416Sgrehan#define	PVO_WIRED		0x010		/* PVO entry is wired */
168142416Sgrehan#define	PVO_MANAGED		0x020		/* PVO entry is managed */
169142416Sgrehan#define	PVO_EXECUTABLE		0x040		/* PVO entry is executable */
170142416Sgrehan#define	PVO_BOOTSTRAP		0x080		/* PVO entry allocated during
17192521Sbenno						   bootstrap */
172142416Sgrehan#define PVO_FAKE		0x100		/* fictitious phys page */
17390643Sbenno#define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
17490643Sbenno#define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
175142416Sgrehan#define PVO_ISFAKE(pvo)		((pvo)->pvo_vaddr & PVO_FAKE)
17690643Sbenno#define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
17790643Sbenno#define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
17890643Sbenno#define	PVO_PTEGIDX_CLR(pvo)	\
17990643Sbenno	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
18090643Sbenno#define	PVO_PTEGIDX_SET(pvo, i)	\
18190643Sbenno	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
18290643Sbenno
18390643Sbenno#define	PMAP_PVO_CHECK(pvo)
18490643Sbenno
18590643Sbennostruct ofw_map {
18690643Sbenno	vm_offset_t	om_va;
18790643Sbenno	vm_size_t	om_len;
18890643Sbenno	vm_offset_t	om_pa;
18990643Sbenno	u_int		om_mode;
19090643Sbenno};
19177957Sbenno
19290643Sbennoint	pmap_bootstrapped = 0;
19377957Sbenno
19490643Sbenno/*
19590643Sbenno * Virtual and physical address of message buffer.
19690643Sbenno */
19790643Sbennostruct		msgbuf *msgbufp;
19890643Sbennovm_offset_t	msgbuf_phys;
19977957Sbenno
200110172Sgrehanint pmap_pagedaemon_waken;
201110172Sgrehan
20290643Sbenno/*
20390643Sbenno * Map of physical memory regions.
20490643Sbenno */
20590643Sbennovm_offset_t	phys_avail[128];
20690643Sbennou_int		phys_avail_count;
20797346Sbennostatic struct	mem_region *regions;
20897346Sbennostatic struct	mem_region *pregions;
20997346Sbennoint		regions_sz, pregions_sz;
210100319Sbennostatic struct	ofw_map *translations;
21177957Sbenno
21290643Sbenno/*
21390643Sbenno * First and last available kernel virtual addresses.
21490643Sbenno */
21590643Sbennovm_offset_t virtual_avail;
21690643Sbennovm_offset_t virtual_end;
21790643Sbennovm_offset_t kernel_vm_end;
21877957Sbenno
21990643Sbenno/*
22090643Sbenno * Kernel pmap.
22190643Sbenno */
22290643Sbennostruct pmap kernel_pmap_store;
22390643Sbennoextern struct pmap ofw_pmap;
22477957Sbenno
22590643Sbenno/*
226134535Salc * Lock for the pteg and pvo tables.
227134535Salc */
228134535Salcstruct mtx	pmap_table_mutex;
229134535Salc
230134535Salc/*
23190643Sbenno * PTEG data.
23290643Sbenno */
23390643Sbennostatic struct	pteg *pmap_pteg_table;
23490643Sbennou_int		pmap_pteg_count;
23590643Sbennou_int		pmap_pteg_mask;
23677957Sbenno
23790643Sbenno/*
23890643Sbenno * PVO data.
23990643Sbenno */
24090643Sbennostruct	pvo_head *pmap_pvo_table;		/* pvo entries by pteg index */
24190643Sbennostruct	pvo_head pmap_pvo_kunmanaged =
24290643Sbenno    LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
24390643Sbennostruct	pvo_head pmap_pvo_unmanaged =
24490643Sbenno    LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
24577957Sbenno
24692847Sjeffuma_zone_t	pmap_upvo_zone;	/* zone for pvo entries for unmanaged pages */
24792847Sjeffuma_zone_t	pmap_mpvo_zone;	/* zone for pvo entries for managed pages */
24877957Sbenno
24999037Sbenno#define	BPVO_POOL_SIZE	32768
25092521Sbennostatic struct	pvo_entry *pmap_bpvo_pool;
25199037Sbennostatic int	pmap_bpvo_pool_index = 0;
25277957Sbenno
25390643Sbenno#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
25490643Sbennostatic u_int	pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
25577957Sbenno
25690643Sbennostatic boolean_t pmap_initialized = FALSE;
25777957Sbenno
25890643Sbenno/*
25990643Sbenno * Statistics.
26090643Sbenno */
26190643Sbennou_int	pmap_pte_valid = 0;
26290643Sbennou_int	pmap_pte_overflow = 0;
26390643Sbennou_int	pmap_pte_replacements = 0;
26490643Sbennou_int	pmap_pvo_entries = 0;
26590643Sbennou_int	pmap_pvo_enter_calls = 0;
26690643Sbennou_int	pmap_pvo_remove_calls = 0;
26790643Sbennou_int	pmap_pte_spills = 0;
26890643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid,
26990643Sbenno    0, "");
27090643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD,
27190643Sbenno    &pmap_pte_overflow, 0, "");
27290643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD,
27390643Sbenno    &pmap_pte_replacements, 0, "");
27490643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries,
27590643Sbenno    0, "");
27690643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD,
27790643Sbenno    &pmap_pvo_enter_calls, 0, "");
27890643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD,
27990643Sbenno    &pmap_pvo_remove_calls, 0, "");
28090643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD,
28190643Sbenno    &pmap_pte_spills, 0, "");
28277957Sbenno
28390643Sbennostruct	pvo_entry *pmap_pvo_zeropage;
28477957Sbenno
28590643Sbennovm_offset_t	pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
28690643Sbennou_int		pmap_rkva_count = 4;
28777957Sbenno
28890643Sbenno/*
28990643Sbenno * Allocate physical memory for use in pmap_bootstrap.
29090643Sbenno */
29190643Sbennostatic vm_offset_t	pmap_bootstrap_alloc(vm_size_t, u_int);
29277957Sbenno
29390643Sbenno/*
29490643Sbenno * PTE calls.
29590643Sbenno */
29690643Sbennostatic int		pmap_pte_insert(u_int, struct pte *);
29777957Sbenno
29877957Sbenno/*
29990643Sbenno * PVO calls.
30077957Sbenno */
30192847Sjeffstatic int	pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
30290643Sbenno		    vm_offset_t, vm_offset_t, u_int, int);
30390643Sbennostatic void	pmap_pvo_remove(struct pvo_entry *, int);
30490643Sbennostatic struct	pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *);
30590643Sbennostatic struct	pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
30690643Sbenno
30790643Sbenno/*
30890643Sbenno * Utility routines.
30990643Sbenno */
31090643Sbennostatic struct		pvo_entry *pmap_rkva_alloc(void);
31190643Sbennostatic void		pmap_pa_map(struct pvo_entry *, vm_offset_t,
31290643Sbenno			    struct pte *, int *);
31390643Sbennostatic void		pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
31490643Sbennostatic void		pmap_syncicache(vm_offset_t, vm_size_t);
31590643Sbennostatic boolean_t	pmap_query_bit(vm_page_t, int);
316110172Sgrehanstatic u_int		pmap_clear_bit(vm_page_t, int, int *);
31790643Sbennostatic void		tlbia(void);
31890643Sbenno
31990643Sbennostatic __inline int
32090643Sbennova_to_sr(u_int *sr, vm_offset_t va)
32177957Sbenno{
32290643Sbenno	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
32390643Sbenno}
32477957Sbenno
32590643Sbennostatic __inline u_int
32690643Sbennova_to_pteg(u_int sr, vm_offset_t addr)
32790643Sbenno{
32890643Sbenno	u_int hash;
32990643Sbenno
33090643Sbenno	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
33190643Sbenno	    ADDR_PIDX_SHFT);
33290643Sbenno	return (hash & pmap_pteg_mask);
33377957Sbenno}
33477957Sbenno
33590643Sbennostatic __inline struct pvo_head *
33696250Sbennopa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
33777957Sbenno{
33890643Sbenno	struct	vm_page *pg;
33977957Sbenno
34090643Sbenno	pg = PHYS_TO_VM_PAGE(pa);
34190643Sbenno
34296250Sbenno	if (pg_p != NULL)
34396250Sbenno		*pg_p = pg;
34496250Sbenno
34590643Sbenno	if (pg == NULL)
34690643Sbenno		return (&pmap_pvo_unmanaged);
34790643Sbenno
34890643Sbenno	return (&pg->md.mdpg_pvoh);
34977957Sbenno}
35077957Sbenno
35190643Sbennostatic __inline struct pvo_head *
35290643Sbennovm_page_to_pvoh(vm_page_t m)
35390643Sbenno{
35490643Sbenno
35590643Sbenno	return (&m->md.mdpg_pvoh);
35690643Sbenno}
35790643Sbenno
35877957Sbennostatic __inline void
35990643Sbennopmap_attr_clear(vm_page_t m, int ptebit)
36077957Sbenno{
36190643Sbenno
36290643Sbenno	m->md.mdpg_attrs &= ~ptebit;
36377957Sbenno}
36477957Sbenno
36577957Sbennostatic __inline int
36690643Sbennopmap_attr_fetch(vm_page_t m)
36777957Sbenno{
36877957Sbenno
36990643Sbenno	return (m->md.mdpg_attrs);
37077957Sbenno}
37177957Sbenno
37290643Sbennostatic __inline void
37390643Sbennopmap_attr_save(vm_page_t m, int ptebit)
37490643Sbenno{
37590643Sbenno
37690643Sbenno	m->md.mdpg_attrs |= ptebit;
37790643Sbenno}
37890643Sbenno
37977957Sbennostatic __inline int
38090643Sbennopmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
38177957Sbenno{
38290643Sbenno	if (pt->pte_hi == pvo_pt->pte_hi)
38390643Sbenno		return (1);
38490643Sbenno
38590643Sbenno	return (0);
38677957Sbenno}
38777957Sbenno
38877957Sbennostatic __inline int
38990643Sbennopmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
39077957Sbenno{
39190643Sbenno	return (pt->pte_hi & ~PTE_VALID) ==
39290643Sbenno	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
39390643Sbenno	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
39490643Sbenno}
39577957Sbenno
39690643Sbennostatic __inline void
39790643Sbennopmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
39890643Sbenno{
39990643Sbenno	/*
40090643Sbenno	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
40190643Sbenno	 * set when the real pte is set in memory.
40290643Sbenno	 *
40390643Sbenno	 * Note: Don't set the valid bit for correct operation of tlb update.
40490643Sbenno	 */
40590643Sbenno	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
40690643Sbenno	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
40790643Sbenno	pt->pte_lo = pte_lo;
40877957Sbenno}
40977957Sbenno
41090643Sbennostatic __inline void
41190643Sbennopmap_pte_synch(struct pte *pt, struct pte *pvo_pt)
41277957Sbenno{
41377957Sbenno
41490643Sbenno	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
41577957Sbenno}
41677957Sbenno
41790643Sbennostatic __inline void
41890643Sbennopmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
41977957Sbenno{
42077957Sbenno
42190643Sbenno	/*
42290643Sbenno	 * As shown in Section 7.6.3.2.3
42390643Sbenno	 */
42490643Sbenno	pt->pte_lo &= ~ptebit;
42590643Sbenno	TLBIE(va);
42690643Sbenno	EIEIO();
42790643Sbenno	TLBSYNC();
42890643Sbenno	SYNC();
42977957Sbenno}
43077957Sbenno
43190643Sbennostatic __inline void
43290643Sbennopmap_pte_set(struct pte *pt, struct pte *pvo_pt)
43377957Sbenno{
43477957Sbenno
43590643Sbenno	pvo_pt->pte_hi |= PTE_VALID;
43690643Sbenno
43777957Sbenno	/*
43890643Sbenno	 * Update the PTE as defined in section 7.6.3.1.
43990643Sbenno	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
44090643Sbenno	 * been saved so this routine can restore them (if desired).
44177957Sbenno	 */
44290643Sbenno	pt->pte_lo = pvo_pt->pte_lo;
44390643Sbenno	EIEIO();
44490643Sbenno	pt->pte_hi = pvo_pt->pte_hi;
44590643Sbenno	SYNC();
44690643Sbenno	pmap_pte_valid++;
44790643Sbenno}
44877957Sbenno
44990643Sbennostatic __inline void
45090643Sbennopmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
45190643Sbenno{
45290643Sbenno
45390643Sbenno	pvo_pt->pte_hi &= ~PTE_VALID;
45490643Sbenno
45577957Sbenno	/*
45690643Sbenno	 * Force the reg & chg bits back into the PTEs.
45777957Sbenno	 */
45890643Sbenno	SYNC();
45977957Sbenno
46090643Sbenno	/*
46190643Sbenno	 * Invalidate the pte.
46290643Sbenno	 */
46390643Sbenno	pt->pte_hi &= ~PTE_VALID;
46477957Sbenno
46590643Sbenno	SYNC();
46690643Sbenno	TLBIE(va);
46790643Sbenno	EIEIO();
46890643Sbenno	TLBSYNC();
46990643Sbenno	SYNC();
47077957Sbenno
47190643Sbenno	/*
47290643Sbenno	 * Save the reg & chg bits.
47390643Sbenno	 */
47490643Sbenno	pmap_pte_synch(pt, pvo_pt);
47590643Sbenno	pmap_pte_valid--;
47677957Sbenno}
47777957Sbenno
47890643Sbennostatic __inline void
47990643Sbennopmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
48090643Sbenno{
48190643Sbenno
48290643Sbenno	/*
48390643Sbenno	 * Invalidate the PTE
48490643Sbenno	 */
48590643Sbenno	pmap_pte_unset(pt, pvo_pt, va);
48690643Sbenno	pmap_pte_set(pt, pvo_pt);
48790643Sbenno}
48890643Sbenno
48977957Sbenno/*
49090643Sbenno * Quick sort callout for comparing memory regions.
49177957Sbenno */
49290643Sbennostatic int	mr_cmp(const void *a, const void *b);
49390643Sbennostatic int	om_cmp(const void *a, const void *b);
49490643Sbenno
49590643Sbennostatic int
49690643Sbennomr_cmp(const void *a, const void *b)
49777957Sbenno{
49890643Sbenno	const struct	mem_region *regiona;
49990643Sbenno	const struct	mem_region *regionb;
50077957Sbenno
50190643Sbenno	regiona = a;
50290643Sbenno	regionb = b;
50390643Sbenno	if (regiona->mr_start < regionb->mr_start)
50490643Sbenno		return (-1);
50590643Sbenno	else if (regiona->mr_start > regionb->mr_start)
50690643Sbenno		return (1);
50790643Sbenno	else
50890643Sbenno		return (0);
50990643Sbenno}
51077957Sbenno
51190643Sbennostatic int
51290643Sbennoom_cmp(const void *a, const void *b)
51390643Sbenno{
51490643Sbenno	const struct	ofw_map *mapa;
51590643Sbenno	const struct	ofw_map *mapb;
51690643Sbenno
51790643Sbenno	mapa = a;
51890643Sbenno	mapb = b;
51990643Sbenno	if (mapa->om_pa < mapb->om_pa)
52090643Sbenno		return (-1);
52190643Sbenno	else if (mapa->om_pa > mapb->om_pa)
52290643Sbenno		return (1);
52390643Sbenno	else
52490643Sbenno		return (0);
52577957Sbenno}
52677957Sbenno
52777957Sbennovoid
52890643Sbennopmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
52977957Sbenno{
53097346Sbenno	ihandle_t	mmui;
53190643Sbenno	phandle_t	chosen, mmu;
53290643Sbenno	int		sz;
53390643Sbenno	int		i, j;
534103604Sgrehan	int		ofw_mappings;
535143200Sgrehan	vm_size_t	size, physsz, hwphyssz;
53690643Sbenno	vm_offset_t	pa, va, off;
53790643Sbenno	u_int		batl, batu;
53877957Sbenno
53999037Sbenno        /*
540103604Sgrehan         * Set up BAT0 to map the lowest 256 MB area
54199037Sbenno         */
54299037Sbenno        battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
54399037Sbenno        battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
54499037Sbenno
54599037Sbenno        /*
54699037Sbenno         * Map PCI memory space.
54799037Sbenno         */
54899037Sbenno        battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
54999037Sbenno        battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
55099037Sbenno
55199037Sbenno        battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
55299037Sbenno        battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
55399037Sbenno
55499037Sbenno        battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
55599037Sbenno        battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
55699037Sbenno
55799037Sbenno        battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
55899037Sbenno        battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
55999037Sbenno
56099037Sbenno        /*
56199037Sbenno         * Map obio devices.
56299037Sbenno         */
56399037Sbenno        battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
56499037Sbenno        battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
56599037Sbenno
56677957Sbenno	/*
56790643Sbenno	 * Use an IBAT and a DBAT to map the bottom segment of memory
56890643Sbenno	 * where we are.
56977957Sbenno	 */
57090643Sbenno	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
57190643Sbenno	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
572131808Sgrehan	__asm ("mtibatu 0,%0; mtibatl 0,%1; isync; \n"
573131808Sgrehan	       "mtdbatu 0,%0; mtdbatl 0,%1; isync"
57490643Sbenno	    :: "r"(batu), "r"(batl));
57599037Sbenno
57690643Sbenno#if 0
57799037Sbenno	/* map frame buffer */
57899037Sbenno	batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
57999037Sbenno	batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
580131808Sgrehan	__asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync"
58199037Sbenno	    :: "r"(batu), "r"(batl));
58299037Sbenno#endif
58399037Sbenno
58499037Sbenno#if 1
58599037Sbenno	/* map pci space */
58690643Sbenno	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
58799037Sbenno	batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
588131808Sgrehan	__asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync"
58990643Sbenno	    :: "r"(batu), "r"(batl));
59090643Sbenno#endif
59177957Sbenno
59277957Sbenno	/*
59390643Sbenno	 * Set the start and end of kva.
59477957Sbenno	 */
59590643Sbenno	virtual_avail = VM_MIN_KERNEL_ADDRESS;
59690643Sbenno	virtual_end = VM_MAX_KERNEL_ADDRESS;
59790643Sbenno
59897346Sbenno	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
59997346Sbenno	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
60097346Sbenno
60197346Sbenno	qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
60297346Sbenno	for (i = 0; i < pregions_sz; i++) {
603103604Sgrehan		vm_offset_t pa;
604103604Sgrehan		vm_offset_t end;
605103604Sgrehan
60697346Sbenno		CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
60797346Sbenno			pregions[i].mr_start,
60897346Sbenno			pregions[i].mr_start + pregions[i].mr_size,
60997346Sbenno			pregions[i].mr_size);
610103604Sgrehan		/*
611103604Sgrehan		 * Install entries into the BAT table to allow all
612103604Sgrehan		 * of physmem to be convered by on-demand BAT entries.
613103604Sgrehan		 * The loop will sometimes set the same battable element
614103604Sgrehan		 * twice, but that's fine since they won't be used for
615103604Sgrehan		 * a while yet.
616103604Sgrehan		 */
617103604Sgrehan		pa = pregions[i].mr_start & 0xf0000000;
618103604Sgrehan		end = pregions[i].mr_start + pregions[i].mr_size;
619103604Sgrehan		do {
620103604Sgrehan                        u_int n = pa >> ADDR_SR_SHFT;
621103604Sgrehan
622103604Sgrehan			battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
623103604Sgrehan			battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
624103604Sgrehan			pa += SEGMENT_LENGTH;
625103604Sgrehan		} while (pa < end);
62697346Sbenno	}
62797346Sbenno
62897346Sbenno	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
62990643Sbenno		panic("pmap_bootstrap: phys_avail too small");
63097346Sbenno	qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
63190643Sbenno	phys_avail_count = 0;
63291793Sbenno	physsz = 0;
633143234Sgrehan	hwphyssz = 0;
634143234Sgrehan	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
63597346Sbenno	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
63690643Sbenno		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
63790643Sbenno		    regions[i].mr_start + regions[i].mr_size,
63890643Sbenno		    regions[i].mr_size);
639143200Sgrehan		if (hwphyssz != 0 &&
640143200Sgrehan		    (physsz + regions[i].mr_size) >= hwphyssz) {
641143200Sgrehan			if (physsz < hwphyssz) {
642143200Sgrehan				phys_avail[j] = regions[i].mr_start;
643143200Sgrehan				phys_avail[j + 1] = regions[i].mr_start +
644143200Sgrehan				    hwphyssz - physsz;
645143200Sgrehan				physsz = hwphyssz;
646143200Sgrehan				phys_avail_count++;
647143200Sgrehan			}
648143200Sgrehan			break;
649143200Sgrehan		}
65090643Sbenno		phys_avail[j] = regions[i].mr_start;
65190643Sbenno		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
65290643Sbenno		phys_avail_count++;
65391793Sbenno		physsz += regions[i].mr_size;
65477957Sbenno	}
65591793Sbenno	physmem = btoc(physsz);
65677957Sbenno
65777957Sbenno	/*
65890643Sbenno	 * Allocate PTEG table.
65977957Sbenno	 */
66090643Sbenno#ifdef PTEGCOUNT
66190643Sbenno	pmap_pteg_count = PTEGCOUNT;
66290643Sbenno#else
66390643Sbenno	pmap_pteg_count = 0x1000;
66477957Sbenno
66590643Sbenno	while (pmap_pteg_count < physmem)
66690643Sbenno		pmap_pteg_count <<= 1;
66777957Sbenno
66890643Sbenno	pmap_pteg_count >>= 1;
66990643Sbenno#endif /* PTEGCOUNT */
67077957Sbenno
67190643Sbenno	size = pmap_pteg_count * sizeof(struct pteg);
67290643Sbenno	CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count,
67390643Sbenno	    size);
67490643Sbenno	pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size);
67590643Sbenno	CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table);
67690643Sbenno	bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg));
67790643Sbenno	pmap_pteg_mask = pmap_pteg_count - 1;
67877957Sbenno
67990643Sbenno	/*
68094839Sbenno	 * Allocate pv/overflow lists.
68190643Sbenno	 */
68290643Sbenno	size = sizeof(struct pvo_head) * pmap_pteg_count;
68390643Sbenno	pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size,
68490643Sbenno	    PAGE_SIZE);
68590643Sbenno	CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table);
68690643Sbenno	for (i = 0; i < pmap_pteg_count; i++)
68790643Sbenno		LIST_INIT(&pmap_pvo_table[i]);
68877957Sbenno
68990643Sbenno	/*
690134535Salc	 * Initialize the lock that synchronizes access to the pteg and pvo
691134535Salc	 * tables.
692134535Salc	 */
693134535Salc	mtx_init(&pmap_table_mutex, "pmap table", NULL, MTX_DEF);
694134535Salc
695134535Salc	/*
69690643Sbenno	 * Allocate the message buffer.
69790643Sbenno	 */
69890643Sbenno	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0);
69977957Sbenno
70090643Sbenno	/*
70190643Sbenno	 * Initialise the unmanaged pvo pool.
70290643Sbenno	 */
70399037Sbenno	pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(
70499037Sbenno		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
70592521Sbenno	pmap_bpvo_pool_index = 0;
70677957Sbenno
70777957Sbenno	/*
70890643Sbenno	 * Make sure kernel vsid is allocated as well as VSID 0.
70977957Sbenno	 */
71090643Sbenno	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
71190643Sbenno		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
71290643Sbenno	pmap_vsid_bitmap[0] |= 1;
71377957Sbenno
71490643Sbenno	/*
715133862Smarius	 * Set up the Open Firmware pmap and add it's mappings.
71690643Sbenno	 */
71790643Sbenno	pmap_pinit(&ofw_pmap);
71890643Sbenno	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
719126478Sgrehan	ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
72090643Sbenno	if ((chosen = OF_finddevice("/chosen")) == -1)
72190643Sbenno		panic("pmap_bootstrap: can't find /chosen");
72290643Sbenno	OF_getprop(chosen, "mmu", &mmui, 4);
72390643Sbenno	if ((mmu = OF_instance_to_package(mmui)) == -1)
72490643Sbenno		panic("pmap_bootstrap: can't get mmu package");
72590643Sbenno	if ((sz = OF_getproplen(mmu, "translations")) == -1)
72690643Sbenno		panic("pmap_bootstrap: can't get ofw translation count");
727100319Sbenno	translations = NULL;
728131401Sgrehan	for (i = 0; phys_avail[i] != 0; i += 2) {
729131401Sgrehan		if (phys_avail[i + 1] >= sz) {
730100319Sbenno			translations = (struct ofw_map *)phys_avail[i];
731131401Sgrehan			break;
732131401Sgrehan		}
733100319Sbenno	}
734100319Sbenno	if (translations == NULL)
735100319Sbenno		panic("pmap_bootstrap: no space to copy translations");
73690643Sbenno	bzero(translations, sz);
73790643Sbenno	if (OF_getprop(mmu, "translations", translations, sz) == -1)
73890643Sbenno		panic("pmap_bootstrap: can't get ofw translations");
73990643Sbenno	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
74097346Sbenno	sz /= sizeof(*translations);
74190643Sbenno	qsort(translations, sz, sizeof (*translations), om_cmp);
742103604Sgrehan	for (i = 0, ofw_mappings = 0; i < sz; i++) {
74390643Sbenno		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
74490643Sbenno		    translations[i].om_pa, translations[i].om_va,
74590643Sbenno		    translations[i].om_len);
74677957Sbenno
747103604Sgrehan		/*
748103604Sgrehan		 * If the mapping is 1:1, let the RAM and device on-demand
749103604Sgrehan		 * BAT tables take care of the translation.
750103604Sgrehan		 */
751103604Sgrehan		if (translations[i].om_va == translations[i].om_pa)
752103604Sgrehan			continue;
75377957Sbenno
754103604Sgrehan		/* Enter the pages */
75590643Sbenno		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
75690643Sbenno			struct	vm_page m;
75777957Sbenno
75890643Sbenno			m.phys_addr = translations[i].om_pa + off;
75990643Sbenno			pmap_enter(&ofw_pmap, translations[i].om_va + off, &m,
760103604Sgrehan				   VM_PROT_ALL, 1);
761103604Sgrehan			ofw_mappings++;
76277957Sbenno		}
76377957Sbenno	}
76490643Sbenno#ifdef SMP
76590643Sbenno	TLBSYNC();
76690643Sbenno#endif
76777957Sbenno
76890643Sbenno	/*
76990643Sbenno	 * Initialize the kernel pmap (which is statically allocated).
77090643Sbenno	 */
771134329Salc	PMAP_LOCK_INIT(kernel_pmap);
77290643Sbenno	for (i = 0; i < 16; i++) {
77390643Sbenno		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
77477957Sbenno	}
77590643Sbenno	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
776139401Sgrehan	kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
77790643Sbenno	kernel_pmap->pm_active = ~0;
77877957Sbenno
77977957Sbenno	/*
78090643Sbenno	 * Allocate a kernel stack with a guard page for thread0 and map it
78190643Sbenno	 * into the kernel page map.
78277957Sbenno	 */
78390643Sbenno	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
78490643Sbenno	kstack0_phys = pa;
78590643Sbenno	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
78690643Sbenno	CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
78790643Sbenno	    kstack0);
78890643Sbenno	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
78990643Sbenno	for (i = 0; i < KSTACK_PAGES; i++) {
79090643Sbenno		pa = kstack0_phys + i * PAGE_SIZE;
79190643Sbenno		va = kstack0 + i * PAGE_SIZE;
79290643Sbenno		pmap_kenter(va, pa);
79390643Sbenno		TLBIE(va);
79477957Sbenno	}
79577957Sbenno
79690643Sbenno	/*
797127875Salc	 * Calculate the last available physical address.
79890643Sbenno	 */
79990643Sbenno	for (i = 0; phys_avail[i + 2] != 0; i += 2)
80090643Sbenno		;
801128103Salc	Maxmem = powerpc_btop(phys_avail[i + 1]);
80277957Sbenno
80377957Sbenno	/*
80490643Sbenno	 * Allocate virtual address space for the message buffer.
80577957Sbenno	 */
80690643Sbenno	msgbufp = (struct msgbuf *)virtual_avail;
80790643Sbenno	virtual_avail += round_page(MSGBUF_SIZE);
80877957Sbenno
80977957Sbenno	/*
81090643Sbenno	 * Initialize hardware.
81177957Sbenno	 */
81277957Sbenno	for (i = 0; i < 16; i++) {
81394836Sbenno		mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT);
81477957Sbenno	}
81577957Sbenno	__asm __volatile ("mtsr %0,%1"
81690643Sbenno	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
817139401Sgrehan	__asm __volatile ("mtsr %0,%1"
818139401Sgrehan	    :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
81977957Sbenno	__asm __volatile ("sync; mtsdr1 %0; isync"
82090643Sbenno	    :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
82177957Sbenno	tlbia();
82277957Sbenno
82390643Sbenno	pmap_bootstrapped++;
82477957Sbenno}
82577957Sbenno
82677957Sbenno/*
82790643Sbenno * Activate a user pmap.  The pmap must be activated before it's address
82890643Sbenno * space can be accessed in any way.
82977957Sbenno */
83077957Sbennovoid
83190643Sbennopmap_activate(struct thread *td)
83277957Sbenno{
83396250Sbenno	pmap_t	pm, pmr;
83477957Sbenno
83577957Sbenno	/*
836103604Sgrehan	 * Load all the data we need up front to encourage the compiler to
83790643Sbenno	 * not issue any loads while we have interrupts disabled below.
83877957Sbenno	 */
83990643Sbenno	pm = &td->td_proc->p_vmspace->vm_pmap;
84077957Sbenno
84196250Sbenno	if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL)
84296250Sbenno		pmr = pm;
84396250Sbenno
84490643Sbenno	pm->pm_active |= PCPU_GET(cpumask);
84596250Sbenno	PCPU_SET(curpmap, pmr);
84677957Sbenno}
84777957Sbenno
84891483Sbennovoid
84991483Sbennopmap_deactivate(struct thread *td)
85091483Sbenno{
85191483Sbenno	pmap_t	pm;
85291483Sbenno
85391483Sbenno	pm = &td->td_proc->p_vmspace->vm_pmap;
85491483Sbenno	pm->pm_active &= ~(PCPU_GET(cpumask));
85596250Sbenno	PCPU_SET(curpmap, NULL);
85691483Sbenno}
85791483Sbenno
85890643Sbennovm_offset_t
85990643Sbennopmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
86077957Sbenno{
86196353Sbenno
86296353Sbenno	return (va);
86377957Sbenno}
86477957Sbenno
86577957Sbennovoid
86696353Sbennopmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
86777957Sbenno{
86896353Sbenno	struct	pvo_entry *pvo;
86996353Sbenno
870134329Salc	PMAP_LOCK(pm);
87196353Sbenno	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
87296353Sbenno
87396353Sbenno	if (pvo != NULL) {
87496353Sbenno		if (wired) {
87596353Sbenno			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
87696353Sbenno				pm->pm_stats.wired_count++;
87796353Sbenno			pvo->pvo_vaddr |= PVO_WIRED;
87896353Sbenno		} else {
87996353Sbenno			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
88096353Sbenno				pm->pm_stats.wired_count--;
88196353Sbenno			pvo->pvo_vaddr &= ~PVO_WIRED;
88296353Sbenno		}
88396353Sbenno	}
884134329Salc	PMAP_UNLOCK(pm);
88577957Sbenno}
88677957Sbenno
88777957Sbennovoid
88890643Sbennopmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
88990643Sbenno	  vm_size_t len, vm_offset_t src_addr)
89077957Sbenno{
89197385Sbenno
89297385Sbenno	/*
89397385Sbenno	 * This is not needed as it's mainly an optimisation.
89497385Sbenno	 * It may want to be implemented later though.
89597385Sbenno	 */
89677957Sbenno}
89777957Sbenno
89877957Sbennovoid
89997385Sbennopmap_copy_page(vm_page_t msrc, vm_page_t mdst)
90077957Sbenno{
90197385Sbenno	vm_offset_t	dst;
90297385Sbenno	vm_offset_t	src;
90397385Sbenno
90497385Sbenno	dst = VM_PAGE_TO_PHYS(mdst);
90597385Sbenno	src = VM_PAGE_TO_PHYS(msrc);
90697385Sbenno
90797385Sbenno	kcopy((void *)src, (void *)dst, PAGE_SIZE);
90877957Sbenno}
90977957Sbenno
91077957Sbenno/*
91190643Sbenno * Zero a page of physical memory by temporarily mapping it into the tlb.
91277957Sbenno */
91377957Sbennovoid
91494777Speterpmap_zero_page(vm_page_t m)
91577957Sbenno{
91694777Speter	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
917110172Sgrehan	caddr_t va;
91877957Sbenno
91990643Sbenno	if (pa < SEGMENT_LENGTH) {
92090643Sbenno		va = (caddr_t) pa;
92190643Sbenno	} else if (pmap_initialized) {
92290643Sbenno		if (pmap_pvo_zeropage == NULL)
92390643Sbenno			pmap_pvo_zeropage = pmap_rkva_alloc();
92490643Sbenno		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
92590643Sbenno		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
92690643Sbenno	} else {
92790643Sbenno		panic("pmap_zero_page: can't zero pa %#x", pa);
92877957Sbenno	}
92990643Sbenno
93090643Sbenno	bzero(va, PAGE_SIZE);
93190643Sbenno
93290643Sbenno	if (pa >= SEGMENT_LENGTH)
93390643Sbenno		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
93477957Sbenno}
93577957Sbenno
93677957Sbennovoid
93794777Speterpmap_zero_page_area(vm_page_t m, int off, int size)
93877957Sbenno{
93999666Sbenno	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
940103604Sgrehan	caddr_t va;
94199666Sbenno
94299666Sbenno	if (pa < SEGMENT_LENGTH) {
94399666Sbenno		va = (caddr_t) pa;
94499666Sbenno	} else if (pmap_initialized) {
94599666Sbenno		if (pmap_pvo_zeropage == NULL)
94699666Sbenno			pmap_pvo_zeropage = pmap_rkva_alloc();
94799666Sbenno		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
94899666Sbenno		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
94999666Sbenno	} else {
95099666Sbenno		panic("pmap_zero_page: can't zero pa %#x", pa);
95199666Sbenno	}
95299666Sbenno
953103604Sgrehan	bzero(va + off, size);
95499666Sbenno
95599666Sbenno	if (pa >= SEGMENT_LENGTH)
95699666Sbenno		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
95777957Sbenno}
95877957Sbenno
95999571Spetervoid
96099571Speterpmap_zero_page_idle(vm_page_t m)
96199571Speter{
96299571Speter
96399571Speter	/* XXX this is called outside of Giant, is pmap_zero_page safe? */
96499571Speter	/* XXX maybe have a dedicated mapping for this to avoid the problem? */
96599571Speter	mtx_lock(&Giant);
96699571Speter	pmap_zero_page(m);
96799571Speter	mtx_unlock(&Giant);
96899571Speter}
96999571Speter
97077957Sbenno/*
97190643Sbenno * Map the given physical page at the specified virtual address in the
97290643Sbenno * target pmap with the protection requested.  If specified the page
97390643Sbenno * will be wired down.
97477957Sbenno */
97577957Sbennovoid
97690643Sbennopmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
97790643Sbenno	   boolean_t wired)
97877957Sbenno{
97990643Sbenno	struct		pvo_head *pvo_head;
98092847Sjeff	uma_zone_t	zone;
98196250Sbenno	vm_page_t	pg;
98296250Sbenno	u_int		pte_lo, pvo_flags, was_exec, i;
98390643Sbenno	int		error;
98477957Sbenno
98590643Sbenno	if (!pmap_initialized) {
98690643Sbenno		pvo_head = &pmap_pvo_kunmanaged;
98790643Sbenno		zone = pmap_upvo_zone;
98890643Sbenno		pvo_flags = 0;
98996250Sbenno		pg = NULL;
99096250Sbenno		was_exec = PTE_EXEC;
99190643Sbenno	} else {
992110172Sgrehan		pvo_head = vm_page_to_pvoh(m);
993110172Sgrehan		pg = m;
99490643Sbenno		zone = pmap_mpvo_zone;
99590643Sbenno		pvo_flags = PVO_MANAGED;
99696250Sbenno		was_exec = 0;
99790643Sbenno	}
998134535Salc	if (pmap_bootstrapped)
999134329Salc		vm_page_lock_queues();
1000134535Salc	PMAP_LOCK(pmap);
100177957Sbenno
1002142416Sgrehan	/* XXX change the pvo head for fake pages */
1003142416Sgrehan	if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS)
1004142416Sgrehan		pvo_head = &pmap_pvo_kunmanaged;
1005142416Sgrehan
100696250Sbenno	/*
100796250Sbenno	 * If this is a managed page, and it's the first reference to the page,
100896250Sbenno	 * clear the execness of the page.  Otherwise fetch the execness.
100996250Sbenno	 */
1010142416Sgrehan	if ((pg != NULL) && ((m->flags & PG_FICTITIOUS) == 0)) {
101196250Sbenno		if (LIST_EMPTY(pvo_head)) {
101296250Sbenno			pmap_attr_clear(pg, PTE_EXEC);
101396250Sbenno		} else {
101496250Sbenno			was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
101596250Sbenno		}
101696250Sbenno	}
101796250Sbenno
101896250Sbenno	/*
101996250Sbenno	 * Assume the page is cache inhibited and access is guarded unless
102096250Sbenno	 * it's in our available memory array.
102196250Sbenno	 */
102290643Sbenno	pte_lo = PTE_I | PTE_G;
102397346Sbenno	for (i = 0; i < pregions_sz; i++) {
102497346Sbenno		if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) &&
102597346Sbenno		    (VM_PAGE_TO_PHYS(m) <
102697346Sbenno			(pregions[i].mr_start + pregions[i].mr_size))) {
102796250Sbenno			pte_lo &= ~(PTE_I | PTE_G);
102896250Sbenno			break;
102996250Sbenno		}
103096250Sbenno	}
103177957Sbenno
103290643Sbenno	if (prot & VM_PROT_WRITE)
103390643Sbenno		pte_lo |= PTE_BW;
103490643Sbenno	else
103590643Sbenno		pte_lo |= PTE_BR;
103677957Sbenno
1037142416Sgrehan	if (prot & VM_PROT_EXECUTE)
1038142416Sgrehan		pvo_flags |= PVO_EXECUTABLE;
103977957Sbenno
104090643Sbenno	if (wired)
104190643Sbenno		pvo_flags |= PVO_WIRED;
104277957Sbenno
1043142416Sgrehan	if ((m->flags & PG_FICTITIOUS) != 0)
1044142416Sgrehan		pvo_flags |= PVO_FAKE;
1045142416Sgrehan
104696250Sbenno	error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
104796250Sbenno	    pte_lo, pvo_flags);
104890643Sbenno
104996250Sbenno	/*
105096250Sbenno	 * Flush the real page from the instruction cache if this page is
105196250Sbenno	 * mapped executable and cacheable and was not previously mapped (or
105296250Sbenno	 * was not mapped executable).
105396250Sbenno	 */
105496250Sbenno	if (error == 0 && (pvo_flags & PVO_EXECUTABLE) &&
105596250Sbenno	    (pte_lo & PTE_I) == 0 && was_exec == 0) {
105677957Sbenno		/*
105790643Sbenno		 * Flush the real memory from the cache.
105877957Sbenno		 */
105996250Sbenno		pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
106096250Sbenno		if (pg != NULL)
106196250Sbenno			pmap_attr_save(pg, PTE_EXEC);
106277957Sbenno	}
1063134329Salc	if (pmap_bootstrapped)
1064134329Salc		vm_page_unlock_queues();
1065103604Sgrehan
1066103604Sgrehan	/* XXX syncicache always until problems are sorted */
1067103604Sgrehan	pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1068134535Salc	PMAP_UNLOCK(pmap);
106977957Sbenno}
107077957Sbenno
1071117045Salcvm_page_t
1072117045Salcpmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
1073117045Salc{
1074117045Salc
1075138897Salc	vm_page_busy(m);
1076138897Salc	vm_page_unlock_queues();
1077138897Salc	VM_OBJECT_UNLOCK(m->object);
1078133143Salc	mtx_lock(&Giant);
1079117045Salc	pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
1080133143Salc	mtx_unlock(&Giant);
1081138897Salc	VM_OBJECT_LOCK(m->object);
1082138897Salc	vm_page_lock_queues();
1083138897Salc	vm_page_wakeup(m);
1084117045Salc	return (NULL);
1085117045Salc}
1086117045Salc
1087131658Salcvm_paddr_t
108896353Sbennopmap_extract(pmap_t pm, vm_offset_t va)
108977957Sbenno{
109096353Sbenno	struct	pvo_entry *pvo;
1091134329Salc	vm_paddr_t pa;
109296353Sbenno
1093134329Salc	PMAP_LOCK(pm);
109496353Sbenno	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1095134329Salc	if (pvo == NULL)
1096134329Salc		pa = 0;
1097134329Salc	else
1098134329Salc		pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1099134329Salc	PMAP_UNLOCK(pm);
1100134329Salc	return (pa);
110177957Sbenno}
110277957Sbenno
110377957Sbenno/*
1104120336Sgrehan * Atomically extract and hold the physical page with the given
1105120336Sgrehan * pmap and virtual address pair if that mapping permits the given
1106120336Sgrehan * protection.
1107120336Sgrehan */
1108120336Sgrehanvm_page_t
1109120336Sgrehanpmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1110120336Sgrehan{
1111132666Salc	struct	pvo_entry *pvo;
1112120336Sgrehan	vm_page_t m;
1113120336Sgrehan
1114120336Sgrehan	m = NULL;
1115120336Sgrehan	mtx_lock(&Giant);
1116134329Salc	vm_page_lock_queues();
1117134329Salc	PMAP_LOCK(pmap);
1118132666Salc	pvo = pmap_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
1119132666Salc	if (pvo != NULL && (pvo->pvo_pte.pte_hi & PTE_VALID) &&
1120132666Salc	    ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_RW ||
1121132666Salc	     (prot & VM_PROT_WRITE) == 0)) {
1122132666Salc		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
1123120336Sgrehan		vm_page_hold(m);
1124120336Sgrehan	}
1125134329Salc	vm_page_unlock_queues();
1126134329Salc	PMAP_UNLOCK(pmap);
1127120336Sgrehan	mtx_unlock(&Giant);
1128120336Sgrehan	return (m);
1129120336Sgrehan}
1130120336Sgrehan
1131120336Sgrehan/*
113290643Sbenno * Grow the number of kernel page table entries.  Unneeded.
113377957Sbenno */
113490643Sbennovoid
113590643Sbennopmap_growkernel(vm_offset_t addr)
113677957Sbenno{
113790643Sbenno}
113877957Sbenno
113990643Sbennovoid
1140127869Salcpmap_init(void)
114190643Sbenno{
114277957Sbenno
114394753Sbenno	CTR0(KTR_PMAP, "pmap_init");
114477957Sbenno
114592847Sjeff	pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1146125442Sgrehan	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1147125442Sgrehan	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
114892847Sjeff	pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1149125442Sgrehan	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1150125442Sgrehan	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
115190643Sbenno	pmap_initialized = TRUE;
115277957Sbenno}
115377957Sbenno
115499037Sbennovoid
115599037Sbennopmap_init2(void)
115699037Sbenno{
115799037Sbenno
115899037Sbenno	CTR0(KTR_PMAP, "pmap_init2");
115999037Sbenno}
116099037Sbenno
116190643Sbennoboolean_t
116290643Sbennopmap_is_modified(vm_page_t m)
116390643Sbenno{
116496353Sbenno
1165110172Sgrehan	if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
116696353Sbenno		return (FALSE);
116796353Sbenno
116896353Sbenno	return (pmap_query_bit(m, PTE_CHG));
116990643Sbenno}
117090643Sbenno
1171120722Salc/*
1172120722Salc *	pmap_is_prefaultable:
1173120722Salc *
1174120722Salc *	Return whether or not the specified virtual address is elgible
1175120722Salc *	for prefault.
1176120722Salc */
1177120722Salcboolean_t
1178120722Salcpmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
1179120722Salc{
1180120722Salc
1181120722Salc	return (FALSE);
1182120722Salc}
1183120722Salc
118490643Sbennovoid
118590643Sbennopmap_clear_reference(vm_page_t m)
118690643Sbenno{
1187110172Sgrehan
1188110172Sgrehan	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1189110172Sgrehan		return;
1190110172Sgrehan	pmap_clear_bit(m, PTE_REF, NULL);
119190643Sbenno}
119290643Sbenno
1193110172Sgrehanvoid
1194110172Sgrehanpmap_clear_modify(vm_page_t m)
1195110172Sgrehan{
1196110172Sgrehan
1197110172Sgrehan	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1198110172Sgrehan		return;
1199110172Sgrehan	pmap_clear_bit(m, PTE_CHG, NULL);
1200110172Sgrehan}
1201110172Sgrehan
120291403Ssilby/*
120391403Ssilby *	pmap_ts_referenced:
120491403Ssilby *
120591403Ssilby *	Return a count of reference bits for a page, clearing those bits.
120691403Ssilby *	It is not necessary for every reference bit to be cleared, but it
120791403Ssilby *	is necessary that 0 only be returned when there are truly no
120891403Ssilby *	reference bits set.
120991403Ssilby *
121091403Ssilby *	XXX: The exact number of bits to check and clear is a matter that
121191403Ssilby *	should be tested and standardized at some point in the future for
121291403Ssilby *	optimal aging of shared pages.
121391403Ssilby */
121490643Sbennoint
121590643Sbennopmap_ts_referenced(vm_page_t m)
121690643Sbenno{
1217110172Sgrehan	int count;
1218110172Sgrehan
1219110172Sgrehan	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1220110172Sgrehan		return (0);
1221110172Sgrehan
1222110172Sgrehan	count = pmap_clear_bit(m, PTE_REF, NULL);
1223110172Sgrehan
1224110172Sgrehan	return (count);
122590643Sbenno}
122690643Sbenno
122777957Sbenno/*
122890643Sbenno * Map a wired page into kernel virtual address space.
122977957Sbenno */
123077957Sbennovoid
123190643Sbennopmap_kenter(vm_offset_t va, vm_offset_t pa)
123277957Sbenno{
123390643Sbenno	u_int		pte_lo;
123490643Sbenno	int		error;
123590643Sbenno	int		i;
123677957Sbenno
123790643Sbenno#if 0
123890643Sbenno	if (va < VM_MIN_KERNEL_ADDRESS)
123990643Sbenno		panic("pmap_kenter: attempt to enter non-kernel address %#x",
124090643Sbenno		    va);
124190643Sbenno#endif
124277957Sbenno
1243103604Sgrehan	pte_lo = PTE_I | PTE_G;
1244103604Sgrehan	for (i = 0; i < pregions_sz; i++) {
1245103604Sgrehan		if ((pa >= pregions[i].mr_start) &&
1246103604Sgrehan		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
124790643Sbenno			pte_lo &= ~(PTE_I | PTE_G);
124877957Sbenno			break;
124977957Sbenno		}
1250103604Sgrehan	}
125177957Sbenno
1252135172Salc	PMAP_LOCK(kernel_pmap);
125390643Sbenno	error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone,
125490643Sbenno	    &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
125590643Sbenno
125690643Sbenno	if (error != 0 && error != ENOENT)
125790643Sbenno		panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va,
125890643Sbenno		    pa, error);
125990643Sbenno
126077957Sbenno	/*
126190643Sbenno	 * Flush the real memory from the instruction cache.
126277957Sbenno	 */
126390643Sbenno	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
126490643Sbenno		pmap_syncicache(pa, PAGE_SIZE);
126577957Sbenno	}
1266135172Salc	PMAP_UNLOCK(kernel_pmap);
126777957Sbenno}
126877957Sbenno
126994838Sbenno/*
127094838Sbenno * Extract the physical page address associated with the given kernel virtual
127194838Sbenno * address.
127294838Sbenno */
127390643Sbennovm_offset_t
127490643Sbennopmap_kextract(vm_offset_t va)
127577957Sbenno{
127694838Sbenno	struct		pvo_entry *pvo;
1277134329Salc	vm_paddr_t pa;
127894838Sbenno
1279125185Sgrehan#ifdef UMA_MD_SMALL_ALLOC
1280125185Sgrehan	/*
1281125185Sgrehan	 * Allow direct mappings
1282125185Sgrehan	 */
1283125185Sgrehan	if (va < VM_MIN_KERNEL_ADDRESS) {
1284125185Sgrehan		return (va);
1285125185Sgrehan	}
1286125185Sgrehan#endif
1287125185Sgrehan
1288134329Salc	PMAP_LOCK(kernel_pmap);
128994838Sbenno	pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1290125185Sgrehan	KASSERT(pvo != NULL, ("pmap_kextract: no addr found"));
1291134329Salc	pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1292134329Salc	PMAP_UNLOCK(kernel_pmap);
1293134329Salc	return (pa);
129477957Sbenno}
129577957Sbenno
129691456Sbenno/*
129791456Sbenno * Remove a wired page from kernel virtual address space.
129891456Sbenno */
129977957Sbennovoid
130077957Sbennopmap_kremove(vm_offset_t va)
130177957Sbenno{
130291456Sbenno
1303103604Sgrehan	pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
130477957Sbenno}
130577957Sbenno
130677957Sbenno/*
130790643Sbenno * Map a range of physical addresses into kernel virtual address space.
130890643Sbenno *
130990643Sbenno * The value passed in *virt is a suggested virtual address for the mapping.
131090643Sbenno * Architectures which can support a direct-mapped physical to virtual region
131190643Sbenno * can return the appropriate address within that region, leaving '*virt'
131290643Sbenno * unchanged.  We cannot and therefore do not; *virt is updated with the
131390643Sbenno * first usable address after the mapped region.
131477957Sbenno */
131590643Sbennovm_offset_t
131690643Sbennopmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
131777957Sbenno{
131890643Sbenno	vm_offset_t	sva, va;
131977957Sbenno
132090643Sbenno	sva = *virt;
132190643Sbenno	va = sva;
132290643Sbenno	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
132390643Sbenno		pmap_kenter(va, pa_start);
132490643Sbenno	*virt = va;
132590643Sbenno	return (sva);
132677957Sbenno}
132777957Sbenno
132890643Sbennoint
132990643Sbennopmap_mincore(pmap_t pmap, vm_offset_t addr)
133077957Sbenno{
133190643Sbenno	TODO;
133290643Sbenno	return (0);
133377957Sbenno}
133477957Sbenno
133577957Sbennovoid
133694838Sbennopmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1337117206Salc		    vm_pindex_t pindex, vm_size_t size)
133890643Sbenno{
133994838Sbenno
1340117206Salc	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1341117206Salc	KASSERT(object->type == OBJT_DEVICE,
1342117206Salc	    ("pmap_object_init_pt: non-device object"));
134394838Sbenno	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1344117206Salc	    ("pmap_object_init_pt: non current pmap"));
134577957Sbenno}
134677957Sbenno
134777957Sbenno/*
134890643Sbenno * Lower the permission for all mappings to a given page.
134977957Sbenno */
135077957Sbennovoid
135177957Sbennopmap_page_protect(vm_page_t m, vm_prot_t prot)
135277957Sbenno{
135390643Sbenno	struct	pvo_head *pvo_head;
135490643Sbenno	struct	pvo_entry *pvo, *next_pvo;
135590643Sbenno	struct	pte *pt;
1356134329Salc	pmap_t	pmap;
135777957Sbenno
135890643Sbenno	/*
135990643Sbenno	 * Since the routine only downgrades protection, if the
136090643Sbenno	 * maximal protection is desired, there isn't any change
136190643Sbenno	 * to be made.
136290643Sbenno	 */
136390643Sbenno	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
136490643Sbenno	    (VM_PROT_READ|VM_PROT_WRITE))
136577957Sbenno		return;
136677957Sbenno
136790643Sbenno	pvo_head = vm_page_to_pvoh(m);
136890643Sbenno	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
136990643Sbenno		next_pvo = LIST_NEXT(pvo, pvo_vlink);
137090643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
1371134329Salc		pmap = pvo->pvo_pmap;
1372134329Salc		PMAP_LOCK(pmap);
137390643Sbenno
137490643Sbenno		/*
137590643Sbenno		 * Downgrading to no mapping at all, we just remove the entry.
137690643Sbenno		 */
137790643Sbenno		if ((prot & VM_PROT_READ) == 0) {
137890643Sbenno			pmap_pvo_remove(pvo, -1);
1379134329Salc			PMAP_UNLOCK(pmap);
138090643Sbenno			continue;
138177957Sbenno		}
138290643Sbenno
138390643Sbenno		/*
138490643Sbenno		 * If EXEC permission is being revoked, just clear the flag
138590643Sbenno		 * in the PVO.
138690643Sbenno		 */
138790643Sbenno		if ((prot & VM_PROT_EXECUTE) == 0)
138890643Sbenno			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
138990643Sbenno
139090643Sbenno		/*
139190643Sbenno		 * If this entry is already RO, don't diddle with the page
139290643Sbenno		 * table.
139390643Sbenno		 */
139490643Sbenno		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
1395134329Salc			PMAP_UNLOCK(pmap);
139690643Sbenno			PMAP_PVO_CHECK(pvo);
139790643Sbenno			continue;
139877957Sbenno		}
139990643Sbenno
140090643Sbenno		/*
140190643Sbenno		 * Grab the PTE before we diddle the bits so pvo_to_pte can
140290643Sbenno		 * verify the pte contents are as expected.
140390643Sbenno		 */
140490643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
140590643Sbenno		pvo->pvo_pte.pte_lo &= ~PTE_PP;
140690643Sbenno		pvo->pvo_pte.pte_lo |= PTE_BR;
140790643Sbenno		if (pt != NULL)
140890643Sbenno			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1409134329Salc		PMAP_UNLOCK(pmap);
141090643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
141177957Sbenno	}
1412133166Sgrehan
1413133166Sgrehan	/*
1414133166Sgrehan	 * Downgrading from writeable: clear the VM page flag
1415133166Sgrehan	 */
1416133166Sgrehan	if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE)
1417133166Sgrehan		vm_page_flag_clear(m, PG_WRITEABLE);
141877957Sbenno}
141977957Sbenno
142077957Sbenno/*
142191403Ssilby * Returns true if the pmap's pv is one of the first
142291403Ssilby * 16 pvs linked to from this page.  This count may
142391403Ssilby * be changed upwards or downwards in the future; it
142491403Ssilby * is only necessary that true be returned for a small
142591403Ssilby * subset of pmaps for proper page aging.
142691403Ssilby */
142790643Sbennoboolean_t
142891403Ssilbypmap_page_exists_quick(pmap_t pmap, vm_page_t m)
142990643Sbenno{
1430110172Sgrehan        int loops;
1431110172Sgrehan	struct pvo_entry *pvo;
1432110172Sgrehan
1433110172Sgrehan        if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1434110172Sgrehan                return FALSE;
1435110172Sgrehan
1436110172Sgrehan	loops = 0;
1437110172Sgrehan	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1438110172Sgrehan		if (pvo->pvo_pmap == pmap)
1439110172Sgrehan			return (TRUE);
1440110172Sgrehan		if (++loops >= 16)
1441110172Sgrehan			break;
1442110172Sgrehan	}
1443110172Sgrehan
1444110172Sgrehan	return (FALSE);
144590643Sbenno}
144677957Sbenno
144790643Sbennostatic u_int	pmap_vsidcontext;
144877957Sbenno
144990643Sbennovoid
145090643Sbennopmap_pinit(pmap_t pmap)
145190643Sbenno{
145290643Sbenno	int	i, mask;
145390643Sbenno	u_int	entropy;
145477957Sbenno
1455126478Sgrehan	KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("pmap_pinit: virt pmap"));
1456134329Salc	PMAP_LOCK_INIT(pmap);
1457126478Sgrehan
145890643Sbenno	entropy = 0;
145990643Sbenno	__asm __volatile("mftb %0" : "=r"(entropy));
146077957Sbenno
146190643Sbenno	/*
146290643Sbenno	 * Allocate some segment registers for this pmap.
146390643Sbenno	 */
146490643Sbenno	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
146590643Sbenno		u_int	hash, n;
146677957Sbenno
146777957Sbenno		/*
146890643Sbenno		 * Create a new value by mutiplying by a prime and adding in
146990643Sbenno		 * entropy from the timebase register.  This is to make the
147090643Sbenno		 * VSID more random so that the PT hash function collides
147190643Sbenno		 * less often.  (Note that the prime casues gcc to do shifts
147290643Sbenno		 * instead of a multiply.)
147377957Sbenno		 */
147490643Sbenno		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
147590643Sbenno		hash = pmap_vsidcontext & (NPMAPS - 1);
147690643Sbenno		if (hash == 0)		/* 0 is special, avoid it */
147790643Sbenno			continue;
147890643Sbenno		n = hash >> 5;
147990643Sbenno		mask = 1 << (hash & (VSID_NBPW - 1));
148090643Sbenno		hash = (pmap_vsidcontext & 0xfffff);
148190643Sbenno		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
148290643Sbenno			/* anything free in this bucket? */
148390643Sbenno			if (pmap_vsid_bitmap[n] == 0xffffffff) {
148490643Sbenno				entropy = (pmap_vsidcontext >> 20);
148590643Sbenno				continue;
148690643Sbenno			}
148790643Sbenno			i = ffs(~pmap_vsid_bitmap[i]) - 1;
148890643Sbenno			mask = 1 << i;
148990643Sbenno			hash &= 0xfffff & ~(VSID_NBPW - 1);
149090643Sbenno			hash |= i;
149177957Sbenno		}
149290643Sbenno		pmap_vsid_bitmap[n] |= mask;
149390643Sbenno		for (i = 0; i < 16; i++)
149490643Sbenno			pmap->pm_sr[i] = VSID_MAKE(i, hash);
149590643Sbenno		return;
149690643Sbenno	}
149777957Sbenno
149890643Sbenno	panic("pmap_pinit: out of segments");
149977957Sbenno}
150077957Sbenno
150177957Sbenno/*
150290643Sbenno * Initialize the pmap associated with process 0.
150377957Sbenno */
150477957Sbennovoid
150590643Sbennopmap_pinit0(pmap_t pm)
150677957Sbenno{
150777957Sbenno
150890643Sbenno	pmap_pinit(pm);
150990643Sbenno	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
151077957Sbenno}
151177957Sbenno
151294838Sbenno/*
151394838Sbenno * Set the physical protection on the specified range of this map as requested.
151494838Sbenno */
151590643Sbennovoid
151694838Sbennopmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
151790643Sbenno{
151894838Sbenno	struct	pvo_entry *pvo;
151994838Sbenno	struct	pte *pt;
152094838Sbenno	int	pteidx;
152194838Sbenno
152294838Sbenno	CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
152394838Sbenno	    eva, prot);
152494838Sbenno
152594838Sbenno
152694838Sbenno	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
152794838Sbenno	    ("pmap_protect: non current pmap"));
152894838Sbenno
152994838Sbenno	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1530132899Salc		mtx_lock(&Giant);
153194838Sbenno		pmap_remove(pm, sva, eva);
1532132899Salc		mtx_unlock(&Giant);
153394838Sbenno		return;
153494838Sbenno	}
153594838Sbenno
1536132899Salc	mtx_lock(&Giant);
1537132220Salc	vm_page_lock_queues();
1538134329Salc	PMAP_LOCK(pm);
153994838Sbenno	for (; sva < eva; sva += PAGE_SIZE) {
154094838Sbenno		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
154194838Sbenno		if (pvo == NULL)
154294838Sbenno			continue;
154394838Sbenno
154494838Sbenno		if ((prot & VM_PROT_EXECUTE) == 0)
154594838Sbenno			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
154694838Sbenno
154794838Sbenno		/*
154894838Sbenno		 * Grab the PTE pointer before we diddle with the cached PTE
154994838Sbenno		 * copy.
155094838Sbenno		 */
155194838Sbenno		pt = pmap_pvo_to_pte(pvo, pteidx);
155294838Sbenno		/*
155394838Sbenno		 * Change the protection of the page.
155494838Sbenno		 */
155594838Sbenno		pvo->pvo_pte.pte_lo &= ~PTE_PP;
155694838Sbenno		pvo->pvo_pte.pte_lo |= PTE_BR;
155794838Sbenno
155894838Sbenno		/*
155994838Sbenno		 * If the PVO is in the page table, update that pte as well.
156094838Sbenno		 */
156194838Sbenno		if (pt != NULL)
156294838Sbenno			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
156394838Sbenno	}
1564132220Salc	vm_page_unlock_queues();
1565134329Salc	PMAP_UNLOCK(pm);
1566132899Salc	mtx_unlock(&Giant);
156777957Sbenno}
156877957Sbenno
156991456Sbenno/*
157091456Sbenno * Map a list of wired pages into kernel virtual address space.  This is
157191456Sbenno * intended for temporary mappings which do not need page modification or
157291456Sbenno * references recorded.  Existing mappings in the region are overwritten.
157391456Sbenno */
157490643Sbennovoid
1575110172Sgrehanpmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
157677957Sbenno{
1577110172Sgrehan	vm_offset_t va;
157877957Sbenno
1579110172Sgrehan	va = sva;
1580110172Sgrehan	while (count-- > 0) {
1581110172Sgrehan		pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
1582110172Sgrehan		va += PAGE_SIZE;
1583110172Sgrehan		m++;
1584110172Sgrehan	}
158590643Sbenno}
158677957Sbenno
158791456Sbenno/*
158891456Sbenno * Remove page mappings from kernel virtual address space.  Intended for
158991456Sbenno * temporary mappings entered by pmap_qenter.
159091456Sbenno */
159190643Sbennovoid
1592110172Sgrehanpmap_qremove(vm_offset_t sva, int count)
159390643Sbenno{
1594110172Sgrehan	vm_offset_t va;
159591456Sbenno
1596110172Sgrehan	va = sva;
1597110172Sgrehan	while (count-- > 0) {
159891456Sbenno		pmap_kremove(va);
1599110172Sgrehan		va += PAGE_SIZE;
1600110172Sgrehan	}
160177957Sbenno}
160277957Sbenno
160390643Sbennovoid
160490643Sbennopmap_release(pmap_t pmap)
160590643Sbenno{
1606103604Sgrehan        int idx, mask;
1607103604Sgrehan
1608103604Sgrehan	/*
1609103604Sgrehan	 * Free segment register's VSID
1610103604Sgrehan	 */
1611103604Sgrehan        if (pmap->pm_sr[0] == 0)
1612103604Sgrehan                panic("pmap_release");
1613103604Sgrehan
1614103604Sgrehan        idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
1615103604Sgrehan        mask = 1 << (idx % VSID_NBPW);
1616103604Sgrehan        idx /= VSID_NBPW;
1617103604Sgrehan        pmap_vsid_bitmap[idx] &= ~mask;
1618134329Salc	PMAP_LOCK_DESTROY(pmap);
161977957Sbenno}
162077957Sbenno
162191456Sbenno/*
162291456Sbenno * Remove the given range of addresses from the specified map.
162391456Sbenno */
162490643Sbennovoid
162591456Sbennopmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
162677957Sbenno{
162791456Sbenno	struct	pvo_entry *pvo;
162891456Sbenno	int	pteidx;
162991456Sbenno
1630132220Salc	vm_page_lock_queues();
1631134329Salc	PMAP_LOCK(pm);
163291456Sbenno	for (; sva < eva; sva += PAGE_SIZE) {
163391456Sbenno		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
163491456Sbenno		if (pvo != NULL) {
163591456Sbenno			pmap_pvo_remove(pvo, pteidx);
163691456Sbenno		}
163791456Sbenno	}
1638140538Sgrehan	PMAP_UNLOCK(pm);
1639132220Salc	vm_page_unlock_queues();
164077957Sbenno}
164177957Sbenno
164294838Sbenno/*
1643110172Sgrehan * Remove physical page from all pmaps in which it resides. pmap_pvo_remove()
1644110172Sgrehan * will reflect changes in pte's back to the vm_page.
1645110172Sgrehan */
1646110172Sgrehanvoid
1647110172Sgrehanpmap_remove_all(vm_page_t m)
1648110172Sgrehan{
1649110172Sgrehan	struct  pvo_head *pvo_head;
1650110172Sgrehan	struct	pvo_entry *pvo, *next_pvo;
1651134329Salc	pmap_t	pmap;
1652110172Sgrehan
1653120336Sgrehan	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1654120336Sgrehan
1655110172Sgrehan	pvo_head = vm_page_to_pvoh(m);
1656110172Sgrehan	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1657110172Sgrehan		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1658133166Sgrehan
1659110172Sgrehan		PMAP_PVO_CHECK(pvo);	/* sanity check */
1660134329Salc		pmap = pvo->pvo_pmap;
1661134329Salc		PMAP_LOCK(pmap);
1662110172Sgrehan		pmap_pvo_remove(pvo, -1);
1663134329Salc		PMAP_UNLOCK(pmap);
1664110172Sgrehan	}
1665110172Sgrehan	vm_page_flag_clear(m, PG_WRITEABLE);
1666110172Sgrehan}
1667110172Sgrehan
1668110172Sgrehan/*
166994838Sbenno * Remove all pages from specified address space, this aids process exit
167094838Sbenno * speeds.  This is much faster than pmap_remove in the case of running down
167194838Sbenno * an entire address space.  Only works for the current pmap.
167294838Sbenno */
167390643Sbennovoid
167494838Sbennopmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
167577957Sbenno{
167677957Sbenno}
167777957Sbenno
167877957Sbenno/*
167990643Sbenno * Allocate a physical page of memory directly from the phys_avail map.
168090643Sbenno * Can only be called from pmap_bootstrap before avail start and end are
168190643Sbenno * calculated.
168283682Smp */
168390643Sbennostatic vm_offset_t
168490643Sbennopmap_bootstrap_alloc(vm_size_t size, u_int align)
168583682Smp{
168690643Sbenno	vm_offset_t	s, e;
168790643Sbenno	int		i, j;
168883682Smp
168990643Sbenno	size = round_page(size);
169090643Sbenno	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
169190643Sbenno		if (align != 0)
169290643Sbenno			s = (phys_avail[i] + align - 1) & ~(align - 1);
169390643Sbenno		else
169490643Sbenno			s = phys_avail[i];
169590643Sbenno		e = s + size;
169690643Sbenno
169790643Sbenno		if (s < phys_avail[i] || e > phys_avail[i + 1])
169890643Sbenno			continue;
169990643Sbenno
170090643Sbenno		if (s == phys_avail[i]) {
170190643Sbenno			phys_avail[i] += size;
170290643Sbenno		} else if (e == phys_avail[i + 1]) {
170390643Sbenno			phys_avail[i + 1] -= size;
170490643Sbenno		} else {
170590643Sbenno			for (j = phys_avail_count * 2; j > i; j -= 2) {
170690643Sbenno				phys_avail[j] = phys_avail[j - 2];
170790643Sbenno				phys_avail[j + 1] = phys_avail[j - 1];
170890643Sbenno			}
170990643Sbenno
171090643Sbenno			phys_avail[i + 3] = phys_avail[i + 1];
171190643Sbenno			phys_avail[i + 1] = s;
171290643Sbenno			phys_avail[i + 2] = e;
171390643Sbenno			phys_avail_count++;
171490643Sbenno		}
171590643Sbenno
171690643Sbenno		return (s);
171783682Smp	}
171890643Sbenno	panic("pmap_bootstrap_alloc: could not allocate memory");
171983682Smp}
172083682Smp
172183682Smp/*
172290643Sbenno * Return an unmapped pvo for a kernel virtual address.
172390643Sbenno * Used by pmap functions that operate on physical pages.
172483682Smp */
172590643Sbennostatic struct pvo_entry *
172690643Sbennopmap_rkva_alloc(void)
172783682Smp{
172890643Sbenno	struct		pvo_entry *pvo;
172990643Sbenno	struct		pte *pt;
173090643Sbenno	vm_offset_t	kva;
173190643Sbenno	int		pteidx;
173283682Smp
173390643Sbenno	if (pmap_rkva_count == 0)
173490643Sbenno		panic("pmap_rkva_alloc: no more reserved KVAs");
173590643Sbenno
173690643Sbenno	kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count);
173790643Sbenno	pmap_kenter(kva, 0);
173890643Sbenno
173990643Sbenno	pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx);
174090643Sbenno
174190643Sbenno	if (pvo == NULL)
174290643Sbenno		panic("pmap_kva_alloc: pmap_pvo_find_va failed");
174390643Sbenno
174490643Sbenno	pt = pmap_pvo_to_pte(pvo, pteidx);
174590643Sbenno
174690643Sbenno	if (pt == NULL)
174790643Sbenno		panic("pmap_kva_alloc: pmap_pvo_to_pte failed");
174890643Sbenno
174990643Sbenno	pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
175090643Sbenno	PVO_PTEGIDX_CLR(pvo);
175190643Sbenno
175290643Sbenno	pmap_pte_overflow++;
175390643Sbenno
175490643Sbenno	return (pvo);
175590643Sbenno}
175690643Sbenno
175790643Sbennostatic void
175890643Sbennopmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
175990643Sbenno    int *depth_p)
176090643Sbenno{
176190643Sbenno	struct	pte *pt;
176290643Sbenno
176390643Sbenno	/*
176490643Sbenno	 * If this pvo already has a valid pte, we need to save it so it can
176590643Sbenno	 * be restored later.  We then just reload the new PTE over the old
176690643Sbenno	 * slot.
176790643Sbenno	 */
176890643Sbenno	if (saved_pt != NULL) {
176990643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
177090643Sbenno
177190643Sbenno		if (pt != NULL) {
177290643Sbenno			pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
177390643Sbenno			PVO_PTEGIDX_CLR(pvo);
177490643Sbenno			pmap_pte_overflow++;
177583682Smp		}
177690643Sbenno
177790643Sbenno		*saved_pt = pvo->pvo_pte;
177890643Sbenno
177990643Sbenno		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
178083682Smp	}
178190643Sbenno
178290643Sbenno	pvo->pvo_pte.pte_lo |= pa;
178390643Sbenno
178490643Sbenno	if (!pmap_pte_spill(pvo->pvo_vaddr))
178590643Sbenno		panic("pmap_pa_map: could not spill pvo %p", pvo);
178690643Sbenno
178790643Sbenno	if (depth_p != NULL)
178890643Sbenno		(*depth_p)++;
178983682Smp}
179083682Smp
179190643Sbennostatic void
179290643Sbennopmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
179377957Sbenno{
179490643Sbenno	struct	pte *pt;
179577957Sbenno
179690643Sbenno	pt = pmap_pvo_to_pte(pvo, -1);
179790643Sbenno
179890643Sbenno	if (pt != NULL) {
179990643Sbenno		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
180090643Sbenno		PVO_PTEGIDX_CLR(pvo);
180190643Sbenno		pmap_pte_overflow++;
180290643Sbenno	}
180390643Sbenno
180490643Sbenno	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
180590643Sbenno
180690643Sbenno	/*
180790643Sbenno	 * If there is a saved PTE and it's valid, restore it and return.
180890643Sbenno	 */
180990643Sbenno	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
181090643Sbenno		if (depth_p != NULL && --(*depth_p) == 0)
181190643Sbenno			panic("pmap_pa_unmap: restoring but depth == 0");
181290643Sbenno
181390643Sbenno		pvo->pvo_pte = *saved_pt;
181490643Sbenno
181590643Sbenno		if (!pmap_pte_spill(pvo->pvo_vaddr))
181690643Sbenno			panic("pmap_pa_unmap: could not spill pvo %p", pvo);
181790643Sbenno	}
181877957Sbenno}
181977957Sbenno
182090643Sbennostatic void
182190643Sbennopmap_syncicache(vm_offset_t pa, vm_size_t len)
182277957Sbenno{
182390643Sbenno	__syncicache((void *)pa, len);
182490643Sbenno}
182577957Sbenno
182690643Sbennostatic void
182790643Sbennotlbia(void)
182890643Sbenno{
182990643Sbenno	caddr_t	i;
183090643Sbenno
183190643Sbenno	SYNC();
183290643Sbenno	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
183390643Sbenno		TLBIE(i);
183490643Sbenno		EIEIO();
183590643Sbenno	}
183690643Sbenno	TLBSYNC();
183790643Sbenno	SYNC();
183877957Sbenno}
183977957Sbenno
184090643Sbennostatic int
184192847Sjeffpmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
184290643Sbenno    vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
184377957Sbenno{
184490643Sbenno	struct	pvo_entry *pvo;
184590643Sbenno	u_int	sr;
184690643Sbenno	int	first;
184790643Sbenno	u_int	ptegidx;
184890643Sbenno	int	i;
1849103604Sgrehan	int     bootstrap;
185077957Sbenno
185190643Sbenno	pmap_pvo_enter_calls++;
185296250Sbenno	first = 0;
1853103604Sgrehan	bootstrap = 0;
185490643Sbenno
185590643Sbenno	/*
185690643Sbenno	 * Compute the PTE Group index.
185790643Sbenno	 */
185890643Sbenno	va &= ~ADDR_POFF;
185990643Sbenno	sr = va_to_sr(pm->pm_sr, va);
186090643Sbenno	ptegidx = va_to_pteg(sr, va);
186190643Sbenno
186290643Sbenno	/*
186390643Sbenno	 * Remove any existing mapping for this page.  Reuse the pvo entry if
186490643Sbenno	 * there is a mapping.
186590643Sbenno	 */
1866134535Salc	mtx_lock(&pmap_table_mutex);
186790643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
186890643Sbenno		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
186996334Sbenno			if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa &&
187096334Sbenno			    (pvo->pvo_pte.pte_lo & PTE_PP) ==
187196334Sbenno			    (pte_lo & PTE_PP)) {
1872134535Salc				mtx_unlock(&pmap_table_mutex);
187392521Sbenno				return (0);
187496334Sbenno			}
187590643Sbenno			pmap_pvo_remove(pvo, -1);
187690643Sbenno			break;
187790643Sbenno		}
187890643Sbenno	}
187990643Sbenno
188090643Sbenno	/*
188190643Sbenno	 * If we aren't overwriting a mapping, try to allocate.
188290643Sbenno	 */
188392521Sbenno	if (pmap_initialized) {
188492847Sjeff		pvo = uma_zalloc(zone, M_NOWAIT);
188592521Sbenno	} else {
188699037Sbenno		if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) {
188799037Sbenno			panic("pmap_enter: bpvo pool exhausted, %d, %d, %d",
188899037Sbenno			      pmap_bpvo_pool_index, BPVO_POOL_SIZE,
188999037Sbenno			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
189092521Sbenno		}
189192521Sbenno		pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index];
189292521Sbenno		pmap_bpvo_pool_index++;
1893103604Sgrehan		bootstrap = 1;
189492521Sbenno	}
189590643Sbenno
189690643Sbenno	if (pvo == NULL) {
1897134535Salc		mtx_unlock(&pmap_table_mutex);
189890643Sbenno		return (ENOMEM);
189990643Sbenno	}
190090643Sbenno
190190643Sbenno	pmap_pvo_entries++;
190290643Sbenno	pvo->pvo_vaddr = va;
190390643Sbenno	pvo->pvo_pmap = pm;
190490643Sbenno	LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
190590643Sbenno	pvo->pvo_vaddr &= ~ADDR_POFF;
190690643Sbenno	if (flags & VM_PROT_EXECUTE)
190790643Sbenno		pvo->pvo_vaddr |= PVO_EXECUTABLE;
190890643Sbenno	if (flags & PVO_WIRED)
190990643Sbenno		pvo->pvo_vaddr |= PVO_WIRED;
191090643Sbenno	if (pvo_head != &pmap_pvo_kunmanaged)
191190643Sbenno		pvo->pvo_vaddr |= PVO_MANAGED;
1912103604Sgrehan	if (bootstrap)
1913103604Sgrehan		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
1914142416Sgrehan	if (flags & PVO_FAKE)
1915142416Sgrehan		pvo->pvo_vaddr |= PVO_FAKE;
1916142416Sgrehan
191790643Sbenno	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
191890643Sbenno
191990643Sbenno	/*
192090643Sbenno	 * Remember if the list was empty and therefore will be the first
192190643Sbenno	 * item.
192290643Sbenno	 */
192396250Sbenno	if (LIST_FIRST(pvo_head) == NULL)
192496250Sbenno		first = 1;
1925142416Sgrehan	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
192690643Sbenno
192790643Sbenno	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1928134453Salc		pm->pm_stats.wired_count++;
1929134453Salc	pm->pm_stats.resident_count++;
193090643Sbenno
193190643Sbenno	/*
193290643Sbenno	 * We hope this succeeds but it isn't required.
193390643Sbenno	 */
193490643Sbenno	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
193590643Sbenno	if (i >= 0) {
193690643Sbenno		PVO_PTEGIDX_SET(pvo, i);
193790643Sbenno	} else {
193890643Sbenno		panic("pmap_pvo_enter: overflow");
193990643Sbenno		pmap_pte_overflow++;
194090643Sbenno	}
1941142416Sgrehan	mtx_unlock(&pmap_table_mutex);
194290643Sbenno
194390643Sbenno	return (first ? ENOENT : 0);
194477957Sbenno}
194577957Sbenno
194690643Sbennostatic void
194790643Sbennopmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
194877957Sbenno{
194990643Sbenno	struct	pte *pt;
195077957Sbenno
195190643Sbenno	/*
195290643Sbenno	 * If there is an active pte entry, we need to deactivate it (and
195390643Sbenno	 * save the ref & cfg bits).
195490643Sbenno	 */
195590643Sbenno	pt = pmap_pvo_to_pte(pvo, pteidx);
195690643Sbenno	if (pt != NULL) {
195790643Sbenno		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
195890643Sbenno		PVO_PTEGIDX_CLR(pvo);
195990643Sbenno	} else {
196090643Sbenno		pmap_pte_overflow--;
1961142416Sgrehan	}
196290643Sbenno
196390643Sbenno	/*
196490643Sbenno	 * Update our statistics.
196590643Sbenno	 */
196690643Sbenno	pvo->pvo_pmap->pm_stats.resident_count--;
196790643Sbenno	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
196890643Sbenno		pvo->pvo_pmap->pm_stats.wired_count--;
196990643Sbenno
197090643Sbenno	/*
197190643Sbenno	 * Save the REF/CHG bits into their cache if the page is managed.
197290643Sbenno	 */
1973142416Sgrehan	if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) {
197490643Sbenno		struct	vm_page *pg;
197590643Sbenno
197692067Sbenno		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
197790643Sbenno		if (pg != NULL) {
197890643Sbenno			pmap_attr_save(pg, pvo->pvo_pte.pte_lo &
197990643Sbenno			    (PTE_REF | PTE_CHG));
198090643Sbenno		}
198190643Sbenno	}
198290643Sbenno
198390643Sbenno	/*
198490643Sbenno	 * Remove this PVO from the PV list.
198590643Sbenno	 */
198690643Sbenno	LIST_REMOVE(pvo, pvo_vlink);
198790643Sbenno
198890643Sbenno	/*
198990643Sbenno	 * Remove this from the overflow list and return it to the pool
199090643Sbenno	 * if we aren't going to reuse it.
199190643Sbenno	 */
199290643Sbenno	LIST_REMOVE(pvo, pvo_olink);
199392521Sbenno	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
199492847Sjeff		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone :
199592521Sbenno		    pmap_upvo_zone, pvo);
199690643Sbenno	pmap_pvo_entries--;
199790643Sbenno	pmap_pvo_remove_calls++;
199877957Sbenno}
199977957Sbenno
200090643Sbennostatic __inline int
200190643Sbennopmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
200277957Sbenno{
200390643Sbenno	int	pteidx;
200477957Sbenno
200590643Sbenno	/*
200690643Sbenno	 * We can find the actual pte entry without searching by grabbing
200790643Sbenno	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
200890643Sbenno	 * noticing the HID bit.
200990643Sbenno	 */
201090643Sbenno	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
201190643Sbenno	if (pvo->pvo_pte.pte_hi & PTE_HID)
201290643Sbenno		pteidx ^= pmap_pteg_mask * 8;
201390643Sbenno
201490643Sbenno	return (pteidx);
201577957Sbenno}
201677957Sbenno
201790643Sbennostatic struct pvo_entry *
201890643Sbennopmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
201977957Sbenno{
202090643Sbenno	struct	pvo_entry *pvo;
202190643Sbenno	int	ptegidx;
202290643Sbenno	u_int	sr;
202377957Sbenno
202490643Sbenno	va &= ~ADDR_POFF;
202590643Sbenno	sr = va_to_sr(pm->pm_sr, va);
202690643Sbenno	ptegidx = va_to_pteg(sr, va);
202790643Sbenno
2028134535Salc	mtx_lock(&pmap_table_mutex);
202990643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
203090643Sbenno		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
203190643Sbenno			if (pteidx_p)
203290643Sbenno				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
2033134535Salc			break;
203490643Sbenno		}
203590643Sbenno	}
2036134535Salc	mtx_unlock(&pmap_table_mutex);
203790643Sbenno
2038134535Salc	return (pvo);
203977957Sbenno}
204077957Sbenno
204190643Sbennostatic struct pte *
204290643Sbennopmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
204377957Sbenno{
204490643Sbenno	struct	pte *pt;
204577957Sbenno
204690643Sbenno	/*
204790643Sbenno	 * If we haven't been supplied the ptegidx, calculate it.
204890643Sbenno	 */
204990643Sbenno	if (pteidx == -1) {
205090643Sbenno		int	ptegidx;
205190643Sbenno		u_int	sr;
205277957Sbenno
205390643Sbenno		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
205490643Sbenno		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
205590643Sbenno		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
205690643Sbenno	}
205790643Sbenno
205890643Sbenno	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
205990643Sbenno
206090643Sbenno	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
206190643Sbenno		panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no "
206290643Sbenno		    "valid pte index", pvo);
206390643Sbenno	}
206490643Sbenno
206590643Sbenno	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
206690643Sbenno		panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo "
206790643Sbenno		    "pvo but no valid pte", pvo);
206890643Sbenno	}
206990643Sbenno
207090643Sbenno	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
207190643Sbenno		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
207290643Sbenno			panic("pmap_pvo_to_pte: pvo %p has valid pte in "
207390643Sbenno			    "pmap_pteg_table %p but invalid in pvo", pvo, pt);
207477957Sbenno		}
207590643Sbenno
207690643Sbenno		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
207790643Sbenno		    != 0) {
207890643Sbenno			panic("pmap_pvo_to_pte: pvo %p pte does not match "
207990643Sbenno			    "pte %p in pmap_pteg_table", pvo, pt);
208090643Sbenno		}
208190643Sbenno
208290643Sbenno		return (pt);
208377957Sbenno	}
208477957Sbenno
208590643Sbenno	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
208690643Sbenno		panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in "
208790643Sbenno		    "pmap_pteg_table but valid in pvo", pvo, pt);
208890643Sbenno	}
208977957Sbenno
209090643Sbenno	return (NULL);
209177957Sbenno}
209278880Sbenno
209378880Sbenno/*
209490643Sbenno * XXX: THIS STUFF SHOULD BE IN pte.c?
209578880Sbenno */
209690643Sbennoint
209790643Sbennopmap_pte_spill(vm_offset_t addr)
209878880Sbenno{
209990643Sbenno	struct	pvo_entry *source_pvo, *victim_pvo;
210090643Sbenno	struct	pvo_entry *pvo;
210190643Sbenno	int	ptegidx, i, j;
210290643Sbenno	u_int	sr;
210390643Sbenno	struct	pteg *pteg;
210490643Sbenno	struct	pte *pt;
210578880Sbenno
210690643Sbenno	pmap_pte_spills++;
210790643Sbenno
210894836Sbenno	sr = mfsrin(addr);
210990643Sbenno	ptegidx = va_to_pteg(sr, addr);
211090643Sbenno
211178880Sbenno	/*
211290643Sbenno	 * Have to substitute some entry.  Use the primary hash for this.
211390643Sbenno	 * Use low bits of timebase as random generator.
211478880Sbenno	 */
211590643Sbenno	pteg = &pmap_pteg_table[ptegidx];
2116134535Salc	mtx_lock(&pmap_table_mutex);
211790643Sbenno	__asm __volatile("mftb %0" : "=r"(i));
211890643Sbenno	i &= 7;
211990643Sbenno	pt = &pteg->pt[i];
212078880Sbenno
212190643Sbenno	source_pvo = NULL;
212290643Sbenno	victim_pvo = NULL;
212390643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
212478880Sbenno		/*
212590643Sbenno		 * We need to find a pvo entry for this address.
212678880Sbenno		 */
212790643Sbenno		PMAP_PVO_CHECK(pvo);
212890643Sbenno		if (source_pvo == NULL &&
212990643Sbenno		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
213090643Sbenno		    pvo->pvo_pte.pte_hi & PTE_HID)) {
213190643Sbenno			/*
213290643Sbenno			 * Now found an entry to be spilled into the pteg.
213390643Sbenno			 * The PTE is now valid, so we know it's active.
213490643Sbenno			 */
213590643Sbenno			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
213678880Sbenno
213790643Sbenno			if (j >= 0) {
213890643Sbenno				PVO_PTEGIDX_SET(pvo, j);
213990643Sbenno				pmap_pte_overflow--;
214090643Sbenno				PMAP_PVO_CHECK(pvo);
2141134535Salc				mtx_unlock(&pmap_table_mutex);
214290643Sbenno				return (1);
214390643Sbenno			}
214490643Sbenno
214590643Sbenno			source_pvo = pvo;
214690643Sbenno
214790643Sbenno			if (victim_pvo != NULL)
214890643Sbenno				break;
214990643Sbenno		}
215090643Sbenno
215178880Sbenno		/*
215290643Sbenno		 * We also need the pvo entry of the victim we are replacing
215390643Sbenno		 * so save the R & C bits of the PTE.
215478880Sbenno		 */
215590643Sbenno		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
215690643Sbenno		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
215790643Sbenno			victim_pvo = pvo;
215890643Sbenno			if (source_pvo != NULL)
215990643Sbenno				break;
216090643Sbenno		}
216190643Sbenno	}
216278880Sbenno
2163134535Salc	if (source_pvo == NULL) {
2164134535Salc		mtx_unlock(&pmap_table_mutex);
216590643Sbenno		return (0);
2166134535Salc	}
216790643Sbenno
216890643Sbenno	if (victim_pvo == NULL) {
216990643Sbenno		if ((pt->pte_hi & PTE_HID) == 0)
217090643Sbenno			panic("pmap_pte_spill: victim p-pte (%p) has no pvo"
217190643Sbenno			    "entry", pt);
217290643Sbenno
217378880Sbenno		/*
217490643Sbenno		 * If this is a secondary PTE, we need to search it's primary
217590643Sbenno		 * pvo bucket for the matching PVO.
217678880Sbenno		 */
217790643Sbenno		LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
217890643Sbenno		    pvo_olink) {
217990643Sbenno			PMAP_PVO_CHECK(pvo);
218090643Sbenno			/*
218190643Sbenno			 * We also need the pvo entry of the victim we are
218290643Sbenno			 * replacing so save the R & C bits of the PTE.
218390643Sbenno			 */
218490643Sbenno			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
218590643Sbenno				victim_pvo = pvo;
218690643Sbenno				break;
218790643Sbenno			}
218890643Sbenno		}
218978880Sbenno
219090643Sbenno		if (victim_pvo == NULL)
219190643Sbenno			panic("pmap_pte_spill: victim s-pte (%p) has no pvo"
219290643Sbenno			    "entry", pt);
219390643Sbenno	}
219478880Sbenno
219590643Sbenno	/*
219690643Sbenno	 * We are invalidating the TLB entry for the EA we are replacing even
219790643Sbenno	 * though it's valid.  If we don't, we lose any ref/chg bit changes
219890643Sbenno	 * contained in the TLB entry.
219990643Sbenno	 */
220090643Sbenno	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
220178880Sbenno
220290643Sbenno	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
220390643Sbenno	pmap_pte_set(pt, &source_pvo->pvo_pte);
220490643Sbenno
220590643Sbenno	PVO_PTEGIDX_CLR(victim_pvo);
220690643Sbenno	PVO_PTEGIDX_SET(source_pvo, i);
220790643Sbenno	pmap_pte_replacements++;
220890643Sbenno
220990643Sbenno	PMAP_PVO_CHECK(victim_pvo);
221090643Sbenno	PMAP_PVO_CHECK(source_pvo);
221190643Sbenno
2212134535Salc	mtx_unlock(&pmap_table_mutex);
221390643Sbenno	return (1);
221490643Sbenno}
221590643Sbenno
221690643Sbennostatic int
221790643Sbennopmap_pte_insert(u_int ptegidx, struct pte *pvo_pt)
221890643Sbenno{
221990643Sbenno	struct	pte *pt;
222090643Sbenno	int	i;
222190643Sbenno
222290643Sbenno	/*
222390643Sbenno	 * First try primary hash.
222490643Sbenno	 */
222590643Sbenno	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
222690643Sbenno		if ((pt->pte_hi & PTE_VALID) == 0) {
222790643Sbenno			pvo_pt->pte_hi &= ~PTE_HID;
222890643Sbenno			pmap_pte_set(pt, pvo_pt);
222990643Sbenno			return (i);
223078880Sbenno		}
223190643Sbenno	}
223278880Sbenno
223390643Sbenno	/*
223490643Sbenno	 * Now try secondary hash.
223590643Sbenno	 */
223690643Sbenno	ptegidx ^= pmap_pteg_mask;
223790643Sbenno	ptegidx++;
223890643Sbenno	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
223990643Sbenno		if ((pt->pte_hi & PTE_VALID) == 0) {
224090643Sbenno			pvo_pt->pte_hi |= PTE_HID;
224190643Sbenno			pmap_pte_set(pt, pvo_pt);
224290643Sbenno			return (i);
224390643Sbenno		}
224490643Sbenno	}
224578880Sbenno
224690643Sbenno	panic("pmap_pte_insert: overflow");
224790643Sbenno	return (-1);
224878880Sbenno}
224984921Sbenno
225090643Sbennostatic boolean_t
225190643Sbennopmap_query_bit(vm_page_t m, int ptebit)
225284921Sbenno{
225390643Sbenno	struct	pvo_entry *pvo;
225490643Sbenno	struct	pte *pt;
225584921Sbenno
2256123560Sgrehan#if 0
225790643Sbenno	if (pmap_attr_fetch(m) & ptebit)
225890643Sbenno		return (TRUE);
2259123560Sgrehan#endif
226084921Sbenno
226190643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
226290643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
226384921Sbenno
226490643Sbenno		/*
226590643Sbenno		 * See if we saved the bit off.  If so, cache it and return
226690643Sbenno		 * success.
226790643Sbenno		 */
226890643Sbenno		if (pvo->pvo_pte.pte_lo & ptebit) {
226990643Sbenno			pmap_attr_save(m, ptebit);
227090643Sbenno			PMAP_PVO_CHECK(pvo);	/* sanity check */
227190643Sbenno			return (TRUE);
227290643Sbenno		}
227390643Sbenno	}
227484921Sbenno
227590643Sbenno	/*
227690643Sbenno	 * No luck, now go through the hard part of looking at the PTEs
227790643Sbenno	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
227890643Sbenno	 * the PTEs.
227990643Sbenno	 */
228090643Sbenno	SYNC();
228190643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
228290643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
228390643Sbenno
228490643Sbenno		/*
228590643Sbenno		 * See if this pvo has a valid PTE.  if so, fetch the
228690643Sbenno		 * REF/CHG bits from the valid PTE.  If the appropriate
228790643Sbenno		 * ptebit is set, cache it and return success.
228890643Sbenno		 */
228990643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
229090643Sbenno		if (pt != NULL) {
229190643Sbenno			pmap_pte_synch(pt, &pvo->pvo_pte);
229290643Sbenno			if (pvo->pvo_pte.pte_lo & ptebit) {
229390643Sbenno				pmap_attr_save(m, ptebit);
229490643Sbenno				PMAP_PVO_CHECK(pvo);	/* sanity check */
229590643Sbenno				return (TRUE);
229690643Sbenno			}
229790643Sbenno		}
229884921Sbenno	}
229984921Sbenno
2300123354Sgallatin	return (FALSE);
230184921Sbenno}
230290643Sbenno
2303110172Sgrehanstatic u_int
2304110172Sgrehanpmap_clear_bit(vm_page_t m, int ptebit, int *origbit)
230590643Sbenno{
2306110172Sgrehan	u_int	count;
230790643Sbenno	struct	pvo_entry *pvo;
230890643Sbenno	struct	pte *pt;
230990643Sbenno	int	rv;
231090643Sbenno
231190643Sbenno	/*
231290643Sbenno	 * Clear the cached value.
231390643Sbenno	 */
231490643Sbenno	rv = pmap_attr_fetch(m);
231590643Sbenno	pmap_attr_clear(m, ptebit);
231690643Sbenno
231790643Sbenno	/*
231890643Sbenno	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
231990643Sbenno	 * we can reset the right ones).  note that since the pvo entries and
232090643Sbenno	 * list heads are accessed via BAT0 and are never placed in the page
232190643Sbenno	 * table, we don't have to worry about further accesses setting the
232290643Sbenno	 * REF/CHG bits.
232390643Sbenno	 */
232490643Sbenno	SYNC();
232590643Sbenno
232690643Sbenno	/*
232790643Sbenno	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
232890643Sbenno	 * valid pte clear the ptebit from the valid pte.
232990643Sbenno	 */
2330110172Sgrehan	count = 0;
233190643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
233290643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
233390643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
233490643Sbenno		if (pt != NULL) {
233590643Sbenno			pmap_pte_synch(pt, &pvo->pvo_pte);
2336110172Sgrehan			if (pvo->pvo_pte.pte_lo & ptebit) {
2337110172Sgrehan				count++;
233890643Sbenno				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2339110172Sgrehan			}
234090643Sbenno		}
234190643Sbenno		rv |= pvo->pvo_pte.pte_lo;
234290643Sbenno		pvo->pvo_pte.pte_lo &= ~ptebit;
234390643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
234490643Sbenno	}
234590643Sbenno
2346110172Sgrehan	if (origbit != NULL) {
2347110172Sgrehan		*origbit = rv;
2348110172Sgrehan	}
2349110172Sgrehan
2350110172Sgrehan	return (count);
235190643Sbenno}
235299038Sbenno
235399038Sbenno/*
2354103604Sgrehan * Return true if the physical range is encompassed by the battable[idx]
2355103604Sgrehan */
2356103604Sgrehanstatic int
2357103604Sgrehanpmap_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
2358103604Sgrehan{
2359103604Sgrehan	u_int prot;
2360103604Sgrehan	u_int32_t start;
2361103604Sgrehan	u_int32_t end;
2362103604Sgrehan	u_int32_t bat_ble;
2363103604Sgrehan
2364103604Sgrehan	/*
2365103604Sgrehan	 * Return immediately if not a valid mapping
2366103604Sgrehan	 */
2367103604Sgrehan	if (!battable[idx].batu & BAT_Vs)
2368103604Sgrehan		return (EINVAL);
2369103604Sgrehan
2370103604Sgrehan	/*
2371103604Sgrehan	 * The BAT entry must be cache-inhibited, guarded, and r/w
2372103604Sgrehan	 * so it can function as an i/o page
2373103604Sgrehan	 */
2374103604Sgrehan	prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
2375103604Sgrehan	if (prot != (BAT_I|BAT_G|BAT_PP_RW))
2376103604Sgrehan		return (EPERM);
2377103604Sgrehan
2378103604Sgrehan	/*
2379103604Sgrehan	 * The address should be within the BAT range. Assume that the
2380103604Sgrehan	 * start address in the BAT has the correct alignment (thus
2381103604Sgrehan	 * not requiring masking)
2382103604Sgrehan	 */
2383103604Sgrehan	start = battable[idx].batl & BAT_PBS;
2384103604Sgrehan	bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
2385103604Sgrehan	end = start | (bat_ble << 15) | 0x7fff;
2386103604Sgrehan
2387103604Sgrehan	if ((pa < start) || ((pa + size) > end))
2388103604Sgrehan		return (ERANGE);
2389103604Sgrehan
2390103604Sgrehan	return (0);
2391103604Sgrehan}
2392103604Sgrehan
2393133855Sssouhlalint
2394133855Sssouhlalpmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
2395133855Sssouhlal{
2396133855Sssouhlal	int i;
2397103604Sgrehan
2398133855Sssouhlal	/*
2399133855Sssouhlal	 * This currently does not work for entries that
2400133855Sssouhlal	 * overlap 256M BAT segments.
2401133855Sssouhlal	 */
2402133855Sssouhlal
2403133855Sssouhlal	for(i = 0; i < 16; i++)
2404133855Sssouhlal		if (pmap_bat_mapped(i, pa, size) == 0)
2405133855Sssouhlal			return (0);
2406133855Sssouhlal
2407133855Sssouhlal	return (EFAULT);
2408133855Sssouhlal}
2409133855Sssouhlal
2410103604Sgrehan/*
241199038Sbenno * Map a set of physical memory pages into the kernel virtual
241299038Sbenno * address space. Return a pointer to where it is mapped. This
241399038Sbenno * routine is intended to be used for mapping device memory,
241499038Sbenno * NOT real memory.
241599038Sbenno */
241699038Sbennovoid *
241799038Sbennopmap_mapdev(vm_offset_t pa, vm_size_t size)
241899038Sbenno{
2419103604Sgrehan	vm_offset_t va, tmpva, ppa, offset;
2420103604Sgrehan	int i;
2421103604Sgrehan
2422103604Sgrehan	ppa = trunc_page(pa);
242399038Sbenno	offset = pa & PAGE_MASK;
242499038Sbenno	size = roundup(offset + size, PAGE_SIZE);
242599038Sbenno
242699038Sbenno	GIANT_REQUIRED;
242799038Sbenno
2428103604Sgrehan	/*
2429103604Sgrehan	 * If the physical address lies within a valid BAT table entry,
2430103604Sgrehan	 * return the 1:1 mapping. This currently doesn't work
2431103604Sgrehan	 * for regions that overlap 256M BAT segments.
2432103604Sgrehan	 */
2433103604Sgrehan	for (i = 0; i < 16; i++) {
2434103604Sgrehan		if (pmap_bat_mapped(i, pa, size) == 0)
2435103604Sgrehan			return ((void *) pa);
2436103604Sgrehan	}
2437103604Sgrehan
2438118365Salc	va = kmem_alloc_nofault(kernel_map, size);
243999038Sbenno	if (!va)
244099038Sbenno		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
244199038Sbenno
244299038Sbenno	for (tmpva = va; size > 0;) {
2443103604Sgrehan		pmap_kenter(tmpva, ppa);
244499038Sbenno		TLBIE(tmpva); /* XXX or should it be invalidate-all ? */
244599038Sbenno		size -= PAGE_SIZE;
244699038Sbenno		tmpva += PAGE_SIZE;
2447103604Sgrehan		ppa += PAGE_SIZE;
244899038Sbenno	}
244999038Sbenno
245099038Sbenno	return ((void *)(va + offset));
245199038Sbenno}
245299038Sbenno
245399038Sbennovoid
245499038Sbennopmap_unmapdev(vm_offset_t va, vm_size_t size)
245599038Sbenno{
245699038Sbenno	vm_offset_t base, offset;
245799038Sbenno
2458103604Sgrehan	/*
2459103604Sgrehan	 * If this is outside kernel virtual space, then it's a
2460103604Sgrehan	 * battable entry and doesn't require unmapping
2461103604Sgrehan	 */
2462103604Sgrehan	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2463103604Sgrehan		base = trunc_page(va);
2464103604Sgrehan		offset = va & PAGE_MASK;
2465103604Sgrehan		size = roundup(offset + size, PAGE_SIZE);
2466103604Sgrehan		kmem_free(kernel_map, base, size);
2467103604Sgrehan	}
246899038Sbenno}
2469