mmu_oea.c revision 92521
177957Sbenno/*
290643Sbenno * Copyright (c) 2001 The NetBSD Foundation, Inc.
390643Sbenno * All rights reserved.
490643Sbenno *
590643Sbenno * This code is derived from software contributed to The NetBSD Foundation
690643Sbenno * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
790643Sbenno *
890643Sbenno * Redistribution and use in source and binary forms, with or without
990643Sbenno * modification, are permitted provided that the following conditions
1090643Sbenno * are met:
1190643Sbenno * 1. Redistributions of source code must retain the above copyright
1290643Sbenno *    notice, this list of conditions and the following disclaimer.
1390643Sbenno * 2. Redistributions in binary form must reproduce the above copyright
1490643Sbenno *    notice, this list of conditions and the following disclaimer in the
1590643Sbenno *    documentation and/or other materials provided with the distribution.
1690643Sbenno * 3. All advertising materials mentioning features or use of this software
1790643Sbenno *    must display the following acknowledgement:
1890643Sbenno *        This product includes software developed by the NetBSD
1990643Sbenno *        Foundation, Inc. and its contributors.
2090643Sbenno * 4. Neither the name of The NetBSD Foundation nor the names of its
2190643Sbenno *    contributors may be used to endorse or promote products derived
2290643Sbenno *    from this software without specific prior written permission.
2390643Sbenno *
2490643Sbenno * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2590643Sbenno * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2690643Sbenno * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2790643Sbenno * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2890643Sbenno * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2990643Sbenno * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
3090643Sbenno * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
3190643Sbenno * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
3290643Sbenno * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
3390643Sbenno * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
3490643Sbenno * POSSIBILITY OF SUCH DAMAGE.
3590643Sbenno */
3690643Sbenno/*
3777957Sbenno * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3877957Sbenno * Copyright (C) 1995, 1996 TooLs GmbH.
3977957Sbenno * All rights reserved.
4077957Sbenno *
4177957Sbenno * Redistribution and use in source and binary forms, with or without
4277957Sbenno * modification, are permitted provided that the following conditions
4377957Sbenno * are met:
4477957Sbenno * 1. Redistributions of source code must retain the above copyright
4577957Sbenno *    notice, this list of conditions and the following disclaimer.
4677957Sbenno * 2. Redistributions in binary form must reproduce the above copyright
4777957Sbenno *    notice, this list of conditions and the following disclaimer in the
4877957Sbenno *    documentation and/or other materials provided with the distribution.
4977957Sbenno * 3. All advertising materials mentioning features or use of this software
5077957Sbenno *    must display the following acknowledgement:
5177957Sbenno *	This product includes software developed by TooLs GmbH.
5277957Sbenno * 4. The name of TooLs GmbH may not be used to endorse or promote products
5377957Sbenno *    derived from this software without specific prior written permission.
5477957Sbenno *
5577957Sbenno * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
5677957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5777957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5877957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
5977957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
6077957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
6177957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
6277957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
6377957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
6477957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
6577957Sbenno *
6678880Sbenno * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
6777957Sbenno */
6877957Sbenno/*
6977957Sbenno * Copyright (C) 2001 Benno Rice.
7077957Sbenno * All rights reserved.
7177957Sbenno *
7277957Sbenno * Redistribution and use in source and binary forms, with or without
7377957Sbenno * modification, are permitted provided that the following conditions
7477957Sbenno * are met:
7577957Sbenno * 1. Redistributions of source code must retain the above copyright
7677957Sbenno *    notice, this list of conditions and the following disclaimer.
7777957Sbenno * 2. Redistributions in binary form must reproduce the above copyright
7877957Sbenno *    notice, this list of conditions and the following disclaimer in the
7977957Sbenno *    documentation and/or other materials provided with the distribution.
8077957Sbenno *
8177957Sbenno * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
8277957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
8377957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
8477957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
8577957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
8677957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
8777957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
8877957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
8977957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
9077957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9177957Sbenno */
9277957Sbenno
9377957Sbenno#ifndef lint
9477957Sbennostatic const char rcsid[] =
9577957Sbenno  "$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 92521 2002-03-17 23:58:12Z benno $";
9677957Sbenno#endif /* not lint */
9777957Sbenno
9890643Sbenno/*
9990643Sbenno * Manages physical address maps.
10090643Sbenno *
10190643Sbenno * In addition to hardware address maps, this module is called upon to
10290643Sbenno * provide software-use-only maps which may or may not be stored in the
10390643Sbenno * same form as hardware maps.  These pseudo-maps are used to store
10490643Sbenno * intermediate results from copy operations to and from address spaces.
10590643Sbenno *
10690643Sbenno * Since the information managed by this module is also stored by the
10790643Sbenno * logical address mapping module, this module may throw away valid virtual
10890643Sbenno * to physical mappings at almost any time.  However, invalidations of
10990643Sbenno * mappings must be done as requested.
11090643Sbenno *
11190643Sbenno * In order to cope with hardware architectures which make virtual to
11290643Sbenno * physical map invalidates expensive, this module may delay invalidate
11390643Sbenno * reduced protection operations until such time as they are actually
11490643Sbenno * necessary.  This module is given full information as to which processors
11590643Sbenno * are currently using which maps, and to when physical maps must be made
11690643Sbenno * correct.
11790643Sbenno */
11890643Sbenno
11977957Sbenno#include <sys/param.h>
12080431Speter#include <sys/kernel.h>
12190643Sbenno#include <sys/ktr.h>
12290643Sbenno#include <sys/lock.h>
12390643Sbenno#include <sys/msgbuf.h>
12490643Sbenno#include <sys/mutex.h>
12577957Sbenno#include <sys/proc.h>
12690643Sbenno#include <sys/sysctl.h>
12790643Sbenno#include <sys/systm.h>
12877957Sbenno#include <sys/vmmeter.h>
12977957Sbenno
13090643Sbenno#include <dev/ofw/openfirm.h>
13190643Sbenno
13290643Sbenno#include <vm/vm.h>
13377957Sbenno#include <vm/vm_param.h>
13477957Sbenno#include <vm/vm_kern.h>
13577957Sbenno#include <vm/vm_page.h>
13677957Sbenno#include <vm/vm_map.h>
13777957Sbenno#include <vm/vm_object.h>
13877957Sbenno#include <vm/vm_extern.h>
13977957Sbenno#include <vm/vm_pageout.h>
14077957Sbenno#include <vm/vm_pager.h>
14177957Sbenno#include <vm/vm_zone.h>
14277957Sbenno
14383730Smp#include <machine/bat.h>
14490643Sbenno#include <machine/frame.h>
14590643Sbenno#include <machine/md_var.h>
14690643Sbenno#include <machine/psl.h>
14777957Sbenno#include <machine/pte.h>
14890643Sbenno#include <machine/sr.h>
14977957Sbenno
15090643Sbenno#define	PMAP_DEBUG
15177957Sbenno
15290643Sbenno#define TODO	panic("%s: not implemented", __func__);
15377957Sbenno
15490643Sbenno#define	PMAP_LOCK(pm)
15590643Sbenno#define	PMAP_UNLOCK(pm)
15690643Sbenno
15790643Sbenno#define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
15890643Sbenno#define	TLBSYNC()	__asm __volatile("tlbsync");
15990643Sbenno#define	SYNC()		__asm __volatile("sync");
16090643Sbenno#define	EIEIO()		__asm __volatile("eieio");
16190643Sbenno
16290643Sbenno#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
16390643Sbenno#define	VSID_TO_SR(vsid)	((vsid) & 0xf)
16490643Sbenno#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
16590643Sbenno
16690643Sbenno#define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
16790643Sbenno#define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
16890643Sbenno#define	PVO_WIRED		0x0010		/* PVO entry is wired */
16990643Sbenno#define	PVO_MANAGED		0x0020		/* PVO entry is managed */
17090643Sbenno#define	PVO_EXECUTABLE		0x0040		/* PVO entry is executable */
17192521Sbenno#define	PVO_BOOTSTRAP		0x0004		/* PVO entry allocated during
17292521Sbenno						   bootstrap */
17390643Sbenno#define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
17490643Sbenno#define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
17590643Sbenno#define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
17690643Sbenno#define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
17790643Sbenno#define	PVO_PTEGIDX_CLR(pvo)	\
17890643Sbenno	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
17990643Sbenno#define	PVO_PTEGIDX_SET(pvo, i)	\
18090643Sbenno	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
18190643Sbenno
18290643Sbenno#define	PMAP_PVO_CHECK(pvo)
18390643Sbenno
18490643Sbennostruct mem_region {
18590643Sbenno	vm_offset_t	mr_start;
18690643Sbenno	vm_offset_t	mr_size;
18777957Sbenno};
18877957Sbenno
18990643Sbennostruct ofw_map {
19090643Sbenno	vm_offset_t	om_va;
19190643Sbenno	vm_size_t	om_len;
19290643Sbenno	vm_offset_t	om_pa;
19390643Sbenno	u_int		om_mode;
19490643Sbenno};
19577957Sbenno
19690643Sbennoint	pmap_bootstrapped = 0;
19777957Sbenno
19890643Sbenno/*
19990643Sbenno * Virtual and physical address of message buffer.
20090643Sbenno */
20190643Sbennostruct		msgbuf *msgbufp;
20290643Sbennovm_offset_t	msgbuf_phys;
20377957Sbenno
20490643Sbenno/*
20590643Sbenno * Physical addresses of first and last available physical page.
20690643Sbenno */
20790643Sbennovm_offset_t avail_start;
20890643Sbennovm_offset_t avail_end;
20977957Sbenno
21090643Sbenno/*
21190643Sbenno * Map of physical memory regions.
21290643Sbenno */
21390643Sbennovm_offset_t	phys_avail[128];
21490643Sbennou_int		phys_avail_count;
21590643Sbennostatic struct	mem_region regions[128];
21690643Sbennostatic struct	ofw_map translations[128];
21790643Sbennostatic int	translations_size;
21877957Sbenno
21990643Sbenno/*
22090643Sbenno * First and last available kernel virtual addresses.
22190643Sbenno */
22290643Sbennovm_offset_t virtual_avail;
22390643Sbennovm_offset_t virtual_end;
22490643Sbennovm_offset_t kernel_vm_end;
22577957Sbenno
22690643Sbenno/*
22790643Sbenno * Kernel pmap.
22890643Sbenno */
22990643Sbennostruct pmap kernel_pmap_store;
23090643Sbennoextern struct pmap ofw_pmap;
23177957Sbenno
23290643Sbenno/*
23390643Sbenno * PTEG data.
23490643Sbenno */
23590643Sbennostatic struct	pteg *pmap_pteg_table;
23690643Sbennou_int		pmap_pteg_count;
23790643Sbennou_int		pmap_pteg_mask;
23877957Sbenno
23990643Sbenno/*
24090643Sbenno * PVO data.
24190643Sbenno */
24290643Sbennostruct	pvo_head *pmap_pvo_table;		/* pvo entries by pteg index */
24390643Sbennostruct	pvo_head pmap_pvo_kunmanaged =
24490643Sbenno    LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
24590643Sbennostruct	pvo_head pmap_pvo_unmanaged =
24690643Sbenno    LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
24777957Sbenno
24890643Sbennovm_zone_t	pmap_upvo_zone;	/* zone for pvo entries for unmanaged pages */
24990643Sbennovm_zone_t	pmap_mpvo_zone;	/* zone for pvo entries for managed pages */
25090643Sbennostruct		vm_object pmap_upvo_zone_obj;
25190643Sbennostruct		vm_object pmap_mpvo_zone_obj;
25277957Sbenno
25390643Sbenno#define	PMAP_PVO_SIZE	1024
25492521Sbennostatic struct	pvo_entry *pmap_bpvo_pool;
25592521Sbennostatic int	pmap_bpvo_pool_index;
25692521Sbennostatic int	pmap_bpvo_pool_count;
25777957Sbenno
25890643Sbenno#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
25990643Sbennostatic u_int	pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
26077957Sbenno
26190643Sbennostatic boolean_t pmap_initialized = FALSE;
26277957Sbenno
26390643Sbenno/*
26490643Sbenno * Statistics.
26590643Sbenno */
26690643Sbennou_int	pmap_pte_valid = 0;
26790643Sbennou_int	pmap_pte_overflow = 0;
26890643Sbennou_int	pmap_pte_replacements = 0;
26990643Sbennou_int	pmap_pvo_entries = 0;
27090643Sbennou_int	pmap_pvo_enter_calls = 0;
27190643Sbennou_int	pmap_pvo_remove_calls = 0;
27290643Sbennou_int	pmap_pte_spills = 0;
27390643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid,
27490643Sbenno    0, "");
27590643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD,
27690643Sbenno    &pmap_pte_overflow, 0, "");
27790643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD,
27890643Sbenno    &pmap_pte_replacements, 0, "");
27990643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries,
28090643Sbenno    0, "");
28190643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD,
28290643Sbenno    &pmap_pvo_enter_calls, 0, "");
28390643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD,
28490643Sbenno    &pmap_pvo_remove_calls, 0, "");
28590643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD,
28690643Sbenno    &pmap_pte_spills, 0, "");
28777957Sbenno
28890643Sbennostruct	pvo_entry *pmap_pvo_zeropage;
28977957Sbenno
29090643Sbennovm_offset_t	pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
29190643Sbennou_int		pmap_rkva_count = 4;
29277957Sbenno
29390643Sbenno/*
29490643Sbenno * Allocate physical memory for use in pmap_bootstrap.
29590643Sbenno */
29690643Sbennostatic vm_offset_t	pmap_bootstrap_alloc(vm_size_t, u_int);
29777957Sbenno
29890643Sbenno/*
29990643Sbenno * PTE calls.
30090643Sbenno */
30190643Sbennostatic int		pmap_pte_insert(u_int, struct pte *);
30277957Sbenno
30377957Sbenno/*
30490643Sbenno * PVO calls.
30577957Sbenno */
30690643Sbennostatic int	pmap_pvo_enter(pmap_t, vm_zone_t, struct pvo_head *,
30790643Sbenno		    vm_offset_t, vm_offset_t, u_int, int);
30890643Sbennostatic void	pmap_pvo_remove(struct pvo_entry *, int);
30990643Sbennostatic struct	pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *);
31090643Sbennostatic struct	pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
31190643Sbenno
31290643Sbenno/*
31390643Sbenno * Utility routines.
31490643Sbenno */
31590643Sbennostatic struct		pvo_entry *pmap_rkva_alloc(void);
31690643Sbennostatic void		pmap_pa_map(struct pvo_entry *, vm_offset_t,
31790643Sbenno			    struct pte *, int *);
31890643Sbennostatic void		pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
31990643Sbennostatic void		pmap_syncicache(vm_offset_t, vm_size_t);
32090643Sbennostatic boolean_t	pmap_query_bit(vm_page_t, int);
32190643Sbennostatic boolean_t	pmap_clear_bit(vm_page_t, int);
32290643Sbennostatic void		tlbia(void);
32390643Sbenno
32490643Sbennostatic __inline int
32590643Sbennova_to_sr(u_int *sr, vm_offset_t va)
32677957Sbenno{
32790643Sbenno	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
32890643Sbenno}
32977957Sbenno
33090643Sbennostatic __inline u_int
33190643Sbennova_to_pteg(u_int sr, vm_offset_t addr)
33290643Sbenno{
33390643Sbenno	u_int hash;
33490643Sbenno
33590643Sbenno	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
33690643Sbenno	    ADDR_PIDX_SHFT);
33790643Sbenno	return (hash & pmap_pteg_mask);
33877957Sbenno}
33977957Sbenno
34090643Sbennostatic __inline struct pvo_head *
34190643Sbennopa_to_pvoh(vm_offset_t pa)
34277957Sbenno{
34390643Sbenno	struct	vm_page *pg;
34477957Sbenno
34590643Sbenno	pg = PHYS_TO_VM_PAGE(pa);
34690643Sbenno
34790643Sbenno	if (pg == NULL)
34890643Sbenno		return (&pmap_pvo_unmanaged);
34990643Sbenno
35090643Sbenno	return (&pg->md.mdpg_pvoh);
35177957Sbenno}
35277957Sbenno
35390643Sbennostatic __inline struct pvo_head *
35490643Sbennovm_page_to_pvoh(vm_page_t m)
35590643Sbenno{
35690643Sbenno
35790643Sbenno	return (&m->md.mdpg_pvoh);
35890643Sbenno}
35990643Sbenno
36077957Sbennostatic __inline void
36190643Sbennopmap_attr_clear(vm_page_t m, int ptebit)
36277957Sbenno{
36390643Sbenno
36490643Sbenno	m->md.mdpg_attrs &= ~ptebit;
36577957Sbenno}
36677957Sbenno
36777957Sbennostatic __inline int
36890643Sbennopmap_attr_fetch(vm_page_t m)
36977957Sbenno{
37077957Sbenno
37190643Sbenno	return (m->md.mdpg_attrs);
37277957Sbenno}
37377957Sbenno
37490643Sbennostatic __inline void
37590643Sbennopmap_attr_save(vm_page_t m, int ptebit)
37690643Sbenno{
37790643Sbenno
37890643Sbenno	m->md.mdpg_attrs |= ptebit;
37990643Sbenno}
38090643Sbenno
38177957Sbennostatic __inline int
38290643Sbennopmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
38377957Sbenno{
38490643Sbenno	if (pt->pte_hi == pvo_pt->pte_hi)
38590643Sbenno		return (1);
38690643Sbenno
38790643Sbenno	return (0);
38877957Sbenno}
38977957Sbenno
39077957Sbennostatic __inline int
39190643Sbennopmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
39277957Sbenno{
39390643Sbenno	return (pt->pte_hi & ~PTE_VALID) ==
39490643Sbenno	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
39590643Sbenno	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
39690643Sbenno}
39777957Sbenno
39890643Sbennostatic __inline void
39990643Sbennopmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
40090643Sbenno{
40190643Sbenno	/*
40290643Sbenno	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
40390643Sbenno	 * set when the real pte is set in memory.
40490643Sbenno	 *
40590643Sbenno	 * Note: Don't set the valid bit for correct operation of tlb update.
40690643Sbenno	 */
40790643Sbenno	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
40890643Sbenno	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
40990643Sbenno	pt->pte_lo = pte_lo;
41077957Sbenno}
41177957Sbenno
41290643Sbennostatic __inline void
41390643Sbennopmap_pte_synch(struct pte *pt, struct pte *pvo_pt)
41477957Sbenno{
41577957Sbenno
41690643Sbenno	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
41777957Sbenno}
41877957Sbenno
41990643Sbennostatic __inline void
42090643Sbennopmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
42177957Sbenno{
42277957Sbenno
42390643Sbenno	/*
42490643Sbenno	 * As shown in Section 7.6.3.2.3
42590643Sbenno	 */
42690643Sbenno	pt->pte_lo &= ~ptebit;
42790643Sbenno	TLBIE(va);
42890643Sbenno	EIEIO();
42990643Sbenno	TLBSYNC();
43090643Sbenno	SYNC();
43177957Sbenno}
43277957Sbenno
43390643Sbennostatic __inline void
43490643Sbennopmap_pte_set(struct pte *pt, struct pte *pvo_pt)
43577957Sbenno{
43677957Sbenno
43790643Sbenno	pvo_pt->pte_hi |= PTE_VALID;
43890643Sbenno
43977957Sbenno	/*
44090643Sbenno	 * Update the PTE as defined in section 7.6.3.1.
44190643Sbenno	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
44290643Sbenno	 * been saved so this routine can restore them (if desired).
44377957Sbenno	 */
44490643Sbenno	pt->pte_lo = pvo_pt->pte_lo;
44590643Sbenno	EIEIO();
44690643Sbenno	pt->pte_hi = pvo_pt->pte_hi;
44790643Sbenno	SYNC();
44890643Sbenno	pmap_pte_valid++;
44990643Sbenno}
45077957Sbenno
45190643Sbennostatic __inline void
45290643Sbennopmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
45390643Sbenno{
45490643Sbenno
45590643Sbenno	pvo_pt->pte_hi &= ~PTE_VALID;
45690643Sbenno
45777957Sbenno	/*
45890643Sbenno	 * Force the reg & chg bits back into the PTEs.
45977957Sbenno	 */
46090643Sbenno	SYNC();
46177957Sbenno
46290643Sbenno	/*
46390643Sbenno	 * Invalidate the pte.
46490643Sbenno	 */
46590643Sbenno	pt->pte_hi &= ~PTE_VALID;
46677957Sbenno
46790643Sbenno	SYNC();
46890643Sbenno	TLBIE(va);
46990643Sbenno	EIEIO();
47090643Sbenno	TLBSYNC();
47190643Sbenno	SYNC();
47277957Sbenno
47390643Sbenno	/*
47490643Sbenno	 * Save the reg & chg bits.
47590643Sbenno	 */
47690643Sbenno	pmap_pte_synch(pt, pvo_pt);
47790643Sbenno	pmap_pte_valid--;
47877957Sbenno}
47977957Sbenno
48090643Sbennostatic __inline void
48190643Sbennopmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
48290643Sbenno{
48390643Sbenno
48490643Sbenno	/*
48590643Sbenno	 * Invalidate the PTE
48690643Sbenno	 */
48790643Sbenno	pmap_pte_unset(pt, pvo_pt, va);
48890643Sbenno	pmap_pte_set(pt, pvo_pt);
48990643Sbenno}
49090643Sbenno
49177957Sbenno/*
49290643Sbenno * Quick sort callout for comparing memory regions.
49377957Sbenno */
49490643Sbennostatic int	mr_cmp(const void *a, const void *b);
49590643Sbennostatic int	om_cmp(const void *a, const void *b);
49690643Sbenno
49790643Sbennostatic int
49890643Sbennomr_cmp(const void *a, const void *b)
49977957Sbenno{
50090643Sbenno	const struct	mem_region *regiona;
50190643Sbenno	const struct	mem_region *regionb;
50277957Sbenno
50390643Sbenno	regiona = a;
50490643Sbenno	regionb = b;
50590643Sbenno	if (regiona->mr_start < regionb->mr_start)
50690643Sbenno		return (-1);
50790643Sbenno	else if (regiona->mr_start > regionb->mr_start)
50890643Sbenno		return (1);
50990643Sbenno	else
51090643Sbenno		return (0);
51190643Sbenno}
51277957Sbenno
51390643Sbennostatic int
51490643Sbennoom_cmp(const void *a, const void *b)
51590643Sbenno{
51690643Sbenno	const struct	ofw_map *mapa;
51790643Sbenno	const struct	ofw_map *mapb;
51890643Sbenno
51990643Sbenno	mapa = a;
52090643Sbenno	mapb = b;
52190643Sbenno	if (mapa->om_pa < mapb->om_pa)
52290643Sbenno		return (-1);
52390643Sbenno	else if (mapa->om_pa > mapb->om_pa)
52490643Sbenno		return (1);
52590643Sbenno	else
52690643Sbenno		return (0);
52777957Sbenno}
52877957Sbenno
52977957Sbennovoid
53090643Sbennopmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
53177957Sbenno{
53290643Sbenno	ihandle_t	pmem, mmui;
53390643Sbenno	phandle_t	chosen, mmu;
53490643Sbenno	int		sz;
53590643Sbenno	int		i, j;
53691793Sbenno	vm_size_t	size, physsz;
53790643Sbenno	vm_offset_t	pa, va, off;
53890643Sbenno	u_int		batl, batu;
53977957Sbenno
54077957Sbenno	/*
54190643Sbenno	 * Use an IBAT and a DBAT to map the bottom segment of memory
54290643Sbenno	 * where we are.
54377957Sbenno	 */
54490643Sbenno	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
54590643Sbenno	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
54690643Sbenno	__asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1"
54790643Sbenno	    :: "r"(batu), "r"(batl));
54890643Sbenno#if 0
54990643Sbenno	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
55090643Sbenno	batl = BATL(0x80000000, BAT_M, BAT_PP_RW);
55190643Sbenno	__asm ("mtibatu 1,%0; mtibatl 1,%1; mtdbatu 1,%0; mtdbatl 1,%1"
55290643Sbenno	    :: "r"(batu), "r"(batl));
55390643Sbenno#endif
55477957Sbenno
55577957Sbenno	/*
55690643Sbenno	 * Set the start and end of kva.
55777957Sbenno	 */
55890643Sbenno	virtual_avail = VM_MIN_KERNEL_ADDRESS;
55990643Sbenno	virtual_end = VM_MAX_KERNEL_ADDRESS;
56090643Sbenno
56190643Sbenno	if ((pmem = OF_finddevice("/memory")) == -1)
56290643Sbenno		panic("pmap_bootstrap: can't locate memory device");
56390643Sbenno	if ((sz = OF_getproplen(pmem, "available")) == -1)
56490643Sbenno		panic("pmap_bootstrap: can't get length of available memory");
56590643Sbenno	if (sizeof(phys_avail) < sz)
56690643Sbenno		panic("pmap_bootstrap: phys_avail too small");
56790643Sbenno	if (sizeof(regions) < sz)
56890643Sbenno		panic("pmap_bootstrap: regions too small");
56990643Sbenno	bzero(regions, sz);
57090643Sbenno	if (OF_getprop(pmem, "available", regions, sz) == -1)
57190643Sbenno		panic("pmap_bootstrap: can't get available memory");
57290643Sbenno	sz /= sizeof(*regions);
57390643Sbenno	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
57490643Sbenno	qsort(regions, sz, sizeof(*regions), mr_cmp);
57590643Sbenno	phys_avail_count = 0;
57691793Sbenno	physsz = 0;
57790643Sbenno	for (i = 0, j = 0; i < sz; i++, j += 2) {
57890643Sbenno		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
57990643Sbenno		    regions[i].mr_start + regions[i].mr_size,
58090643Sbenno		    regions[i].mr_size);
58190643Sbenno		phys_avail[j] = regions[i].mr_start;
58290643Sbenno		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
58390643Sbenno		phys_avail_count++;
58491793Sbenno		physsz += regions[i].mr_size;
58577957Sbenno	}
58691793Sbenno	physmem = btoc(physsz);
58777957Sbenno
58877957Sbenno	/*
58990643Sbenno	 * Allocate PTEG table.
59077957Sbenno	 */
59190643Sbenno#ifdef PTEGCOUNT
59290643Sbenno	pmap_pteg_count = PTEGCOUNT;
59390643Sbenno#else
59490643Sbenno	pmap_pteg_count = 0x1000;
59577957Sbenno
59690643Sbenno	while (pmap_pteg_count < physmem)
59790643Sbenno		pmap_pteg_count <<= 1;
59877957Sbenno
59990643Sbenno	pmap_pteg_count >>= 1;
60090643Sbenno#endif /* PTEGCOUNT */
60177957Sbenno
60290643Sbenno	size = pmap_pteg_count * sizeof(struct pteg);
60390643Sbenno	CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count,
60490643Sbenno	    size);
60590643Sbenno	pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size);
60690643Sbenno	CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table);
60790643Sbenno	bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg));
60890643Sbenno	pmap_pteg_mask = pmap_pteg_count - 1;
60977957Sbenno
61090643Sbenno	/*
61190643Sbenno	 * Allocate PTE overflow lists.
61290643Sbenno	 */
61390643Sbenno	size = sizeof(struct pvo_head) * pmap_pteg_count;
61490643Sbenno	pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size,
61590643Sbenno	    PAGE_SIZE);
61690643Sbenno	CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table);
61790643Sbenno	for (i = 0; i < pmap_pteg_count; i++)
61890643Sbenno		LIST_INIT(&pmap_pvo_table[i]);
61977957Sbenno
62090643Sbenno	/*
62190643Sbenno	 * Allocate the message buffer.
62290643Sbenno	 */
62390643Sbenno	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0);
62477957Sbenno
62590643Sbenno	/*
62690643Sbenno	 * Initialise the unmanaged pvo pool.
62790643Sbenno	 */
62892521Sbenno	pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(PAGE_SIZE, 0);
62992521Sbenno	pmap_bpvo_pool_index = 0;
63092521Sbenno	pmap_bpvo_pool_count = (int)PAGE_SIZE / sizeof(struct pvo_entry);
63177957Sbenno
63277957Sbenno	/*
63390643Sbenno	 * Make sure kernel vsid is allocated as well as VSID 0.
63477957Sbenno	 */
63590643Sbenno	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
63690643Sbenno		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
63790643Sbenno	pmap_vsid_bitmap[0] |= 1;
63877957Sbenno
63990643Sbenno	/*
64090643Sbenno	 * Set up the OpenFirmware pmap and add it's mappings.
64190643Sbenno	 */
64290643Sbenno	pmap_pinit(&ofw_pmap);
64390643Sbenno	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
64490643Sbenno	if ((chosen = OF_finddevice("/chosen")) == -1)
64590643Sbenno		panic("pmap_bootstrap: can't find /chosen");
64690643Sbenno	OF_getprop(chosen, "mmu", &mmui, 4);
64790643Sbenno	if ((mmu = OF_instance_to_package(mmui)) == -1)
64890643Sbenno		panic("pmap_bootstrap: can't get mmu package");
64990643Sbenno	if ((sz = OF_getproplen(mmu, "translations")) == -1)
65090643Sbenno		panic("pmap_bootstrap: can't get ofw translation count");
65190643Sbenno	if (sizeof(translations) < sz)
65290643Sbenno		panic("pmap_bootstrap: translations too small");
65390643Sbenno	bzero(translations, sz);
65490643Sbenno	if (OF_getprop(mmu, "translations", translations, sz) == -1)
65590643Sbenno		panic("pmap_bootstrap: can't get ofw translations");
65690643Sbenno	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
65790643Sbenno	qsort(translations, sz, sizeof (*translations), om_cmp);
65890643Sbenno	for (i = 0; i < sz; i++) {
65990643Sbenno		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
66090643Sbenno		    translations[i].om_pa, translations[i].om_va,
66190643Sbenno		    translations[i].om_len);
66277957Sbenno
66390643Sbenno		/* Drop stuff below something? */
66477957Sbenno
66590643Sbenno		/* Enter the pages? */
66690643Sbenno		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
66790643Sbenno			struct	vm_page m;
66877957Sbenno
66990643Sbenno			m.phys_addr = translations[i].om_pa + off;
67090643Sbenno			pmap_enter(&ofw_pmap, translations[i].om_va + off, &m,
67190643Sbenno			    VM_PROT_ALL, 1);
67277957Sbenno		}
67377957Sbenno	}
67490643Sbenno#ifdef SMP
67590643Sbenno	TLBSYNC();
67690643Sbenno#endif
67777957Sbenno
67890643Sbenno	/*
67990643Sbenno	 * Initialize the kernel pmap (which is statically allocated).
68090643Sbenno	 */
68190643Sbenno	for (i = 0; i < 16; i++) {
68290643Sbenno		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
68377957Sbenno	}
68490643Sbenno	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
68590643Sbenno	kernel_pmap->pm_active = ~0;
68690643Sbenno	kernel_pmap->pm_count = 1;
68777957Sbenno
68877957Sbenno	/*
68990643Sbenno	 * Allocate a kernel stack with a guard page for thread0 and map it
69090643Sbenno	 * into the kernel page map.
69177957Sbenno	 */
69290643Sbenno	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
69390643Sbenno	kstack0_phys = pa;
69490643Sbenno	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
69590643Sbenno	CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
69690643Sbenno	    kstack0);
69790643Sbenno	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
69890643Sbenno	for (i = 0; i < KSTACK_PAGES; i++) {
69990643Sbenno		pa = kstack0_phys + i * PAGE_SIZE;
70090643Sbenno		va = kstack0 + i * PAGE_SIZE;
70190643Sbenno		pmap_kenter(va, pa);
70290643Sbenno		TLBIE(va);
70377957Sbenno	}
70477957Sbenno
70590643Sbenno	/*
70690643Sbenno	 * Calculate the first and last available physical addresses.
70790643Sbenno	 */
70890643Sbenno	avail_start = phys_avail[0];
70990643Sbenno	for (i = 0; phys_avail[i + 2] != 0; i += 2)
71090643Sbenno		;
71190643Sbenno	avail_end = phys_avail[i + 1];
71290643Sbenno	Maxmem = powerpc_btop(avail_end);
71377957Sbenno
71477957Sbenno	/*
71590643Sbenno	 * Allocate virtual address space for the message buffer.
71677957Sbenno	 */
71790643Sbenno	msgbufp = (struct msgbuf *)virtual_avail;
71890643Sbenno	virtual_avail += round_page(MSGBUF_SIZE);
71977957Sbenno
72077957Sbenno	/*
72190643Sbenno	 * Initialize hardware.
72277957Sbenno	 */
72377957Sbenno	for (i = 0; i < 16; i++) {
72490643Sbenno		__asm __volatile("mtsrin %0,%1"
72590643Sbenno		    :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
72677957Sbenno	}
72777957Sbenno	__asm __volatile ("mtsr %0,%1"
72890643Sbenno	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
72977957Sbenno	__asm __volatile ("sync; mtsdr1 %0; isync"
73090643Sbenno	    :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
73177957Sbenno	tlbia();
73277957Sbenno
73390643Sbenno	pmap_bootstrapped++;
73477957Sbenno}
73577957Sbenno
73677957Sbenno/*
73790643Sbenno * Activate a user pmap.  The pmap must be activated before it's address
73890643Sbenno * space can be accessed in any way.
73977957Sbenno */
74077957Sbennovoid
74190643Sbennopmap_activate(struct thread *td)
74277957Sbenno{
74390643Sbenno	pmap_t	pm;
74490643Sbenno	int	i;
74577957Sbenno
74677957Sbenno	/*
74790643Sbenno	 * Load all the data we need up front to encourasge the compiler to
74890643Sbenno	 * not issue any loads while we have interrupts disabled below.
74977957Sbenno	 */
75090643Sbenno	pm = &td->td_proc->p_vmspace->vm_pmap;
75177957Sbenno
75290643Sbenno	KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?"));
75377957Sbenno
75490643Sbenno	pm->pm_active |= PCPU_GET(cpumask);
75577957Sbenno
75677957Sbenno	/*
75790643Sbenno	 * XXX: Address this again later?
75891483Sbenno	 * NetBSD only change the segment registers on return to userland.
75977957Sbenno	 */
76091483Sbenno#if 0
76190643Sbenno	critical_enter();
76290643Sbenno
76390643Sbenno	for (i = 0; i < 16; i++) {
76490643Sbenno		__asm __volatile("mtsr %0,%1" :: "r"(i), "r"(pm->pm_sr[i]));
76577957Sbenno	}
76690643Sbenno	__asm __volatile("sync; isync");
76790643Sbenno
76890643Sbenno	critical_exit();
76991483Sbenno#endif
77077957Sbenno}
77177957Sbenno
77291483Sbennovoid
77391483Sbennopmap_deactivate(struct thread *td)
77491483Sbenno{
77591483Sbenno	pmap_t	pm;
77691483Sbenno
77791483Sbenno	pm = &td->td_proc->p_vmspace->vm_pmap;
77891483Sbenno	pm->pm_active &= ~(PCPU_GET(cpumask));
77991483Sbenno}
78091483Sbenno
78190643Sbennovm_offset_t
78290643Sbennopmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
78377957Sbenno{
78490643Sbenno	TODO;
78590643Sbenno	return (0);
78677957Sbenno}
78777957Sbenno
78877957Sbennovoid
78990643Sbennopmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
79077957Sbenno{
79190643Sbenno	TODO;
79277957Sbenno}
79377957Sbenno
79477957Sbennovoid
79590643Sbennopmap_clear_modify(vm_page_t m)
79677957Sbenno{
79777957Sbenno
79890643Sbenno	if (m->flags * PG_FICTITIOUS)
79990643Sbenno		return;
80090643Sbenno	pmap_clear_bit(m, PTE_CHG);
80177957Sbenno}
80277957Sbenno
80377957Sbennovoid
80490643Sbennopmap_collect(void)
80577957Sbenno{
80690643Sbenno	TODO;
80777957Sbenno}
80877957Sbenno
80977957Sbennovoid
81090643Sbennopmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
81190643Sbenno	  vm_size_t len, vm_offset_t src_addr)
81277957Sbenno{
81390643Sbenno	TODO;
81477957Sbenno}
81577957Sbenno
81677957Sbennovoid
81790643Sbennopmap_copy_page(vm_offset_t src, vm_offset_t dst)
81877957Sbenno{
81990643Sbenno	TODO;
82077957Sbenno}
82177957Sbenno
82277957Sbenno/*
82390643Sbenno * Zero a page of physical memory by temporarily mapping it into the tlb.
82477957Sbenno */
82577957Sbennovoid
82677957Sbennopmap_zero_page(vm_offset_t pa)
82777957Sbenno{
82890643Sbenno	caddr_t	va;
82977957Sbenno	int	i;
83077957Sbenno
83190643Sbenno	if (pa < SEGMENT_LENGTH) {
83290643Sbenno		va = (caddr_t) pa;
83390643Sbenno	} else if (pmap_initialized) {
83490643Sbenno		if (pmap_pvo_zeropage == NULL)
83590643Sbenno			pmap_pvo_zeropage = pmap_rkva_alloc();
83690643Sbenno		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
83790643Sbenno		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
83890643Sbenno	} else {
83990643Sbenno		panic("pmap_zero_page: can't zero pa %#x", pa);
84077957Sbenno	}
84190643Sbenno
84290643Sbenno	bzero(va, PAGE_SIZE);
84390643Sbenno
84490643Sbenno	for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) {
84590643Sbenno		__asm __volatile("dcbz 0,%0" :: "r"(va));
84690643Sbenno		va += CACHELINESIZE;
84790643Sbenno	}
84890643Sbenno
84990643Sbenno	if (pa >= SEGMENT_LENGTH)
85090643Sbenno		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
85177957Sbenno}
85277957Sbenno
85377957Sbennovoid
85477957Sbennopmap_zero_page_area(vm_offset_t pa, int off, int size)
85577957Sbenno{
85690643Sbenno	TODO;
85777957Sbenno}
85877957Sbenno
85977957Sbenno/*
86090643Sbenno * Map the given physical page at the specified virtual address in the
86190643Sbenno * target pmap with the protection requested.  If specified the page
86290643Sbenno * will be wired down.
86377957Sbenno */
86477957Sbennovoid
86590643Sbennopmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
86690643Sbenno	   boolean_t wired)
86777957Sbenno{
86890643Sbenno	struct		pvo_head *pvo_head;
86990643Sbenno	vm_zone_t	zone;
87090643Sbenno	u_int		pte_lo, pvo_flags;
87190643Sbenno	int		error;
87277957Sbenno
87390643Sbenno	if (!pmap_initialized) {
87490643Sbenno		pvo_head = &pmap_pvo_kunmanaged;
87590643Sbenno		zone = pmap_upvo_zone;
87690643Sbenno		pvo_flags = 0;
87790643Sbenno	} else {
87890643Sbenno		pvo_head = pa_to_pvoh(m->phys_addr);
87990643Sbenno		zone = pmap_mpvo_zone;
88090643Sbenno		pvo_flags = PVO_MANAGED;
88190643Sbenno	}
88277957Sbenno
88390643Sbenno	pte_lo = PTE_I | PTE_G;
88477957Sbenno
88590643Sbenno	if (prot & VM_PROT_WRITE)
88690643Sbenno		pte_lo |= PTE_BW;
88790643Sbenno	else
88890643Sbenno		pte_lo |= PTE_BR;
88977957Sbenno
89090643Sbenno	if (prot & VM_PROT_EXECUTE)
89190643Sbenno		pvo_flags |= PVO_EXECUTABLE;
89277957Sbenno
89390643Sbenno	if (wired)
89490643Sbenno		pvo_flags |= PVO_WIRED;
89577957Sbenno
89690643Sbenno	error = pmap_pvo_enter(pmap, zone, pvo_head, va, m->phys_addr, pte_lo,
89790643Sbenno	    pvo_flags);
89890643Sbenno
89990643Sbenno	if (error == ENOENT) {
90077957Sbenno		/*
90190643Sbenno		 * Flush the real memory from the cache.
90277957Sbenno		 */
90390643Sbenno		if ((pvo_flags & PVO_EXECUTABLE) && (pte_lo & PTE_I) == 0) {
90490643Sbenno			pmap_syncicache(m->phys_addr, PAGE_SIZE);
90577957Sbenno		}
90677957Sbenno	}
90777957Sbenno}
90877957Sbenno
90990643Sbennovm_offset_t
91090643Sbennopmap_extract(pmap_t pmap, vm_offset_t va)
91177957Sbenno{
91290643Sbenno	TODO;
91390643Sbenno	return (0);
91477957Sbenno}
91577957Sbenno
91677957Sbenno/*
91790643Sbenno * Grow the number of kernel page table entries.  Unneeded.
91877957Sbenno */
91990643Sbennovoid
92090643Sbennopmap_growkernel(vm_offset_t addr)
92177957Sbenno{
92290643Sbenno}
92377957Sbenno
92490643Sbennovoid
92590643Sbennopmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
92690643Sbenno{
92777957Sbenno
92890643Sbenno	CTR(KTR_PMAP, "pmap_init");
92977957Sbenno}
93077957Sbenno
93190643Sbennovoid
93290643Sbennopmap_init2(void)
93377957Sbenno{
93477957Sbenno
93590643Sbenno	CTR(KTR_PMAP, "pmap_init2");
93692521Sbenno
93792521Sbenno	pmap_upvo_zone = zinit("UPVO entry", sizeof (struct pvo_entry),
93892521Sbenno	    0, 0, 0);
93992521Sbenno	pmap_mpvo_zone = zinit("MPVO entry", sizeof(struct pvo_entry),
94090643Sbenno	    PMAP_PVO_SIZE, ZONE_INTERRUPT, 1);
94190643Sbenno	pmap_initialized = TRUE;
94277957Sbenno}
94377957Sbenno
94490643Sbennoboolean_t
94590643Sbennopmap_is_modified(vm_page_t m)
94690643Sbenno{
94790643Sbenno	TODO;
94890643Sbenno	return (0);
94990643Sbenno}
95090643Sbenno
95190643Sbennovoid
95290643Sbennopmap_clear_reference(vm_page_t m)
95390643Sbenno{
95490643Sbenno	TODO;
95590643Sbenno}
95690643Sbenno
95791403Ssilby/*
95891403Ssilby *	pmap_ts_referenced:
95991403Ssilby *
96091403Ssilby *	Return a count of reference bits for a page, clearing those bits.
96191403Ssilby *	It is not necessary for every reference bit to be cleared, but it
96291403Ssilby *	is necessary that 0 only be returned when there are truly no
96391403Ssilby *	reference bits set.
96491403Ssilby *
96591403Ssilby *	XXX: The exact number of bits to check and clear is a matter that
96691403Ssilby *	should be tested and standardized at some point in the future for
96791403Ssilby *	optimal aging of shared pages.
96891403Ssilby */
96991403Ssilby
97090643Sbennoint
97190643Sbennopmap_ts_referenced(vm_page_t m)
97290643Sbenno{
97390643Sbenno	TODO;
97490643Sbenno	return (0);
97590643Sbenno}
97690643Sbenno
97777957Sbenno/*
97890643Sbenno * Map a wired page into kernel virtual address space.
97977957Sbenno */
98077957Sbennovoid
98190643Sbennopmap_kenter(vm_offset_t va, vm_offset_t pa)
98277957Sbenno{
98390643Sbenno	u_int		pte_lo;
98490643Sbenno	int		error;
98590643Sbenno	int		i;
98677957Sbenno
98790643Sbenno#if 0
98890643Sbenno	if (va < VM_MIN_KERNEL_ADDRESS)
98990643Sbenno		panic("pmap_kenter: attempt to enter non-kernel address %#x",
99090643Sbenno		    va);
99190643Sbenno#endif
99277957Sbenno
99390643Sbenno	pte_lo = PTE_I | PTE_G | PTE_BW;
99490643Sbenno	for (i = 0; phys_avail[i + 2] != 0; i += 2) {
99590643Sbenno		if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) {
99690643Sbenno			pte_lo &= ~(PTE_I | PTE_G);
99777957Sbenno			break;
99877957Sbenno		}
99977957Sbenno	}
100077957Sbenno
100190643Sbenno	error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone,
100290643Sbenno	    &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
100390643Sbenno
100490643Sbenno	if (error != 0 && error != ENOENT)
100590643Sbenno		panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va,
100690643Sbenno		    pa, error);
100790643Sbenno
100877957Sbenno	/*
100990643Sbenno	 * Flush the real memory from the instruction cache.
101077957Sbenno	 */
101190643Sbenno	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
101290643Sbenno		pmap_syncicache(pa, PAGE_SIZE);
101377957Sbenno	}
101477957Sbenno}
101577957Sbenno
101690643Sbennovm_offset_t
101790643Sbennopmap_kextract(vm_offset_t va)
101877957Sbenno{
101990643Sbenno	TODO;
102090643Sbenno	return (0);
102177957Sbenno}
102277957Sbenno
102391456Sbenno/*
102491456Sbenno * Remove a wired page from kernel virtual address space.
102591456Sbenno */
102677957Sbennovoid
102777957Sbennopmap_kremove(vm_offset_t va)
102877957Sbenno{
102991456Sbenno
103091456Sbenno	pmap_remove(kernel_pmap, va, roundup(va, PAGE_SIZE));
103177957Sbenno}
103277957Sbenno
103377957Sbenno/*
103490643Sbenno * Map a range of physical addresses into kernel virtual address space.
103590643Sbenno *
103690643Sbenno * The value passed in *virt is a suggested virtual address for the mapping.
103790643Sbenno * Architectures which can support a direct-mapped physical to virtual region
103890643Sbenno * can return the appropriate address within that region, leaving '*virt'
103990643Sbenno * unchanged.  We cannot and therefore do not; *virt is updated with the
104090643Sbenno * first usable address after the mapped region.
104177957Sbenno */
104290643Sbennovm_offset_t
104390643Sbennopmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
104477957Sbenno{
104590643Sbenno	vm_offset_t	sva, va;
104677957Sbenno
104790643Sbenno	sva = *virt;
104890643Sbenno	va = sva;
104990643Sbenno	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
105090643Sbenno		pmap_kenter(va, pa_start);
105190643Sbenno	*virt = va;
105290643Sbenno	return (sva);
105377957Sbenno}
105477957Sbenno
105590643Sbennoint
105690643Sbennopmap_mincore(pmap_t pmap, vm_offset_t addr)
105777957Sbenno{
105890643Sbenno	TODO;
105990643Sbenno	return (0);
106077957Sbenno}
106177957Sbenno
106290643Sbenno/*
106390643Sbenno * Create the uarea for a new process.
106490643Sbenno * This routine directly affects the fork perf for a process.
106577957Sbenno */
106677957Sbennovoid
106790643Sbennopmap_new_proc(struct proc *p)
106877957Sbenno{
106990643Sbenno	vm_object_t	upobj;
107090643Sbenno	vm_offset_t	up;
107190643Sbenno	vm_page_t	m;
107290643Sbenno	u_int		i;
107377957Sbenno
107477957Sbenno	/*
107590643Sbenno	 * Allocate the object for the upages.
107677957Sbenno	 */
107790643Sbenno	upobj = p->p_upages_obj;
107890643Sbenno	if (upobj == NULL) {
107990643Sbenno		upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
108090643Sbenno		p->p_upages_obj = upobj;
108177957Sbenno	}
108277957Sbenno
108390643Sbenno	/*
108490643Sbenno	 * Get a kernel virtual address for the uarea for this process.
108590643Sbenno	 */
108690643Sbenno	up = (vm_offset_t)p->p_uarea;
108790643Sbenno	if (up == 0) {
108890643Sbenno		up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
108990643Sbenno		if (up == 0)
109090643Sbenno			panic("pmap_new_proc: upage allocation failed");
109190643Sbenno		p->p_uarea = (struct user *)up;
109277957Sbenno	}
109377957Sbenno
109490643Sbenno	for (i = 0; i < UAREA_PAGES; i++) {
109590643Sbenno		/*
109690643Sbenno		 * Get a uarea page.
109790643Sbenno		 */
109890643Sbenno		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
109977957Sbenno
110090643Sbenno		/*
110190643Sbenno		 * Wire the page.
110290643Sbenno		 */
110390643Sbenno		m->wire_count++;
110477957Sbenno
110590643Sbenno		/*
110690643Sbenno		 * Enter the page into the kernel address space.
110790643Sbenno		 */
110890643Sbenno		pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
110977957Sbenno
111090643Sbenno		vm_page_wakeup(m);
111190643Sbenno		vm_page_flag_clear(m, PG_ZERO);
111290643Sbenno		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
111390643Sbenno		m->valid = VM_PAGE_BITS_ALL;
111477957Sbenno	}
111590643Sbenno}
111677957Sbenno
111790643Sbennovoid
111890643Sbennopmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
111990643Sbenno		    vm_pindex_t pindex, vm_size_t size, int limit)
112090643Sbenno{
112190643Sbenno	TODO;
112277957Sbenno}
112377957Sbenno
112477957Sbenno/*
112590643Sbenno * Lower the permission for all mappings to a given page.
112677957Sbenno */
112777957Sbennovoid
112877957Sbennopmap_page_protect(vm_page_t m, vm_prot_t prot)
112977957Sbenno{
113090643Sbenno	struct	pvo_head *pvo_head;
113190643Sbenno	struct	pvo_entry *pvo, *next_pvo;
113290643Sbenno	struct	pte *pt;
113377957Sbenno
113490643Sbenno	/*
113590643Sbenno	 * Since the routine only downgrades protection, if the
113690643Sbenno	 * maximal protection is desired, there isn't any change
113790643Sbenno	 * to be made.
113890643Sbenno	 */
113990643Sbenno	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
114090643Sbenno	    (VM_PROT_READ|VM_PROT_WRITE))
114177957Sbenno		return;
114277957Sbenno
114390643Sbenno	pvo_head = vm_page_to_pvoh(m);
114490643Sbenno	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
114590643Sbenno		next_pvo = LIST_NEXT(pvo, pvo_vlink);
114690643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
114790643Sbenno
114890643Sbenno		/*
114990643Sbenno		 * Downgrading to no mapping at all, we just remove the entry.
115090643Sbenno		 */
115190643Sbenno		if ((prot & VM_PROT_READ) == 0) {
115290643Sbenno			pmap_pvo_remove(pvo, -1);
115390643Sbenno			continue;
115477957Sbenno		}
115590643Sbenno
115690643Sbenno		/*
115790643Sbenno		 * If EXEC permission is being revoked, just clear the flag
115890643Sbenno		 * in the PVO.
115990643Sbenno		 */
116090643Sbenno		if ((prot & VM_PROT_EXECUTE) == 0)
116190643Sbenno			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
116290643Sbenno
116390643Sbenno		/*
116490643Sbenno		 * If this entry is already RO, don't diddle with the page
116590643Sbenno		 * table.
116690643Sbenno		 */
116790643Sbenno		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
116890643Sbenno			PMAP_PVO_CHECK(pvo);
116990643Sbenno			continue;
117077957Sbenno		}
117190643Sbenno
117290643Sbenno		/*
117390643Sbenno		 * Grab the PTE before we diddle the bits so pvo_to_pte can
117490643Sbenno		 * verify the pte contents are as expected.
117590643Sbenno		 */
117690643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
117790643Sbenno		pvo->pvo_pte.pte_lo &= ~PTE_PP;
117890643Sbenno		pvo->pvo_pte.pte_lo |= PTE_BR;
117990643Sbenno		if (pt != NULL)
118090643Sbenno			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
118190643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
118277957Sbenno	}
118377957Sbenno}
118477957Sbenno
118577957Sbenno/*
118690643Sbenno * Make the specified page pageable (or not).  Unneeded.
118777957Sbenno */
118877957Sbennovoid
118990643Sbennopmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
119090643Sbenno	      boolean_t pageable)
119177957Sbenno{
119290643Sbenno}
119377957Sbenno
119491403Ssilby/*
119591403Ssilby * Returns true if the pmap's pv is one of the first
119691403Ssilby * 16 pvs linked to from this page.  This count may
119791403Ssilby * be changed upwards or downwards in the future; it
119891403Ssilby * is only necessary that true be returned for a small
119991403Ssilby * subset of pmaps for proper page aging.
120091403Ssilby */
120190643Sbennoboolean_t
120291403Ssilbypmap_page_exists_quick(pmap_t pmap, vm_page_t m)
120390643Sbenno{
120490643Sbenno	TODO;
120590643Sbenno	return (0);
120690643Sbenno}
120777957Sbenno
120890643Sbennostatic u_int	pmap_vsidcontext;
120977957Sbenno
121090643Sbennovoid
121190643Sbennopmap_pinit(pmap_t pmap)
121290643Sbenno{
121390643Sbenno	int	i, mask;
121490643Sbenno	u_int	entropy;
121577957Sbenno
121690643Sbenno	entropy = 0;
121790643Sbenno	__asm __volatile("mftb %0" : "=r"(entropy));
121877957Sbenno
121990643Sbenno	/*
122090643Sbenno	 * Allocate some segment registers for this pmap.
122190643Sbenno	 */
122290643Sbenno	pmap->pm_count = 1;
122390643Sbenno	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
122490643Sbenno		u_int	hash, n;
122577957Sbenno
122677957Sbenno		/*
122790643Sbenno		 * Create a new value by mutiplying by a prime and adding in
122890643Sbenno		 * entropy from the timebase register.  This is to make the
122990643Sbenno		 * VSID more random so that the PT hash function collides
123090643Sbenno		 * less often.  (Note that the prime casues gcc to do shifts
123190643Sbenno		 * instead of a multiply.)
123277957Sbenno		 */
123390643Sbenno		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
123490643Sbenno		hash = pmap_vsidcontext & (NPMAPS - 1);
123590643Sbenno		if (hash == 0)		/* 0 is special, avoid it */
123690643Sbenno			continue;
123790643Sbenno		n = hash >> 5;
123890643Sbenno		mask = 1 << (hash & (VSID_NBPW - 1));
123990643Sbenno		hash = (pmap_vsidcontext & 0xfffff);
124090643Sbenno		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
124190643Sbenno			/* anything free in this bucket? */
124290643Sbenno			if (pmap_vsid_bitmap[n] == 0xffffffff) {
124390643Sbenno				entropy = (pmap_vsidcontext >> 20);
124490643Sbenno				continue;
124590643Sbenno			}
124690643Sbenno			i = ffs(~pmap_vsid_bitmap[i]) - 1;
124790643Sbenno			mask = 1 << i;
124890643Sbenno			hash &= 0xfffff & ~(VSID_NBPW - 1);
124990643Sbenno			hash |= i;
125077957Sbenno		}
125190643Sbenno		pmap_vsid_bitmap[n] |= mask;
125290643Sbenno		for (i = 0; i < 16; i++)
125390643Sbenno			pmap->pm_sr[i] = VSID_MAKE(i, hash);
125490643Sbenno		return;
125590643Sbenno	}
125677957Sbenno
125790643Sbenno	panic("pmap_pinit: out of segments");
125877957Sbenno}
125977957Sbenno
126077957Sbenno/*
126190643Sbenno * Initialize the pmap associated with process 0.
126277957Sbenno */
126377957Sbennovoid
126490643Sbennopmap_pinit0(pmap_t pm)
126577957Sbenno{
126677957Sbenno
126790643Sbenno	pmap_pinit(pm);
126890643Sbenno	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
126977957Sbenno}
127077957Sbenno
127177957Sbennovoid
127290643Sbennopmap_pinit2(pmap_t pmap)
127377957Sbenno{
127490643Sbenno	/* XXX: Remove this stub when no longer called */
127590643Sbenno}
127677957Sbenno
127790643Sbennovoid
127891802Sbennopmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
127990643Sbenno{
128091802Sbenno	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
128191802Sbenno	    ("pmap_prefault: non current pmap"));
128291802Sbenno	/* XXX */
128390643Sbenno}
128477957Sbenno
128590643Sbennovoid
128690643Sbennopmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
128790643Sbenno{
128890643Sbenno	TODO;
128977957Sbenno}
129077957Sbenno
129190643Sbennovm_offset_t
129290643Sbennopmap_phys_address(int ppn)
129377957Sbenno{
129490643Sbenno	TODO;
129577957Sbenno	return (0);
129677957Sbenno}
129777957Sbenno
129891456Sbenno/*
129991456Sbenno * Map a list of wired pages into kernel virtual address space.  This is
130091456Sbenno * intended for temporary mappings which do not need page modification or
130191456Sbenno * references recorded.  Existing mappings in the region are overwritten.
130291456Sbenno */
130390643Sbennovoid
130490643Sbennopmap_qenter(vm_offset_t va, vm_page_t *m, int count)
130577957Sbenno{
130690643Sbenno	int	i;
130777957Sbenno
130890643Sbenno	for (i = 0; i < count; i++, va += PAGE_SIZE)
130990643Sbenno		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
131090643Sbenno}
131177957Sbenno
131291456Sbenno/*
131391456Sbenno * Remove page mappings from kernel virtual address space.  Intended for
131491456Sbenno * temporary mappings entered by pmap_qenter.
131591456Sbenno */
131690643Sbennovoid
131790643Sbennopmap_qremove(vm_offset_t va, int count)
131890643Sbenno{
131991456Sbenno	int	i;
132091456Sbenno
132191456Sbenno	for (i = 0; i < count; i++, va += PAGE_SIZE)
132291456Sbenno		pmap_kremove(va);
132377957Sbenno}
132477957Sbenno
132577957Sbenno/*
132690643Sbenno * Add a reference to the specified pmap.
132777957Sbenno */
132890643Sbennovoid
132990643Sbennopmap_reference(pmap_t pm)
133077957Sbenno{
133177957Sbenno
133290643Sbenno	if (pm != NULL)
133390643Sbenno		pm->pm_count++;
133490643Sbenno}
133577957Sbenno
133690643Sbennovoid
133790643Sbennopmap_release(pmap_t pmap)
133890643Sbenno{
133990643Sbenno	TODO;
134077957Sbenno}
134177957Sbenno
134291456Sbenno/*
134391456Sbenno * Remove the given range of addresses from the specified map.
134491456Sbenno */
134590643Sbennovoid
134691456Sbennopmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
134777957Sbenno{
134891456Sbenno	struct	pvo_entry *pvo;
134991456Sbenno	int	pteidx;
135091456Sbenno
135191456Sbenno	for (; sva < eva; sva += PAGE_SIZE) {
135291456Sbenno		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
135391456Sbenno		if (pvo != NULL) {
135491456Sbenno			pmap_pvo_remove(pvo, pteidx);
135591456Sbenno		}
135691456Sbenno	}
135777957Sbenno}
135877957Sbenno
135990643Sbennovoid
136090643Sbennopmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
136177957Sbenno{
136290643Sbenno	TODO;
136377957Sbenno}
136477957Sbenno
136577957Sbennovoid
136690643Sbennopmap_swapin_proc(struct proc *p)
136777957Sbenno{
136890643Sbenno	TODO;
136977957Sbenno}
137077957Sbenno
137177957Sbennovoid
137290643Sbennopmap_swapout_proc(struct proc *p)
137377957Sbenno{
137490643Sbenno	TODO;
137577957Sbenno}
137677957Sbenno
137777957Sbenno/*
137890643Sbenno * Create the kernel stack and pcb for a new thread.
137990643Sbenno * This routine directly affects the fork perf for a process and
138090643Sbenno * create performance for a thread.
138177957Sbenno */
138277957Sbennovoid
138390643Sbennopmap_new_thread(struct thread *td)
138477957Sbenno{
138590643Sbenno	vm_object_t	ksobj;
138690643Sbenno	vm_offset_t	ks;
138790643Sbenno	vm_page_t	m;
138890643Sbenno	u_int		i;
138980431Speter
139090643Sbenno	/*
139190643Sbenno	 * Allocate object for the kstack.
139290643Sbenno	 */
139390643Sbenno	ksobj = td->td_kstack_obj;
139490643Sbenno	if (ksobj == NULL) {
139590643Sbenno		ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
139690643Sbenno		td->td_kstack_obj = ksobj;
139790643Sbenno	}
139890643Sbenno
139990643Sbenno	/*
140090643Sbenno	 * Get a kernel virtual address for the kstack for this thread.
140190643Sbenno	 */
140290643Sbenno	ks = td->td_kstack;
140390643Sbenno	if (ks == 0) {
140490643Sbenno		ks = kmem_alloc_nofault(kernel_map,
140590643Sbenno		    (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
140690643Sbenno		if (ks == 0)
140790643Sbenno			panic("pmap_new_thread: kstack allocation failed");
140890643Sbenno		TLBIE(ks);
140990643Sbenno		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
141090643Sbenno		td->td_kstack = ks;
141190643Sbenno	}
141290643Sbenno
141390643Sbenno	for (i = 0; i < KSTACK_PAGES; i++) {
141490643Sbenno		/*
141590643Sbenno		 * Get a kernel stack page.
141690643Sbenno		 */
141790643Sbenno		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
141890643Sbenno
141990643Sbenno		/*
142090643Sbenno		 * Wire the page.
142190643Sbenno		 */
142290643Sbenno		m->wire_count++;
142390643Sbenno
142490643Sbenno		/*
142590643Sbenno		 * Enter the page into the kernel address space.
142690643Sbenno		 */
142790643Sbenno		pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
142890643Sbenno
142990643Sbenno		vm_page_wakeup(m);
143090643Sbenno		vm_page_flag_clear(m, PG_ZERO);
143190643Sbenno		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
143290643Sbenno		m->valid = VM_PAGE_BITS_ALL;
143390643Sbenno	}
143477957Sbenno}
143577957Sbenno
143677957Sbennovoid
143790643Sbennopmap_dispose_proc(struct proc *p)
143877957Sbenno{
143990643Sbenno	TODO;
144077957Sbenno}
144177957Sbenno
144277957Sbennovoid
144390643Sbennopmap_dispose_thread(struct thread *td)
144477957Sbenno{
144590643Sbenno	TODO;
144677957Sbenno}
144777957Sbenno
144877957Sbennovoid
144990643Sbennopmap_swapin_thread(struct thread *td)
145083682Smp{
145190643Sbenno	TODO;
145283682Smp}
145383682Smp
145483682Smpvoid
145590643Sbennopmap_swapout_thread(struct thread *td)
145683682Smp{
145790643Sbenno	TODO;
145883682Smp}
145983682Smp
146083682Smp/*
146190643Sbenno * Allocate a physical page of memory directly from the phys_avail map.
146290643Sbenno * Can only be called from pmap_bootstrap before avail start and end are
146390643Sbenno * calculated.
146483682Smp */
146590643Sbennostatic vm_offset_t
146690643Sbennopmap_bootstrap_alloc(vm_size_t size, u_int align)
146783682Smp{
146890643Sbenno	vm_offset_t	s, e;
146990643Sbenno	int		i, j;
147083682Smp
147190643Sbenno	size = round_page(size);
147290643Sbenno	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
147390643Sbenno		if (align != 0)
147490643Sbenno			s = (phys_avail[i] + align - 1) & ~(align - 1);
147590643Sbenno		else
147690643Sbenno			s = phys_avail[i];
147790643Sbenno		e = s + size;
147890643Sbenno
147990643Sbenno		if (s < phys_avail[i] || e > phys_avail[i + 1])
148090643Sbenno			continue;
148190643Sbenno
148290643Sbenno		if (s == phys_avail[i]) {
148390643Sbenno			phys_avail[i] += size;
148490643Sbenno		} else if (e == phys_avail[i + 1]) {
148590643Sbenno			phys_avail[i + 1] -= size;
148690643Sbenno		} else {
148790643Sbenno			for (j = phys_avail_count * 2; j > i; j -= 2) {
148890643Sbenno				phys_avail[j] = phys_avail[j - 2];
148990643Sbenno				phys_avail[j + 1] = phys_avail[j - 1];
149090643Sbenno			}
149190643Sbenno
149290643Sbenno			phys_avail[i + 3] = phys_avail[i + 1];
149390643Sbenno			phys_avail[i + 1] = s;
149490643Sbenno			phys_avail[i + 2] = e;
149590643Sbenno			phys_avail_count++;
149690643Sbenno		}
149790643Sbenno
149890643Sbenno		return (s);
149983682Smp	}
150090643Sbenno	panic("pmap_bootstrap_alloc: could not allocate memory");
150183682Smp}
150283682Smp
150383682Smp/*
150490643Sbenno * Return an unmapped pvo for a kernel virtual address.
150590643Sbenno * Used by pmap functions that operate on physical pages.
150683682Smp */
150790643Sbennostatic struct pvo_entry *
150890643Sbennopmap_rkva_alloc(void)
150983682Smp{
151090643Sbenno	struct		pvo_entry *pvo;
151190643Sbenno	struct		pte *pt;
151290643Sbenno	vm_offset_t	kva;
151390643Sbenno	int		pteidx;
151483682Smp
151590643Sbenno	if (pmap_rkva_count == 0)
151690643Sbenno		panic("pmap_rkva_alloc: no more reserved KVAs");
151790643Sbenno
151890643Sbenno	kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count);
151990643Sbenno	pmap_kenter(kva, 0);
152090643Sbenno
152190643Sbenno	pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx);
152290643Sbenno
152390643Sbenno	if (pvo == NULL)
152490643Sbenno		panic("pmap_kva_alloc: pmap_pvo_find_va failed");
152590643Sbenno
152690643Sbenno	pt = pmap_pvo_to_pte(pvo, pteidx);
152790643Sbenno
152890643Sbenno	if (pt == NULL)
152990643Sbenno		panic("pmap_kva_alloc: pmap_pvo_to_pte failed");
153090643Sbenno
153190643Sbenno	pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
153290643Sbenno	PVO_PTEGIDX_CLR(pvo);
153390643Sbenno
153490643Sbenno	pmap_pte_overflow++;
153590643Sbenno
153690643Sbenno	return (pvo);
153790643Sbenno}
153890643Sbenno
153990643Sbennostatic void
154090643Sbennopmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
154190643Sbenno    int *depth_p)
154290643Sbenno{
154390643Sbenno	struct	pte *pt;
154490643Sbenno
154590643Sbenno	/*
154690643Sbenno	 * If this pvo already has a valid pte, we need to save it so it can
154790643Sbenno	 * be restored later.  We then just reload the new PTE over the old
154890643Sbenno	 * slot.
154990643Sbenno	 */
155090643Sbenno	if (saved_pt != NULL) {
155190643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
155290643Sbenno
155390643Sbenno		if (pt != NULL) {
155490643Sbenno			pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
155590643Sbenno			PVO_PTEGIDX_CLR(pvo);
155690643Sbenno			pmap_pte_overflow++;
155783682Smp		}
155890643Sbenno
155990643Sbenno		*saved_pt = pvo->pvo_pte;
156090643Sbenno
156190643Sbenno		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
156283682Smp	}
156390643Sbenno
156490643Sbenno	pvo->pvo_pte.pte_lo |= pa;
156590643Sbenno
156690643Sbenno	if (!pmap_pte_spill(pvo->pvo_vaddr))
156790643Sbenno		panic("pmap_pa_map: could not spill pvo %p", pvo);
156890643Sbenno
156990643Sbenno	if (depth_p != NULL)
157090643Sbenno		(*depth_p)++;
157183682Smp}
157283682Smp
157390643Sbennostatic void
157490643Sbennopmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
157577957Sbenno{
157690643Sbenno	struct	pte *pt;
157777957Sbenno
157890643Sbenno	pt = pmap_pvo_to_pte(pvo, -1);
157990643Sbenno
158090643Sbenno	if (pt != NULL) {
158190643Sbenno		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
158290643Sbenno		PVO_PTEGIDX_CLR(pvo);
158390643Sbenno		pmap_pte_overflow++;
158490643Sbenno	}
158590643Sbenno
158690643Sbenno	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
158790643Sbenno
158890643Sbenno	/*
158990643Sbenno	 * If there is a saved PTE and it's valid, restore it and return.
159090643Sbenno	 */
159190643Sbenno	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
159290643Sbenno		if (depth_p != NULL && --(*depth_p) == 0)
159390643Sbenno			panic("pmap_pa_unmap: restoring but depth == 0");
159490643Sbenno
159590643Sbenno		pvo->pvo_pte = *saved_pt;
159690643Sbenno
159790643Sbenno		if (!pmap_pte_spill(pvo->pvo_vaddr))
159890643Sbenno			panic("pmap_pa_unmap: could not spill pvo %p", pvo);
159990643Sbenno	}
160077957Sbenno}
160177957Sbenno
160290643Sbennostatic void
160390643Sbennopmap_syncicache(vm_offset_t pa, vm_size_t len)
160477957Sbenno{
160590643Sbenno	__syncicache((void *)pa, len);
160690643Sbenno}
160777957Sbenno
160890643Sbennostatic void
160990643Sbennotlbia(void)
161090643Sbenno{
161190643Sbenno	caddr_t	i;
161290643Sbenno
161390643Sbenno	SYNC();
161490643Sbenno	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
161590643Sbenno		TLBIE(i);
161690643Sbenno		EIEIO();
161790643Sbenno	}
161890643Sbenno	TLBSYNC();
161990643Sbenno	SYNC();
162077957Sbenno}
162177957Sbenno
162290643Sbennostatic int
162390643Sbennopmap_pvo_enter(pmap_t pm, vm_zone_t zone, struct pvo_head *pvo_head,
162490643Sbenno    vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
162577957Sbenno{
162690643Sbenno	struct	pvo_entry *pvo;
162790643Sbenno	u_int	sr;
162890643Sbenno	int	first;
162990643Sbenno	u_int	ptegidx;
163090643Sbenno	int	i;
163177957Sbenno
163290643Sbenno	pmap_pvo_enter_calls++;
163390643Sbenno
163490643Sbenno	/*
163590643Sbenno	 * Compute the PTE Group index.
163690643Sbenno	 */
163790643Sbenno	va &= ~ADDR_POFF;
163890643Sbenno	sr = va_to_sr(pm->pm_sr, va);
163990643Sbenno	ptegidx = va_to_pteg(sr, va);
164090643Sbenno
164190643Sbenno	/*
164290643Sbenno	 * Remove any existing mapping for this page.  Reuse the pvo entry if
164390643Sbenno	 * there is a mapping.
164490643Sbenno	 */
164590643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
164690643Sbenno		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
164792521Sbenno			if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa)
164892521Sbenno				return (0);
164990643Sbenno			pmap_pvo_remove(pvo, -1);
165090643Sbenno			break;
165190643Sbenno		}
165290643Sbenno	}
165390643Sbenno
165490643Sbenno	/*
165590643Sbenno	 * If we aren't overwriting a mapping, try to allocate.
165690643Sbenno	 */
165792521Sbenno	if (pmap_initialized) {
165892521Sbenno		pvo = zalloc(zone);
165992521Sbenno	} else {
166092521Sbenno		if (pmap_bpvo_pool_index >= pmap_bpvo_pool_count) {
166192521Sbenno			pmap_bpvo_pool = (struct pvo_entry *)
166292521Sbenno			    pmap_bootstrap_alloc(PAGE_SIZE, 0);
166392521Sbenno			pmap_bpvo_pool_index = 0;
166492521Sbenno		}
166592521Sbenno		pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index];
166692521Sbenno		pmap_bpvo_pool_index++;
166792521Sbenno		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
166892521Sbenno	}
166990643Sbenno
167090643Sbenno	if (pvo == NULL) {
167190643Sbenno		return (ENOMEM);
167290643Sbenno	}
167390643Sbenno
167490643Sbenno	pmap_pvo_entries++;
167590643Sbenno	pvo->pvo_vaddr = va;
167690643Sbenno	pvo->pvo_pmap = pm;
167790643Sbenno	LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
167890643Sbenno	pvo->pvo_vaddr &= ~ADDR_POFF;
167990643Sbenno	if (flags & VM_PROT_EXECUTE)
168090643Sbenno		pvo->pvo_vaddr |= PVO_EXECUTABLE;
168190643Sbenno	if (flags & PVO_WIRED)
168290643Sbenno		pvo->pvo_vaddr |= PVO_WIRED;
168390643Sbenno	if (pvo_head != &pmap_pvo_kunmanaged)
168490643Sbenno		pvo->pvo_vaddr |= PVO_MANAGED;
168590643Sbenno	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
168690643Sbenno
168790643Sbenno	/*
168890643Sbenno	 * Remember if the list was empty and therefore will be the first
168990643Sbenno	 * item.
169090643Sbenno	 */
169190643Sbenno	first = LIST_FIRST(pvo_head) == NULL;
169290643Sbenno
169390643Sbenno	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
169490643Sbenno	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
169590643Sbenno		pvo->pvo_pmap->pm_stats.wired_count++;
169690643Sbenno	pvo->pvo_pmap->pm_stats.resident_count++;
169790643Sbenno
169890643Sbenno	/*
169990643Sbenno	 * We hope this succeeds but it isn't required.
170090643Sbenno	 */
170190643Sbenno	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
170290643Sbenno	if (i >= 0) {
170390643Sbenno		PVO_PTEGIDX_SET(pvo, i);
170490643Sbenno	} else {
170590643Sbenno		panic("pmap_pvo_enter: overflow");
170690643Sbenno		pmap_pte_overflow++;
170790643Sbenno	}
170890643Sbenno
170990643Sbenno	return (first ? ENOENT : 0);
171077957Sbenno}
171177957Sbenno
171290643Sbennostatic void
171390643Sbennopmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
171477957Sbenno{
171590643Sbenno	struct	pte *pt;
171677957Sbenno
171790643Sbenno	/*
171890643Sbenno	 * If there is an active pte entry, we need to deactivate it (and
171990643Sbenno	 * save the ref & cfg bits).
172090643Sbenno	 */
172190643Sbenno	pt = pmap_pvo_to_pte(pvo, pteidx);
172290643Sbenno	if (pt != NULL) {
172390643Sbenno		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
172490643Sbenno		PVO_PTEGIDX_CLR(pvo);
172590643Sbenno	} else {
172690643Sbenno		pmap_pte_overflow--;
172790643Sbenno	}
172890643Sbenno
172990643Sbenno	/*
173090643Sbenno	 * Update our statistics.
173190643Sbenno	 */
173290643Sbenno	pvo->pvo_pmap->pm_stats.resident_count--;
173390643Sbenno	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
173490643Sbenno		pvo->pvo_pmap->pm_stats.wired_count--;
173590643Sbenno
173690643Sbenno	/*
173790643Sbenno	 * Save the REF/CHG bits into their cache if the page is managed.
173890643Sbenno	 */
173990643Sbenno	if (pvo->pvo_vaddr & PVO_MANAGED) {
174090643Sbenno		struct	vm_page *pg;
174190643Sbenno
174292067Sbenno		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
174390643Sbenno		if (pg != NULL) {
174490643Sbenno			pmap_attr_save(pg, pvo->pvo_pte.pte_lo &
174590643Sbenno			    (PTE_REF | PTE_CHG));
174690643Sbenno		}
174790643Sbenno	}
174890643Sbenno
174990643Sbenno	/*
175090643Sbenno	 * Remove this PVO from the PV list.
175190643Sbenno	 */
175290643Sbenno	LIST_REMOVE(pvo, pvo_vlink);
175390643Sbenno
175490643Sbenno	/*
175590643Sbenno	 * Remove this from the overflow list and return it to the pool
175690643Sbenno	 * if we aren't going to reuse it.
175790643Sbenno	 */
175890643Sbenno	LIST_REMOVE(pvo, pvo_olink);
175992521Sbenno	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
176092521Sbenno		zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone :
176192521Sbenno		    pmap_upvo_zone, pvo);
176290643Sbenno	pmap_pvo_entries--;
176390643Sbenno	pmap_pvo_remove_calls++;
176477957Sbenno}
176577957Sbenno
176690643Sbennostatic __inline int
176790643Sbennopmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
176877957Sbenno{
176990643Sbenno	int	pteidx;
177077957Sbenno
177190643Sbenno	/*
177290643Sbenno	 * We can find the actual pte entry without searching by grabbing
177390643Sbenno	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
177490643Sbenno	 * noticing the HID bit.
177590643Sbenno	 */
177690643Sbenno	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
177790643Sbenno	if (pvo->pvo_pte.pte_hi & PTE_HID)
177890643Sbenno		pteidx ^= pmap_pteg_mask * 8;
177990643Sbenno
178090643Sbenno	return (pteidx);
178177957Sbenno}
178277957Sbenno
178390643Sbennostatic struct pvo_entry *
178490643Sbennopmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
178577957Sbenno{
178690643Sbenno	struct	pvo_entry *pvo;
178790643Sbenno	int	ptegidx;
178890643Sbenno	u_int	sr;
178977957Sbenno
179090643Sbenno	va &= ~ADDR_POFF;
179190643Sbenno	sr = va_to_sr(pm->pm_sr, va);
179290643Sbenno	ptegidx = va_to_pteg(sr, va);
179390643Sbenno
179490643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
179590643Sbenno		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
179690643Sbenno			if (pteidx_p)
179790643Sbenno				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
179890643Sbenno			return (pvo);
179990643Sbenno		}
180090643Sbenno	}
180190643Sbenno
180290643Sbenno	return (NULL);
180377957Sbenno}
180477957Sbenno
180590643Sbennostatic struct pte *
180690643Sbennopmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
180777957Sbenno{
180890643Sbenno	struct	pte *pt;
180977957Sbenno
181090643Sbenno	/*
181190643Sbenno	 * If we haven't been supplied the ptegidx, calculate it.
181290643Sbenno	 */
181390643Sbenno	if (pteidx == -1) {
181490643Sbenno		int	ptegidx;
181590643Sbenno		u_int	sr;
181677957Sbenno
181790643Sbenno		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
181890643Sbenno		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
181990643Sbenno		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
182090643Sbenno	}
182190643Sbenno
182290643Sbenno	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
182390643Sbenno
182490643Sbenno	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
182590643Sbenno		panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no "
182690643Sbenno		    "valid pte index", pvo);
182790643Sbenno	}
182890643Sbenno
182990643Sbenno	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
183090643Sbenno		panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo "
183190643Sbenno		    "pvo but no valid pte", pvo);
183290643Sbenno	}
183390643Sbenno
183490643Sbenno	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
183590643Sbenno		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
183690643Sbenno			panic("pmap_pvo_to_pte: pvo %p has valid pte in "
183790643Sbenno			    "pmap_pteg_table %p but invalid in pvo", pvo, pt);
183877957Sbenno		}
183990643Sbenno
184090643Sbenno		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
184190643Sbenno		    != 0) {
184290643Sbenno			panic("pmap_pvo_to_pte: pvo %p pte does not match "
184390643Sbenno			    "pte %p in pmap_pteg_table", pvo, pt);
184490643Sbenno		}
184590643Sbenno
184690643Sbenno		return (pt);
184777957Sbenno	}
184877957Sbenno
184990643Sbenno	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
185090643Sbenno		panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in "
185190643Sbenno		    "pmap_pteg_table but valid in pvo", pvo, pt);
185290643Sbenno	}
185377957Sbenno
185490643Sbenno	return (NULL);
185577957Sbenno}
185678880Sbenno
185778880Sbenno/*
185890643Sbenno * XXX: THIS STUFF SHOULD BE IN pte.c?
185978880Sbenno */
186090643Sbennoint
186190643Sbennopmap_pte_spill(vm_offset_t addr)
186278880Sbenno{
186390643Sbenno	struct	pvo_entry *source_pvo, *victim_pvo;
186490643Sbenno	struct	pvo_entry *pvo;
186590643Sbenno	int	ptegidx, i, j;
186690643Sbenno	u_int	sr;
186790643Sbenno	struct	pteg *pteg;
186890643Sbenno	struct	pte *pt;
186978880Sbenno
187090643Sbenno	pmap_pte_spills++;
187190643Sbenno
187290643Sbenno	__asm __volatile("mfsrin %0,%1" : "=r"(sr) : "r"(addr));
187390643Sbenno	ptegidx = va_to_pteg(sr, addr);
187490643Sbenno
187578880Sbenno	/*
187690643Sbenno	 * Have to substitute some entry.  Use the primary hash for this.
187790643Sbenno	 * Use low bits of timebase as random generator.
187878880Sbenno	 */
187990643Sbenno	pteg = &pmap_pteg_table[ptegidx];
188090643Sbenno	__asm __volatile("mftb %0" : "=r"(i));
188190643Sbenno	i &= 7;
188290643Sbenno	pt = &pteg->pt[i];
188378880Sbenno
188490643Sbenno	source_pvo = NULL;
188590643Sbenno	victim_pvo = NULL;
188690643Sbenno	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
188778880Sbenno		/*
188890643Sbenno		 * We need to find a pvo entry for this address.
188978880Sbenno		 */
189090643Sbenno		PMAP_PVO_CHECK(pvo);
189190643Sbenno		if (source_pvo == NULL &&
189290643Sbenno		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
189390643Sbenno		    pvo->pvo_pte.pte_hi & PTE_HID)) {
189490643Sbenno			/*
189590643Sbenno			 * Now found an entry to be spilled into the pteg.
189690643Sbenno			 * The PTE is now valid, so we know it's active.
189790643Sbenno			 */
189890643Sbenno			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
189978880Sbenno
190090643Sbenno			if (j >= 0) {
190190643Sbenno				PVO_PTEGIDX_SET(pvo, j);
190290643Sbenno				pmap_pte_overflow--;
190390643Sbenno				PMAP_PVO_CHECK(pvo);
190490643Sbenno				return (1);
190590643Sbenno			}
190690643Sbenno
190790643Sbenno			source_pvo = pvo;
190890643Sbenno
190990643Sbenno			if (victim_pvo != NULL)
191090643Sbenno				break;
191190643Sbenno		}
191290643Sbenno
191378880Sbenno		/*
191490643Sbenno		 * We also need the pvo entry of the victim we are replacing
191590643Sbenno		 * so save the R & C bits of the PTE.
191678880Sbenno		 */
191790643Sbenno		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
191890643Sbenno		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
191990643Sbenno			victim_pvo = pvo;
192090643Sbenno			if (source_pvo != NULL)
192190643Sbenno				break;
192290643Sbenno		}
192390643Sbenno	}
192478880Sbenno
192590643Sbenno	if (source_pvo == NULL)
192690643Sbenno		return (0);
192790643Sbenno
192890643Sbenno	if (victim_pvo == NULL) {
192990643Sbenno		if ((pt->pte_hi & PTE_HID) == 0)
193090643Sbenno			panic("pmap_pte_spill: victim p-pte (%p) has no pvo"
193190643Sbenno			    "entry", pt);
193290643Sbenno
193378880Sbenno		/*
193490643Sbenno		 * If this is a secondary PTE, we need to search it's primary
193590643Sbenno		 * pvo bucket for the matching PVO.
193678880Sbenno		 */
193790643Sbenno		LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
193890643Sbenno		    pvo_olink) {
193990643Sbenno			PMAP_PVO_CHECK(pvo);
194090643Sbenno			/*
194190643Sbenno			 * We also need the pvo entry of the victim we are
194290643Sbenno			 * replacing so save the R & C bits of the PTE.
194390643Sbenno			 */
194490643Sbenno			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
194590643Sbenno				victim_pvo = pvo;
194690643Sbenno				break;
194790643Sbenno			}
194890643Sbenno		}
194978880Sbenno
195090643Sbenno		if (victim_pvo == NULL)
195190643Sbenno			panic("pmap_pte_spill: victim s-pte (%p) has no pvo"
195290643Sbenno			    "entry", pt);
195390643Sbenno	}
195478880Sbenno
195590643Sbenno	/*
195690643Sbenno	 * We are invalidating the TLB entry for the EA we are replacing even
195790643Sbenno	 * though it's valid.  If we don't, we lose any ref/chg bit changes
195890643Sbenno	 * contained in the TLB entry.
195990643Sbenno	 */
196090643Sbenno	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
196178880Sbenno
196290643Sbenno	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
196390643Sbenno	pmap_pte_set(pt, &source_pvo->pvo_pte);
196490643Sbenno
196590643Sbenno	PVO_PTEGIDX_CLR(victim_pvo);
196690643Sbenno	PVO_PTEGIDX_SET(source_pvo, i);
196790643Sbenno	pmap_pte_replacements++;
196890643Sbenno
196990643Sbenno	PMAP_PVO_CHECK(victim_pvo);
197090643Sbenno	PMAP_PVO_CHECK(source_pvo);
197190643Sbenno
197290643Sbenno	return (1);
197390643Sbenno}
197490643Sbenno
197590643Sbennostatic int
197690643Sbennopmap_pte_insert(u_int ptegidx, struct pte *pvo_pt)
197790643Sbenno{
197890643Sbenno	struct	pte *pt;
197990643Sbenno	int	i;
198090643Sbenno
198190643Sbenno	/*
198290643Sbenno	 * First try primary hash.
198390643Sbenno	 */
198490643Sbenno	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
198590643Sbenno		if ((pt->pte_hi & PTE_VALID) == 0) {
198690643Sbenno			pvo_pt->pte_hi &= ~PTE_HID;
198790643Sbenno			pmap_pte_set(pt, pvo_pt);
198890643Sbenno			return (i);
198978880Sbenno		}
199090643Sbenno	}
199178880Sbenno
199290643Sbenno	/*
199390643Sbenno	 * Now try secondary hash.
199490643Sbenno	 */
199590643Sbenno	ptegidx ^= pmap_pteg_mask;
199690643Sbenno	ptegidx++;
199790643Sbenno	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
199890643Sbenno		if ((pt->pte_hi & PTE_VALID) == 0) {
199990643Sbenno			pvo_pt->pte_hi |= PTE_HID;
200090643Sbenno			pmap_pte_set(pt, pvo_pt);
200190643Sbenno			return (i);
200290643Sbenno		}
200390643Sbenno	}
200478880Sbenno
200590643Sbenno	panic("pmap_pte_insert: overflow");
200690643Sbenno	return (-1);
200778880Sbenno}
200884921Sbenno
200990643Sbennostatic boolean_t
201090643Sbennopmap_query_bit(vm_page_t m, int ptebit)
201184921Sbenno{
201290643Sbenno	struct	pvo_entry *pvo;
201390643Sbenno	struct	pte *pt;
201484921Sbenno
201590643Sbenno	if (pmap_attr_fetch(m) & ptebit)
201690643Sbenno		return (TRUE);
201784921Sbenno
201890643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
201990643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
202084921Sbenno
202190643Sbenno		/*
202290643Sbenno		 * See if we saved the bit off.  If so, cache it and return
202390643Sbenno		 * success.
202490643Sbenno		 */
202590643Sbenno		if (pvo->pvo_pte.pte_lo & ptebit) {
202690643Sbenno			pmap_attr_save(m, ptebit);
202790643Sbenno			PMAP_PVO_CHECK(pvo);	/* sanity check */
202890643Sbenno			return (TRUE);
202990643Sbenno		}
203090643Sbenno	}
203184921Sbenno
203290643Sbenno	/*
203390643Sbenno	 * No luck, now go through the hard part of looking at the PTEs
203490643Sbenno	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
203590643Sbenno	 * the PTEs.
203690643Sbenno	 */
203790643Sbenno	SYNC();
203890643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
203990643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
204090643Sbenno
204190643Sbenno		/*
204290643Sbenno		 * See if this pvo has a valid PTE.  if so, fetch the
204390643Sbenno		 * REF/CHG bits from the valid PTE.  If the appropriate
204490643Sbenno		 * ptebit is set, cache it and return success.
204590643Sbenno		 */
204690643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
204790643Sbenno		if (pt != NULL) {
204890643Sbenno			pmap_pte_synch(pt, &pvo->pvo_pte);
204990643Sbenno			if (pvo->pvo_pte.pte_lo & ptebit) {
205090643Sbenno				pmap_attr_save(m, ptebit);
205190643Sbenno				PMAP_PVO_CHECK(pvo);	/* sanity check */
205290643Sbenno				return (TRUE);
205390643Sbenno			}
205490643Sbenno		}
205584921Sbenno	}
205684921Sbenno
205790643Sbenno	return (TRUE);
205884921Sbenno}
205990643Sbenno
206090643Sbennostatic boolean_t
206190643Sbennopmap_clear_bit(vm_page_t m, int ptebit)
206290643Sbenno{
206390643Sbenno	struct	pvo_entry *pvo;
206490643Sbenno	struct	pte *pt;
206590643Sbenno	int	rv;
206690643Sbenno
206790643Sbenno	/*
206890643Sbenno	 * Clear the cached value.
206990643Sbenno	 */
207090643Sbenno	rv = pmap_attr_fetch(m);
207190643Sbenno	pmap_attr_clear(m, ptebit);
207290643Sbenno
207390643Sbenno	/*
207490643Sbenno	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
207590643Sbenno	 * we can reset the right ones).  note that since the pvo entries and
207690643Sbenno	 * list heads are accessed via BAT0 and are never placed in the page
207790643Sbenno	 * table, we don't have to worry about further accesses setting the
207890643Sbenno	 * REF/CHG bits.
207990643Sbenno	 */
208090643Sbenno	SYNC();
208190643Sbenno
208290643Sbenno	/*
208390643Sbenno	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
208490643Sbenno	 * valid pte clear the ptebit from the valid pte.
208590643Sbenno	 */
208690643Sbenno	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
208790643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
208890643Sbenno		pt = pmap_pvo_to_pte(pvo, -1);
208990643Sbenno		if (pt != NULL) {
209090643Sbenno			pmap_pte_synch(pt, &pvo->pvo_pte);
209190643Sbenno			if (pvo->pvo_pte.pte_lo & ptebit)
209290643Sbenno				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
209390643Sbenno		}
209490643Sbenno		rv |= pvo->pvo_pte.pte_lo;
209590643Sbenno		pvo->pvo_pte.pte_lo &= ~ptebit;
209690643Sbenno		PMAP_PVO_CHECK(pvo);	/* sanity check */
209790643Sbenno	}
209890643Sbenno
209990643Sbenno	return ((rv & ptebit) != 0);
210090643Sbenno}
2101