pmap.c revision 187151
157416Smarkm/*-
257416Smarkm * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com>
357416Smarkm * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
457416Smarkm * All rights reserved.
557416Smarkm *
657416Smarkm * Redistribution and use in source and binary forms, with or without
757416Smarkm * modification, are permitted provided that the following conditions
857416Smarkm * are met:
957416Smarkm * 1. Redistributions of source code must retain the above copyright
1057416Smarkm *    notice, this list of conditions and the following disclaimer.
1157416Smarkm * 2. Redistributions in binary form must reproduce the above copyright
1257416Smarkm *    notice, this list of conditions and the following disclaimer in the
1357416Smarkm *    documentation and/or other materials provided with the distribution.
1457416Smarkm *
1557416Smarkm * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1657416Smarkm * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1757416Smarkm * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
1857416Smarkm * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1957416Smarkm * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
2057416Smarkm * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
2157416Smarkm * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2257416Smarkm * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2357416Smarkm * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2457416Smarkm * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2557416Smarkm *
2657416Smarkm * Some hw specific parts of this pmap were derived or influenced
2757416Smarkm * by NetBSD's ibm4xx pmap module. More generic code is shared with
2857416Smarkm * a few other pmap modules from the FreeBSD tree.
2957416Smarkm */
3057416Smarkm
3157416Smarkm /*
3257416Smarkm  * VM layout notes:
3357416Smarkm  *
3457416Smarkm  * Kernel and user threads run within one common virtual address space
3557416Smarkm  * defined by AS=0.
36233294Sstas  *
3757416Smarkm  * Virtual address space layout:
3857416Smarkm  * -----------------------------
3957416Smarkm  * 0x0000_0000 - 0xafff_ffff	: user process
4057416Smarkm  * 0xb000_0000 - 0xbfff_ffff	: pmap_mapdev()-ed area (PCI/PCIE etc.)
4157416Smarkm  * 0xc000_0000 - 0xc0ff_ffff	: kernel reserved
4257416Smarkm  *   0xc000_0000 - kernelend	: kernel code+data, env, metadata etc.
4357416Smarkm  * 0xc100_0000 - 0xfeef_ffff	: KVA
4457416Smarkm  *   0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
4557416Smarkm  *   0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
4657416Smarkm  *   0xc200_4000 - 0xc200_8fff : guard page + kstack0
4757416Smarkm  *   0xc200_9000 - 0xfeef_ffff	: actual free KVA space
4857416Smarkm  * 0xfef0_0000 - 0xffff_ffff	: I/O devices region
4957416Smarkm  */
5057416Smarkm
5157416Smarkm#include <sys/cdefs.h>
5257416Smarkm__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 187151 2009-01-13 16:15:49Z raj $");
5357416Smarkm
5457416Smarkm#include <sys/types.h>
5557416Smarkm#include <sys/param.h>
5657416Smarkm#include <sys/malloc.h>
5757416Smarkm#include <sys/ktr.h>
5857416Smarkm#include <sys/proc.h>
5957416Smarkm#include <sys/user.h>
6057416Smarkm#include <sys/queue.h>
6157416Smarkm#include <sys/systm.h>
6257416Smarkm#include <sys/kernel.h>
6357416Smarkm#include <sys/msgbuf.h>
6457416Smarkm#include <sys/lock.h>
6557416Smarkm#include <sys/mutex.h>
6657416Smarkm#include <sys/vmmeter.h>
6757416Smarkm
6857416Smarkm#include <vm/vm.h>
6957416Smarkm#include <vm/vm_page.h>
7057416Smarkm#include <vm/vm_kern.h>
7157416Smarkm#include <vm/vm_pageout.h>
7257416Smarkm#include <vm/vm_extern.h>
7357416Smarkm#include <vm/vm_object.h>
7457416Smarkm#include <vm/vm_param.h>
7557416Smarkm#include <vm/vm_map.h>
7657416Smarkm#include <vm/vm_pager.h>
7757416Smarkm#include <vm/uma.h>
7857416Smarkm
7957416Smarkm#include <machine/cpu.h>
8057416Smarkm#include <machine/pcb.h>
8157416Smarkm#include <machine/powerpc.h>
8257416Smarkm
8357416Smarkm#include <machine/tlb.h>
8457416Smarkm#include <machine/spr.h>
8557416Smarkm#include <machine/vmparam.h>
8657416Smarkm#include <machine/md_var.h>
8757416Smarkm#include <machine/mmuvar.h>
8857416Smarkm#include <machine/pmap.h>
8957416Smarkm#include <machine/pte.h>
9057416Smarkm
9157416Smarkm#include "mmu_if.h"
9257416Smarkm
9357416Smarkm#define DEBUG
9457416Smarkm#undef DEBUG
9557416Smarkm
9657416Smarkm#ifdef  DEBUG
9757416Smarkm#define debugf(fmt, args...) printf(fmt, ##args)
9857416Smarkm#else
9957416Smarkm#define debugf(fmt, args...)
10057416Smarkm#endif
10157416Smarkm
10257416Smarkm#define TODO			panic("%s: not implemented", __func__);
10357416Smarkm#define memmove(d, s, l)	bcopy(s, d, l)
10457416Smarkm
10557416Smarkm#include "opt_sched.h"
10657416Smarkm#ifndef SCHED_4BSD
10757416Smarkm#error "e500 only works with SCHED_4BSD which uses a global scheduler lock."
10857416Smarkm#endif
10957416Smarkmextern struct mtx sched_lock;
11057416Smarkm
11157416Smarkm/* Kernel physical load address. */
11257416Smarkmextern uint32_t kernload;
11357416Smarkm
11457416Smarkmstruct mem_region availmem_regions[MEM_REGIONS];
11557416Smarkmint availmem_regions_sz;
11657416Smarkm
11757416Smarkm/* Reserved KVA space and mutex for mmu_booke_zero_page. */
11857416Smarkmstatic vm_offset_t zero_page_va;
11957416Smarkmstatic struct mtx zero_page_mutex;
12057416Smarkm
12157416Smarkmstatic struct mtx tlbivax_mutex;
12257416Smarkm
12357416Smarkm/*
12457416Smarkm * Reserved KVA space for mmu_booke_zero_page_idle. This is used
12557416Smarkm * by idle thred only, no lock required.
12657416Smarkm */
12757416Smarkmstatic vm_offset_t zero_page_idle_va;
12857416Smarkm
12957416Smarkm/* Reserved KVA space and mutex for mmu_booke_copy_page. */
13057416Smarkmstatic vm_offset_t copy_page_src_va;
13157416Smarkmstatic vm_offset_t copy_page_dst_va;
13257416Smarkmstatic struct mtx copy_page_mutex;
13357416Smarkm
13457416Smarkm/**************************************************************************/
13557416Smarkm/* PMAP */
13657416Smarkm/**************************************************************************/
13757416Smarkm
13857416Smarkmstatic void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
13957416Smarkm    vm_prot_t, boolean_t);
14057416Smarkm
14157416Smarkmunsigned int kptbl_min;		/* Index of the first kernel ptbl. */
14257416Smarkmunsigned int kernel_ptbls;	/* Number of KVA ptbls. */
14357416Smarkm
14457416Smarkmstatic int pagedaemon_waken;
14557416Smarkm
14657416Smarkm/*
14757416Smarkm * If user pmap is processed with mmu_booke_remove and the resident count
14857416Smarkm * drops to 0, there are no more pages to remove, so we need not continue.
14957416Smarkm */
15057416Smarkm#define PMAP_REMOVE_DONE(pmap) \
15157416Smarkm	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
15257416Smarkm
15357416Smarkmextern void tlb_lock(uint32_t *);
15457416Smarkmextern void tlb_unlock(uint32_t *);
15557416Smarkmextern void tid_flush(tlbtid_t);
15657416Smarkm
15757416Smarkm/**************************************************************************/
15857416Smarkm/* TLB and TID handling */
15957416Smarkm/**************************************************************************/
16057416Smarkm
16157416Smarkm/* Translation ID busy table */
16257416Smarkmstatic volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
163102644Snectar
16457416Smarkm/*
16557416Smarkm * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
16657416Smarkm * core revisions and should be read from h/w registers during early config.
16757416Smarkm */
16857416Smarkmuint32_t tlb0_entries;
16957416Smarkmuint32_t tlb0_ways;
17057416Smarkmuint32_t tlb0_entries_per_way;
17157416Smarkm
17257416Smarkm#define TLB0_ENTRIES		(tlb0_entries)
17357416Smarkm#define TLB0_WAYS		(tlb0_ways)
17457416Smarkm#define TLB0_ENTRIES_PER_WAY	(tlb0_entries_per_way)
17557416Smarkm
17657416Smarkm#define TLB1_ENTRIES 16
17757416Smarkm
17857416Smarkm/* In-ram copy of the TLB1 */
17957416Smarkmstatic tlb_entry_t tlb1[TLB1_ENTRIES];
18057416Smarkm
18157416Smarkm/* Next free entry in the TLB1 */
18257416Smarkmstatic unsigned int tlb1_idx;
18357416Smarkm
18457416Smarkmstatic tlbtid_t tid_alloc(struct pmap *);
18557416Smarkm
18657416Smarkmstatic void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
18757416Smarkm
18857416Smarkmstatic int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t);
18957416Smarkmstatic void tlb1_write_entry(unsigned int);
19057416Smarkmstatic int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
19157416Smarkmstatic vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t);
19257416Smarkm
19357416Smarkmstatic vm_size_t tsize2size(unsigned int);
19457416Smarkmstatic unsigned int size2tsize(vm_size_t);
19557416Smarkmstatic unsigned int ilog2(unsigned int);
19657416Smarkm
19757416Smarkmstatic void set_mas4_defaults(void);
19857416Smarkm
19957416Smarkmstatic inline void tlb0_flush_entry(vm_offset_t);
20057416Smarkmstatic inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
20157416Smarkm
20257416Smarkm/**************************************************************************/
20357416Smarkm/* Page table management */
20457416Smarkm/**************************************************************************/
20557416Smarkm
20657416Smarkm/* Data for the pv entry allocation mechanism */
20757416Smarkmstatic uma_zone_t pvzone;
20857416Smarkmstatic struct vm_object pvzone_obj;
20957416Smarkmstatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
21057416Smarkm
21157416Smarkm#define PV_ENTRY_ZONE_MIN	2048	/* min pv entries in uma zone */
21257416Smarkm
21357416Smarkm#ifndef PMAP_SHPGPERPROC
21457416Smarkm#define PMAP_SHPGPERPROC	200
21557416Smarkm#endif
21657416Smarkm
21757416Smarkmstatic void ptbl_init(void);
21857416Smarkmstatic struct ptbl_buf *ptbl_buf_alloc(void);
21957416Smarkmstatic void ptbl_buf_free(struct ptbl_buf *);
22057416Smarkmstatic void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
22157416Smarkm
22257416Smarkmstatic pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int);
22357416Smarkmstatic void ptbl_free(mmu_t, pmap_t, unsigned int);
22457416Smarkmstatic void ptbl_hold(mmu_t, pmap_t, unsigned int);
22557416Smarkmstatic int ptbl_unhold(mmu_t, pmap_t, unsigned int);
22657416Smarkm
22757416Smarkmstatic vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
22857416Smarkmstatic pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
22957416Smarkmstatic void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t);
23057416Smarkmstatic int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
23157416Smarkm
23257416Smarkmstatic pv_entry_t pv_alloc(void);
23357416Smarkmstatic void pv_free(pv_entry_t);
23457416Smarkmstatic void pv_insert(pmap_t, vm_offset_t, vm_page_t);
23557416Smarkmstatic void pv_remove(pmap_t, vm_offset_t, vm_page_t);
23657416Smarkm
23757416Smarkm/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
23857416Smarkm#define PTBL_BUFS		(128 * 16)
23957416Smarkm
24057416Smarkmstruct ptbl_buf {
24157416Smarkm	TAILQ_ENTRY(ptbl_buf) link;	/* list link */
24257416Smarkm	vm_offset_t kva;		/* va of mapping */
24357416Smarkm};
24457416Smarkm
24557416Smarkm/* ptbl free list and a lock used for access synchronization. */
24657416Smarkmstatic TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
24757416Smarkmstatic struct mtx ptbl_buf_freelist_lock;
24857416Smarkm
24957416Smarkm/* Base address of kva space allocated fot ptbl bufs. */
25057416Smarkmstatic vm_offset_t ptbl_buf_pool_vabase;
25157416Smarkm
25257416Smarkm/* Pointer to ptbl_buf structures. */
25357416Smarkmstatic struct ptbl_buf *ptbl_bufs;
25457416Smarkm
25557416Smarkm/*
25657416Smarkm * Kernel MMU interface
25757416Smarkm */
25857416Smarkmstatic void		mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
25957416Smarkmstatic void		mmu_booke_clear_modify(mmu_t, vm_page_t);
26057416Smarkmstatic void		mmu_booke_clear_reference(mmu_t, vm_page_t);
26157416Smarkmstatic void		mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t,
26257416Smarkm    vm_offset_t);
26357416Smarkmstatic void		mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
26457416Smarkmstatic void		mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
26557416Smarkm    vm_prot_t, boolean_t);
26657416Smarkmstatic void		mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
26757416Smarkm    vm_page_t, vm_prot_t);
26857416Smarkmstatic void		mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
26957416Smarkm    vm_prot_t);
27057416Smarkmstatic vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
27157416Smarkmstatic vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
27257416Smarkm    vm_prot_t);
27357416Smarkmstatic void		mmu_booke_init(mmu_t);
27457416Smarkmstatic boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
27557416Smarkmstatic boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
27657416Smarkmstatic boolean_t	mmu_booke_ts_referenced(mmu_t, vm_page_t);
27757416Smarkmstatic vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
27857416Smarkm    int);
27957416Smarkmstatic int		mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t);
28057416Smarkmstatic void		mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
28157416Smarkm    vm_object_t, vm_pindex_t, vm_size_t);
28257416Smarkmstatic boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
28357416Smarkmstatic void		mmu_booke_page_init(mmu_t, vm_page_t);
28457416Smarkmstatic int		mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
28557416Smarkmstatic void		mmu_booke_pinit(mmu_t, pmap_t);
28657416Smarkmstatic void		mmu_booke_pinit0(mmu_t, pmap_t);
28757416Smarkmstatic void		mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
28857416Smarkm    vm_prot_t);
28957416Smarkmstatic void		mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
29057416Smarkmstatic void		mmu_booke_qremove(mmu_t, vm_offset_t, int);
29157416Smarkmstatic void		mmu_booke_release(mmu_t, pmap_t);
29257416Smarkmstatic void		mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
29357416Smarkmstatic void		mmu_booke_remove_all(mmu_t, vm_page_t);
29457416Smarkmstatic void		mmu_booke_remove_write(mmu_t, vm_page_t);
29557416Smarkmstatic void		mmu_booke_zero_page(mmu_t, vm_page_t);
29657416Smarkmstatic void		mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
29757416Smarkmstatic void		mmu_booke_zero_page_idle(mmu_t, vm_page_t);
29857416Smarkmstatic void		mmu_booke_activate(mmu_t, struct thread *);
29957416Smarkmstatic void		mmu_booke_deactivate(mmu_t, struct thread *);
30057416Smarkmstatic void		mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
30157416Smarkmstatic void		*mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
30257416Smarkmstatic void		mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
30357416Smarkmstatic vm_offset_t	mmu_booke_kextract(mmu_t, vm_offset_t);
30457416Smarkmstatic void		mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
30557416Smarkmstatic void		mmu_booke_kremove(mmu_t, vm_offset_t);
30657416Smarkmstatic boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
30757416Smarkmstatic boolean_t	mmu_booke_page_executable(mmu_t, vm_page_t);
30857416Smarkm
30957416Smarkmstatic mmu_method_t mmu_booke_methods[] = {
31057416Smarkm	/* pmap dispatcher interface */
31157416Smarkm	MMUMETHOD(mmu_change_wiring,	mmu_booke_change_wiring),
31257416Smarkm	MMUMETHOD(mmu_clear_modify,	mmu_booke_clear_modify),
31357416Smarkm	MMUMETHOD(mmu_clear_reference,	mmu_booke_clear_reference),
31457416Smarkm	MMUMETHOD(mmu_copy,		mmu_booke_copy),
31557416Smarkm	MMUMETHOD(mmu_copy_page,	mmu_booke_copy_page),
31657416Smarkm	MMUMETHOD(mmu_enter,		mmu_booke_enter),
31757416Smarkm	MMUMETHOD(mmu_enter_object,	mmu_booke_enter_object),
31857416Smarkm	MMUMETHOD(mmu_enter_quick,	mmu_booke_enter_quick),
31957416Smarkm	MMUMETHOD(mmu_extract,		mmu_booke_extract),
32057416Smarkm	MMUMETHOD(mmu_extract_and_hold,	mmu_booke_extract_and_hold),
32157416Smarkm	MMUMETHOD(mmu_init,		mmu_booke_init),
32257416Smarkm	MMUMETHOD(mmu_is_modified,	mmu_booke_is_modified),
32357416Smarkm	MMUMETHOD(mmu_is_prefaultable,	mmu_booke_is_prefaultable),
32457416Smarkm	MMUMETHOD(mmu_ts_referenced,	mmu_booke_ts_referenced),
32557416Smarkm	MMUMETHOD(mmu_map,		mmu_booke_map),
32657416Smarkm	MMUMETHOD(mmu_mincore,		mmu_booke_mincore),
32757416Smarkm	MMUMETHOD(mmu_object_init_pt,	mmu_booke_object_init_pt),
32857416Smarkm	MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
32957416Smarkm	MMUMETHOD(mmu_page_init,	mmu_booke_page_init),
33057416Smarkm	MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
33157416Smarkm	MMUMETHOD(mmu_pinit,		mmu_booke_pinit),
33257416Smarkm	MMUMETHOD(mmu_pinit0,		mmu_booke_pinit0),
33357416Smarkm	MMUMETHOD(mmu_protect,		mmu_booke_protect),
33457416Smarkm	MMUMETHOD(mmu_qenter,		mmu_booke_qenter),
33557416Smarkm	MMUMETHOD(mmu_qremove,		mmu_booke_qremove),
33657416Smarkm	MMUMETHOD(mmu_release,		mmu_booke_release),
33757416Smarkm	MMUMETHOD(mmu_remove,		mmu_booke_remove),
33857416Smarkm	MMUMETHOD(mmu_remove_all,	mmu_booke_remove_all),
33957416Smarkm	MMUMETHOD(mmu_remove_write,	mmu_booke_remove_write),
34057416Smarkm	MMUMETHOD(mmu_zero_page,	mmu_booke_zero_page),
34157416Smarkm	MMUMETHOD(mmu_zero_page_area,	mmu_booke_zero_page_area),
34257416Smarkm	MMUMETHOD(mmu_zero_page_idle,	mmu_booke_zero_page_idle),
34357416Smarkm	MMUMETHOD(mmu_activate,		mmu_booke_activate),
34457416Smarkm	MMUMETHOD(mmu_deactivate,	mmu_booke_deactivate),
34557416Smarkm
34657416Smarkm	/* Internal interfaces */
34757416Smarkm	MMUMETHOD(mmu_bootstrap,	mmu_booke_bootstrap),
34857416Smarkm	MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
34957416Smarkm	MMUMETHOD(mmu_mapdev,		mmu_booke_mapdev),
35057416Smarkm	MMUMETHOD(mmu_kenter,		mmu_booke_kenter),
35157416Smarkm	MMUMETHOD(mmu_kextract,		mmu_booke_kextract),
35257416Smarkm/*	MMUMETHOD(mmu_kremove,		mmu_booke_kremove),	*/
35357416Smarkm	MMUMETHOD(mmu_page_executable,	mmu_booke_page_executable),
35457416Smarkm	MMUMETHOD(mmu_unmapdev,		mmu_booke_unmapdev),
35557416Smarkm
35657416Smarkm	{ 0, 0 }
35757416Smarkm};
35857416Smarkm
35957416Smarkmstatic mmu_def_t booke_mmu = {
36057416Smarkm	MMU_TYPE_BOOKE,
36157416Smarkm	mmu_booke_methods,
36257416Smarkm	0
36357416Smarkm};
36457416SmarkmMMU_DEF(booke_mmu);
36557416Smarkm
36657416Smarkm/* Return number of entries in TLB0. */
36757416Smarkmstatic __inline void
36857416Smarkmtlb0_get_tlbconf(void)
36957416Smarkm{
37057416Smarkm	uint32_t tlb0_cfg;
37157416Smarkm
37257416Smarkm	tlb0_cfg = mfspr(SPR_TLB0CFG);
37357416Smarkm	tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
37457416Smarkm	tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
37557416Smarkm	tlb0_entries_per_way = tlb0_entries / tlb0_ways;
37657416Smarkm}
37757416Smarkm
37857416Smarkm/* Initialize pool of kva ptbl buffers. */
37957416Smarkmstatic void
38057416Smarkmptbl_init(void)
38157416Smarkm{
38257416Smarkm	int i;
38357416Smarkm
38457416Smarkm	CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
38557416Smarkm	    (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
38657416Smarkm	CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
38757416Smarkm	    __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
38857416Smarkm
38957416Smarkm	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
39057416Smarkm	TAILQ_INIT(&ptbl_buf_freelist);
39157416Smarkm
39257416Smarkm	for (i = 0; i < PTBL_BUFS; i++) {
39357416Smarkm		ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
39457416Smarkm		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
39557416Smarkm	}
39657416Smarkm}
39757416Smarkm
39857416Smarkm/* Get a ptbl_buf from the freelist. */
39957416Smarkmstatic struct ptbl_buf *
40057416Smarkmptbl_buf_alloc(void)
40157416Smarkm{
40257416Smarkm	struct ptbl_buf *buf;
40357416Smarkm
40457416Smarkm	mtx_lock(&ptbl_buf_freelist_lock);
40557416Smarkm	buf = TAILQ_FIRST(&ptbl_buf_freelist);
406233294Sstas	if (buf != NULL)
407233294Sstas		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
40857416Smarkm	mtx_unlock(&ptbl_buf_freelist_lock);
409233294Sstas
41057416Smarkm	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
411178825Sdfr
41257416Smarkm	return (buf);
41357416Smarkm}
41457416Smarkm
41557416Smarkm/* Return ptbl buff to free pool. */
41657416Smarkmstatic void
41757416Smarkmptbl_buf_free(struct ptbl_buf *buf)
41857416Smarkm{
41957416Smarkm
42057416Smarkm	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
42157416Smarkm
42257416Smarkm	mtx_lock(&ptbl_buf_freelist_lock);
42357416Smarkm	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
42457416Smarkm	mtx_unlock(&ptbl_buf_freelist_lock);
42557416Smarkm}
42657416Smarkm
42757416Smarkm/*
42857416Smarkm * Search the list of allocated ptbl bufs and find on list of allocated ptbls
42957416Smarkm */
43057416Smarkmstatic void
43157416Smarkmptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
43257416Smarkm{
43357416Smarkm	struct ptbl_buf *pbuf;
43457416Smarkm
43557416Smarkm	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
43657416Smarkm
43757416Smarkm	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
43857416Smarkm
43957416Smarkm	TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
44057416Smarkm		if (pbuf->kva == (vm_offset_t)ptbl) {
44157416Smarkm			/* Remove from pmap ptbl buf list. */
44257416Smarkm			TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
44357416Smarkm
44457416Smarkm			/* Free corresponding ptbl buf. */
44557416Smarkm			ptbl_buf_free(pbuf);
44657416Smarkm			break;
44757416Smarkm		}
44857416Smarkm}
44957416Smarkm
45057416Smarkm/* Allocate page table. */
45157416Smarkmstatic pte_t *
45257416Smarkmptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
45357416Smarkm{
45457416Smarkm	vm_page_t mtbl[PTBL_PAGES];
45557416Smarkm	vm_page_t m;
45657416Smarkm	struct ptbl_buf *pbuf;
45757416Smarkm	unsigned int pidx;
45857416Smarkm	pte_t *ptbl;
45957416Smarkm	int i;
46057416Smarkm
46157416Smarkm	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
46257416Smarkm	    (pmap == kernel_pmap), pdir_idx);
46357416Smarkm
46457416Smarkm	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
46557416Smarkm	    ("ptbl_alloc: invalid pdir_idx"));
46657416Smarkm	KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
46757416Smarkm	    ("pte_alloc: valid ptbl entry exists!"));
46857416Smarkm
46957416Smarkm	pbuf = ptbl_buf_alloc();
47057416Smarkm	if (pbuf == NULL)
47157416Smarkm		panic("pte_alloc: couldn't alloc kernel virtual memory");
47257416Smarkm
47357416Smarkm	ptbl = (pte_t *)pbuf->kva;
47457416Smarkm
47557416Smarkm	CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
47657416Smarkm
47757416Smarkm	/* Allocate ptbl pages, this will sleep! */
47857416Smarkm	for (i = 0; i < PTBL_PAGES; i++) {
47957416Smarkm		pidx = (PTBL_PAGES * pdir_idx) + i;
48057416Smarkm		while ((m = vm_page_alloc(NULL, pidx,
48157416Smarkm		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
48257416Smarkm
48357416Smarkm			PMAP_UNLOCK(pmap);
48457416Smarkm			vm_page_unlock_queues();
48557416Smarkm			VM_WAIT;
48657416Smarkm			vm_page_lock_queues();
487			PMAP_LOCK(pmap);
488		}
489		mtbl[i] = m;
490	}
491
492	/* Map allocated pages into kernel_pmap. */
493	mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
494
495	/* Zero whole ptbl. */
496	bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
497
498	/* Add pbuf to the pmap ptbl bufs list. */
499	TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
500
501	return (ptbl);
502}
503
504/* Free ptbl pages and invalidate pdir entry. */
505static void
506ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
507{
508	pte_t *ptbl;
509	vm_paddr_t pa;
510	vm_offset_t va;
511	vm_page_t m;
512	int i;
513
514	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
515	    (pmap == kernel_pmap), pdir_idx);
516
517	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
518	    ("ptbl_free: invalid pdir_idx"));
519
520	ptbl = pmap->pm_pdir[pdir_idx];
521
522	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
523
524	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
525
526	/*
527	 * Invalidate the pdir entry as soon as possible, so that other CPUs
528	 * don't attempt to look up the page tables we are releasing.
529	 */
530	mtx_lock_spin(&tlbivax_mutex);
531
532	pmap->pm_pdir[pdir_idx] = NULL;
533
534	mtx_unlock_spin(&tlbivax_mutex);
535
536	for (i = 0; i < PTBL_PAGES; i++) {
537		va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
538		pa = pte_vatopa(mmu, kernel_pmap, va);
539		m = PHYS_TO_VM_PAGE(pa);
540		vm_page_free_zero(m);
541		atomic_subtract_int(&cnt.v_wire_count, 1);
542		mmu_booke_kremove(mmu, va);
543	}
544
545	ptbl_free_pmap_ptbl(pmap, ptbl);
546}
547
548/*
549 * Decrement ptbl pages hold count and attempt to free ptbl pages.
550 * Called when removing pte entry from ptbl.
551 *
552 * Return 1 if ptbl pages were freed.
553 */
554static int
555ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
556{
557	pte_t *ptbl;
558	vm_paddr_t pa;
559	vm_page_t m;
560	int i;
561
562	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
563	    (pmap == kernel_pmap), pdir_idx);
564
565	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
566	    ("ptbl_unhold: invalid pdir_idx"));
567	KASSERT((pmap != kernel_pmap),
568	    ("ptbl_unhold: unholding kernel ptbl!"));
569
570	ptbl = pmap->pm_pdir[pdir_idx];
571
572	//debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
573	KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
574	    ("ptbl_unhold: non kva ptbl"));
575
576	/* decrement hold count */
577	for (i = 0; i < PTBL_PAGES; i++) {
578		pa = pte_vatopa(mmu, kernel_pmap,
579		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
580		m = PHYS_TO_VM_PAGE(pa);
581		m->wire_count--;
582	}
583
584	/*
585	 * Free ptbl pages if there are no pte etries in this ptbl.
586	 * wire_count has the same value for all ptbl pages, so check the last
587	 * page.
588	 */
589	if (m->wire_count == 0) {
590		ptbl_free(mmu, pmap, pdir_idx);
591
592		//debugf("ptbl_unhold: e (freed ptbl)\n");
593		return (1);
594	}
595
596	return (0);
597}
598
599/*
600 * Increment hold count for ptbl pages. This routine is used when a new pte
601 * entry is being inserted into the ptbl.
602 */
603static void
604ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
605{
606	vm_paddr_t pa;
607	pte_t *ptbl;
608	vm_page_t m;
609	int i;
610
611	CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
612	    pdir_idx);
613
614	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
615	    ("ptbl_hold: invalid pdir_idx"));
616	KASSERT((pmap != kernel_pmap),
617	    ("ptbl_hold: holding kernel ptbl!"));
618
619	ptbl = pmap->pm_pdir[pdir_idx];
620
621	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
622
623	for (i = 0; i < PTBL_PAGES; i++) {
624		pa = pte_vatopa(mmu, kernel_pmap,
625		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
626		m = PHYS_TO_VM_PAGE(pa);
627		m->wire_count++;
628	}
629}
630
631/* Allocate pv_entry structure. */
632pv_entry_t
633pv_alloc(void)
634{
635	pv_entry_t pv;
636
637	pv_entry_count++;
638	if ((pv_entry_count > pv_entry_high_water) &&
639	    (pagedaemon_waken == 0)) {
640		pagedaemon_waken = 1;
641		wakeup(&vm_pages_needed);
642	}
643	pv = uma_zalloc(pvzone, M_NOWAIT);
644
645	return (pv);
646}
647
648/* Free pv_entry structure. */
649static __inline void
650pv_free(pv_entry_t pve)
651{
652
653	pv_entry_count--;
654	uma_zfree(pvzone, pve);
655}
656
657
658/* Allocate and initialize pv_entry structure. */
659static void
660pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
661{
662	pv_entry_t pve;
663
664	//int su = (pmap == kernel_pmap);
665	//debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
666	//	(u_int32_t)pmap, va, (u_int32_t)m);
667
668	pve = pv_alloc();
669	if (pve == NULL)
670		panic("pv_insert: no pv entries!");
671
672	pve->pv_pmap = pmap;
673	pve->pv_va = va;
674
675	/* add to pv_list */
676	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
677	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
678
679	TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
680
681	//debugf("pv_insert: e\n");
682}
683
684/* Destroy pv entry. */
685static void
686pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
687{
688	pv_entry_t pve;
689
690	//int su = (pmap == kernel_pmap);
691	//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
692
693	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
694	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
695
696	/* find pv entry */
697	TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
698		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
699			/* remove from pv_list */
700			TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
701			if (TAILQ_EMPTY(&m->md.pv_list))
702				vm_page_flag_clear(m, PG_WRITEABLE);
703
704			/* free pv entry struct */
705			pv_free(pve);
706			break;
707		}
708	}
709
710	//debugf("pv_remove: e\n");
711}
712
713/*
714 * Clean pte entry, try to free page table page if requested.
715 *
716 * Return 1 if ptbl pages were freed, otherwise return 0.
717 */
718static int
719pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
720{
721	unsigned int pdir_idx = PDIR_IDX(va);
722	unsigned int ptbl_idx = PTBL_IDX(va);
723	vm_page_t m;
724	pte_t *ptbl;
725	pte_t *pte;
726
727	//int su = (pmap == kernel_pmap);
728	//debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
729	//		su, (u_int32_t)pmap, va, flags);
730
731	ptbl = pmap->pm_pdir[pdir_idx];
732	KASSERT(ptbl, ("pte_remove: null ptbl"));
733
734	pte = &ptbl[ptbl_idx];
735
736	if (pte == NULL || !PTE_ISVALID(pte))
737		return (0);
738
739	/* Get vm_page_t for mapped pte. */
740	m = PHYS_TO_VM_PAGE(PTE_PA(pte));
741
742	if (PTE_ISWIRED(pte))
743		pmap->pm_stats.wired_count--;
744
745	if (!PTE_ISFAKE(pte)) {
746		/* Handle managed entry. */
747		if (PTE_ISMANAGED(pte)) {
748
749			/* Handle modified pages. */
750			if (PTE_ISMODIFIED(pte))
751				vm_page_dirty(m);
752
753			/* Referenced pages. */
754			if (PTE_ISREFERENCED(pte))
755				vm_page_flag_set(m, PG_REFERENCED);
756
757			/* Remove pv_entry from pv_list. */
758			pv_remove(pmap, va, m);
759		}
760	}
761
762	mtx_lock_spin(&tlbivax_mutex);
763
764	tlb0_flush_entry(va);
765	pte->flags = 0;
766	pte->rpn = 0;
767
768	mtx_unlock_spin(&tlbivax_mutex);
769
770	pmap->pm_stats.resident_count--;
771
772	if (flags & PTBL_UNHOLD) {
773		//debugf("pte_remove: e (unhold)\n");
774		return (ptbl_unhold(mmu, pmap, pdir_idx));
775	}
776
777	//debugf("pte_remove: e\n");
778	return (0);
779}
780
781/*
782 * Insert PTE for a given page and virtual address.
783 */
784static void
785pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
786{
787	unsigned int pdir_idx = PDIR_IDX(va);
788	unsigned int ptbl_idx = PTBL_IDX(va);
789	pte_t *ptbl, *pte;
790
791	CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
792	    pmap == kernel_pmap, pmap, va);
793
794	/* Get the page table pointer. */
795	ptbl = pmap->pm_pdir[pdir_idx];
796
797	if (ptbl == NULL) {
798		/* Allocate page table pages. */
799		ptbl = ptbl_alloc(mmu, pmap, pdir_idx);
800	} else {
801		/*
802		 * Check if there is valid mapping for requested
803		 * va, if there is, remove it.
804		 */
805		pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
806		if (PTE_ISVALID(pte)) {
807			pte_remove(mmu, pmap, va, PTBL_HOLD);
808		} else {
809			/*
810			 * pte is not used, increment hold count
811			 * for ptbl pages.
812			 */
813			if (pmap != kernel_pmap)
814				ptbl_hold(mmu, pmap, pdir_idx);
815		}
816	}
817
818	/*
819	 * Insert pv_entry into pv_list for mapped page if part of managed
820	 * memory.
821	 */
822        if ((m->flags & PG_FICTITIOUS) == 0) {
823		if ((m->flags & PG_UNMANAGED) == 0) {
824			flags |= PTE_MANAGED;
825
826			/* Create and insert pv entry. */
827			pv_insert(pmap, va, m);
828		}
829        } else {
830		flags |= PTE_FAKE;
831	}
832
833	pmap->pm_stats.resident_count++;
834
835	mtx_lock_spin(&tlbivax_mutex);
836
837	tlb0_flush_entry(va);
838	if (pmap->pm_pdir[pdir_idx] == NULL) {
839		/*
840		 * If we just allocated a new page table, hook it in
841		 * the pdir.
842		 */
843		pmap->pm_pdir[pdir_idx] = ptbl;
844	}
845	pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
846	pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
847	pte->flags |= (PTE_VALID | flags);
848
849	mtx_unlock_spin(&tlbivax_mutex);
850}
851
852/* Return the pa for the given pmap/va. */
853static vm_paddr_t
854pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
855{
856	vm_paddr_t pa = 0;
857	pte_t *pte;
858
859	pte = pte_find(mmu, pmap, va);
860	if ((pte != NULL) && PTE_ISVALID(pte))
861		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
862	return (pa);
863}
864
865/* Get a pointer to a PTE in a page table. */
866static pte_t *
867pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
868{
869	unsigned int pdir_idx = PDIR_IDX(va);
870	unsigned int ptbl_idx = PTBL_IDX(va);
871
872	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
873
874	if (pmap->pm_pdir[pdir_idx])
875		return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
876
877	return (NULL);
878}
879
880/**************************************************************************/
881/* PMAP related */
882/**************************************************************************/
883
884/*
885 * This is called during e500_init, before the system is really initialized.
886 */
887static void
888mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend)
889{
890	vm_offset_t phys_kernelend;
891	struct mem_region *mp, *mp1;
892	int cnt, i, j;
893	u_int s, e, sz;
894	u_int phys_avail_count;
895	vm_size_t physsz, hwphyssz, kstack0_sz;
896	vm_offset_t kernel_pdir, kstack0;
897	vm_paddr_t kstack0_phys;
898
899	debugf("mmu_booke_bootstrap: entered\n");
900
901	/* Initialize invalidation mutex */
902	mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
903
904	/* Read TLB0 size and associativity. */
905	tlb0_get_tlbconf();
906
907	/* Align kernel start and end address (kernel image). */
908	kernelstart = trunc_page(kernelstart);
909	kernelend = round_page(kernelend);
910
911	/* Allocate space for the message buffer. */
912	msgbufp = (struct msgbuf *)kernelend;
913	kernelend += MSGBUF_SIZE;
914	debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
915	    kernelend);
916
917	kernelend = round_page(kernelend);
918
919	/* Allocate space for ptbl_bufs. */
920	ptbl_bufs = (struct ptbl_buf *)kernelend;
921	kernelend += sizeof(struct ptbl_buf) * PTBL_BUFS;
922	debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
923	    kernelend);
924
925	kernelend = round_page(kernelend);
926
927	/* Allocate PTE tables for kernel KVA. */
928	kernel_pdir = kernelend;
929	kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
930	    PDIR_SIZE - 1) / PDIR_SIZE;
931	kernelend += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
932	debugf(" kernel ptbls: %d\n", kernel_ptbls);
933	debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, kernelend);
934
935	debugf(" kernelend: 0x%08x\n", kernelend);
936	if (kernelend - kernelstart > 0x1000000) {
937		kernelend = (kernelend + 0x3fffff) & ~0x3fffff;
938		tlb1_mapin_region(kernelstart + 0x1000000,
939		    kernload + 0x1000000, kernelend - kernelstart - 0x1000000);
940	} else
941		kernelend = (kernelend + 0xffffff) & ~0xffffff;
942
943	debugf(" updated kernelend: 0x%08x\n", kernelend);
944
945	/*
946	 * Clear the structures - note we can only do it safely after the
947	 * possible additional TLB1 translations are in place (above) so that
948	 * all range up to the currently calculated 'kernelend' is covered.
949	 */
950	memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
951	memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
952
953	/*******************************************************/
954	/* Set the start and end of kva. */
955	/*******************************************************/
956	virtual_avail = kernelend;
957	virtual_end = VM_MAX_KERNEL_ADDRESS;
958
959	/* Allocate KVA space for page zero/copy operations. */
960	zero_page_va = virtual_avail;
961	virtual_avail += PAGE_SIZE;
962	zero_page_idle_va = virtual_avail;
963	virtual_avail += PAGE_SIZE;
964	copy_page_src_va = virtual_avail;
965	virtual_avail += PAGE_SIZE;
966	copy_page_dst_va = virtual_avail;
967	virtual_avail += PAGE_SIZE;
968	debugf("zero_page_va = 0x%08x\n", zero_page_va);
969	debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va);
970	debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
971	debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
972
973	/* Initialize page zero/copy mutexes. */
974	mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
975	mtx_init(&copy_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
976
977	/* Allocate KVA space for ptbl bufs. */
978	ptbl_buf_pool_vabase = virtual_avail;
979	virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
980	debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
981	    ptbl_buf_pool_vabase, virtual_avail);
982
983	/* Calculate corresponding physical addresses for the kernel region. */
984	phys_kernelend = kernload + (kernelend - kernelstart);
985	debugf("kernel image and allocated data:\n");
986	debugf(" kernload    = 0x%08x\n", kernload);
987	debugf(" kernelstart = 0x%08x\n", kernelstart);
988	debugf(" kernelend   = 0x%08x\n", kernelend);
989	debugf(" kernel size = 0x%08x\n", kernelend - kernelstart);
990
991	if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
992		panic("mmu_booke_bootstrap: phys_avail too small");
993
994	/*
995	 * Remove kernel physical address range from avail regions list. Page
996	 * align all regions.  Non-page aligned memory isn't very interesting
997	 * to us.  Also, sort the entries for ascending addresses.
998	 */
999	sz = 0;
1000	cnt = availmem_regions_sz;
1001	debugf("processing avail regions:\n");
1002	for (mp = availmem_regions; mp->mr_size; mp++) {
1003		s = mp->mr_start;
1004		e = mp->mr_start + mp->mr_size;
1005		debugf(" %08x-%08x -> ", s, e);
1006		/* Check whether this region holds all of the kernel. */
1007		if (s < kernload && e > phys_kernelend) {
1008			availmem_regions[cnt].mr_start = phys_kernelend;
1009			availmem_regions[cnt++].mr_size = e - phys_kernelend;
1010			e = kernload;
1011		}
1012		/* Look whether this regions starts within the kernel. */
1013		if (s >= kernload && s < phys_kernelend) {
1014			if (e <= phys_kernelend)
1015				goto empty;
1016			s = phys_kernelend;
1017		}
1018		/* Now look whether this region ends within the kernel. */
1019		if (e > kernload && e <= phys_kernelend) {
1020			if (s >= kernload)
1021				goto empty;
1022			e = kernload;
1023		}
1024		/* Now page align the start and size of the region. */
1025		s = round_page(s);
1026		e = trunc_page(e);
1027		if (e < s)
1028			e = s;
1029		sz = e - s;
1030		debugf("%08x-%08x = %x\n", s, e, sz);
1031
1032		/* Check whether some memory is left here. */
1033		if (sz == 0) {
1034		empty:
1035			memmove(mp, mp + 1,
1036			    (cnt - (mp - availmem_regions)) * sizeof(*mp));
1037			cnt--;
1038			mp--;
1039			continue;
1040		}
1041
1042		/* Do an insertion sort. */
1043		for (mp1 = availmem_regions; mp1 < mp; mp1++)
1044			if (s < mp1->mr_start)
1045				break;
1046		if (mp1 < mp) {
1047			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1048			mp1->mr_start = s;
1049			mp1->mr_size = sz;
1050		} else {
1051			mp->mr_start = s;
1052			mp->mr_size = sz;
1053		}
1054	}
1055	availmem_regions_sz = cnt;
1056
1057	/*******************************************************/
1058	/* Steal physical memory for kernel stack from the end */
1059	/* of the first avail region                           */
1060	/*******************************************************/
1061	kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
1062	kstack0_phys = availmem_regions[0].mr_start +
1063	    availmem_regions[0].mr_size;
1064	kstack0_phys -= kstack0_sz;
1065	availmem_regions[0].mr_size -= kstack0_sz;
1066
1067	/*******************************************************/
1068	/* Fill in phys_avail table, based on availmem_regions */
1069	/*******************************************************/
1070	phys_avail_count = 0;
1071	physsz = 0;
1072	hwphyssz = 0;
1073	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1074
1075	debugf("fill in phys_avail:\n");
1076	for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1077
1078		debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
1079		    availmem_regions[i].mr_start,
1080		    availmem_regions[i].mr_start +
1081		        availmem_regions[i].mr_size,
1082		    availmem_regions[i].mr_size);
1083
1084		if (hwphyssz != 0 &&
1085		    (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1086			debugf(" hw.physmem adjust\n");
1087			if (physsz < hwphyssz) {
1088				phys_avail[j] = availmem_regions[i].mr_start;
1089				phys_avail[j + 1] =
1090				    availmem_regions[i].mr_start +
1091				    hwphyssz - physsz;
1092				physsz = hwphyssz;
1093				phys_avail_count++;
1094			}
1095			break;
1096		}
1097
1098		phys_avail[j] = availmem_regions[i].mr_start;
1099		phys_avail[j + 1] = availmem_regions[i].mr_start +
1100		    availmem_regions[i].mr_size;
1101		phys_avail_count++;
1102		physsz += availmem_regions[i].mr_size;
1103	}
1104	physmem = btoc(physsz);
1105
1106	/* Calculate the last available physical address. */
1107	for (i = 0; phys_avail[i + 2] != 0; i += 2)
1108		;
1109	Maxmem = powerpc_btop(phys_avail[i + 1]);
1110
1111	debugf("Maxmem = 0x%08lx\n", Maxmem);
1112	debugf("phys_avail_count = %d\n", phys_avail_count);
1113	debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem,
1114	    physmem);
1115
1116	/*******************************************************/
1117	/* Initialize (statically allocated) kernel pmap. */
1118	/*******************************************************/
1119	PMAP_LOCK_INIT(kernel_pmap);
1120	kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1121
1122	debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
1123	debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
1124	debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1125	    kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1126
1127	/* Initialize kernel pdir */
1128	for (i = 0; i < kernel_ptbls; i++)
1129		kernel_pmap->pm_pdir[kptbl_min + i] =
1130		    (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1131
1132	for (i = 0; i < MAXCPU; i++) {
1133		kernel_pmap->pm_tid[i] = TID_KERNEL;
1134
1135		/* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1136		tidbusy[i][0] = kernel_pmap;
1137	}
1138	/* Mark kernel_pmap active on all CPUs */
1139	kernel_pmap->pm_active = ~0;
1140
1141	/*******************************************************/
1142	/* Final setup */
1143	/*******************************************************/
1144
1145	/* Enter kstack0 into kernel map, provide guard page */
1146	kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1147	thread0.td_kstack = kstack0;
1148	thread0.td_kstack_pages = KSTACK_PAGES;
1149
1150	debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1151	debugf("kstack0_phys at 0x%08x - 0x%08x\n",
1152	    kstack0_phys, kstack0_phys + kstack0_sz);
1153	debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
1154
1155	virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
1156	for (i = 0; i < KSTACK_PAGES; i++) {
1157		mmu_booke_kenter(mmu, kstack0, kstack0_phys);
1158		kstack0 += PAGE_SIZE;
1159		kstack0_phys += PAGE_SIZE;
1160	}
1161
1162	debugf("virtual_avail = %08x\n", virtual_avail);
1163	debugf("virtual_end   = %08x\n", virtual_end);
1164
1165	debugf("mmu_booke_bootstrap: exit\n");
1166}
1167
1168/*
1169 * Get the physical page address for the given pmap/virtual address.
1170 */
1171static vm_paddr_t
1172mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1173{
1174	vm_paddr_t pa;
1175
1176	PMAP_LOCK(pmap);
1177	pa = pte_vatopa(mmu, pmap, va);
1178	PMAP_UNLOCK(pmap);
1179
1180	return (pa);
1181}
1182
1183/*
1184 * Extract the physical page address associated with the given
1185 * kernel virtual address.
1186 */
1187static vm_paddr_t
1188mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1189{
1190
1191	return (pte_vatopa(mmu, kernel_pmap, va));
1192}
1193
1194/*
1195 * Initialize the pmap module.
1196 * Called by vm_init, to initialize any structures that the pmap
1197 * system needs to map virtual memory.
1198 */
1199static void
1200mmu_booke_init(mmu_t mmu)
1201{
1202	int shpgperproc = PMAP_SHPGPERPROC;
1203
1204	/*
1205	 * Initialize the address space (zone) for the pv entries.  Set a
1206	 * high water mark so that the system can recover from excessive
1207	 * numbers of pv entries.
1208	 */
1209	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1210	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1211
1212	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1213	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1214
1215	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1216	pv_entry_high_water = 9 * (pv_entry_max / 10);
1217
1218	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
1219
1220	/* Pre-fill pvzone with initial number of pv entries. */
1221	uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1222
1223	/* Initialize ptbl allocation. */
1224	ptbl_init();
1225}
1226
1227/*
1228 * Map a list of wired pages into kernel virtual address space.  This is
1229 * intended for temporary mappings which do not need page modification or
1230 * references recorded.  Existing mappings in the region are overwritten.
1231 */
1232static void
1233mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1234{
1235	vm_offset_t va;
1236
1237	va = sva;
1238	while (count-- > 0) {
1239		mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1240		va += PAGE_SIZE;
1241		m++;
1242	}
1243}
1244
1245/*
1246 * Remove page mappings from kernel virtual address space.  Intended for
1247 * temporary mappings entered by mmu_booke_qenter.
1248 */
1249static void
1250mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1251{
1252	vm_offset_t va;
1253
1254	va = sva;
1255	while (count-- > 0) {
1256		mmu_booke_kremove(mmu, va);
1257		va += PAGE_SIZE;
1258	}
1259}
1260
1261/*
1262 * Map a wired page into kernel virtual address space.
1263 */
1264static void
1265mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1266{
1267	unsigned int pdir_idx = PDIR_IDX(va);
1268	unsigned int ptbl_idx = PTBL_IDX(va);
1269	uint32_t flags;
1270	pte_t *pte;
1271
1272	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1273	    (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1274
1275#if 0
1276	/* assume IO mapping, set I, G bits */
1277	flags = (PTE_G | PTE_I | PTE_FAKE);
1278
1279	/* if mapping is within system memory, do not set I, G bits */
1280	for (i = 0; i < totalmem_regions_sz; i++) {
1281		if ((pa >= totalmem_regions[i].mr_start) &&
1282				(pa < (totalmem_regions[i].mr_start +
1283				       totalmem_regions[i].mr_size))) {
1284			flags &= ~(PTE_I | PTE_G | PTE_FAKE);
1285			break;
1286		}
1287	}
1288#else
1289	flags = 0;
1290#endif
1291
1292	flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID);
1293	flags |= PTE_M;
1294
1295	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1296
1297	mtx_lock_spin(&tlbivax_mutex);
1298
1299	if (PTE_ISVALID(pte)) {
1300
1301		CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1302
1303		/* Flush entry from TLB0 */
1304		tlb0_flush_entry(va);
1305	}
1306
1307	pte->rpn = pa & ~PTE_PA_MASK;
1308	pte->flags = flags;
1309
1310	//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1311	//		"pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1312	//		pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1313
1314	/* Flush the real memory from the instruction cache. */
1315	if ((flags & (PTE_I | PTE_G)) == 0) {
1316		__syncicache((void *)va, PAGE_SIZE);
1317	}
1318
1319	mtx_unlock_spin(&tlbivax_mutex);
1320}
1321
1322/*
1323 * Remove a page from kernel page table.
1324 */
1325static void
1326mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1327{
1328	unsigned int pdir_idx = PDIR_IDX(va);
1329	unsigned int ptbl_idx = PTBL_IDX(va);
1330	pte_t *pte;
1331
1332//	CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va));
1333
1334	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1335	    (va <= VM_MAX_KERNEL_ADDRESS)),
1336	    ("mmu_booke_kremove: invalid va"));
1337
1338	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1339
1340	if (!PTE_ISVALID(pte)) {
1341
1342		CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1343
1344		return;
1345	}
1346
1347	mtx_lock_spin(&tlbivax_mutex);
1348
1349	/* Invalidate entry in TLB0, update PTE. */
1350	tlb0_flush_entry(va);
1351	pte->flags = 0;
1352	pte->rpn = 0;
1353
1354	mtx_unlock_spin(&tlbivax_mutex);
1355}
1356
1357/*
1358 * Initialize pmap associated with process 0.
1359 */
1360static void
1361mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1362{
1363
1364	mmu_booke_pinit(mmu, pmap);
1365	PCPU_SET(curpmap, pmap);
1366}
1367
1368/*
1369 * Initialize a preallocated and zeroed pmap structure,
1370 * such as one in a vmspace structure.
1371 */
1372static void
1373mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1374{
1375	int i;
1376
1377	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
1378	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
1379
1380	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
1381
1382	PMAP_LOCK_INIT(pmap);
1383	for (i = 0; i < MAXCPU; i++)
1384		pmap->pm_tid[i] = TID_NONE;
1385	pmap->pm_active = 0;
1386	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1387	bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1388	TAILQ_INIT(&pmap->pm_ptbl_list);
1389}
1390
1391/*
1392 * Release any resources held by the given physical map.
1393 * Called when a pmap initialized by mmu_booke_pinit is being released.
1394 * Should only be called if the map contains no valid mappings.
1395 */
1396static void
1397mmu_booke_release(mmu_t mmu, pmap_t pmap)
1398{
1399
1400	printf("mmu_booke_release: s\n");
1401
1402	KASSERT(pmap->pm_stats.resident_count == 0,
1403	    ("pmap_release: pmap resident count %ld != 0",
1404	    pmap->pm_stats.resident_count));
1405
1406	PMAP_LOCK_DESTROY(pmap);
1407}
1408
1409#if 0
1410/* Not needed, kernel page tables are statically allocated. */
1411void
1412mmu_booke_growkernel(vm_offset_t maxkvaddr)
1413{
1414}
1415#endif
1416
1417/*
1418 * Insert the given physical page at the specified virtual address in the
1419 * target physical map with the protection requested. If specified the page
1420 * will be wired down.
1421 */
1422static void
1423mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1424    vm_prot_t prot, boolean_t wired)
1425{
1426
1427	vm_page_lock_queues();
1428	PMAP_LOCK(pmap);
1429	mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
1430	vm_page_unlock_queues();
1431	PMAP_UNLOCK(pmap);
1432}
1433
1434static void
1435mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1436    vm_prot_t prot, boolean_t wired)
1437{
1438	pte_t *pte;
1439	vm_paddr_t pa;
1440	uint32_t flags;
1441	int su, sync;
1442
1443	pa = VM_PAGE_TO_PHYS(m);
1444	su = (pmap == kernel_pmap);
1445	sync = 0;
1446
1447	//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1448	//		"pa=0x%08x prot=0x%08x wired=%d)\n",
1449	//		(u_int32_t)pmap, su, pmap->pm_tid,
1450	//		(u_int32_t)m, va, pa, prot, wired);
1451
1452	if (su) {
1453		KASSERT(((va >= virtual_avail) &&
1454		    (va <= VM_MAX_KERNEL_ADDRESS)),
1455		    ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1456	} else {
1457		KASSERT((va <= VM_MAXUSER_ADDRESS),
1458		    ("mmu_booke_enter_locked: user pmap, non user va"));
1459	}
1460
1461	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1462
1463	/*
1464	 * If there is an existing mapping, and the physical address has not
1465	 * changed, must be protection or wiring change.
1466	 */
1467	if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1468	    (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1469
1470		/*
1471		 * Before actually updating pte->flags we calculate and
1472		 * prepare its new value in a helper var.
1473		 */
1474		flags = pte->flags;
1475		flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1476
1477		/* Wiring change, just update stats. */
1478		if (wired) {
1479			if (!PTE_ISWIRED(pte)) {
1480				flags |= PTE_WIRED;
1481				pmap->pm_stats.wired_count++;
1482			}
1483		} else {
1484			if (PTE_ISWIRED(pte)) {
1485				flags &= ~PTE_WIRED;
1486				pmap->pm_stats.wired_count--;
1487			}
1488		}
1489
1490		if (prot & VM_PROT_WRITE) {
1491			/* Add write permissions. */
1492			flags |= PTE_SW;
1493			if (!su)
1494				flags |= PTE_UW;
1495		} else {
1496			/* Handle modified pages, sense modify status. */
1497
1498			/*
1499			 * The PTE_MODIFIED flag could be set by underlying
1500			 * TLB misses since we last read it (above), possibly
1501			 * other CPUs could update it so we check in the PTE
1502			 * directly rather than rely on that saved local flags
1503			 * copy.
1504			 */
1505			if (PTE_ISMODIFIED(pte))
1506				vm_page_dirty(m);
1507		}
1508
1509		if (prot & VM_PROT_EXECUTE) {
1510			flags |= PTE_SX;
1511			if (!su)
1512				flags |= PTE_UX;
1513
1514			/*
1515			 * Check existing flags for execute permissions: if we
1516			 * are turning execute permissions on, icache should
1517			 * be flushed.
1518			 */
1519			if ((flags & (PTE_UX | PTE_SX)) == 0)
1520				sync++;
1521		}
1522
1523		flags &= ~PTE_REFERENCED;
1524
1525		/*
1526		 * The new flags value is all calculated -- only now actually
1527		 * update the PTE.
1528		 */
1529		mtx_lock_spin(&tlbivax_mutex);
1530
1531		tlb0_flush_entry(va);
1532		pte->flags = flags;
1533
1534		mtx_unlock_spin(&tlbivax_mutex);
1535
1536	} else {
1537		/*
1538		 * If there is an existing mapping, but it's for a different
1539		 * physical address, pte_enter() will delete the old mapping.
1540		 */
1541		//if ((pte != NULL) && PTE_ISVALID(pte))
1542		//	debugf("mmu_booke_enter_locked: replace\n");
1543		//else
1544		//	debugf("mmu_booke_enter_locked: new\n");
1545
1546		/* Now set up the flags and install the new mapping. */
1547		flags = (PTE_SR | PTE_VALID);
1548		flags |= PTE_M;
1549
1550		if (!su)
1551			flags |= PTE_UR;
1552
1553		if (prot & VM_PROT_WRITE) {
1554			flags |= PTE_SW;
1555			if (!su)
1556				flags |= PTE_UW;
1557		}
1558
1559		if (prot & VM_PROT_EXECUTE) {
1560			flags |= PTE_SX;
1561			if (!su)
1562				flags |= PTE_UX;
1563		}
1564
1565		/* If its wired update stats. */
1566		if (wired) {
1567			pmap->pm_stats.wired_count++;
1568			flags |= PTE_WIRED;
1569		}
1570
1571		pte_enter(mmu, pmap, m, va, flags);
1572
1573		/* Flush the real memory from the instruction cache. */
1574		if (prot & VM_PROT_EXECUTE)
1575			sync++;
1576	}
1577
1578	if (sync && (su || pmap == PCPU_GET(curpmap))) {
1579		__syncicache((void *)va, PAGE_SIZE);
1580		sync = 0;
1581	}
1582
1583	if (sync) {
1584		/* Create a temporary mapping. */
1585		pmap = PCPU_GET(curpmap);
1586
1587		va = 0;
1588		pte = pte_find(mmu, pmap, va);
1589		KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__));
1590
1591		flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M;
1592
1593		pte_enter(mmu, pmap, m, va, flags);
1594		__syncicache((void *)va, PAGE_SIZE);
1595		pte_remove(mmu, pmap, va, PTBL_UNHOLD);
1596	}
1597}
1598
1599/*
1600 * Maps a sequence of resident pages belonging to the same object.
1601 * The sequence begins with the given page m_start.  This page is
1602 * mapped at the given virtual address start.  Each subsequent page is
1603 * mapped at a virtual address that is offset from start by the same
1604 * amount as the page is offset from m_start within the object.  The
1605 * last page in the sequence is the page with the largest offset from
1606 * m_start that can be mapped at a virtual address less than the given
1607 * virtual address end.  Not every virtual page between start and end
1608 * is mapped; only those for which a resident page exists with the
1609 * corresponding offset from m_start are mapped.
1610 */
1611static void
1612mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1613    vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1614{
1615	vm_page_t m;
1616	vm_pindex_t diff, psize;
1617
1618	psize = atop(end - start);
1619	m = m_start;
1620	PMAP_LOCK(pmap);
1621	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1622		mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
1623		    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1624		m = TAILQ_NEXT(m, listq);
1625	}
1626	PMAP_UNLOCK(pmap);
1627}
1628
1629static void
1630mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1631    vm_prot_t prot)
1632{
1633
1634	PMAP_LOCK(pmap);
1635	mmu_booke_enter_locked(mmu, pmap, va, m,
1636	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1637	PMAP_UNLOCK(pmap);
1638}
1639
1640/*
1641 * Remove the given range of addresses from the specified map.
1642 *
1643 * It is assumed that the start and end are properly rounded to the page size.
1644 */
1645static void
1646mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1647{
1648	pte_t *pte;
1649	uint8_t hold_flag;
1650
1651	int su = (pmap == kernel_pmap);
1652
1653	//debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1654	//		su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1655
1656	if (su) {
1657		KASSERT(((va >= virtual_avail) &&
1658		    (va <= VM_MAX_KERNEL_ADDRESS)),
1659		    ("mmu_booke_remove: kernel pmap, non kernel va"));
1660	} else {
1661		KASSERT((va <= VM_MAXUSER_ADDRESS),
1662		    ("mmu_booke_remove: user pmap, non user va"));
1663	}
1664
1665	if (PMAP_REMOVE_DONE(pmap)) {
1666		//debugf("mmu_booke_remove: e (empty)\n");
1667		return;
1668	}
1669
1670	hold_flag = PTBL_HOLD_FLAG(pmap);
1671	//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1672
1673	vm_page_lock_queues();
1674	PMAP_LOCK(pmap);
1675	for (; va < endva; va += PAGE_SIZE) {
1676		pte = pte_find(mmu, pmap, va);
1677		if ((pte != NULL) && PTE_ISVALID(pte))
1678			pte_remove(mmu, pmap, va, hold_flag);
1679	}
1680	PMAP_UNLOCK(pmap);
1681	vm_page_unlock_queues();
1682
1683	//debugf("mmu_booke_remove: e\n");
1684}
1685
1686/*
1687 * Remove physical page from all pmaps in which it resides.
1688 */
1689static void
1690mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1691{
1692	pv_entry_t pv, pvn;
1693	uint8_t hold_flag;
1694
1695	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1696
1697	for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1698		pvn = TAILQ_NEXT(pv, pv_link);
1699
1700		PMAP_LOCK(pv->pv_pmap);
1701		hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1702		pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1703		PMAP_UNLOCK(pv->pv_pmap);
1704	}
1705	vm_page_flag_clear(m, PG_WRITEABLE);
1706}
1707
1708/*
1709 * Map a range of physical addresses into kernel virtual address space.
1710 */
1711static vm_offset_t
1712mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1713    vm_offset_t pa_end, int prot)
1714{
1715	vm_offset_t sva = *virt;
1716	vm_offset_t va = sva;
1717
1718	//debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1719	//		sva, pa_start, pa_end);
1720
1721	while (pa_start < pa_end) {
1722		mmu_booke_kenter(mmu, va, pa_start);
1723		va += PAGE_SIZE;
1724		pa_start += PAGE_SIZE;
1725	}
1726	*virt = va;
1727
1728	//debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1729	return (sva);
1730}
1731
1732/*
1733 * The pmap must be activated before it's address space can be accessed in any
1734 * way.
1735 */
1736static void
1737mmu_booke_activate(mmu_t mmu, struct thread *td)
1738{
1739	pmap_t pmap;
1740
1741	pmap = &td->td_proc->p_vmspace->vm_pmap;
1742
1743	CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
1744	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1745
1746	KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1747
1748	mtx_lock_spin(&sched_lock);
1749
1750	atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
1751	PCPU_SET(curpmap, pmap);
1752
1753	if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE)
1754		tid_alloc(pmap);
1755
1756	/* Load PID0 register with pmap tid value. */
1757	mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]);
1758	__asm __volatile("isync");
1759
1760	mtx_unlock_spin(&sched_lock);
1761
1762	CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1763	    pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1764}
1765
1766/*
1767 * Deactivate the specified process's address space.
1768 */
1769static void
1770mmu_booke_deactivate(mmu_t mmu, struct thread *td)
1771{
1772	pmap_t pmap;
1773
1774	pmap = &td->td_proc->p_vmspace->vm_pmap;
1775
1776	CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
1777	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1778
1779	atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask));
1780	PCPU_SET(curpmap, NULL);
1781}
1782
1783/*
1784 * Copy the range specified by src_addr/len
1785 * from the source map to the range dst_addr/len
1786 * in the destination map.
1787 *
1788 * This routine is only advisory and need not do anything.
1789 */
1790static void
1791mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
1792    vm_size_t len, vm_offset_t src_addr)
1793{
1794
1795}
1796
1797/*
1798 * Set the physical protection on the specified range of this map as requested.
1799 */
1800static void
1801mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1802    vm_prot_t prot)
1803{
1804	vm_offset_t va;
1805	vm_page_t m;
1806	pte_t *pte;
1807
1808	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1809		mmu_booke_remove(mmu, pmap, sva, eva);
1810		return;
1811	}
1812
1813	if (prot & VM_PROT_WRITE)
1814		return;
1815
1816	vm_page_lock_queues();
1817	PMAP_LOCK(pmap);
1818	for (va = sva; va < eva; va += PAGE_SIZE) {
1819		if ((pte = pte_find(mmu, pmap, va)) != NULL) {
1820			if (PTE_ISVALID(pte)) {
1821				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1822
1823				mtx_lock_spin(&tlbivax_mutex);
1824
1825				/* Handle modified pages. */
1826				if (PTE_ISMODIFIED(pte))
1827					vm_page_dirty(m);
1828
1829				/* Referenced pages. */
1830				if (PTE_ISREFERENCED(pte))
1831					vm_page_flag_set(m, PG_REFERENCED);
1832
1833				tlb0_flush_entry(va);
1834				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
1835				    PTE_REFERENCED);
1836
1837				mtx_unlock_spin(&tlbivax_mutex);
1838			}
1839		}
1840	}
1841	PMAP_UNLOCK(pmap);
1842	vm_page_unlock_queues();
1843}
1844
1845/*
1846 * Clear the write and modified bits in each of the given page's mappings.
1847 */
1848static void
1849mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
1850{
1851	pv_entry_t pv;
1852	pte_t *pte;
1853
1854	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1855	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1856	    (m->flags & PG_WRITEABLE) == 0)
1857		return;
1858
1859	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1860		PMAP_LOCK(pv->pv_pmap);
1861		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
1862			if (PTE_ISVALID(pte)) {
1863				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1864
1865				mtx_lock_spin(&tlbivax_mutex);
1866
1867				/* Handle modified pages. */
1868				if (PTE_ISMODIFIED(pte))
1869					vm_page_dirty(m);
1870
1871				/* Referenced pages. */
1872				if (PTE_ISREFERENCED(pte))
1873					vm_page_flag_set(m, PG_REFERENCED);
1874
1875				/* Flush mapping from TLB0. */
1876				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
1877				    PTE_REFERENCED);
1878
1879				mtx_unlock_spin(&tlbivax_mutex);
1880			}
1881		}
1882		PMAP_UNLOCK(pv->pv_pmap);
1883	}
1884	vm_page_flag_clear(m, PG_WRITEABLE);
1885}
1886
1887static boolean_t
1888mmu_booke_page_executable(mmu_t mmu, vm_page_t m)
1889{
1890	pv_entry_t pv;
1891	pte_t *pte;
1892	boolean_t executable;
1893
1894	executable = FALSE;
1895	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1896		PMAP_LOCK(pv->pv_pmap);
1897		pte = pte_find(mmu, pv->pv_pmap, pv->pv_va);
1898		if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX))
1899			executable = TRUE;
1900		PMAP_UNLOCK(pv->pv_pmap);
1901		if (executable)
1902			break;
1903	}
1904
1905	return (executable);
1906}
1907
1908/*
1909 * Atomically extract and hold the physical page with the given
1910 * pmap and virtual address pair if that mapping permits the given
1911 * protection.
1912 */
1913static vm_page_t
1914mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
1915    vm_prot_t prot)
1916{
1917	pte_t *pte;
1918	vm_page_t m;
1919	uint32_t pte_wbit;
1920
1921	m = NULL;
1922	vm_page_lock_queues();
1923	PMAP_LOCK(pmap);
1924
1925	pte = pte_find(mmu, pmap, va);
1926	if ((pte != NULL) && PTE_ISVALID(pte)) {
1927		if (pmap == kernel_pmap)
1928			pte_wbit = PTE_SW;
1929		else
1930			pte_wbit = PTE_UW;
1931
1932		if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
1933			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1934			vm_page_hold(m);
1935		}
1936	}
1937
1938	vm_page_unlock_queues();
1939	PMAP_UNLOCK(pmap);
1940	return (m);
1941}
1942
1943/*
1944 * Initialize a vm_page's machine-dependent fields.
1945 */
1946static void
1947mmu_booke_page_init(mmu_t mmu, vm_page_t m)
1948{
1949
1950	TAILQ_INIT(&m->md.pv_list);
1951}
1952
1953/*
1954 * mmu_booke_zero_page_area zeros the specified hardware page by
1955 * mapping it into virtual memory and using bzero to clear
1956 * its contents.
1957 *
1958 * off and size must reside within a single page.
1959 */
1960static void
1961mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1962{
1963	vm_offset_t va;
1964
1965	/* XXX KASSERT off and size are within a single page? */
1966
1967	mtx_lock(&zero_page_mutex);
1968	va = zero_page_va;
1969
1970	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
1971	bzero((caddr_t)va + off, size);
1972	mmu_booke_kremove(mmu, va);
1973
1974	mtx_unlock(&zero_page_mutex);
1975}
1976
1977/*
1978 * mmu_booke_zero_page zeros the specified hardware page.
1979 */
1980static void
1981mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
1982{
1983
1984	mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
1985}
1986
1987/*
1988 * mmu_booke_copy_page copies the specified (machine independent) page by
1989 * mapping the page into virtual memory and using memcopy to copy the page,
1990 * one machine dependent page at a time.
1991 */
1992static void
1993mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
1994{
1995	vm_offset_t sva, dva;
1996
1997	sva = copy_page_src_va;
1998	dva = copy_page_dst_va;
1999
2000	mtx_lock(&copy_page_mutex);
2001	mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2002	mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2003	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2004	mmu_booke_kremove(mmu, dva);
2005	mmu_booke_kremove(mmu, sva);
2006	mtx_unlock(&copy_page_mutex);
2007}
2008
2009#if 0
2010/*
2011 * Remove all pages from specified address space, this aids process exit
2012 * speeds. This is much faster than mmu_booke_remove in the case of running
2013 * down an entire address space. Only works for the current pmap.
2014 */
2015void
2016mmu_booke_remove_pages(pmap_t pmap)
2017{
2018}
2019#endif
2020
2021/*
2022 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2023 * into virtual memory and using bzero to clear its contents. This is intended
2024 * to be called from the vm_pagezero process only and outside of Giant. No
2025 * lock is required.
2026 */
2027static void
2028mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2029{
2030	vm_offset_t va;
2031
2032	va = zero_page_idle_va;
2033	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2034	bzero((caddr_t)va, PAGE_SIZE);
2035	mmu_booke_kremove(mmu, va);
2036}
2037
2038/*
2039 * Return whether or not the specified physical page was modified
2040 * in any of physical maps.
2041 */
2042static boolean_t
2043mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2044{
2045	pte_t *pte;
2046	pv_entry_t pv;
2047
2048	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2049	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2050		return (FALSE);
2051
2052	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2053		PMAP_LOCK(pv->pv_pmap);
2054		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2055			if (!PTE_ISVALID(pte))
2056				goto make_sure_to_unlock;
2057
2058			if (PTE_ISMODIFIED(pte)) {
2059				PMAP_UNLOCK(pv->pv_pmap);
2060				return (TRUE);
2061			}
2062		}
2063make_sure_to_unlock:
2064		PMAP_UNLOCK(pv->pv_pmap);
2065	}
2066	return (FALSE);
2067}
2068
2069/*
2070 * Return whether or not the specified virtual address is eligible
2071 * for prefault.
2072 */
2073static boolean_t
2074mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2075{
2076
2077	return (FALSE);
2078}
2079
2080/*
2081 * Clear the modify bits on the specified physical page.
2082 */
2083static void
2084mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2085{
2086	pte_t *pte;
2087	pv_entry_t pv;
2088
2089	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2090	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2091		return;
2092
2093	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2094		PMAP_LOCK(pv->pv_pmap);
2095		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2096			if (!PTE_ISVALID(pte))
2097				goto make_sure_to_unlock;
2098
2099			mtx_lock_spin(&tlbivax_mutex);
2100
2101			if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2102				tlb0_flush_entry(pv->pv_va);
2103				pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2104				    PTE_REFERENCED);
2105			}
2106
2107			mtx_unlock_spin(&tlbivax_mutex);
2108		}
2109make_sure_to_unlock:
2110		PMAP_UNLOCK(pv->pv_pmap);
2111	}
2112}
2113
2114/*
2115 * Return a count of reference bits for a page, clearing those bits.
2116 * It is not necessary for every reference bit to be cleared, but it
2117 * is necessary that 0 only be returned when there are truly no
2118 * reference bits set.
2119 *
2120 * XXX: The exact number of bits to check and clear is a matter that
2121 * should be tested and standardized at some point in the future for
2122 * optimal aging of shared pages.
2123 */
2124static int
2125mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2126{
2127	pte_t *pte;
2128	pv_entry_t pv;
2129	int count;
2130
2131	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2132	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2133		return (0);
2134
2135	count = 0;
2136	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2137		PMAP_LOCK(pv->pv_pmap);
2138		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2139			if (!PTE_ISVALID(pte))
2140				goto make_sure_to_unlock;
2141
2142			if (PTE_ISREFERENCED(pte)) {
2143				mtx_lock_spin(&tlbivax_mutex);
2144
2145				tlb0_flush_entry(pv->pv_va);
2146				pte->flags &= ~PTE_REFERENCED;
2147
2148				mtx_unlock_spin(&tlbivax_mutex);
2149
2150				if (++count > 4) {
2151					PMAP_UNLOCK(pv->pv_pmap);
2152					break;
2153				}
2154			}
2155		}
2156make_sure_to_unlock:
2157		PMAP_UNLOCK(pv->pv_pmap);
2158	}
2159	return (count);
2160}
2161
2162/*
2163 * Clear the reference bit on the specified physical page.
2164 */
2165static void
2166mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
2167{
2168	pte_t *pte;
2169	pv_entry_t pv;
2170
2171	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2172	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2173		return;
2174
2175	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2176		PMAP_LOCK(pv->pv_pmap);
2177		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2178			if (!PTE_ISVALID(pte))
2179				goto make_sure_to_unlock;
2180
2181			if (PTE_ISREFERENCED(pte)) {
2182				mtx_lock_spin(&tlbivax_mutex);
2183
2184				tlb0_flush_entry(pv->pv_va);
2185				pte->flags &= ~PTE_REFERENCED;
2186
2187				mtx_unlock_spin(&tlbivax_mutex);
2188			}
2189		}
2190make_sure_to_unlock:
2191		PMAP_UNLOCK(pv->pv_pmap);
2192	}
2193}
2194
2195/*
2196 * Change wiring attribute for a map/virtual-address pair.
2197 */
2198static void
2199mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
2200{
2201	pte_t *pte;;
2202
2203	PMAP_LOCK(pmap);
2204	if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2205		if (wired) {
2206			if (!PTE_ISWIRED(pte)) {
2207				pte->flags |= PTE_WIRED;
2208				pmap->pm_stats.wired_count++;
2209			}
2210		} else {
2211			if (PTE_ISWIRED(pte)) {
2212				pte->flags &= ~PTE_WIRED;
2213				pmap->pm_stats.wired_count--;
2214			}
2215		}
2216	}
2217	PMAP_UNLOCK(pmap);
2218}
2219
2220/*
2221 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2222 * page.  This count may be changed upwards or downwards in the future; it is
2223 * only necessary that true be returned for a small subset of pmaps for proper
2224 * page aging.
2225 */
2226static boolean_t
2227mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2228{
2229	pv_entry_t pv;
2230	int loops;
2231
2232	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2233	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2234		return (FALSE);
2235
2236	loops = 0;
2237	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2238		if (pv->pv_pmap == pmap)
2239			return (TRUE);
2240
2241		if (++loops >= 16)
2242			break;
2243	}
2244	return (FALSE);
2245}
2246
2247/*
2248 * Return the number of managed mappings to the given physical page that are
2249 * wired.
2250 */
2251static int
2252mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2253{
2254	pv_entry_t pv;
2255	pte_t *pte;
2256	int count = 0;
2257
2258	if ((m->flags & PG_FICTITIOUS) != 0)
2259		return (count);
2260	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2261
2262	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2263		PMAP_LOCK(pv->pv_pmap);
2264		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2265			if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2266				count++;
2267		PMAP_UNLOCK(pv->pv_pmap);
2268	}
2269
2270	return (count);
2271}
2272
2273static int
2274mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2275{
2276	int i;
2277	vm_offset_t va;
2278
2279	/*
2280	 * This currently does not work for entries that
2281	 * overlap TLB1 entries.
2282	 */
2283	for (i = 0; i < tlb1_idx; i ++) {
2284		if (tlb1_iomapped(i, pa, size, &va) == 0)
2285			return (0);
2286	}
2287
2288	return (EFAULT);
2289}
2290
2291/*
2292 * Map a set of physical memory pages into the kernel virtual address space.
2293 * Return a pointer to where it is mapped. This routine is intended to be used
2294 * for mapping device memory, NOT real memory.
2295 */
2296static void *
2297mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2298{
2299	void *res;
2300	uintptr_t va;
2301	vm_size_t sz;
2302
2303	va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
2304	res = (void *)va;
2305
2306	do {
2307		sz = 1 << (ilog2(size) & ~1);
2308		if (bootverbose)
2309			printf("Wiring VA=%x to PA=%x (size=%x), "
2310			    "using TLB1[%d]\n", va, pa, sz, tlb1_idx);
2311		tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO);
2312		size -= sz;
2313		pa += sz;
2314		va += sz;
2315	} while (size > 0);
2316
2317	return (res);
2318}
2319
2320/*
2321 * 'Unmap' a range mapped by mmu_booke_mapdev().
2322 */
2323static void
2324mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2325{
2326	vm_offset_t base, offset;
2327
2328	/*
2329	 * Unmap only if this is inside kernel virtual space.
2330	 */
2331	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2332		base = trunc_page(va);
2333		offset = va & PAGE_MASK;
2334		size = roundup(offset + size, PAGE_SIZE);
2335		kmem_free(kernel_map, base, size);
2336	}
2337}
2338
2339/*
2340 * mmu_booke_object_init_pt preloads the ptes for a given object into the
2341 * specified pmap. This eliminates the blast of soft faults on process startup
2342 * and immediately after an mmap.
2343 */
2344static void
2345mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2346    vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2347{
2348
2349	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2350	KASSERT(object->type == OBJT_DEVICE,
2351	    ("mmu_booke_object_init_pt: non-device object"));
2352}
2353
2354/*
2355 * Perform the pmap work for mincore.
2356 */
2357static int
2358mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2359{
2360
2361	TODO;
2362	return (0);
2363}
2364
2365/**************************************************************************/
2366/* TID handling */
2367/**************************************************************************/
2368
2369/*
2370 * Allocate a TID. If necessary, steal one from someone else.
2371 * The new TID is flushed from the TLB before returning.
2372 */
2373static tlbtid_t
2374tid_alloc(pmap_t pmap)
2375{
2376	tlbtid_t tid;
2377	int thiscpu;
2378
2379	KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2380
2381	CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2382
2383	thiscpu = PCPU_GET(cpuid);
2384
2385	tid = PCPU_GET(tid_next);
2386	if (tid > TID_MAX)
2387		tid = TID_MIN;
2388	PCPU_SET(tid_next, tid + 1);
2389
2390	/* If we are stealing TID then clear the relevant pmap's field */
2391	if (tidbusy[thiscpu][tid] != NULL) {
2392
2393		CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2394
2395		tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2396
2397		/* Flush all entries from TLB0 matching this TID. */
2398		tid_flush(tid);
2399	}
2400
2401	tidbusy[thiscpu][tid] = pmap;
2402	pmap->pm_tid[thiscpu] = tid;
2403	__asm __volatile("msync; isync");
2404
2405	CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2406	    PCPU_GET(tid_next));
2407
2408	return (tid);
2409}
2410
2411/**************************************************************************/
2412/* TLB0 handling */
2413/**************************************************************************/
2414
2415static void
2416tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
2417    uint32_t mas7)
2418{
2419	int as;
2420	char desc[3];
2421	tlbtid_t tid;
2422	vm_size_t size;
2423	unsigned int tsize;
2424
2425	desc[2] = '\0';
2426	if (mas1 & MAS1_VALID)
2427		desc[0] = 'V';
2428	else
2429		desc[0] = ' ';
2430
2431	if (mas1 & MAS1_IPROT)
2432		desc[1] = 'P';
2433	else
2434		desc[1] = ' ';
2435
2436	as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
2437	tid = MAS1_GETTID(mas1);
2438
2439	tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2440	size = 0;
2441	if (tsize)
2442		size = tsize2size(tsize);
2443
2444	debugf("%3d: (%s) [AS=%d] "
2445	    "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2446	    "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2447	    i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2448}
2449
2450/* Convert TLB0 va and way number to tlb0[] table index. */
2451static inline unsigned int
2452tlb0_tableidx(vm_offset_t va, unsigned int way)
2453{
2454	unsigned int idx;
2455
2456	idx = (way * TLB0_ENTRIES_PER_WAY);
2457	idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2458	return (idx);
2459}
2460
2461/*
2462 * Invalidate TLB0 entry.
2463 */
2464static inline void
2465tlb0_flush_entry(vm_offset_t va)
2466{
2467
2468	CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2469
2470	mtx_assert(&tlbivax_mutex, MA_OWNED);
2471
2472	__asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2473	__asm __volatile("isync; msync");
2474	__asm __volatile("tlbsync; msync");
2475
2476	CTR1(KTR_PMAP, "%s: e", __func__);
2477}
2478
2479/* Print out contents of the MAS registers for each TLB0 entry */
2480void
2481tlb0_print_tlbentries(void)
2482{
2483	uint32_t mas0, mas1, mas2, mas3, mas7;
2484	int entryidx, way, idx;
2485
2486	debugf("TLB0 entries:\n");
2487	for (way = 0; way < TLB0_WAYS; way ++)
2488		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2489
2490			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2491			mtspr(SPR_MAS0, mas0);
2492			__asm __volatile("isync");
2493
2494			mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
2495			mtspr(SPR_MAS2, mas2);
2496
2497			__asm __volatile("isync; tlbre");
2498
2499			mas1 = mfspr(SPR_MAS1);
2500			mas2 = mfspr(SPR_MAS2);
2501			mas3 = mfspr(SPR_MAS3);
2502			mas7 = mfspr(SPR_MAS7);
2503
2504			idx = tlb0_tableidx(mas2, way);
2505			tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2506		}
2507}
2508
2509/**************************************************************************/
2510/* TLB1 handling */
2511/**************************************************************************/
2512
2513/*
2514 * TLB1 mapping notes:
2515 *
2516 * TLB1[0]	CCSRBAR
2517 * TLB1[1]	Kernel text and data.
2518 * TLB1[2-15]	Additional kernel text and data mappings (if required), PCI
2519 *		windows, other devices mappings.
2520 */
2521
2522/*
2523 * Write given entry to TLB1 hardware.
2524 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2525 */
2526static void
2527tlb1_write_entry(unsigned int idx)
2528{
2529	uint32_t mas0, mas7;
2530
2531	//debugf("tlb1_write_entry: s\n");
2532
2533	/* Clear high order RPN bits */
2534	mas7 = 0;
2535
2536	/* Select entry */
2537	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2538	//debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
2539
2540	mtspr(SPR_MAS0, mas0);
2541	__asm __volatile("isync");
2542	mtspr(SPR_MAS1, tlb1[idx].mas1);
2543	__asm __volatile("isync");
2544	mtspr(SPR_MAS2, tlb1[idx].mas2);
2545	__asm __volatile("isync");
2546	mtspr(SPR_MAS3, tlb1[idx].mas3);
2547	__asm __volatile("isync");
2548	mtspr(SPR_MAS7, mas7);
2549	__asm __volatile("isync; tlbwe; isync; msync");
2550
2551	//debugf("tlb1_write_entry: e\n");;
2552}
2553
2554/*
2555 * Return the largest uint value log such that 2^log <= num.
2556 */
2557static unsigned int
2558ilog2(unsigned int num)
2559{
2560	int lz;
2561
2562	__asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
2563	return (31 - lz);
2564}
2565
2566/*
2567 * Convert TLB TSIZE value to mapped region size.
2568 */
2569static vm_size_t
2570tsize2size(unsigned int tsize)
2571{
2572
2573	/*
2574	 * size = 4^tsize KB
2575	 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2576	 */
2577
2578	return ((1 << (2 * tsize)) * 1024);
2579}
2580
2581/*
2582 * Convert region size (must be power of 4) to TLB TSIZE value.
2583 */
2584static unsigned int
2585size2tsize(vm_size_t size)
2586{
2587
2588	return (ilog2(size) / 2 - 5);
2589}
2590
2591/*
2592 * Register permanent kernel mapping in TLB1.
2593 *
2594 * Entries are created starting from index 0 (current free entry is
2595 * kept in tlb1_idx) and are not supposed to be invalidated.
2596 */
2597static int
2598tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
2599    uint32_t flags)
2600{
2601	uint32_t ts, tid;
2602	int tsize;
2603
2604	if (tlb1_idx >= TLB1_ENTRIES) {
2605		printf("tlb1_set_entry: TLB1 full!\n");
2606		return (-1);
2607	}
2608
2609	/* Convert size to TSIZE */
2610	tsize = size2tsize(size);
2611
2612	tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
2613	/* XXX TS is hard coded to 0 for now as we only use single address space */
2614	ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
2615
2616	/* XXX LOCK tlb1[] */
2617
2618	tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
2619	tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
2620	tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags;
2621
2622	/* Set supervisor RWX permission bits */
2623	tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
2624
2625	tlb1_write_entry(tlb1_idx++);
2626
2627	/* XXX UNLOCK tlb1[] */
2628
2629	/*
2630	 * XXX in general TLB1 updates should be propagated between CPUs,
2631	 * since current design assumes to have the same TLB1 set-up on all
2632	 * cores.
2633	 */
2634	return (0);
2635}
2636
2637static int
2638tlb1_entry_size_cmp(const void *a, const void *b)
2639{
2640	const vm_size_t *sza;
2641	const vm_size_t *szb;
2642
2643	sza = a;
2644	szb = b;
2645	if (*sza > *szb)
2646		return (-1);
2647	else if (*sza < *szb)
2648		return (1);
2649	else
2650		return (0);
2651}
2652
2653/*
2654 * Map in contiguous RAM region into the TLB1 using maximum of
2655 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
2656 *
2657 * If necessary round up last entry size and return total size
2658 * used by all allocated entries.
2659 */
2660vm_size_t
2661tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size)
2662{
2663	vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES];
2664	vm_size_t mapped_size, sz, esz;
2665	unsigned int log;
2666	int i;
2667
2668	CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x",
2669	    __func__, size, va, pa);
2670
2671	mapped_size = 0;
2672	sz = size;
2673	memset(entry_size, 0, sizeof(entry_size));
2674
2675	/* Calculate entry sizes. */
2676	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) {
2677
2678		/* Largest region that is power of 4 and fits within size */
2679		log = ilog2(sz) / 2;
2680		esz = 1 << (2 * log);
2681
2682		/* If this is last entry cover remaining size. */
2683		if (i ==  KERNEL_REGION_MAX_TLB_ENTRIES - 1) {
2684			while (esz < sz)
2685				esz = esz << 2;
2686		}
2687
2688		entry_size[i] = esz;
2689		mapped_size += esz;
2690		if (esz < sz)
2691			sz -= esz;
2692		else
2693			sz = 0;
2694	}
2695
2696	/* Sort entry sizes, required to get proper entry address alignment. */
2697	qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES,
2698	    sizeof(vm_size_t), tlb1_entry_size_cmp);
2699
2700	/* Load TLB1 entries. */
2701	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) {
2702		esz = entry_size[i];
2703		if (!esz)
2704			break;
2705
2706		CTR5(KTR_PMAP, "%s: entry %d: sz  = 0x%08x (va = 0x%08x "
2707		    "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa);
2708
2709		tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM);
2710
2711		va += esz;
2712		pa += esz;
2713	}
2714
2715	CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)",
2716	    __func__, mapped_size, mapped_size - size);
2717
2718	return (mapped_size);
2719}
2720
2721/*
2722 * TLB1 initialization routine, to be called after the very first
2723 * assembler level setup done in locore.S.
2724 */
2725void
2726tlb1_init(vm_offset_t ccsrbar)
2727{
2728	uint32_t mas0;
2729
2730	/* TLB1[1] is used to map the kernel. Save that entry. */
2731	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1);
2732	mtspr(SPR_MAS0, mas0);
2733	__asm __volatile("isync; tlbre");
2734
2735	tlb1[1].mas1 = mfspr(SPR_MAS1);
2736	tlb1[1].mas2 = mfspr(SPR_MAS2);
2737	tlb1[1].mas3 = mfspr(SPR_MAS3);
2738
2739	/* Map in CCSRBAR in TLB1[0] */
2740	tlb1_idx = 0;
2741	tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO);
2742	/*
2743	 * Set the next available TLB1 entry index. Note TLB[1] is reserved
2744	 * for initial mapping of kernel text+data, which was set early in
2745	 * locore, we need to skip this [busy] entry.
2746	 */
2747	tlb1_idx = 2;
2748
2749	/* Setup TLB miss defaults */
2750	set_mas4_defaults();
2751}
2752
2753/*
2754 * Setup MAS4 defaults.
2755 * These values are loaded to MAS0-2 on a TLB miss.
2756 */
2757static void
2758set_mas4_defaults(void)
2759{
2760	uint32_t mas4;
2761
2762	/* Defaults: TLB0, PID0, TSIZED=4K */
2763	mas4 = MAS4_TLBSELD0;
2764	mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
2765
2766	mtspr(SPR_MAS4, mas4);
2767	__asm __volatile("isync");
2768}
2769
2770/*
2771 * Print out contents of the MAS registers for each TLB1 entry
2772 */
2773void
2774tlb1_print_tlbentries(void)
2775{
2776	uint32_t mas0, mas1, mas2, mas3, mas7;
2777	int i;
2778
2779	debugf("TLB1 entries:\n");
2780	for (i = 0; i < TLB1_ENTRIES; i++) {
2781
2782		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
2783		mtspr(SPR_MAS0, mas0);
2784
2785		__asm __volatile("isync; tlbre");
2786
2787		mas1 = mfspr(SPR_MAS1);
2788		mas2 = mfspr(SPR_MAS2);
2789		mas3 = mfspr(SPR_MAS3);
2790		mas7 = mfspr(SPR_MAS7);
2791
2792		tlb_print_entry(i, mas1, mas2, mas3, mas7);
2793	}
2794}
2795
2796/*
2797 * Print out contents of the in-ram tlb1 table.
2798 */
2799void
2800tlb1_print_entries(void)
2801{
2802	int i;
2803
2804	debugf("tlb1[] table entries:\n");
2805	for (i = 0; i < TLB1_ENTRIES; i++)
2806		tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0);
2807}
2808
2809/*
2810 * Return 0 if the physical IO range is encompassed by one of the
2811 * the TLB1 entries, otherwise return related error code.
2812 */
2813static int
2814tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
2815{
2816	uint32_t prot;
2817	vm_paddr_t pa_start;
2818	vm_paddr_t pa_end;
2819	unsigned int entry_tsize;
2820	vm_size_t entry_size;
2821
2822	*va = (vm_offset_t)NULL;
2823
2824	/* Skip invalid entries */
2825	if (!(tlb1[i].mas1 & MAS1_VALID))
2826		return (EINVAL);
2827
2828	/*
2829	 * The entry must be cache-inhibited, guarded, and r/w
2830	 * so it can function as an i/o page
2831	 */
2832	prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
2833	if (prot != (MAS2_I | MAS2_G))
2834		return (EPERM);
2835
2836	prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
2837	if (prot != (MAS3_SR | MAS3_SW))
2838		return (EPERM);
2839
2840	/* The address should be within the entry range. */
2841	entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2842	KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
2843
2844	entry_size = tsize2size(entry_tsize);
2845	pa_start = tlb1[i].mas3 & MAS3_RPN;
2846	pa_end = pa_start + entry_size - 1;
2847
2848	if ((pa < pa_start) || ((pa + size) > pa_end))
2849		return (ERANGE);
2850
2851	/* Return virtual address of this mapping. */
2852	*va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start);
2853	return (0);
2854}
2855