pmap.c revision 266000
1176771Sraj/*-
2192532Sraj * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3176771Sraj * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4176771Sraj * All rights reserved.
5176771Sraj *
6176771Sraj * Redistribution and use in source and binary forms, with or without
7176771Sraj * modification, are permitted provided that the following conditions
8176771Sraj * are met:
9176771Sraj * 1. Redistributions of source code must retain the above copyright
10176771Sraj *    notice, this list of conditions and the following disclaimer.
11176771Sraj * 2. Redistributions in binary form must reproduce the above copyright
12176771Sraj *    notice, this list of conditions and the following disclaimer in the
13176771Sraj *    documentation and/or other materials provided with the distribution.
14176771Sraj *
15176771Sraj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16176771Sraj * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17176771Sraj * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18176771Sraj * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19176771Sraj * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20176771Sraj * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21176771Sraj * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22176771Sraj * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23176771Sraj * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24176771Sraj * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25176771Sraj *
26176771Sraj * Some hw specific parts of this pmap were derived or influenced
27176771Sraj * by NetBSD's ibm4xx pmap module. More generic code is shared with
28176771Sraj * a few other pmap modules from the FreeBSD tree.
29176771Sraj */
30176771Sraj
31176771Sraj /*
32176771Sraj  * VM layout notes:
33176771Sraj  *
34176771Sraj  * Kernel and user threads run within one common virtual address space
35176771Sraj  * defined by AS=0.
36176771Sraj  *
37176771Sraj  * Virtual address space layout:
38176771Sraj  * -----------------------------
39187151Sraj  * 0x0000_0000 - 0xafff_ffff	: user process
40187151Sraj  * 0xb000_0000 - 0xbfff_ffff	: pmap_mapdev()-ed area (PCI/PCIE etc.)
41187151Sraj  * 0xc000_0000 - 0xc0ff_ffff	: kernel reserved
42190701Smarcel  *   0xc000_0000 - data_end	: kernel code+data, env, metadata etc.
43187151Sraj  * 0xc100_0000 - 0xfeef_ffff	: KVA
44187151Sraj  *   0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
45187151Sraj  *   0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
46187151Sraj  *   0xc200_4000 - 0xc200_8fff : guard page + kstack0
47187151Sraj  *   0xc200_9000 - 0xfeef_ffff	: actual free KVA space
48187151Sraj  * 0xfef0_0000 - 0xffff_ffff	: I/O devices region
49176771Sraj  */
50176771Sraj
51176771Sraj#include <sys/cdefs.h>
52176771Sraj__FBSDID("$FreeBSD: stable/10/sys/powerpc/booke/pmap.c 266000 2014-05-14 01:53:20Z ian $");
53176771Sraj
54176771Sraj#include <sys/param.h>
55176771Sraj#include <sys/malloc.h>
56187149Sraj#include <sys/ktr.h>
57176771Sraj#include <sys/proc.h>
58176771Sraj#include <sys/user.h>
59176771Sraj#include <sys/queue.h>
60176771Sraj#include <sys/systm.h>
61176771Sraj#include <sys/kernel.h>
62224611Smarcel#include <sys/linker.h>
63176771Sraj#include <sys/msgbuf.h>
64176771Sraj#include <sys/lock.h>
65176771Sraj#include <sys/mutex.h>
66242535Salc#include <sys/rwlock.h>
67222813Sattilio#include <sys/sched.h>
68192532Sraj#include <sys/smp.h>
69176771Sraj#include <sys/vmmeter.h>
70176771Sraj
71176771Sraj#include <vm/vm.h>
72176771Sraj#include <vm/vm_page.h>
73176771Sraj#include <vm/vm_kern.h>
74176771Sraj#include <vm/vm_pageout.h>
75176771Sraj#include <vm/vm_extern.h>
76176771Sraj#include <vm/vm_object.h>
77176771Sraj#include <vm/vm_param.h>
78176771Sraj#include <vm/vm_map.h>
79176771Sraj#include <vm/vm_pager.h>
80176771Sraj#include <vm/uma.h>
81176771Sraj
82176771Sraj#include <machine/cpu.h>
83176771Sraj#include <machine/pcb.h>
84192067Snwhitehorn#include <machine/platform.h>
85176771Sraj
86176771Sraj#include <machine/tlb.h>
87176771Sraj#include <machine/spr.h>
88176771Sraj#include <machine/md_var.h>
89176771Sraj#include <machine/mmuvar.h>
90176771Sraj#include <machine/pmap.h>
91176771Sraj#include <machine/pte.h>
92176771Sraj
93176771Sraj#include "mmu_if.h"
94176771Sraj
95176771Sraj#ifdef  DEBUG
96176771Sraj#define debugf(fmt, args...) printf(fmt, ##args)
97176771Sraj#else
98176771Sraj#define debugf(fmt, args...)
99176771Sraj#endif
100176771Sraj
101176771Sraj#define TODO			panic("%s: not implemented", __func__);
102176771Sraj
103176771Srajextern struct mtx sched_lock;
104176771Sraj
105190701Smarcelextern int dumpsys_minidump;
106190701Smarcel
107190701Smarcelextern unsigned char _etext[];
108190701Smarcelextern unsigned char _end[];
109190701Smarcel
110224611Smarcelextern uint32_t *bootinfo;
111224611Smarcel
112224611Smarcel#ifdef SMP
113242526Smarcelextern uint32_t bp_ntlb1s;
114224611Smarcel#endif
115224611Smarcel
116224611Smarcelvm_paddr_t kernload;
117190701Smarcelvm_offset_t kernstart;
118190701Smarcelvm_size_t kernsize;
119176771Sraj
120190701Smarcel/* Message buffer and tables. */
121190701Smarcelstatic vm_offset_t data_start;
122190701Smarcelstatic vm_size_t data_end;
123190701Smarcel
124192067Snwhitehorn/* Phys/avail memory regions. */
125192067Snwhitehornstatic struct mem_region *availmem_regions;
126192067Snwhitehornstatic int availmem_regions_sz;
127192067Snwhitehornstatic struct mem_region *physmem_regions;
128192067Snwhitehornstatic int physmem_regions_sz;
129176771Sraj
130176771Sraj/* Reserved KVA space and mutex for mmu_booke_zero_page. */
131176771Srajstatic vm_offset_t zero_page_va;
132176771Srajstatic struct mtx zero_page_mutex;
133176771Sraj
134187149Srajstatic struct mtx tlbivax_mutex;
135187149Sraj
136176771Sraj/*
137176771Sraj * Reserved KVA space for mmu_booke_zero_page_idle. This is used
138176771Sraj * by idle thred only, no lock required.
139176771Sraj */
140176771Srajstatic vm_offset_t zero_page_idle_va;
141176771Sraj
142176771Sraj/* Reserved KVA space and mutex for mmu_booke_copy_page. */
143176771Srajstatic vm_offset_t copy_page_src_va;
144176771Srajstatic vm_offset_t copy_page_dst_va;
145176771Srajstatic struct mtx copy_page_mutex;
146176771Sraj
147176771Sraj/**************************************************************************/
148176771Sraj/* PMAP */
149176771Sraj/**************************************************************************/
150176771Sraj
151176771Srajstatic void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
152176771Sraj    vm_prot_t, boolean_t);
153176771Sraj
154176771Srajunsigned int kptbl_min;		/* Index of the first kernel ptbl. */
155176771Srajunsigned int kernel_ptbls;	/* Number of KVA ptbls. */
156176771Sraj
157176771Sraj/*
158176771Sraj * If user pmap is processed with mmu_booke_remove and the resident count
159176771Sraj * drops to 0, there are no more pages to remove, so we need not continue.
160176771Sraj */
161176771Sraj#define PMAP_REMOVE_DONE(pmap) \
162176771Sraj	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
163176771Sraj
164187149Srajextern void tid_flush(tlbtid_t);
165176771Sraj
166176771Sraj/**************************************************************************/
167176771Sraj/* TLB and TID handling */
168176771Sraj/**************************************************************************/
169176771Sraj
170176771Sraj/* Translation ID busy table */
171187149Srajstatic volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
172176771Sraj
173176771Sraj/*
174187149Sraj * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
175187149Sraj * core revisions and should be read from h/w registers during early config.
176176771Sraj */
177187149Srajuint32_t tlb0_entries;
178187149Srajuint32_t tlb0_ways;
179187149Srajuint32_t tlb0_entries_per_way;
180176771Sraj
181187149Sraj#define TLB0_ENTRIES		(tlb0_entries)
182187149Sraj#define TLB0_WAYS		(tlb0_ways)
183187149Sraj#define TLB0_ENTRIES_PER_WAY	(tlb0_entries_per_way)
184176771Sraj
185187149Sraj#define TLB1_ENTRIES 16
186176771Sraj
187176771Sraj/* In-ram copy of the TLB1 */
188187149Srajstatic tlb_entry_t tlb1[TLB1_ENTRIES];
189176771Sraj
190176771Sraj/* Next free entry in the TLB1 */
191176771Srajstatic unsigned int tlb1_idx;
192265998Sianstatic vm_offset_t tlb1_map_base = VM_MAX_KERNEL_ADDRESS;
193176771Sraj
194176771Srajstatic tlbtid_t tid_alloc(struct pmap *);
195176771Sraj
196187149Srajstatic void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
197176771Sraj
198187149Srajstatic int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t);
199176771Srajstatic void tlb1_write_entry(unsigned int);
200176771Srajstatic int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
201224611Smarcelstatic vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
202176771Sraj
203176771Srajstatic vm_size_t tsize2size(unsigned int);
204176771Srajstatic unsigned int size2tsize(vm_size_t);
205176771Srajstatic unsigned int ilog2(unsigned int);
206176771Sraj
207176771Srajstatic void set_mas4_defaults(void);
208176771Sraj
209187149Srajstatic inline void tlb0_flush_entry(vm_offset_t);
210176771Srajstatic inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
211176771Sraj
212176771Sraj/**************************************************************************/
213176771Sraj/* Page table management */
214176771Sraj/**************************************************************************/
215176771Sraj
216242535Salcstatic struct rwlock_padalign pvh_global_lock;
217242535Salc
218176771Sraj/* Data for the pv entry allocation mechanism */
219176771Srajstatic uma_zone_t pvzone;
220176771Srajstatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
221176771Sraj
222176771Sraj#define PV_ENTRY_ZONE_MIN	2048	/* min pv entries in uma zone */
223176771Sraj
224176771Sraj#ifndef PMAP_SHPGPERPROC
225176771Sraj#define PMAP_SHPGPERPROC	200
226176771Sraj#endif
227176771Sraj
228176771Srajstatic void ptbl_init(void);
229176771Srajstatic struct ptbl_buf *ptbl_buf_alloc(void);
230176771Srajstatic void ptbl_buf_free(struct ptbl_buf *);
231176771Srajstatic void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
232176771Sraj
233187149Srajstatic pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int);
234176771Srajstatic void ptbl_free(mmu_t, pmap_t, unsigned int);
235176771Srajstatic void ptbl_hold(mmu_t, pmap_t, unsigned int);
236176771Srajstatic int ptbl_unhold(mmu_t, pmap_t, unsigned int);
237176771Sraj
238176771Srajstatic vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
239176771Srajstatic pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
240187149Srajstatic void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t);
241187149Srajstatic int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
242176771Sraj
243187149Srajstatic pv_entry_t pv_alloc(void);
244176771Srajstatic void pv_free(pv_entry_t);
245176771Srajstatic void pv_insert(pmap_t, vm_offset_t, vm_page_t);
246176771Srajstatic void pv_remove(pmap_t, vm_offset_t, vm_page_t);
247176771Sraj
248176771Sraj/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
249176771Sraj#define PTBL_BUFS		(128 * 16)
250176771Sraj
251176771Srajstruct ptbl_buf {
252176771Sraj	TAILQ_ENTRY(ptbl_buf) link;	/* list link */
253176771Sraj	vm_offset_t kva;		/* va of mapping */
254176771Sraj};
255176771Sraj
256176771Sraj/* ptbl free list and a lock used for access synchronization. */
257176771Srajstatic TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
258176771Srajstatic struct mtx ptbl_buf_freelist_lock;
259176771Sraj
260176771Sraj/* Base address of kva space allocated fot ptbl bufs. */
261176771Srajstatic vm_offset_t ptbl_buf_pool_vabase;
262176771Sraj
263176771Sraj/* Pointer to ptbl_buf structures. */
264176771Srajstatic struct ptbl_buf *ptbl_bufs;
265176771Sraj
266192532Srajvoid pmap_bootstrap_ap(volatile uint32_t *);
267192532Sraj
268176771Sraj/*
269176771Sraj * Kernel MMU interface
270176771Sraj */
271176771Srajstatic void		mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
272176771Srajstatic void		mmu_booke_clear_modify(mmu_t, vm_page_t);
273194101Srajstatic void		mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
274194101Sraj    vm_size_t, vm_offset_t);
275176771Srajstatic void		mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
276248280Skibstatic void		mmu_booke_copy_pages(mmu_t, vm_page_t *,
277248280Skib    vm_offset_t, vm_page_t *, vm_offset_t, int);
278176771Srajstatic void		mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
279176771Sraj    vm_prot_t, boolean_t);
280176771Srajstatic void		mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
281176771Sraj    vm_page_t, vm_prot_t);
282176771Srajstatic void		mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
283176771Sraj    vm_prot_t);
284176771Srajstatic vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
285176771Srajstatic vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
286176771Sraj    vm_prot_t);
287176771Srajstatic void		mmu_booke_init(mmu_t);
288176771Srajstatic boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
289176771Srajstatic boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
290207155Salcstatic boolean_t	mmu_booke_is_referenced(mmu_t, vm_page_t);
291238357Salcstatic int		mmu_booke_ts_referenced(mmu_t, vm_page_t);
292235936Srajstatic vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t,
293176771Sraj    int);
294208504Salcstatic int		mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
295208504Salc    vm_paddr_t *);
296176771Srajstatic void		mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
297176771Sraj    vm_object_t, vm_pindex_t, vm_size_t);
298176771Srajstatic boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
299176771Srajstatic void		mmu_booke_page_init(mmu_t, vm_page_t);
300176771Srajstatic int		mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
301176771Srajstatic void		mmu_booke_pinit(mmu_t, pmap_t);
302176771Srajstatic void		mmu_booke_pinit0(mmu_t, pmap_t);
303176771Srajstatic void		mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
304176771Sraj    vm_prot_t);
305176771Srajstatic void		mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
306176771Srajstatic void		mmu_booke_qremove(mmu_t, vm_offset_t, int);
307176771Srajstatic void		mmu_booke_release(mmu_t, pmap_t);
308176771Srajstatic void		mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
309176771Srajstatic void		mmu_booke_remove_all(mmu_t, vm_page_t);
310176771Srajstatic void		mmu_booke_remove_write(mmu_t, vm_page_t);
311176771Srajstatic void		mmu_booke_zero_page(mmu_t, vm_page_t);
312176771Srajstatic void		mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
313176771Srajstatic void		mmu_booke_zero_page_idle(mmu_t, vm_page_t);
314176771Srajstatic void		mmu_booke_activate(mmu_t, struct thread *);
315176771Srajstatic void		mmu_booke_deactivate(mmu_t, struct thread *);
316176771Srajstatic void		mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
317235936Srajstatic void		*mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t);
318265996Sianstatic void		*mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
319176771Srajstatic void		mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
320235936Srajstatic vm_paddr_t	mmu_booke_kextract(mmu_t, vm_offset_t);
321235936Srajstatic void		mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t);
322265996Sianstatic void		mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t);
323176771Srajstatic void		mmu_booke_kremove(mmu_t, vm_offset_t);
324235936Srajstatic boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
325198341Smarcelstatic void		mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
326198341Smarcel    vm_size_t);
327190701Smarcelstatic vm_offset_t	mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
328190701Smarcel    vm_size_t, vm_size_t *);
329190701Smarcelstatic void		mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
330190701Smarcel    vm_size_t, vm_offset_t);
331190701Smarcelstatic struct pmap_md	*mmu_booke_scan_md(mmu_t, struct pmap_md *);
332176771Sraj
333176771Srajstatic mmu_method_t mmu_booke_methods[] = {
334176771Sraj	/* pmap dispatcher interface */
335176771Sraj	MMUMETHOD(mmu_change_wiring,	mmu_booke_change_wiring),
336176771Sraj	MMUMETHOD(mmu_clear_modify,	mmu_booke_clear_modify),
337176771Sraj	MMUMETHOD(mmu_copy,		mmu_booke_copy),
338176771Sraj	MMUMETHOD(mmu_copy_page,	mmu_booke_copy_page),
339248280Skib	MMUMETHOD(mmu_copy_pages,	mmu_booke_copy_pages),
340176771Sraj	MMUMETHOD(mmu_enter,		mmu_booke_enter),
341176771Sraj	MMUMETHOD(mmu_enter_object,	mmu_booke_enter_object),
342176771Sraj	MMUMETHOD(mmu_enter_quick,	mmu_booke_enter_quick),
343176771Sraj	MMUMETHOD(mmu_extract,		mmu_booke_extract),
344176771Sraj	MMUMETHOD(mmu_extract_and_hold,	mmu_booke_extract_and_hold),
345176771Sraj	MMUMETHOD(mmu_init,		mmu_booke_init),
346176771Sraj	MMUMETHOD(mmu_is_modified,	mmu_booke_is_modified),
347176771Sraj	MMUMETHOD(mmu_is_prefaultable,	mmu_booke_is_prefaultable),
348207155Salc	MMUMETHOD(mmu_is_referenced,	mmu_booke_is_referenced),
349176771Sraj	MMUMETHOD(mmu_ts_referenced,	mmu_booke_ts_referenced),
350176771Sraj	MMUMETHOD(mmu_map,		mmu_booke_map),
351176771Sraj	MMUMETHOD(mmu_mincore,		mmu_booke_mincore),
352176771Sraj	MMUMETHOD(mmu_object_init_pt,	mmu_booke_object_init_pt),
353176771Sraj	MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
354176771Sraj	MMUMETHOD(mmu_page_init,	mmu_booke_page_init),
355176771Sraj	MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
356176771Sraj	MMUMETHOD(mmu_pinit,		mmu_booke_pinit),
357176771Sraj	MMUMETHOD(mmu_pinit0,		mmu_booke_pinit0),
358176771Sraj	MMUMETHOD(mmu_protect,		mmu_booke_protect),
359176771Sraj	MMUMETHOD(mmu_qenter,		mmu_booke_qenter),
360176771Sraj	MMUMETHOD(mmu_qremove,		mmu_booke_qremove),
361176771Sraj	MMUMETHOD(mmu_release,		mmu_booke_release),
362176771Sraj	MMUMETHOD(mmu_remove,		mmu_booke_remove),
363176771Sraj	MMUMETHOD(mmu_remove_all,	mmu_booke_remove_all),
364176771Sraj	MMUMETHOD(mmu_remove_write,	mmu_booke_remove_write),
365198341Smarcel	MMUMETHOD(mmu_sync_icache,	mmu_booke_sync_icache),
366176771Sraj	MMUMETHOD(mmu_zero_page,	mmu_booke_zero_page),
367176771Sraj	MMUMETHOD(mmu_zero_page_area,	mmu_booke_zero_page_area),
368176771Sraj	MMUMETHOD(mmu_zero_page_idle,	mmu_booke_zero_page_idle),
369176771Sraj	MMUMETHOD(mmu_activate,		mmu_booke_activate),
370176771Sraj	MMUMETHOD(mmu_deactivate,	mmu_booke_deactivate),
371176771Sraj
372176771Sraj	/* Internal interfaces */
373176771Sraj	MMUMETHOD(mmu_bootstrap,	mmu_booke_bootstrap),
374176771Sraj	MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
375176771Sraj	MMUMETHOD(mmu_mapdev,		mmu_booke_mapdev),
376265996Sian	MMUMETHOD(mmu_mapdev_attr,	mmu_booke_mapdev_attr),
377176771Sraj	MMUMETHOD(mmu_kenter,		mmu_booke_kenter),
378265996Sian	MMUMETHOD(mmu_kenter_attr,	mmu_booke_kenter_attr),
379176771Sraj	MMUMETHOD(mmu_kextract,		mmu_booke_kextract),
380176771Sraj/*	MMUMETHOD(mmu_kremove,		mmu_booke_kremove),	*/
381176771Sraj	MMUMETHOD(mmu_unmapdev,		mmu_booke_unmapdev),
382176771Sraj
383190701Smarcel	/* dumpsys() support */
384190701Smarcel	MMUMETHOD(mmu_dumpsys_map,	mmu_booke_dumpsys_map),
385190701Smarcel	MMUMETHOD(mmu_dumpsys_unmap,	mmu_booke_dumpsys_unmap),
386190701Smarcel	MMUMETHOD(mmu_scan_md,		mmu_booke_scan_md),
387190701Smarcel
388176771Sraj	{ 0, 0 }
389176771Sraj};
390176771Sraj
391212627SgrehanMMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0);
392176771Sraj
393265996Sianstatic __inline uint32_t
394265996Siantlb_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
395265996Sian{
396265996Sian	uint32_t attrib;
397265996Sian	int i;
398265996Sian
399265996Sian	if (ma != VM_MEMATTR_DEFAULT) {
400265996Sian		switch (ma) {
401265996Sian		case VM_MEMATTR_UNCACHEABLE:
402265996Sian			return (PTE_I | PTE_G);
403265996Sian		case VM_MEMATTR_WRITE_COMBINING:
404265996Sian		case VM_MEMATTR_WRITE_BACK:
405265996Sian		case VM_MEMATTR_PREFETCHABLE:
406265996Sian			return (PTE_I);
407265996Sian		case VM_MEMATTR_WRITE_THROUGH:
408265996Sian			return (PTE_W | PTE_M);
409265996Sian		}
410265996Sian	}
411265996Sian
412265996Sian	/*
413265996Sian	 * Assume the page is cache inhibited and access is guarded unless
414265996Sian	 * it's in our available memory array.
415265996Sian	 */
416265996Sian	attrib = _TLB_ENTRY_IO;
417265996Sian	for (i = 0; i < physmem_regions_sz; i++) {
418265996Sian		if ((pa >= physmem_regions[i].mr_start) &&
419265996Sian		    (pa < (physmem_regions[i].mr_start +
420265996Sian		     physmem_regions[i].mr_size))) {
421265996Sian			attrib = _TLB_ENTRY_MEM;
422265996Sian			break;
423265996Sian		}
424265996Sian	}
425265996Sian
426265996Sian	return (attrib);
427265996Sian}
428265996Sian
429192532Srajstatic inline void
430192532Srajtlb_miss_lock(void)
431192532Sraj{
432192532Sraj#ifdef SMP
433192532Sraj	struct pcpu *pc;
434192532Sraj
435192532Sraj	if (!smp_started)
436192532Sraj		return;
437192532Sraj
438222531Snwhitehorn	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
439192532Sraj		if (pc != pcpup) {
440192532Sraj
441192532Sraj			CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
442192532Sraj			    "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
443192532Sraj
444192532Sraj			KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
445192532Sraj			    ("tlb_miss_lock: tried to lock self"));
446192532Sraj
447192532Sraj			tlb_lock(pc->pc_booke_tlb_lock);
448192532Sraj
449192532Sraj			CTR1(KTR_PMAP, "%s: locked", __func__);
450192532Sraj		}
451192532Sraj	}
452192532Sraj#endif
453192532Sraj}
454192532Sraj
455192532Srajstatic inline void
456192532Srajtlb_miss_unlock(void)
457192532Sraj{
458192532Sraj#ifdef SMP
459192532Sraj	struct pcpu *pc;
460192532Sraj
461192532Sraj	if (!smp_started)
462192532Sraj		return;
463192532Sraj
464222531Snwhitehorn	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
465192532Sraj		if (pc != pcpup) {
466192532Sraj			CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
467192532Sraj			    __func__, pc->pc_cpuid);
468192532Sraj
469192532Sraj			tlb_unlock(pc->pc_booke_tlb_lock);
470192532Sraj
471192532Sraj			CTR1(KTR_PMAP, "%s: unlocked", __func__);
472192532Sraj		}
473192532Sraj	}
474192532Sraj#endif
475192532Sraj}
476192532Sraj
477176771Sraj/* Return number of entries in TLB0. */
478176771Srajstatic __inline void
479176771Srajtlb0_get_tlbconf(void)
480176771Sraj{
481176771Sraj	uint32_t tlb0_cfg;
482176771Sraj
483176771Sraj	tlb0_cfg = mfspr(SPR_TLB0CFG);
484187149Sraj	tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
485187149Sraj	tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
486187149Sraj	tlb0_entries_per_way = tlb0_entries / tlb0_ways;
487176771Sraj}
488176771Sraj
489176771Sraj/* Initialize pool of kva ptbl buffers. */
490176771Srajstatic void
491176771Srajptbl_init(void)
492176771Sraj{
493176771Sraj	int i;
494176771Sraj
495187151Sraj	CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
496187151Sraj	    (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
497187151Sraj	CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
498187151Sraj	    __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
499176771Sraj
500176771Sraj	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
501176771Sraj	TAILQ_INIT(&ptbl_buf_freelist);
502176771Sraj
503176771Sraj	for (i = 0; i < PTBL_BUFS; i++) {
504176771Sraj		ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
505176771Sraj		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
506176771Sraj	}
507176771Sraj}
508176771Sraj
509182362Sraj/* Get a ptbl_buf from the freelist. */
510176771Srajstatic struct ptbl_buf *
511176771Srajptbl_buf_alloc(void)
512176771Sraj{
513176771Sraj	struct ptbl_buf *buf;
514176771Sraj
515176771Sraj	mtx_lock(&ptbl_buf_freelist_lock);
516176771Sraj	buf = TAILQ_FIRST(&ptbl_buf_freelist);
517176771Sraj	if (buf != NULL)
518176771Sraj		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
519176771Sraj	mtx_unlock(&ptbl_buf_freelist_lock);
520176771Sraj
521187151Sraj	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
522187151Sraj
523176771Sraj	return (buf);
524176771Sraj}
525176771Sraj
526176771Sraj/* Return ptbl buff to free pool. */
527176771Srajstatic void
528176771Srajptbl_buf_free(struct ptbl_buf *buf)
529176771Sraj{
530176771Sraj
531187149Sraj	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
532176771Sraj
533176771Sraj	mtx_lock(&ptbl_buf_freelist_lock);
534176771Sraj	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
535176771Sraj	mtx_unlock(&ptbl_buf_freelist_lock);
536176771Sraj}
537176771Sraj
538176771Sraj/*
539187149Sraj * Search the list of allocated ptbl bufs and find on list of allocated ptbls
540176771Sraj */
541176771Srajstatic void
542176771Srajptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
543176771Sraj{
544176771Sraj	struct ptbl_buf *pbuf;
545176771Sraj
546187149Sraj	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
547176771Sraj
548187149Sraj	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
549187149Sraj
550187149Sraj	TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
551176771Sraj		if (pbuf->kva == (vm_offset_t)ptbl) {
552176771Sraj			/* Remove from pmap ptbl buf list. */
553187149Sraj			TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
554176771Sraj
555187149Sraj			/* Free corresponding ptbl buf. */
556176771Sraj			ptbl_buf_free(pbuf);
557176771Sraj			break;
558176771Sraj		}
559176771Sraj}
560176771Sraj
561176771Sraj/* Allocate page table. */
562187149Srajstatic pte_t *
563176771Srajptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
564176771Sraj{
565176771Sraj	vm_page_t mtbl[PTBL_PAGES];
566176771Sraj	vm_page_t m;
567176771Sraj	struct ptbl_buf *pbuf;
568176771Sraj	unsigned int pidx;
569187149Sraj	pte_t *ptbl;
570176771Sraj	int i;
571176771Sraj
572187149Sraj	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
573187149Sraj	    (pmap == kernel_pmap), pdir_idx);
574176771Sraj
575176771Sraj	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
576176771Sraj	    ("ptbl_alloc: invalid pdir_idx"));
577176771Sraj	KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
578176771Sraj	    ("pte_alloc: valid ptbl entry exists!"));
579176771Sraj
580176771Sraj	pbuf = ptbl_buf_alloc();
581176771Sraj	if (pbuf == NULL)
582176771Sraj		panic("pte_alloc: couldn't alloc kernel virtual memory");
583187149Sraj
584187149Sraj	ptbl = (pte_t *)pbuf->kva;
585176771Sraj
586187149Sraj	CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
587187149Sraj
588176771Sraj	/* Allocate ptbl pages, this will sleep! */
589176771Sraj	for (i = 0; i < PTBL_PAGES; i++) {
590176771Sraj		pidx = (PTBL_PAGES * pdir_idx) + i;
591187149Sraj		while ((m = vm_page_alloc(NULL, pidx,
592187149Sraj		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
593187149Sraj
594176771Sraj			PMAP_UNLOCK(pmap);
595242535Salc			rw_wunlock(&pvh_global_lock);
596176771Sraj			VM_WAIT;
597242535Salc			rw_wlock(&pvh_global_lock);
598176771Sraj			PMAP_LOCK(pmap);
599176771Sraj		}
600176771Sraj		mtbl[i] = m;
601176771Sraj	}
602176771Sraj
603187149Sraj	/* Map allocated pages into kernel_pmap. */
604187149Sraj	mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
605176771Sraj
606176771Sraj	/* Zero whole ptbl. */
607187149Sraj	bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
608176771Sraj
609176771Sraj	/* Add pbuf to the pmap ptbl bufs list. */
610187149Sraj	TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
611176771Sraj
612187149Sraj	return (ptbl);
613176771Sraj}
614176771Sraj
615176771Sraj/* Free ptbl pages and invalidate pdir entry. */
616176771Srajstatic void
617176771Srajptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
618176771Sraj{
619176771Sraj	pte_t *ptbl;
620176771Sraj	vm_paddr_t pa;
621176771Sraj	vm_offset_t va;
622176771Sraj	vm_page_t m;
623176771Sraj	int i;
624176771Sraj
625187149Sraj	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
626187149Sraj	    (pmap == kernel_pmap), pdir_idx);
627176771Sraj
628176771Sraj	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
629176771Sraj	    ("ptbl_free: invalid pdir_idx"));
630176771Sraj
631176771Sraj	ptbl = pmap->pm_pdir[pdir_idx];
632176771Sraj
633187149Sraj	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
634187149Sraj
635176771Sraj	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
636176771Sraj
637187149Sraj	/*
638187149Sraj	 * Invalidate the pdir entry as soon as possible, so that other CPUs
639187149Sraj	 * don't attempt to look up the page tables we are releasing.
640187149Sraj	 */
641187149Sraj	mtx_lock_spin(&tlbivax_mutex);
642192532Sraj	tlb_miss_lock();
643187149Sraj
644187149Sraj	pmap->pm_pdir[pdir_idx] = NULL;
645187149Sraj
646192532Sraj	tlb_miss_unlock();
647187149Sraj	mtx_unlock_spin(&tlbivax_mutex);
648187149Sraj
649176771Sraj	for (i = 0; i < PTBL_PAGES; i++) {
650176771Sraj		va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
651176771Sraj		pa = pte_vatopa(mmu, kernel_pmap, va);
652176771Sraj		m = PHYS_TO_VM_PAGE(pa);
653176771Sraj		vm_page_free_zero(m);
654176771Sraj		atomic_subtract_int(&cnt.v_wire_count, 1);
655176771Sraj		mmu_booke_kremove(mmu, va);
656176771Sraj	}
657176771Sraj
658176771Sraj	ptbl_free_pmap_ptbl(pmap, ptbl);
659176771Sraj}
660176771Sraj
661176771Sraj/*
662176771Sraj * Decrement ptbl pages hold count and attempt to free ptbl pages.
663176771Sraj * Called when removing pte entry from ptbl.
664176771Sraj *
665176771Sraj * Return 1 if ptbl pages were freed.
666176771Sraj */
667176771Srajstatic int
668176771Srajptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
669176771Sraj{
670176771Sraj	pte_t *ptbl;
671176771Sraj	vm_paddr_t pa;
672176771Sraj	vm_page_t m;
673176771Sraj	int i;
674176771Sraj
675187151Sraj	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
676187151Sraj	    (pmap == kernel_pmap), pdir_idx);
677176771Sraj
678176771Sraj	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
679176771Sraj	    ("ptbl_unhold: invalid pdir_idx"));
680176771Sraj	KASSERT((pmap != kernel_pmap),
681176771Sraj	    ("ptbl_unhold: unholding kernel ptbl!"));
682176771Sraj
683176771Sraj	ptbl = pmap->pm_pdir[pdir_idx];
684176771Sraj
685176771Sraj	//debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
686176771Sraj	KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
687176771Sraj	    ("ptbl_unhold: non kva ptbl"));
688176771Sraj
689176771Sraj	/* decrement hold count */
690176771Sraj	for (i = 0; i < PTBL_PAGES; i++) {
691187151Sraj		pa = pte_vatopa(mmu, kernel_pmap,
692187151Sraj		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
693176771Sraj		m = PHYS_TO_VM_PAGE(pa);
694176771Sraj		m->wire_count--;
695176771Sraj	}
696176771Sraj
697176771Sraj	/*
698176771Sraj	 * Free ptbl pages if there are no pte etries in this ptbl.
699187151Sraj	 * wire_count has the same value for all ptbl pages, so check the last
700187151Sraj	 * page.
701176771Sraj	 */
702176771Sraj	if (m->wire_count == 0) {
703176771Sraj		ptbl_free(mmu, pmap, pdir_idx);
704176771Sraj
705176771Sraj		//debugf("ptbl_unhold: e (freed ptbl)\n");
706176771Sraj		return (1);
707176771Sraj	}
708176771Sraj
709176771Sraj	return (0);
710176771Sraj}
711176771Sraj
712176771Sraj/*
713187151Sraj * Increment hold count for ptbl pages. This routine is used when a new pte
714187151Sraj * entry is being inserted into the ptbl.
715176771Sraj */
716176771Srajstatic void
717176771Srajptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
718176771Sraj{
719176771Sraj	vm_paddr_t pa;
720176771Sraj	pte_t *ptbl;
721176771Sraj	vm_page_t m;
722176771Sraj	int i;
723176771Sraj
724187151Sraj	CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
725187151Sraj	    pdir_idx);
726176771Sraj
727176771Sraj	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
728176771Sraj	    ("ptbl_hold: invalid pdir_idx"));
729176771Sraj	KASSERT((pmap != kernel_pmap),
730176771Sraj	    ("ptbl_hold: holding kernel ptbl!"));
731176771Sraj
732176771Sraj	ptbl = pmap->pm_pdir[pdir_idx];
733176771Sraj
734176771Sraj	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
735176771Sraj
736176771Sraj	for (i = 0; i < PTBL_PAGES; i++) {
737187151Sraj		pa = pte_vatopa(mmu, kernel_pmap,
738187151Sraj		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
739176771Sraj		m = PHYS_TO_VM_PAGE(pa);
740176771Sraj		m->wire_count++;
741176771Sraj	}
742176771Sraj}
743176771Sraj
744176771Sraj/* Allocate pv_entry structure. */
745176771Srajpv_entry_t
746176771Srajpv_alloc(void)
747176771Sraj{
748176771Sraj	pv_entry_t pv;
749176771Sraj
750176771Sraj	pv_entry_count++;
751194123Salc	if (pv_entry_count > pv_entry_high_water)
752194123Salc		pagedaemon_wakeup();
753176771Sraj	pv = uma_zalloc(pvzone, M_NOWAIT);
754176771Sraj
755176771Sraj	return (pv);
756176771Sraj}
757176771Sraj
758176771Sraj/* Free pv_entry structure. */
759176771Srajstatic __inline void
760176771Srajpv_free(pv_entry_t pve)
761176771Sraj{
762176771Sraj
763176771Sraj	pv_entry_count--;
764176771Sraj	uma_zfree(pvzone, pve);
765176771Sraj}
766176771Sraj
767176771Sraj
768176771Sraj/* Allocate and initialize pv_entry structure. */
769176771Srajstatic void
770176771Srajpv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
771176771Sraj{
772176771Sraj	pv_entry_t pve;
773176771Sraj
774176771Sraj	//int su = (pmap == kernel_pmap);
775176771Sraj	//debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
776176771Sraj	//	(u_int32_t)pmap, va, (u_int32_t)m);
777176771Sraj
778176771Sraj	pve = pv_alloc();
779176771Sraj	if (pve == NULL)
780176771Sraj		panic("pv_insert: no pv entries!");
781176771Sraj
782176771Sraj	pve->pv_pmap = pmap;
783176771Sraj	pve->pv_va = va;
784176771Sraj
785176771Sraj	/* add to pv_list */
786176771Sraj	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
787242535Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
788176771Sraj
789176771Sraj	TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
790176771Sraj
791176771Sraj	//debugf("pv_insert: e\n");
792176771Sraj}
793176771Sraj
794176771Sraj/* Destroy pv entry. */
795176771Srajstatic void
796176771Srajpv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
797176771Sraj{
798176771Sraj	pv_entry_t pve;
799176771Sraj
800176771Sraj	//int su = (pmap == kernel_pmap);
801176771Sraj	//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
802176771Sraj
803176771Sraj	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
804242535Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
805176771Sraj
806176771Sraj	/* find pv entry */
807176771Sraj	TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
808176771Sraj		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
809176771Sraj			/* remove from pv_list */
810176771Sraj			TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
811176771Sraj			if (TAILQ_EMPTY(&m->md.pv_list))
812225418Skib				vm_page_aflag_clear(m, PGA_WRITEABLE);
813176771Sraj
814176771Sraj			/* free pv entry struct */
815176771Sraj			pv_free(pve);
816176771Sraj			break;
817176771Sraj		}
818176771Sraj	}
819176771Sraj
820176771Sraj	//debugf("pv_remove: e\n");
821176771Sraj}
822176771Sraj
823176771Sraj/*
824176771Sraj * Clean pte entry, try to free page table page if requested.
825176771Sraj *
826176771Sraj * Return 1 if ptbl pages were freed, otherwise return 0.
827176771Sraj */
828176771Srajstatic int
829187151Srajpte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
830176771Sraj{
831176771Sraj	unsigned int pdir_idx = PDIR_IDX(va);
832176771Sraj	unsigned int ptbl_idx = PTBL_IDX(va);
833176771Sraj	vm_page_t m;
834176771Sraj	pte_t *ptbl;
835176771Sraj	pte_t *pte;
836176771Sraj
837176771Sraj	//int su = (pmap == kernel_pmap);
838176771Sraj	//debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
839176771Sraj	//		su, (u_int32_t)pmap, va, flags);
840176771Sraj
841176771Sraj	ptbl = pmap->pm_pdir[pdir_idx];
842176771Sraj	KASSERT(ptbl, ("pte_remove: null ptbl"));
843176771Sraj
844176771Sraj	pte = &ptbl[ptbl_idx];
845176771Sraj
846176771Sraj	if (pte == NULL || !PTE_ISVALID(pte))
847176771Sraj		return (0);
848176771Sraj
849176771Sraj	if (PTE_ISWIRED(pte))
850176771Sraj		pmap->pm_stats.wired_count--;
851176771Sraj
852191445Smarcel	/* Handle managed entry. */
853191445Smarcel	if (PTE_ISMANAGED(pte)) {
854191445Smarcel		/* Get vm_page_t for mapped pte. */
855191445Smarcel		m = PHYS_TO_VM_PAGE(PTE_PA(pte));
856176771Sraj
857191445Smarcel		if (PTE_ISMODIFIED(pte))
858191445Smarcel			vm_page_dirty(m);
859176771Sraj
860191445Smarcel		if (PTE_ISREFERENCED(pte))
861225418Skib			vm_page_aflag_set(m, PGA_REFERENCED);
862176771Sraj
863191445Smarcel		pv_remove(pmap, va, m);
864176771Sraj	}
865176771Sraj
866187149Sraj	mtx_lock_spin(&tlbivax_mutex);
867192532Sraj	tlb_miss_lock();
868187149Sraj
869187149Sraj	tlb0_flush_entry(va);
870176771Sraj	pte->flags = 0;
871176771Sraj	pte->rpn = 0;
872187149Sraj
873192532Sraj	tlb_miss_unlock();
874187149Sraj	mtx_unlock_spin(&tlbivax_mutex);
875187149Sraj
876176771Sraj	pmap->pm_stats.resident_count--;
877176771Sraj
878176771Sraj	if (flags & PTBL_UNHOLD) {
879176771Sraj		//debugf("pte_remove: e (unhold)\n");
880176771Sraj		return (ptbl_unhold(mmu, pmap, pdir_idx));
881176771Sraj	}
882176771Sraj
883176771Sraj	//debugf("pte_remove: e\n");
884176771Sraj	return (0);
885176771Sraj}
886176771Sraj
887176771Sraj/*
888176771Sraj * Insert PTE for a given page and virtual address.
889176771Sraj */
890187149Srajstatic void
891187149Srajpte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
892176771Sraj{
893176771Sraj	unsigned int pdir_idx = PDIR_IDX(va);
894176771Sraj	unsigned int ptbl_idx = PTBL_IDX(va);
895187149Sraj	pte_t *ptbl, *pte;
896176771Sraj
897187149Sraj	CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
898187149Sraj	    pmap == kernel_pmap, pmap, va);
899176771Sraj
900176771Sraj	/* Get the page table pointer. */
901176771Sraj	ptbl = pmap->pm_pdir[pdir_idx];
902176771Sraj
903187149Sraj	if (ptbl == NULL) {
904187149Sraj		/* Allocate page table pages. */
905187149Sraj		ptbl = ptbl_alloc(mmu, pmap, pdir_idx);
906187149Sraj	} else {
907176771Sraj		/*
908176771Sraj		 * Check if there is valid mapping for requested
909176771Sraj		 * va, if there is, remove it.
910176771Sraj		 */
911176771Sraj		pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
912176771Sraj		if (PTE_ISVALID(pte)) {
913176771Sraj			pte_remove(mmu, pmap, va, PTBL_HOLD);
914176771Sraj		} else {
915176771Sraj			/*
916176771Sraj			 * pte is not used, increment hold count
917176771Sraj			 * for ptbl pages.
918176771Sraj			 */
919176771Sraj			if (pmap != kernel_pmap)
920176771Sraj				ptbl_hold(mmu, pmap, pdir_idx);
921176771Sraj		}
922176771Sraj	}
923176771Sraj
924176771Sraj	/*
925187149Sraj	 * Insert pv_entry into pv_list for mapped page if part of managed
926187149Sraj	 * memory.
927176771Sraj	 */
928224746Skib	if ((m->oflags & VPO_UNMANAGED) == 0) {
929224746Skib		flags |= PTE_MANAGED;
930176771Sraj
931224746Skib		/* Create and insert pv entry. */
932224746Skib		pv_insert(pmap, va, m);
933176771Sraj	}
934176771Sraj
935176771Sraj	pmap->pm_stats.resident_count++;
936187149Sraj
937187149Sraj	mtx_lock_spin(&tlbivax_mutex);
938192532Sraj	tlb_miss_lock();
939187149Sraj
940187149Sraj	tlb0_flush_entry(va);
941187149Sraj	if (pmap->pm_pdir[pdir_idx] == NULL) {
942187149Sraj		/*
943187149Sraj		 * If we just allocated a new page table, hook it in
944187149Sraj		 * the pdir.
945187149Sraj		 */
946187149Sraj		pmap->pm_pdir[pdir_idx] = ptbl;
947187149Sraj	}
948187149Sraj	pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
949176771Sraj	pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
950176771Sraj	pte->flags |= (PTE_VALID | flags);
951176771Sraj
952192532Sraj	tlb_miss_unlock();
953187149Sraj	mtx_unlock_spin(&tlbivax_mutex);
954176771Sraj}
955176771Sraj
956176771Sraj/* Return the pa for the given pmap/va. */
957176771Srajstatic vm_paddr_t
958176771Srajpte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
959176771Sraj{
960176771Sraj	vm_paddr_t pa = 0;
961176771Sraj	pte_t *pte;
962176771Sraj
963176771Sraj	pte = pte_find(mmu, pmap, va);
964176771Sraj	if ((pte != NULL) && PTE_ISVALID(pte))
965176771Sraj		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
966176771Sraj	return (pa);
967176771Sraj}
968176771Sraj
969176771Sraj/* Get a pointer to a PTE in a page table. */
970176771Srajstatic pte_t *
971176771Srajpte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
972176771Sraj{
973176771Sraj	unsigned int pdir_idx = PDIR_IDX(va);
974176771Sraj	unsigned int ptbl_idx = PTBL_IDX(va);
975176771Sraj
976176771Sraj	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
977176771Sraj
978176771Sraj	if (pmap->pm_pdir[pdir_idx])
979176771Sraj		return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
980176771Sraj
981176771Sraj	return (NULL);
982176771Sraj}
983176771Sraj
984176771Sraj/**************************************************************************/
985176771Sraj/* PMAP related */
986176771Sraj/**************************************************************************/
987176771Sraj
988176771Sraj/*
989222400Smarcel * This is called during booke_init, before the system is really initialized.
990176771Sraj */
991176771Srajstatic void
992190701Smarcelmmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
993176771Sraj{
994176771Sraj	vm_offset_t phys_kernelend;
995176771Sraj	struct mem_region *mp, *mp1;
996176771Sraj	int cnt, i, j;
997176771Sraj	u_int s, e, sz;
998176771Sraj	u_int phys_avail_count;
999182198Sraj	vm_size_t physsz, hwphyssz, kstack0_sz;
1000193489Sraj	vm_offset_t kernel_pdir, kstack0, va;
1001182198Sraj	vm_paddr_t kstack0_phys;
1002194784Sjeff	void *dpcpu;
1003193489Sraj	pte_t *pte;
1004176771Sraj
1005176771Sraj	debugf("mmu_booke_bootstrap: entered\n");
1006176771Sraj
1007187149Sraj	/* Initialize invalidation mutex */
1008187149Sraj	mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
1009187149Sraj
1010187149Sraj	/* Read TLB0 size and associativity. */
1011187149Sraj	tlb0_get_tlbconf();
1012187149Sraj
1013224611Smarcel	/*
1014224611Smarcel	 * Align kernel start and end address (kernel image).
1015224611Smarcel	 * Note that kernel end does not necessarily relate to kernsize.
1016224611Smarcel	 * kernsize is the size of the kernel that is actually mapped.
1017235932Smarcel	 * Also note that "start - 1" is deliberate. With SMP, the
1018235932Smarcel	 * entry point is exactly a page from the actual load address.
1019235932Smarcel	 * As such, trunc_page() has no effect and we're off by a page.
1020235932Smarcel	 * Since we always have the ELF header between the load address
1021235932Smarcel	 * and the entry point, we can safely subtract 1 to compensate.
1022224611Smarcel	 */
1023235932Smarcel	kernstart = trunc_page(start - 1);
1024190701Smarcel	data_start = round_page(kernelend);
1025190701Smarcel	data_end = data_start;
1026190701Smarcel
1027224611Smarcel	/*
1028224611Smarcel	 * Addresses of preloaded modules (like file systems) use
1029224611Smarcel	 * physical addresses. Make sure we relocate those into
1030224611Smarcel	 * virtual addresses.
1031224611Smarcel	 */
1032224611Smarcel	preload_addr_relocate = kernstart - kernload;
1033224611Smarcel
1034224611Smarcel	/* Allocate the dynamic per-cpu area. */
1035224611Smarcel	dpcpu = (void *)data_end;
1036224611Smarcel	data_end += DPCPU_SIZE;
1037224611Smarcel
1038176771Sraj	/* Allocate space for the message buffer. */
1039190701Smarcel	msgbufp = (struct msgbuf *)data_end;
1040217688Spluknet	data_end += msgbufsize;
1041187149Sraj	debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
1042190701Smarcel	    data_end);
1043176771Sraj
1044190701Smarcel	data_end = round_page(data_end);
1045176771Sraj
1046176771Sraj	/* Allocate space for ptbl_bufs. */
1047190701Smarcel	ptbl_bufs = (struct ptbl_buf *)data_end;
1048190701Smarcel	data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1049187149Sraj	debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
1050190701Smarcel	    data_end);
1051176771Sraj
1052190701Smarcel	data_end = round_page(data_end);
1053176771Sraj
1054176771Sraj	/* Allocate PTE tables for kernel KVA. */
1055190701Smarcel	kernel_pdir = data_end;
1056176771Sraj	kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
1057176771Sraj	    PDIR_SIZE - 1) / PDIR_SIZE;
1058190701Smarcel	data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1059176771Sraj	debugf(" kernel ptbls: %d\n", kernel_ptbls);
1060190701Smarcel	debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
1061176771Sraj
1062190701Smarcel	debugf(" data_end: 0x%08x\n", data_end);
1063224611Smarcel	if (data_end - kernstart > kernsize) {
1064224611Smarcel		kernsize += tlb1_mapin_region(kernstart + kernsize,
1065224611Smarcel		    kernload + kernsize, (data_end - kernstart) - kernsize);
1066224611Smarcel	}
1067224611Smarcel	data_end = kernstart + kernsize;
1068190701Smarcel	debugf(" updated data_end: 0x%08x\n", data_end);
1069187149Sraj
1070182362Sraj	/*
1071182362Sraj	 * Clear the structures - note we can only do it safely after the
1072187149Sraj	 * possible additional TLB1 translations are in place (above) so that
1073190701Smarcel	 * all range up to the currently calculated 'data_end' is covered.
1074182362Sraj	 */
1075224611Smarcel	dpcpu_init(dpcpu, 0);
1076182362Sraj	memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1077182362Sraj	memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1078182362Sraj
1079176771Sraj	/*******************************************************/
1080176771Sraj	/* Set the start and end of kva. */
1081176771Sraj	/*******************************************************/
1082190701Smarcel	virtual_avail = round_page(data_end);
1083176771Sraj	virtual_end = VM_MAX_KERNEL_ADDRESS;
1084176771Sraj
1085176771Sraj	/* Allocate KVA space for page zero/copy operations. */
1086176771Sraj	zero_page_va = virtual_avail;
1087176771Sraj	virtual_avail += PAGE_SIZE;
1088176771Sraj	zero_page_idle_va = virtual_avail;
1089176771Sraj	virtual_avail += PAGE_SIZE;
1090176771Sraj	copy_page_src_va = virtual_avail;
1091176771Sraj	virtual_avail += PAGE_SIZE;
1092176771Sraj	copy_page_dst_va = virtual_avail;
1093176771Sraj	virtual_avail += PAGE_SIZE;
1094187149Sraj	debugf("zero_page_va = 0x%08x\n", zero_page_va);
1095187149Sraj	debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va);
1096187149Sraj	debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1097187149Sraj	debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1098176771Sraj
1099176771Sraj	/* Initialize page zero/copy mutexes. */
1100176771Sraj	mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1101176771Sraj	mtx_init(&copy_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1102176771Sraj
1103176771Sraj	/* Allocate KVA space for ptbl bufs. */
1104176771Sraj	ptbl_buf_pool_vabase = virtual_avail;
1105176771Sraj	virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1106187149Sraj	debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1107187149Sraj	    ptbl_buf_pool_vabase, virtual_avail);
1108176771Sraj
1109176771Sraj	/* Calculate corresponding physical addresses for the kernel region. */
1110190701Smarcel	phys_kernelend = kernload + kernsize;
1111176771Sraj	debugf("kernel image and allocated data:\n");
1112176771Sraj	debugf(" kernload    = 0x%08x\n", kernload);
1113190701Smarcel	debugf(" kernstart   = 0x%08x\n", kernstart);
1114190701Smarcel	debugf(" kernsize    = 0x%08x\n", kernsize);
1115176771Sraj
1116176771Sraj	if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1117176771Sraj		panic("mmu_booke_bootstrap: phys_avail too small");
1118176771Sraj
1119176771Sraj	/*
1120187151Sraj	 * Remove kernel physical address range from avail regions list. Page
1121187151Sraj	 * align all regions.  Non-page aligned memory isn't very interesting
1122187151Sraj	 * to us.  Also, sort the entries for ascending addresses.
1123176771Sraj	 */
1124192067Snwhitehorn
1125192067Snwhitehorn	/* Retrieve phys/avail mem regions */
1126192067Snwhitehorn	mem_regions(&physmem_regions, &physmem_regions_sz,
1127192067Snwhitehorn	    &availmem_regions, &availmem_regions_sz);
1128176771Sraj	sz = 0;
1129176771Sraj	cnt = availmem_regions_sz;
1130176771Sraj	debugf("processing avail regions:\n");
1131176771Sraj	for (mp = availmem_regions; mp->mr_size; mp++) {
1132176771Sraj		s = mp->mr_start;
1133176771Sraj		e = mp->mr_start + mp->mr_size;
1134176771Sraj		debugf(" %08x-%08x -> ", s, e);
1135176771Sraj		/* Check whether this region holds all of the kernel. */
1136176771Sraj		if (s < kernload && e > phys_kernelend) {
1137176771Sraj			availmem_regions[cnt].mr_start = phys_kernelend;
1138176771Sraj			availmem_regions[cnt++].mr_size = e - phys_kernelend;
1139176771Sraj			e = kernload;
1140176771Sraj		}
1141176771Sraj		/* Look whether this regions starts within the kernel. */
1142176771Sraj		if (s >= kernload && s < phys_kernelend) {
1143176771Sraj			if (e <= phys_kernelend)
1144176771Sraj				goto empty;
1145176771Sraj			s = phys_kernelend;
1146176771Sraj		}
1147176771Sraj		/* Now look whether this region ends within the kernel. */
1148176771Sraj		if (e > kernload && e <= phys_kernelend) {
1149176771Sraj			if (s >= kernload)
1150176771Sraj				goto empty;
1151176771Sraj			e = kernload;
1152176771Sraj		}
1153176771Sraj		/* Now page align the start and size of the region. */
1154176771Sraj		s = round_page(s);
1155176771Sraj		e = trunc_page(e);
1156176771Sraj		if (e < s)
1157176771Sraj			e = s;
1158176771Sraj		sz = e - s;
1159176771Sraj		debugf("%08x-%08x = %x\n", s, e, sz);
1160176771Sraj
1161176771Sraj		/* Check whether some memory is left here. */
1162176771Sraj		if (sz == 0) {
1163176771Sraj		empty:
1164176771Sraj			memmove(mp, mp + 1,
1165176771Sraj			    (cnt - (mp - availmem_regions)) * sizeof(*mp));
1166176771Sraj			cnt--;
1167176771Sraj			mp--;
1168176771Sraj			continue;
1169176771Sraj		}
1170176771Sraj
1171176771Sraj		/* Do an insertion sort. */
1172176771Sraj		for (mp1 = availmem_regions; mp1 < mp; mp1++)
1173176771Sraj			if (s < mp1->mr_start)
1174176771Sraj				break;
1175176771Sraj		if (mp1 < mp) {
1176176771Sraj			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1177176771Sraj			mp1->mr_start = s;
1178176771Sraj			mp1->mr_size = sz;
1179176771Sraj		} else {
1180176771Sraj			mp->mr_start = s;
1181176771Sraj			mp->mr_size = sz;
1182176771Sraj		}
1183176771Sraj	}
1184176771Sraj	availmem_regions_sz = cnt;
1185176771Sraj
1186176771Sraj	/*******************************************************/
1187182198Sraj	/* Steal physical memory for kernel stack from the end */
1188182198Sraj	/* of the first avail region                           */
1189182198Sraj	/*******************************************************/
1190182198Sraj	kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
1191182198Sraj	kstack0_phys = availmem_regions[0].mr_start +
1192182198Sraj	    availmem_regions[0].mr_size;
1193182198Sraj	kstack0_phys -= kstack0_sz;
1194182198Sraj	availmem_regions[0].mr_size -= kstack0_sz;
1195182198Sraj
1196182198Sraj	/*******************************************************/
1197176771Sraj	/* Fill in phys_avail table, based on availmem_regions */
1198176771Sraj	/*******************************************************/
1199176771Sraj	phys_avail_count = 0;
1200176771Sraj	physsz = 0;
1201176771Sraj	hwphyssz = 0;
1202176771Sraj	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1203176771Sraj
1204176771Sraj	debugf("fill in phys_avail:\n");
1205176771Sraj	for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1206176771Sraj
1207176771Sraj		debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
1208176771Sraj		    availmem_regions[i].mr_start,
1209187151Sraj		    availmem_regions[i].mr_start +
1210187151Sraj		        availmem_regions[i].mr_size,
1211176771Sraj		    availmem_regions[i].mr_size);
1212176771Sraj
1213182362Sraj		if (hwphyssz != 0 &&
1214182362Sraj		    (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1215176771Sraj			debugf(" hw.physmem adjust\n");
1216176771Sraj			if (physsz < hwphyssz) {
1217176771Sraj				phys_avail[j] = availmem_regions[i].mr_start;
1218182362Sraj				phys_avail[j + 1] =
1219182362Sraj				    availmem_regions[i].mr_start +
1220176771Sraj				    hwphyssz - physsz;
1221176771Sraj				physsz = hwphyssz;
1222176771Sraj				phys_avail_count++;
1223176771Sraj			}
1224176771Sraj			break;
1225176771Sraj		}
1226176771Sraj
1227176771Sraj		phys_avail[j] = availmem_regions[i].mr_start;
1228176771Sraj		phys_avail[j + 1] = availmem_regions[i].mr_start +
1229176771Sraj		    availmem_regions[i].mr_size;
1230176771Sraj		phys_avail_count++;
1231176771Sraj		physsz += availmem_regions[i].mr_size;
1232176771Sraj	}
1233176771Sraj	physmem = btoc(physsz);
1234176771Sraj
1235176771Sraj	/* Calculate the last available physical address. */
1236176771Sraj	for (i = 0; phys_avail[i + 2] != 0; i += 2)
1237176771Sraj		;
1238176771Sraj	Maxmem = powerpc_btop(phys_avail[i + 1]);
1239176771Sraj
1240176771Sraj	debugf("Maxmem = 0x%08lx\n", Maxmem);
1241176771Sraj	debugf("phys_avail_count = %d\n", phys_avail_count);
1242187151Sraj	debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem,
1243187151Sraj	    physmem);
1244176771Sraj
1245176771Sraj	/*******************************************************/
1246176771Sraj	/* Initialize (statically allocated) kernel pmap. */
1247176771Sraj	/*******************************************************/
1248176771Sraj	PMAP_LOCK_INIT(kernel_pmap);
1249176771Sraj	kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1250176771Sraj
1251187149Sraj	debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
1252187149Sraj	debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
1253176771Sraj	debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1254176771Sraj	    kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1255176771Sraj
1256176771Sraj	/* Initialize kernel pdir */
1257176771Sraj	for (i = 0; i < kernel_ptbls; i++)
1258176771Sraj		kernel_pmap->pm_pdir[kptbl_min + i] =
1259176771Sraj		    (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1260176771Sraj
1261187149Sraj	for (i = 0; i < MAXCPU; i++) {
1262187149Sraj		kernel_pmap->pm_tid[i] = TID_KERNEL;
1263187149Sraj
1264187149Sraj		/* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1265187149Sraj		tidbusy[i][0] = kernel_pmap;
1266187149Sraj	}
1267193489Sraj
1268193489Sraj	/*
1269193489Sraj	 * Fill in PTEs covering kernel code and data. They are not required
1270193489Sraj	 * for address translation, as this area is covered by static TLB1
1271193489Sraj	 * entries, but for pte_vatopa() to work correctly with kernel area
1272193489Sraj	 * addresses.
1273193489Sraj	 */
1274235932Smarcel	for (va = kernstart; va < data_end; va += PAGE_SIZE) {
1275193489Sraj		pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1276235932Smarcel		pte->rpn = kernload + (va - kernstart);
1277193489Sraj		pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1278193489Sraj		    PTE_VALID;
1279193489Sraj	}
1280187149Sraj	/* Mark kernel_pmap active on all CPUs */
1281222813Sattilio	CPU_FILL(&kernel_pmap->pm_active);
1282176771Sraj
1283242535Salc 	/*
1284242535Salc	 * Initialize the global pv list lock.
1285242535Salc	 */
1286242535Salc	rw_init(&pvh_global_lock, "pmap pv global");
1287242535Salc
1288176771Sraj	/*******************************************************/
1289176771Sraj	/* Final setup */
1290176771Sraj	/*******************************************************/
1291187149Sraj
1292182198Sraj	/* Enter kstack0 into kernel map, provide guard page */
1293182198Sraj	kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1294182198Sraj	thread0.td_kstack = kstack0;
1295182198Sraj	thread0.td_kstack_pages = KSTACK_PAGES;
1296182198Sraj
1297182198Sraj	debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1298182198Sraj	debugf("kstack0_phys at 0x%08x - 0x%08x\n",
1299182198Sraj	    kstack0_phys, kstack0_phys + kstack0_sz);
1300182198Sraj	debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
1301182198Sraj
1302182198Sraj	virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
1303182198Sraj	for (i = 0; i < KSTACK_PAGES; i++) {
1304182198Sraj		mmu_booke_kenter(mmu, kstack0, kstack0_phys);
1305182198Sraj		kstack0 += PAGE_SIZE;
1306182198Sraj		kstack0_phys += PAGE_SIZE;
1307182198Sraj	}
1308187149Sraj
1309187149Sraj	debugf("virtual_avail = %08x\n", virtual_avail);
1310187149Sraj	debugf("virtual_end   = %08x\n", virtual_end);
1311182198Sraj
1312176771Sraj	debugf("mmu_booke_bootstrap: exit\n");
1313176771Sraj}
1314176771Sraj
1315192532Srajvoid
1316192532Srajpmap_bootstrap_ap(volatile uint32_t *trcp __unused)
1317192532Sraj{
1318192532Sraj	int i;
1319192532Sraj
1320192532Sraj	/*
1321192532Sraj	 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
1322192532Sraj	 * have the snapshot of its contents in the s/w tlb1[] table, so use
1323192532Sraj	 * these values directly to (re)program AP's TLB1 hardware.
1324192532Sraj	 */
1325242526Smarcel	for (i = bp_ntlb1s; i < tlb1_idx; i++) {
1326192532Sraj		/* Skip invalid entries */
1327192532Sraj		if (!(tlb1[i].mas1 & MAS1_VALID))
1328192532Sraj			continue;
1329192532Sraj
1330192532Sraj		tlb1_write_entry(i);
1331192532Sraj	}
1332192532Sraj
1333192532Sraj	set_mas4_defaults();
1334192532Sraj}
1335192532Sraj
1336176771Sraj/*
1337176771Sraj * Get the physical page address for the given pmap/virtual address.
1338176771Sraj */
1339176771Srajstatic vm_paddr_t
1340176771Srajmmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1341176771Sraj{
1342176771Sraj	vm_paddr_t pa;
1343176771Sraj
1344176771Sraj	PMAP_LOCK(pmap);
1345176771Sraj	pa = pte_vatopa(mmu, pmap, va);
1346176771Sraj	PMAP_UNLOCK(pmap);
1347176771Sraj
1348176771Sraj	return (pa);
1349176771Sraj}
1350176771Sraj
1351176771Sraj/*
1352176771Sraj * Extract the physical page address associated with the given
1353176771Sraj * kernel virtual address.
1354176771Sraj */
1355176771Srajstatic vm_paddr_t
1356176771Srajmmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1357176771Sraj{
1358265996Sian	int i;
1359176771Sraj
1360265996Sian	/* Check TLB1 mappings */
1361265996Sian	for (i = 0; i < tlb1_idx; i++) {
1362265996Sian		if (!(tlb1[i].mas1 & MAS1_VALID))
1363265996Sian			continue;
1364265996Sian		if (va >= tlb1[i].virt && va < tlb1[i].virt + tlb1[i].size)
1365265996Sian			return (tlb1[i].phys + (va - tlb1[i].virt));
1366265996Sian	}
1367265996Sian
1368176771Sraj	return (pte_vatopa(mmu, kernel_pmap, va));
1369176771Sraj}
1370176771Sraj
1371176771Sraj/*
1372176771Sraj * Initialize the pmap module.
1373176771Sraj * Called by vm_init, to initialize any structures that the pmap
1374176771Sraj * system needs to map virtual memory.
1375176771Sraj */
1376176771Srajstatic void
1377176771Srajmmu_booke_init(mmu_t mmu)
1378176771Sraj{
1379176771Sraj	int shpgperproc = PMAP_SHPGPERPROC;
1380176771Sraj
1381176771Sraj	/*
1382176771Sraj	 * Initialize the address space (zone) for the pv entries.  Set a
1383176771Sraj	 * high water mark so that the system can recover from excessive
1384176771Sraj	 * numbers of pv entries.
1385176771Sraj	 */
1386176771Sraj	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1387176771Sraj	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1388176771Sraj
1389176771Sraj	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1390176771Sraj	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1391176771Sraj
1392176771Sraj	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1393176771Sraj	pv_entry_high_water = 9 * (pv_entry_max / 10);
1394176771Sraj
1395247360Sattilio	uma_zone_reserve_kva(pvzone, pv_entry_max);
1396176771Sraj
1397176771Sraj	/* Pre-fill pvzone with initial number of pv entries. */
1398176771Sraj	uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1399176771Sraj
1400176771Sraj	/* Initialize ptbl allocation. */
1401176771Sraj	ptbl_init();
1402176771Sraj}
1403176771Sraj
1404176771Sraj/*
1405176771Sraj * Map a list of wired pages into kernel virtual address space.  This is
1406176771Sraj * intended for temporary mappings which do not need page modification or
1407176771Sraj * references recorded.  Existing mappings in the region are overwritten.
1408176771Sraj */
1409176771Srajstatic void
1410176771Srajmmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1411176771Sraj{
1412176771Sraj	vm_offset_t va;
1413176771Sraj
1414176771Sraj	va = sva;
1415176771Sraj	while (count-- > 0) {
1416176771Sraj		mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1417176771Sraj		va += PAGE_SIZE;
1418176771Sraj		m++;
1419176771Sraj	}
1420176771Sraj}
1421176771Sraj
1422176771Sraj/*
1423176771Sraj * Remove page mappings from kernel virtual address space.  Intended for
1424176771Sraj * temporary mappings entered by mmu_booke_qenter.
1425176771Sraj */
1426176771Srajstatic void
1427176771Srajmmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1428176771Sraj{
1429176771Sraj	vm_offset_t va;
1430176771Sraj
1431176771Sraj	va = sva;
1432176771Sraj	while (count-- > 0) {
1433176771Sraj		mmu_booke_kremove(mmu, va);
1434176771Sraj		va += PAGE_SIZE;
1435176771Sraj	}
1436176771Sraj}
1437176771Sraj
1438176771Sraj/*
1439176771Sraj * Map a wired page into kernel virtual address space.
1440176771Sraj */
1441176771Srajstatic void
1442235936Srajmmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1443176771Sraj{
1444265996Sian
1445265996Sian	mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1446265996Sian}
1447265996Sian
1448265996Sianstatic void
1449265996Sianmmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1450265996Sian{
1451176771Sraj	unsigned int pdir_idx = PDIR_IDX(va);
1452176771Sraj	unsigned int ptbl_idx = PTBL_IDX(va);
1453187151Sraj	uint32_t flags;
1454176771Sraj	pte_t *pte;
1455176771Sraj
1456187151Sraj	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1457187151Sraj	    (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1458176771Sraj
1459265996Sian	flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
1460265996Sian	flags |= tlb_calc_wimg(pa, ma);
1461176771Sraj
1462176771Sraj	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1463176771Sraj
1464187149Sraj	mtx_lock_spin(&tlbivax_mutex);
1465192532Sraj	tlb_miss_lock();
1466187149Sraj
1467176771Sraj	if (PTE_ISVALID(pte)) {
1468187149Sraj
1469187149Sraj		CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1470176771Sraj
1471176771Sraj		/* Flush entry from TLB0 */
1472187149Sraj		tlb0_flush_entry(va);
1473176771Sraj	}
1474176771Sraj
1475176771Sraj	pte->rpn = pa & ~PTE_PA_MASK;
1476176771Sraj	pte->flags = flags;
1477176771Sraj
1478176771Sraj	//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1479176771Sraj	//		"pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1480176771Sraj	//		pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1481176771Sraj
1482176771Sraj	/* Flush the real memory from the instruction cache. */
1483176771Sraj	if ((flags & (PTE_I | PTE_G)) == 0) {
1484176771Sraj		__syncicache((void *)va, PAGE_SIZE);
1485176771Sraj	}
1486176771Sraj
1487192532Sraj	tlb_miss_unlock();
1488187149Sraj	mtx_unlock_spin(&tlbivax_mutex);
1489176771Sraj}
1490176771Sraj
1491176771Sraj/*
1492176771Sraj * Remove a page from kernel page table.
1493176771Sraj */
1494176771Srajstatic void
1495176771Srajmmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1496176771Sraj{
1497176771Sraj	unsigned int pdir_idx = PDIR_IDX(va);
1498176771Sraj	unsigned int ptbl_idx = PTBL_IDX(va);
1499176771Sraj	pte_t *pte;
1500176771Sraj
1501187149Sraj//	CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va));
1502176771Sraj
1503187149Sraj	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1504187149Sraj	    (va <= VM_MAX_KERNEL_ADDRESS)),
1505176771Sraj	    ("mmu_booke_kremove: invalid va"));
1506176771Sraj
1507176771Sraj	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1508176771Sraj
1509176771Sraj	if (!PTE_ISVALID(pte)) {
1510187149Sraj
1511187149Sraj		CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1512187149Sraj
1513176771Sraj		return;
1514176771Sraj	}
1515176771Sraj
1516187149Sraj	mtx_lock_spin(&tlbivax_mutex);
1517192532Sraj	tlb_miss_lock();
1518176771Sraj
1519187149Sraj	/* Invalidate entry in TLB0, update PTE. */
1520187149Sraj	tlb0_flush_entry(va);
1521176771Sraj	pte->flags = 0;
1522176771Sraj	pte->rpn = 0;
1523176771Sraj
1524192532Sraj	tlb_miss_unlock();
1525187149Sraj	mtx_unlock_spin(&tlbivax_mutex);
1526176771Sraj}
1527176771Sraj
1528176771Sraj/*
1529176771Sraj * Initialize pmap associated with process 0.
1530176771Sraj */
1531176771Srajstatic void
1532176771Srajmmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1533176771Sraj{
1534187151Sraj
1535254667Skib	PMAP_LOCK_INIT(pmap);
1536176771Sraj	mmu_booke_pinit(mmu, pmap);
1537176771Sraj	PCPU_SET(curpmap, pmap);
1538176771Sraj}
1539176771Sraj
1540176771Sraj/*
1541176771Sraj * Initialize a preallocated and zeroed pmap structure,
1542176771Sraj * such as one in a vmspace structure.
1543176771Sraj */
1544176771Srajstatic void
1545176771Srajmmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1546176771Sraj{
1547187149Sraj	int i;
1548176771Sraj
1549187149Sraj	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
1550187149Sraj	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
1551176771Sraj
1552187149Sraj	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
1553176771Sraj
1554187149Sraj	for (i = 0; i < MAXCPU; i++)
1555187149Sraj		pmap->pm_tid[i] = TID_NONE;
1556222813Sattilio	CPU_ZERO(&kernel_pmap->pm_active);
1557176771Sraj	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1558176771Sraj	bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1559187149Sraj	TAILQ_INIT(&pmap->pm_ptbl_list);
1560176771Sraj}
1561176771Sraj
1562176771Sraj/*
1563176771Sraj * Release any resources held by the given physical map.
1564176771Sraj * Called when a pmap initialized by mmu_booke_pinit is being released.
1565176771Sraj * Should only be called if the map contains no valid mappings.
1566176771Sraj */
1567176771Srajstatic void
1568176771Srajmmu_booke_release(mmu_t mmu, pmap_t pmap)
1569176771Sraj{
1570176771Sraj
1571187151Sraj	KASSERT(pmap->pm_stats.resident_count == 0,
1572187151Sraj	    ("pmap_release: pmap resident count %ld != 0",
1573187151Sraj	    pmap->pm_stats.resident_count));
1574176771Sraj}
1575176771Sraj
1576176771Sraj/*
1577176771Sraj * Insert the given physical page at the specified virtual address in the
1578176771Sraj * target physical map with the protection requested. If specified the page
1579176771Sraj * will be wired down.
1580176771Sraj */
1581176771Srajstatic void
1582176771Srajmmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1583176771Sraj    vm_prot_t prot, boolean_t wired)
1584176771Sraj{
1585187151Sraj
1586242535Salc	rw_wlock(&pvh_global_lock);
1587176771Sraj	PMAP_LOCK(pmap);
1588176771Sraj	mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
1589242535Salc	rw_wunlock(&pvh_global_lock);
1590176771Sraj	PMAP_UNLOCK(pmap);
1591176771Sraj}
1592176771Sraj
1593176771Srajstatic void
1594176771Srajmmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1595176771Sraj    vm_prot_t prot, boolean_t wired)
1596176771Sraj{
1597176771Sraj	pte_t *pte;
1598176771Sraj	vm_paddr_t pa;
1599187151Sraj	uint32_t flags;
1600176771Sraj	int su, sync;
1601176771Sraj
1602176771Sraj	pa = VM_PAGE_TO_PHYS(m);
1603176771Sraj	su = (pmap == kernel_pmap);
1604176771Sraj	sync = 0;
1605176771Sraj
1606176771Sraj	//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1607176771Sraj	//		"pa=0x%08x prot=0x%08x wired=%d)\n",
1608176771Sraj	//		(u_int32_t)pmap, su, pmap->pm_tid,
1609176771Sraj	//		(u_int32_t)m, va, pa, prot, wired);
1610176771Sraj
1611176771Sraj	if (su) {
1612187151Sraj		KASSERT(((va >= virtual_avail) &&
1613187151Sraj		    (va <= VM_MAX_KERNEL_ADDRESS)),
1614187151Sraj		    ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1615176771Sraj	} else {
1616176771Sraj		KASSERT((va <= VM_MAXUSER_ADDRESS),
1617187151Sraj		    ("mmu_booke_enter_locked: user pmap, non user va"));
1618176771Sraj	}
1619254138Sattilio	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1620250747Salc		VM_OBJECT_ASSERT_LOCKED(m->object);
1621176771Sraj
1622176771Sraj	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1623176771Sraj
1624176771Sraj	/*
1625176771Sraj	 * If there is an existing mapping, and the physical address has not
1626176771Sraj	 * changed, must be protection or wiring change.
1627176771Sraj	 */
1628176771Sraj	if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1629176771Sraj	    (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1630187149Sraj
1631187149Sraj		/*
1632187149Sraj		 * Before actually updating pte->flags we calculate and
1633187149Sraj		 * prepare its new value in a helper var.
1634187149Sraj		 */
1635187149Sraj		flags = pte->flags;
1636187149Sraj		flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1637176771Sraj
1638176771Sraj		/* Wiring change, just update stats. */
1639176771Sraj		if (wired) {
1640176771Sraj			if (!PTE_ISWIRED(pte)) {
1641187149Sraj				flags |= PTE_WIRED;
1642176771Sraj				pmap->pm_stats.wired_count++;
1643176771Sraj			}
1644176771Sraj		} else {
1645176771Sraj			if (PTE_ISWIRED(pte)) {
1646187149Sraj				flags &= ~PTE_WIRED;
1647176771Sraj				pmap->pm_stats.wired_count--;
1648176771Sraj			}
1649176771Sraj		}
1650176771Sraj
1651176771Sraj		if (prot & VM_PROT_WRITE) {
1652176771Sraj			/* Add write permissions. */
1653187149Sraj			flags |= PTE_SW;
1654176771Sraj			if (!su)
1655187149Sraj				flags |= PTE_UW;
1656192795Sraj
1657208846Salc			if ((flags & PTE_MANAGED) != 0)
1658225418Skib				vm_page_aflag_set(m, PGA_WRITEABLE);
1659176771Sraj		} else {
1660176771Sraj			/* Handle modified pages, sense modify status. */
1661187149Sraj
1662187149Sraj			/*
1663187149Sraj			 * The PTE_MODIFIED flag could be set by underlying
1664187149Sraj			 * TLB misses since we last read it (above), possibly
1665187149Sraj			 * other CPUs could update it so we check in the PTE
1666187149Sraj			 * directly rather than rely on that saved local flags
1667187149Sraj			 * copy.
1668187149Sraj			 */
1669178626Smarcel			if (PTE_ISMODIFIED(pte))
1670178626Smarcel				vm_page_dirty(m);
1671176771Sraj		}
1672176771Sraj
1673176771Sraj		if (prot & VM_PROT_EXECUTE) {
1674187149Sraj			flags |= PTE_SX;
1675176771Sraj			if (!su)
1676187149Sraj				flags |= PTE_UX;
1677176771Sraj
1678187149Sraj			/*
1679187149Sraj			 * Check existing flags for execute permissions: if we
1680187149Sraj			 * are turning execute permissions on, icache should
1681187149Sraj			 * be flushed.
1682187149Sraj			 */
1683208720Salc			if ((pte->flags & (PTE_UX | PTE_SX)) == 0)
1684176771Sraj				sync++;
1685176771Sraj		}
1686176771Sraj
1687187149Sraj		flags &= ~PTE_REFERENCED;
1688187149Sraj
1689187149Sraj		/*
1690187149Sraj		 * The new flags value is all calculated -- only now actually
1691187149Sraj		 * update the PTE.
1692187149Sraj		 */
1693187149Sraj		mtx_lock_spin(&tlbivax_mutex);
1694192532Sraj		tlb_miss_lock();
1695187149Sraj
1696187149Sraj		tlb0_flush_entry(va);
1697187149Sraj		pte->flags = flags;
1698187149Sraj
1699192532Sraj		tlb_miss_unlock();
1700187149Sraj		mtx_unlock_spin(&tlbivax_mutex);
1701187149Sraj
1702176771Sraj	} else {
1703176771Sraj		/*
1704187149Sraj		 * If there is an existing mapping, but it's for a different
1705176771Sraj		 * physical address, pte_enter() will delete the old mapping.
1706176771Sraj		 */
1707176771Sraj		//if ((pte != NULL) && PTE_ISVALID(pte))
1708176771Sraj		//	debugf("mmu_booke_enter_locked: replace\n");
1709176771Sraj		//else
1710176771Sraj		//	debugf("mmu_booke_enter_locked: new\n");
1711176771Sraj
1712176771Sraj		/* Now set up the flags and install the new mapping. */
1713176771Sraj		flags = (PTE_SR | PTE_VALID);
1714187149Sraj		flags |= PTE_M;
1715176771Sraj
1716176771Sraj		if (!su)
1717176771Sraj			flags |= PTE_UR;
1718176771Sraj
1719176771Sraj		if (prot & VM_PROT_WRITE) {
1720176771Sraj			flags |= PTE_SW;
1721176771Sraj			if (!su)
1722176771Sraj				flags |= PTE_UW;
1723192795Sraj
1724224746Skib			if ((m->oflags & VPO_UNMANAGED) == 0)
1725225418Skib				vm_page_aflag_set(m, PGA_WRITEABLE);
1726176771Sraj		}
1727176771Sraj
1728176771Sraj		if (prot & VM_PROT_EXECUTE) {
1729176771Sraj			flags |= PTE_SX;
1730176771Sraj			if (!su)
1731176771Sraj				flags |= PTE_UX;
1732176771Sraj		}
1733176771Sraj
1734176771Sraj		/* If its wired update stats. */
1735176771Sraj		if (wired) {
1736176771Sraj			pmap->pm_stats.wired_count++;
1737176771Sraj			flags |= PTE_WIRED;
1738176771Sraj		}
1739176771Sraj
1740176771Sraj		pte_enter(mmu, pmap, m, va, flags);
1741176771Sraj
1742176771Sraj		/* Flush the real memory from the instruction cache. */
1743176771Sraj		if (prot & VM_PROT_EXECUTE)
1744176771Sraj			sync++;
1745176771Sraj	}
1746176771Sraj
1747176771Sraj	if (sync && (su || pmap == PCPU_GET(curpmap))) {
1748176771Sraj		__syncicache((void *)va, PAGE_SIZE);
1749176771Sraj		sync = 0;
1750176771Sraj	}
1751176771Sraj}
1752176771Sraj
1753176771Sraj/*
1754176771Sraj * Maps a sequence of resident pages belonging to the same object.
1755176771Sraj * The sequence begins with the given page m_start.  This page is
1756176771Sraj * mapped at the given virtual address start.  Each subsequent page is
1757176771Sraj * mapped at a virtual address that is offset from start by the same
1758176771Sraj * amount as the page is offset from m_start within the object.  The
1759176771Sraj * last page in the sequence is the page with the largest offset from
1760176771Sraj * m_start that can be mapped at a virtual address less than the given
1761176771Sraj * virtual address end.  Not every virtual page between start and end
1762176771Sraj * is mapped; only those for which a resident page exists with the
1763176771Sraj * corresponding offset from m_start are mapped.
1764176771Sraj */
1765176771Srajstatic void
1766176771Srajmmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1767176771Sraj    vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1768176771Sraj{
1769176771Sraj	vm_page_t m;
1770176771Sraj	vm_pindex_t diff, psize;
1771176771Sraj
1772250884Sattilio	VM_OBJECT_ASSERT_LOCKED(m_start->object);
1773250884Sattilio
1774176771Sraj	psize = atop(end - start);
1775176771Sraj	m = m_start;
1776242535Salc	rw_wlock(&pvh_global_lock);
1777176771Sraj	PMAP_LOCK(pmap);
1778176771Sraj	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1779187151Sraj		mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
1780187151Sraj		    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1781176771Sraj		m = TAILQ_NEXT(m, listq);
1782176771Sraj	}
1783242535Salc	rw_wunlock(&pvh_global_lock);
1784176771Sraj	PMAP_UNLOCK(pmap);
1785176771Sraj}
1786176771Sraj
1787176771Srajstatic void
1788176771Srajmmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1789176771Sraj    vm_prot_t prot)
1790176771Sraj{
1791176771Sraj
1792242535Salc	rw_wlock(&pvh_global_lock);
1793176771Sraj	PMAP_LOCK(pmap);
1794176771Sraj	mmu_booke_enter_locked(mmu, pmap, va, m,
1795176771Sraj	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1796242535Salc	rw_wunlock(&pvh_global_lock);
1797176771Sraj	PMAP_UNLOCK(pmap);
1798176771Sraj}
1799176771Sraj
1800176771Sraj/*
1801176771Sraj * Remove the given range of addresses from the specified map.
1802176771Sraj *
1803176771Sraj * It is assumed that the start and end are properly rounded to the page size.
1804176771Sraj */
1805176771Srajstatic void
1806176771Srajmmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1807176771Sraj{
1808176771Sraj	pte_t *pte;
1809187151Sraj	uint8_t hold_flag;
1810176771Sraj
1811176771Sraj	int su = (pmap == kernel_pmap);
1812176771Sraj
1813176771Sraj	//debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1814176771Sraj	//		su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1815176771Sraj
1816176771Sraj	if (su) {
1817187151Sraj		KASSERT(((va >= virtual_avail) &&
1818187151Sraj		    (va <= VM_MAX_KERNEL_ADDRESS)),
1819187151Sraj		    ("mmu_booke_remove: kernel pmap, non kernel va"));
1820176771Sraj	} else {
1821176771Sraj		KASSERT((va <= VM_MAXUSER_ADDRESS),
1822187151Sraj		    ("mmu_booke_remove: user pmap, non user va"));
1823176771Sraj	}
1824176771Sraj
1825176771Sraj	if (PMAP_REMOVE_DONE(pmap)) {
1826176771Sraj		//debugf("mmu_booke_remove: e (empty)\n");
1827176771Sraj		return;
1828176771Sraj	}
1829176771Sraj
1830176771Sraj	hold_flag = PTBL_HOLD_FLAG(pmap);
1831176771Sraj	//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1832176771Sraj
1833242535Salc	rw_wlock(&pvh_global_lock);
1834176771Sraj	PMAP_LOCK(pmap);
1835176771Sraj	for (; va < endva; va += PAGE_SIZE) {
1836176771Sraj		pte = pte_find(mmu, pmap, va);
1837187149Sraj		if ((pte != NULL) && PTE_ISVALID(pte))
1838176771Sraj			pte_remove(mmu, pmap, va, hold_flag);
1839176771Sraj	}
1840176771Sraj	PMAP_UNLOCK(pmap);
1841242535Salc	rw_wunlock(&pvh_global_lock);
1842176771Sraj
1843176771Sraj	//debugf("mmu_booke_remove: e\n");
1844176771Sraj}
1845176771Sraj
1846176771Sraj/*
1847176771Sraj * Remove physical page from all pmaps in which it resides.
1848176771Sraj */
1849176771Srajstatic void
1850176771Srajmmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1851176771Sraj{
1852176771Sraj	pv_entry_t pv, pvn;
1853187151Sraj	uint8_t hold_flag;
1854176771Sraj
1855242535Salc	rw_wlock(&pvh_global_lock);
1856176771Sraj	for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1857176771Sraj		pvn = TAILQ_NEXT(pv, pv_link);
1858176771Sraj
1859176771Sraj		PMAP_LOCK(pv->pv_pmap);
1860176771Sraj		hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1861176771Sraj		pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1862176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
1863176771Sraj	}
1864225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
1865242535Salc	rw_wunlock(&pvh_global_lock);
1866176771Sraj}
1867176771Sraj
1868176771Sraj/*
1869176771Sraj * Map a range of physical addresses into kernel virtual address space.
1870176771Sraj */
1871176771Srajstatic vm_offset_t
1872235936Srajmmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1873235936Sraj    vm_paddr_t pa_end, int prot)
1874176771Sraj{
1875176771Sraj	vm_offset_t sva = *virt;
1876176771Sraj	vm_offset_t va = sva;
1877176771Sraj
1878176771Sraj	//debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1879176771Sraj	//		sva, pa_start, pa_end);
1880176771Sraj
1881176771Sraj	while (pa_start < pa_end) {
1882176771Sraj		mmu_booke_kenter(mmu, va, pa_start);
1883176771Sraj		va += PAGE_SIZE;
1884176771Sraj		pa_start += PAGE_SIZE;
1885176771Sraj	}
1886176771Sraj	*virt = va;
1887176771Sraj
1888176771Sraj	//debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1889176771Sraj	return (sva);
1890176771Sraj}
1891176771Sraj
1892176771Sraj/*
1893176771Sraj * The pmap must be activated before it's address space can be accessed in any
1894176771Sraj * way.
1895176771Sraj */
1896176771Srajstatic void
1897176771Srajmmu_booke_activate(mmu_t mmu, struct thread *td)
1898176771Sraj{
1899176771Sraj	pmap_t pmap;
1900223758Sattilio	u_int cpuid;
1901176771Sraj
1902176771Sraj	pmap = &td->td_proc->p_vmspace->vm_pmap;
1903176771Sraj
1904187149Sraj	CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
1905187149Sraj	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1906176771Sraj
1907176771Sraj	KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1908176771Sraj
1909176771Sraj	mtx_lock_spin(&sched_lock);
1910176771Sraj
1911223758Sattilio	cpuid = PCPU_GET(cpuid);
1912223758Sattilio	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
1913176771Sraj	PCPU_SET(curpmap, pmap);
1914187149Sraj
1915223758Sattilio	if (pmap->pm_tid[cpuid] == TID_NONE)
1916176771Sraj		tid_alloc(pmap);
1917176771Sraj
1918176771Sraj	/* Load PID0 register with pmap tid value. */
1919223758Sattilio	mtspr(SPR_PID0, pmap->pm_tid[cpuid]);
1920187149Sraj	__asm __volatile("isync");
1921176771Sraj
1922176771Sraj	mtx_unlock_spin(&sched_lock);
1923176771Sraj
1924187149Sraj	CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1925187149Sraj	    pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1926176771Sraj}
1927176771Sraj
1928176771Sraj/*
1929176771Sraj * Deactivate the specified process's address space.
1930176771Sraj */
1931176771Srajstatic void
1932176771Srajmmu_booke_deactivate(mmu_t mmu, struct thread *td)
1933176771Sraj{
1934176771Sraj	pmap_t pmap;
1935176771Sraj
1936176771Sraj	pmap = &td->td_proc->p_vmspace->vm_pmap;
1937187149Sraj
1938187149Sraj	CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
1939187149Sraj	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1940187149Sraj
1941223758Sattilio	CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active);
1942176771Sraj	PCPU_SET(curpmap, NULL);
1943176771Sraj}
1944176771Sraj
1945176771Sraj/*
1946176771Sraj * Copy the range specified by src_addr/len
1947176771Sraj * from the source map to the range dst_addr/len
1948176771Sraj * in the destination map.
1949176771Sraj *
1950176771Sraj * This routine is only advisory and need not do anything.
1951176771Sraj */
1952176771Srajstatic void
1953194101Srajmmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
1954194101Sraj    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
1955176771Sraj{
1956176771Sraj
1957176771Sraj}
1958176771Sraj
1959176771Sraj/*
1960176771Sraj * Set the physical protection on the specified range of this map as requested.
1961176771Sraj */
1962176771Srajstatic void
1963176771Srajmmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1964176771Sraj    vm_prot_t prot)
1965176771Sraj{
1966176771Sraj	vm_offset_t va;
1967176771Sraj	vm_page_t m;
1968176771Sraj	pte_t *pte;
1969176771Sraj
1970176771Sraj	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1971176771Sraj		mmu_booke_remove(mmu, pmap, sva, eva);
1972176771Sraj		return;
1973176771Sraj	}
1974176771Sraj
1975176771Sraj	if (prot & VM_PROT_WRITE)
1976176771Sraj		return;
1977176771Sraj
1978176771Sraj	PMAP_LOCK(pmap);
1979176771Sraj	for (va = sva; va < eva; va += PAGE_SIZE) {
1980176771Sraj		if ((pte = pte_find(mmu, pmap, va)) != NULL) {
1981176771Sraj			if (PTE_ISVALID(pte)) {
1982176771Sraj				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1983176771Sraj
1984187149Sraj				mtx_lock_spin(&tlbivax_mutex);
1985192532Sraj				tlb_miss_lock();
1986187149Sraj
1987176771Sraj				/* Handle modified pages. */
1988207437Salc				if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
1989178626Smarcel					vm_page_dirty(m);
1990176771Sraj
1991187149Sraj				tlb0_flush_entry(va);
1992207437Salc				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1993187149Sraj
1994192532Sraj				tlb_miss_unlock();
1995187149Sraj				mtx_unlock_spin(&tlbivax_mutex);
1996176771Sraj			}
1997176771Sraj		}
1998176771Sraj	}
1999176771Sraj	PMAP_UNLOCK(pmap);
2000176771Sraj}
2001176771Sraj
2002176771Sraj/*
2003176771Sraj * Clear the write and modified bits in each of the given page's mappings.
2004176771Sraj */
2005176771Srajstatic void
2006176771Srajmmu_booke_remove_write(mmu_t mmu, vm_page_t m)
2007176771Sraj{
2008176771Sraj	pv_entry_t pv;
2009176771Sraj	pte_t *pte;
2010176771Sraj
2011224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2012208175Salc	    ("mmu_booke_remove_write: page %p is not managed", m));
2013208175Salc
2014208175Salc	/*
2015254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2016254138Sattilio	 * set by another thread while the object is locked.  Thus,
2017254138Sattilio	 * if PGA_WRITEABLE is clear, no page table entries need updating.
2018208175Salc	 */
2019248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
2020254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2021176771Sraj		return;
2022242535Salc	rw_wlock(&pvh_global_lock);
2023176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2024176771Sraj		PMAP_LOCK(pv->pv_pmap);
2025176771Sraj		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2026176771Sraj			if (PTE_ISVALID(pte)) {
2027176771Sraj				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2028176771Sraj
2029187149Sraj				mtx_lock_spin(&tlbivax_mutex);
2030192532Sraj				tlb_miss_lock();
2031187149Sraj
2032176771Sraj				/* Handle modified pages. */
2033178626Smarcel				if (PTE_ISMODIFIED(pte))
2034178626Smarcel					vm_page_dirty(m);
2035176771Sraj
2036176771Sraj				/* Flush mapping from TLB0. */
2037207437Salc				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
2038187149Sraj
2039192532Sraj				tlb_miss_unlock();
2040187149Sraj				mtx_unlock_spin(&tlbivax_mutex);
2041176771Sraj			}
2042176771Sraj		}
2043176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
2044176771Sraj	}
2045225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
2046242535Salc	rw_wunlock(&pvh_global_lock);
2047176771Sraj}
2048176771Sraj
2049198341Smarcelstatic void
2050198341Smarcelmmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2051176771Sraj{
2052176771Sraj	pte_t *pte;
2053198341Smarcel	pmap_t pmap;
2054198341Smarcel	vm_page_t m;
2055198341Smarcel	vm_offset_t addr;
2056198341Smarcel	vm_paddr_t pa;
2057198341Smarcel	int active, valid;
2058198341Smarcel
2059198341Smarcel	va = trunc_page(va);
2060198341Smarcel	sz = round_page(sz);
2061176771Sraj
2062242535Salc	rw_wlock(&pvh_global_lock);
2063198341Smarcel	pmap = PCPU_GET(curpmap);
2064198341Smarcel	active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2065198341Smarcel	while (sz > 0) {
2066198341Smarcel		PMAP_LOCK(pm);
2067198341Smarcel		pte = pte_find(mmu, pm, va);
2068198341Smarcel		valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2069198341Smarcel		if (valid)
2070198341Smarcel			pa = PTE_PA(pte);
2071198341Smarcel		PMAP_UNLOCK(pm);
2072198341Smarcel		if (valid) {
2073198341Smarcel			if (!active) {
2074198341Smarcel				/* Create a mapping in the active pmap. */
2075198341Smarcel				addr = 0;
2076198341Smarcel				m = PHYS_TO_VM_PAGE(pa);
2077198341Smarcel				PMAP_LOCK(pmap);
2078198341Smarcel				pte_enter(mmu, pmap, m, addr,
2079198341Smarcel				    PTE_SR | PTE_VALID | PTE_UR);
2080198341Smarcel				__syncicache((void *)addr, PAGE_SIZE);
2081198341Smarcel				pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2082198341Smarcel				PMAP_UNLOCK(pmap);
2083198341Smarcel			} else
2084198341Smarcel				__syncicache((void *)va, PAGE_SIZE);
2085198341Smarcel		}
2086198341Smarcel		va += PAGE_SIZE;
2087198341Smarcel		sz -= PAGE_SIZE;
2088176771Sraj	}
2089242535Salc	rw_wunlock(&pvh_global_lock);
2090176771Sraj}
2091176771Sraj
2092176771Sraj/*
2093176771Sraj * Atomically extract and hold the physical page with the given
2094176771Sraj * pmap and virtual address pair if that mapping permits the given
2095176771Sraj * protection.
2096176771Sraj */
2097176771Srajstatic vm_page_t
2098176771Srajmmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2099176771Sraj    vm_prot_t prot)
2100176771Sraj{
2101176771Sraj	pte_t *pte;
2102176771Sraj	vm_page_t m;
2103187151Sraj	uint32_t pte_wbit;
2104207410Skmacy	vm_paddr_t pa;
2105207410Skmacy
2106176771Sraj	m = NULL;
2107207410Skmacy	pa = 0;
2108176771Sraj	PMAP_LOCK(pmap);
2109207410Skmacyretry:
2110176771Sraj	pte = pte_find(mmu, pmap, va);
2111176771Sraj	if ((pte != NULL) && PTE_ISVALID(pte)) {
2112176771Sraj		if (pmap == kernel_pmap)
2113176771Sraj			pte_wbit = PTE_SW;
2114176771Sraj		else
2115176771Sraj			pte_wbit = PTE_UW;
2116176771Sraj
2117176771Sraj		if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2118207410Skmacy			if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2119207410Skmacy				goto retry;
2120176771Sraj			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2121176771Sraj			vm_page_hold(m);
2122176771Sraj		}
2123176771Sraj	}
2124176771Sraj
2125207410Skmacy	PA_UNLOCK_COND(pa);
2126176771Sraj	PMAP_UNLOCK(pmap);
2127176771Sraj	return (m);
2128176771Sraj}
2129176771Sraj
2130176771Sraj/*
2131176771Sraj * Initialize a vm_page's machine-dependent fields.
2132176771Sraj */
2133176771Srajstatic void
2134176771Srajmmu_booke_page_init(mmu_t mmu, vm_page_t m)
2135176771Sraj{
2136176771Sraj
2137176771Sraj	TAILQ_INIT(&m->md.pv_list);
2138176771Sraj}
2139176771Sraj
2140176771Sraj/*
2141176771Sraj * mmu_booke_zero_page_area zeros the specified hardware page by
2142176771Sraj * mapping it into virtual memory and using bzero to clear
2143176771Sraj * its contents.
2144176771Sraj *
2145176771Sraj * off and size must reside within a single page.
2146176771Sraj */
2147176771Srajstatic void
2148176771Srajmmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2149176771Sraj{
2150176771Sraj	vm_offset_t va;
2151176771Sraj
2152187151Sraj	/* XXX KASSERT off and size are within a single page? */
2153176771Sraj
2154176771Sraj	mtx_lock(&zero_page_mutex);
2155176771Sraj	va = zero_page_va;
2156176771Sraj
2157176771Sraj	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2158176771Sraj	bzero((caddr_t)va + off, size);
2159176771Sraj	mmu_booke_kremove(mmu, va);
2160176771Sraj
2161176771Sraj	mtx_unlock(&zero_page_mutex);
2162176771Sraj}
2163176771Sraj
2164176771Sraj/*
2165176771Sraj * mmu_booke_zero_page zeros the specified hardware page.
2166176771Sraj */
2167176771Srajstatic void
2168176771Srajmmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2169176771Sraj{
2170176771Sraj
2171176771Sraj	mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
2172176771Sraj}
2173176771Sraj
2174176771Sraj/*
2175176771Sraj * mmu_booke_copy_page copies the specified (machine independent) page by
2176176771Sraj * mapping the page into virtual memory and using memcopy to copy the page,
2177176771Sraj * one machine dependent page at a time.
2178176771Sraj */
2179176771Srajstatic void
2180176771Srajmmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
2181176771Sraj{
2182176771Sraj	vm_offset_t sva, dva;
2183176771Sraj
2184176771Sraj	sva = copy_page_src_va;
2185176771Sraj	dva = copy_page_dst_va;
2186176771Sraj
2187187149Sraj	mtx_lock(&copy_page_mutex);
2188176771Sraj	mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2189176771Sraj	mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2190176771Sraj	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2191176771Sraj	mmu_booke_kremove(mmu, dva);
2192176771Sraj	mmu_booke_kremove(mmu, sva);
2193176771Sraj	mtx_unlock(&copy_page_mutex);
2194176771Sraj}
2195176771Sraj
2196248280Skibstatic inline void
2197248280Skibmmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
2198248280Skib    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
2199248280Skib{
2200248280Skib	void *a_cp, *b_cp;
2201248280Skib	vm_offset_t a_pg_offset, b_pg_offset;
2202248280Skib	int cnt;
2203248280Skib
2204248280Skib	mtx_lock(&copy_page_mutex);
2205248280Skib	while (xfersize > 0) {
2206248280Skib		a_pg_offset = a_offset & PAGE_MASK;
2207248280Skib		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2208248280Skib		mmu_booke_kenter(mmu, copy_page_src_va,
2209248280Skib		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
2210248280Skib		a_cp = (char *)copy_page_src_va + a_pg_offset;
2211248280Skib		b_pg_offset = b_offset & PAGE_MASK;
2212248280Skib		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2213248280Skib		mmu_booke_kenter(mmu, copy_page_dst_va,
2214248280Skib		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
2215248280Skib		b_cp = (char *)copy_page_dst_va + b_pg_offset;
2216248280Skib		bcopy(a_cp, b_cp, cnt);
2217248280Skib		mmu_booke_kremove(mmu, copy_page_dst_va);
2218248280Skib		mmu_booke_kremove(mmu, copy_page_src_va);
2219248280Skib		a_offset += cnt;
2220248280Skib		b_offset += cnt;
2221248280Skib		xfersize -= cnt;
2222248280Skib	}
2223248280Skib	mtx_unlock(&copy_page_mutex);
2224248280Skib}
2225248280Skib
2226176771Sraj/*
2227176771Sraj * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2228176771Sraj * into virtual memory and using bzero to clear its contents. This is intended
2229176771Sraj * to be called from the vm_pagezero process only and outside of Giant. No
2230176771Sraj * lock is required.
2231176771Sraj */
2232176771Srajstatic void
2233176771Srajmmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2234176771Sraj{
2235176771Sraj	vm_offset_t va;
2236176771Sraj
2237176771Sraj	va = zero_page_idle_va;
2238176771Sraj	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2239176771Sraj	bzero((caddr_t)va, PAGE_SIZE);
2240176771Sraj	mmu_booke_kremove(mmu, va);
2241176771Sraj}
2242176771Sraj
2243176771Sraj/*
2244176771Sraj * Return whether or not the specified physical page was modified
2245176771Sraj * in any of physical maps.
2246176771Sraj */
2247176771Srajstatic boolean_t
2248176771Srajmmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2249176771Sraj{
2250176771Sraj	pte_t *pte;
2251176771Sraj	pv_entry_t pv;
2252208504Salc	boolean_t rv;
2253176771Sraj
2254224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2255208504Salc	    ("mmu_booke_is_modified: page %p is not managed", m));
2256208504Salc	rv = FALSE;
2257176771Sraj
2258208504Salc	/*
2259254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2260225418Skib	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
2261208504Salc	 * is clear, no PTEs can be modified.
2262208504Salc	 */
2263248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
2264254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2265208504Salc		return (rv);
2266242535Salc	rw_wlock(&pvh_global_lock);
2267176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2268176771Sraj		PMAP_LOCK(pv->pv_pmap);
2269208504Salc		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2270208504Salc		    PTE_ISVALID(pte)) {
2271208504Salc			if (PTE_ISMODIFIED(pte))
2272208504Salc				rv = TRUE;
2273176771Sraj		}
2274176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
2275208504Salc		if (rv)
2276208504Salc			break;
2277176771Sraj	}
2278242535Salc	rw_wunlock(&pvh_global_lock);
2279208504Salc	return (rv);
2280176771Sraj}
2281176771Sraj
2282176771Sraj/*
2283187151Sraj * Return whether or not the specified virtual address is eligible
2284176771Sraj * for prefault.
2285176771Sraj */
2286176771Srajstatic boolean_t
2287176771Srajmmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2288176771Sraj{
2289176771Sraj
2290176771Sraj	return (FALSE);
2291176771Sraj}
2292176771Sraj
2293176771Sraj/*
2294207155Salc * Return whether or not the specified physical page was referenced
2295207155Salc * in any physical maps.
2296207155Salc */
2297207155Salcstatic boolean_t
2298207155Salcmmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
2299207155Salc{
2300207155Salc	pte_t *pte;
2301207155Salc	pv_entry_t pv;
2302207155Salc	boolean_t rv;
2303207155Salc
2304224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2305208574Salc	    ("mmu_booke_is_referenced: page %p is not managed", m));
2306207155Salc	rv = FALSE;
2307242535Salc	rw_wlock(&pvh_global_lock);
2308207155Salc	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2309207155Salc		PMAP_LOCK(pv->pv_pmap);
2310207155Salc		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2311208574Salc		    PTE_ISVALID(pte)) {
2312208574Salc			if (PTE_ISREFERENCED(pte))
2313208574Salc				rv = TRUE;
2314208574Salc		}
2315207155Salc		PMAP_UNLOCK(pv->pv_pmap);
2316207155Salc		if (rv)
2317207155Salc			break;
2318207155Salc	}
2319242535Salc	rw_wunlock(&pvh_global_lock);
2320207155Salc	return (rv);
2321207155Salc}
2322207155Salc
2323207155Salc/*
2324176771Sraj * Clear the modify bits on the specified physical page.
2325176771Sraj */
2326176771Srajstatic void
2327176771Srajmmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2328176771Sraj{
2329176771Sraj	pte_t *pte;
2330176771Sraj	pv_entry_t pv;
2331176771Sraj
2332224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2333208504Salc	    ("mmu_booke_clear_modify: page %p is not managed", m));
2334248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
2335254138Sattilio	KASSERT(!vm_page_xbusied(m),
2336254138Sattilio	    ("mmu_booke_clear_modify: page %p is exclusive busied", m));
2337208504Salc
2338208504Salc	/*
2339225418Skib	 * If the page is not PG_AWRITEABLE, then no PTEs can be modified.
2340208504Salc	 * If the object containing the page is locked and the page is not
2341254138Sattilio	 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set.
2342208504Salc	 */
2343225418Skib	if ((m->aflags & PGA_WRITEABLE) == 0)
2344176771Sraj		return;
2345242535Salc	rw_wlock(&pvh_global_lock);
2346176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2347176771Sraj		PMAP_LOCK(pv->pv_pmap);
2348208504Salc		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2349208504Salc		    PTE_ISVALID(pte)) {
2350187149Sraj			mtx_lock_spin(&tlbivax_mutex);
2351192532Sraj			tlb_miss_lock();
2352187149Sraj
2353176771Sraj			if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2354187149Sraj				tlb0_flush_entry(pv->pv_va);
2355176771Sraj				pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2356176771Sraj				    PTE_REFERENCED);
2357176771Sraj			}
2358187149Sraj
2359192532Sraj			tlb_miss_unlock();
2360187149Sraj			mtx_unlock_spin(&tlbivax_mutex);
2361176771Sraj		}
2362176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
2363176771Sraj	}
2364242535Salc	rw_wunlock(&pvh_global_lock);
2365176771Sraj}
2366176771Sraj
2367176771Sraj/*
2368176771Sraj * Return a count of reference bits for a page, clearing those bits.
2369176771Sraj * It is not necessary for every reference bit to be cleared, but it
2370176771Sraj * is necessary that 0 only be returned when there are truly no
2371176771Sraj * reference bits set.
2372176771Sraj *
2373176771Sraj * XXX: The exact number of bits to check and clear is a matter that
2374176771Sraj * should be tested and standardized at some point in the future for
2375176771Sraj * optimal aging of shared pages.
2376176771Sraj */
2377176771Srajstatic int
2378176771Srajmmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2379176771Sraj{
2380176771Sraj	pte_t *pte;
2381176771Sraj	pv_entry_t pv;
2382176771Sraj	int count;
2383176771Sraj
2384224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2385208990Salc	    ("mmu_booke_ts_referenced: page %p is not managed", m));
2386176771Sraj	count = 0;
2387242535Salc	rw_wlock(&pvh_global_lock);
2388176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2389176771Sraj		PMAP_LOCK(pv->pv_pmap);
2390208990Salc		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2391208990Salc		    PTE_ISVALID(pte)) {
2392176771Sraj			if (PTE_ISREFERENCED(pte)) {
2393187149Sraj				mtx_lock_spin(&tlbivax_mutex);
2394192532Sraj				tlb_miss_lock();
2395187149Sraj
2396187149Sraj				tlb0_flush_entry(pv->pv_va);
2397176771Sraj				pte->flags &= ~PTE_REFERENCED;
2398176771Sraj
2399192532Sraj				tlb_miss_unlock();
2400187149Sraj				mtx_unlock_spin(&tlbivax_mutex);
2401187149Sraj
2402176771Sraj				if (++count > 4) {
2403176771Sraj					PMAP_UNLOCK(pv->pv_pmap);
2404176771Sraj					break;
2405176771Sraj				}
2406176771Sraj			}
2407176771Sraj		}
2408176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
2409176771Sraj	}
2410242535Salc	rw_wunlock(&pvh_global_lock);
2411176771Sraj	return (count);
2412176771Sraj}
2413176771Sraj
2414176771Sraj/*
2415176771Sraj * Change wiring attribute for a map/virtual-address pair.
2416176771Sraj */
2417176771Srajstatic void
2418176771Srajmmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
2419176771Sraj{
2420201758Smbr	pte_t *pte;
2421176771Sraj
2422176771Sraj	PMAP_LOCK(pmap);
2423176771Sraj	if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2424176771Sraj		if (wired) {
2425176771Sraj			if (!PTE_ISWIRED(pte)) {
2426176771Sraj				pte->flags |= PTE_WIRED;
2427176771Sraj				pmap->pm_stats.wired_count++;
2428176771Sraj			}
2429176771Sraj		} else {
2430176771Sraj			if (PTE_ISWIRED(pte)) {
2431176771Sraj				pte->flags &= ~PTE_WIRED;
2432176771Sraj				pmap->pm_stats.wired_count--;
2433176771Sraj			}
2434176771Sraj		}
2435176771Sraj	}
2436176771Sraj	PMAP_UNLOCK(pmap);
2437176771Sraj}
2438176771Sraj
2439176771Sraj/*
2440176771Sraj * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2441176771Sraj * page.  This count may be changed upwards or downwards in the future; it is
2442176771Sraj * only necessary that true be returned for a small subset of pmaps for proper
2443176771Sraj * page aging.
2444176771Sraj */
2445176771Srajstatic boolean_t
2446176771Srajmmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2447176771Sraj{
2448176771Sraj	pv_entry_t pv;
2449176771Sraj	int loops;
2450208990Salc	boolean_t rv;
2451176771Sraj
2452224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2453208990Salc	    ("mmu_booke_page_exists_quick: page %p is not managed", m));
2454176771Sraj	loops = 0;
2455208990Salc	rv = FALSE;
2456242535Salc	rw_wlock(&pvh_global_lock);
2457176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2458208990Salc		if (pv->pv_pmap == pmap) {
2459208990Salc			rv = TRUE;
2460208990Salc			break;
2461208990Salc		}
2462176771Sraj		if (++loops >= 16)
2463176771Sraj			break;
2464176771Sraj	}
2465242535Salc	rw_wunlock(&pvh_global_lock);
2466208990Salc	return (rv);
2467176771Sraj}
2468176771Sraj
2469176771Sraj/*
2470176771Sraj * Return the number of managed mappings to the given physical page that are
2471176771Sraj * wired.
2472176771Sraj */
2473176771Srajstatic int
2474176771Srajmmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2475176771Sraj{
2476176771Sraj	pv_entry_t pv;
2477176771Sraj	pte_t *pte;
2478176771Sraj	int count = 0;
2479176771Sraj
2480224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0)
2481176771Sraj		return (count);
2482242535Salc	rw_wlock(&pvh_global_lock);
2483176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2484176771Sraj		PMAP_LOCK(pv->pv_pmap);
2485176771Sraj		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2486176771Sraj			if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2487176771Sraj				count++;
2488176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
2489176771Sraj	}
2490242535Salc	rw_wunlock(&pvh_global_lock);
2491176771Sraj	return (count);
2492176771Sraj}
2493176771Sraj
2494176771Srajstatic int
2495235936Srajmmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2496176771Sraj{
2497176771Sraj	int i;
2498176771Sraj	vm_offset_t va;
2499176771Sraj
2500176771Sraj	/*
2501176771Sraj	 * This currently does not work for entries that
2502176771Sraj	 * overlap TLB1 entries.
2503176771Sraj	 */
2504176771Sraj	for (i = 0; i < tlb1_idx; i ++) {
2505176771Sraj		if (tlb1_iomapped(i, pa, size, &va) == 0)
2506176771Sraj			return (0);
2507176771Sraj	}
2508176771Sraj
2509176771Sraj	return (EFAULT);
2510176771Sraj}
2511176771Sraj
2512190701Smarcelvm_offset_t
2513190701Smarcelmmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2514190701Smarcel    vm_size_t *sz)
2515190701Smarcel{
2516190701Smarcel	vm_paddr_t pa, ppa;
2517190701Smarcel	vm_offset_t va;
2518190701Smarcel	vm_size_t gran;
2519190701Smarcel
2520190701Smarcel	/* Raw physical memory dumps don't have a virtual address. */
2521190701Smarcel	if (md->md_vaddr == ~0UL) {
2522190701Smarcel		/* We always map a 256MB page at 256M. */
2523190701Smarcel		gran = 256 * 1024 * 1024;
2524190701Smarcel		pa = md->md_paddr + ofs;
2525190701Smarcel		ppa = pa & ~(gran - 1);
2526190701Smarcel		ofs = pa - ppa;
2527190701Smarcel		va = gran;
2528190701Smarcel		tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO);
2529190701Smarcel		if (*sz > (gran - ofs))
2530190701Smarcel			*sz = gran - ofs;
2531190701Smarcel		return (va + ofs);
2532190701Smarcel	}
2533190701Smarcel
2534190701Smarcel	/* Minidumps are based on virtual memory addresses. */
2535190701Smarcel	va = md->md_vaddr + ofs;
2536190701Smarcel	if (va >= kernstart + kernsize) {
2537190701Smarcel		gran = PAGE_SIZE - (va & PAGE_MASK);
2538190701Smarcel		if (*sz > gran)
2539190701Smarcel			*sz = gran;
2540190701Smarcel	}
2541190701Smarcel	return (va);
2542190701Smarcel}
2543190701Smarcel
2544190701Smarcelvoid
2545190701Smarcelmmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2546190701Smarcel    vm_offset_t va)
2547190701Smarcel{
2548190701Smarcel
2549190701Smarcel	/* Raw physical memory dumps don't have a virtual address. */
2550190701Smarcel	if (md->md_vaddr == ~0UL) {
2551190701Smarcel		tlb1_idx--;
2552190701Smarcel		tlb1[tlb1_idx].mas1 = 0;
2553190701Smarcel		tlb1[tlb1_idx].mas2 = 0;
2554190701Smarcel		tlb1[tlb1_idx].mas3 = 0;
2555190701Smarcel		tlb1_write_entry(tlb1_idx);
2556190701Smarcel		return;
2557190701Smarcel	}
2558190701Smarcel
2559190701Smarcel	/* Minidumps are based on virtual memory addresses. */
2560190701Smarcel	/* Nothing to do... */
2561190701Smarcel}
2562190701Smarcel
2563190701Smarcelstruct pmap_md *
2564190701Smarcelmmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
2565190701Smarcel{
2566190701Smarcel	static struct pmap_md md;
2567190701Smarcel	pte_t *pte;
2568190701Smarcel	vm_offset_t va;
2569190701Smarcel
2570190701Smarcel	if (dumpsys_minidump) {
2571190701Smarcel		md.md_paddr = ~0UL;	/* Minidumps use virtual addresses. */
2572190701Smarcel		if (prev == NULL) {
2573190701Smarcel			/* 1st: kernel .data and .bss. */
2574190701Smarcel			md.md_index = 1;
2575190701Smarcel			md.md_vaddr = trunc_page((uintptr_t)_etext);
2576190701Smarcel			md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
2577190701Smarcel			return (&md);
2578190701Smarcel		}
2579190701Smarcel		switch (prev->md_index) {
2580190701Smarcel		case 1:
2581190701Smarcel			/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2582190701Smarcel			md.md_index = 2;
2583190701Smarcel			md.md_vaddr = data_start;
2584190701Smarcel			md.md_size = data_end - data_start;
2585190701Smarcel			break;
2586190701Smarcel		case 2:
2587190701Smarcel			/* 3rd: kernel VM. */
2588190701Smarcel			va = prev->md_vaddr + prev->md_size;
2589190701Smarcel			/* Find start of next chunk (from va). */
2590190701Smarcel			while (va < virtual_end) {
2591190701Smarcel				/* Don't dump the buffer cache. */
2592190701Smarcel				if (va >= kmi.buffer_sva &&
2593190701Smarcel				    va < kmi.buffer_eva) {
2594190701Smarcel					va = kmi.buffer_eva;
2595190701Smarcel					continue;
2596190701Smarcel				}
2597190701Smarcel				pte = pte_find(mmu, kernel_pmap, va);
2598190701Smarcel				if (pte != NULL && PTE_ISVALID(pte))
2599190701Smarcel					break;
2600190701Smarcel				va += PAGE_SIZE;
2601190701Smarcel			}
2602190701Smarcel			if (va < virtual_end) {
2603190701Smarcel				md.md_vaddr = va;
2604190701Smarcel				va += PAGE_SIZE;
2605190701Smarcel				/* Find last page in chunk. */
2606190701Smarcel				while (va < virtual_end) {
2607190701Smarcel					/* Don't run into the buffer cache. */
2608190701Smarcel					if (va == kmi.buffer_sva)
2609190701Smarcel						break;
2610190701Smarcel					pte = pte_find(mmu, kernel_pmap, va);
2611190701Smarcel					if (pte == NULL || !PTE_ISVALID(pte))
2612190701Smarcel						break;
2613190701Smarcel					va += PAGE_SIZE;
2614190701Smarcel				}
2615190701Smarcel				md.md_size = va - md.md_vaddr;
2616190701Smarcel				break;
2617190701Smarcel			}
2618190701Smarcel			md.md_index = 3;
2619190701Smarcel			/* FALLTHROUGH */
2620190701Smarcel		default:
2621190701Smarcel			return (NULL);
2622190701Smarcel		}
2623190701Smarcel	} else { /* minidumps */
2624209908Sraj		mem_regions(&physmem_regions, &physmem_regions_sz,
2625209908Sraj		    &availmem_regions, &availmem_regions_sz);
2626209908Sraj
2627190701Smarcel		if (prev == NULL) {
2628190701Smarcel			/* first physical chunk. */
2629209908Sraj			md.md_paddr = physmem_regions[0].mr_start;
2630209908Sraj			md.md_size = physmem_regions[0].mr_size;
2631190701Smarcel			md.md_vaddr = ~0UL;
2632190701Smarcel			md.md_index = 1;
2633209908Sraj		} else if (md.md_index < physmem_regions_sz) {
2634209908Sraj			md.md_paddr = physmem_regions[md.md_index].mr_start;
2635209908Sraj			md.md_size = physmem_regions[md.md_index].mr_size;
2636190701Smarcel			md.md_vaddr = ~0UL;
2637190701Smarcel			md.md_index++;
2638190701Smarcel		} else {
2639190701Smarcel			/* There's no next physical chunk. */
2640190701Smarcel			return (NULL);
2641190701Smarcel		}
2642190701Smarcel	}
2643190701Smarcel
2644190701Smarcel	return (&md);
2645190701Smarcel}
2646190701Smarcel
2647176771Sraj/*
2648176771Sraj * Map a set of physical memory pages into the kernel virtual address space.
2649176771Sraj * Return a pointer to where it is mapped. This routine is intended to be used
2650176771Sraj * for mapping device memory, NOT real memory.
2651176771Sraj */
2652176771Srajstatic void *
2653235936Srajmmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2654176771Sraj{
2655265996Sian
2656265996Sian	return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
2657265996Sian}
2658265996Sian
2659265996Sianstatic void *
2660265996Sianmmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2661265996Sian{
2662184244Smarcel	void *res;
2663176771Sraj	uintptr_t va;
2664184244Smarcel	vm_size_t sz;
2665265996Sian	int i;
2666176771Sraj
2667242526Smarcel	/*
2668265996Sian	 * Check if this is premapped in TLB1. Note: this should probably also
2669265996Sian	 * check whether a sequence of TLB1 entries exist that match the
2670265996Sian	 * requirement, but now only checks the easy case.
2671242526Smarcel	 */
2672265996Sian	if (ma == VM_MEMATTR_DEFAULT) {
2673265996Sian		for (i = 0; i < tlb1_idx; i++) {
2674265996Sian			if (!(tlb1[i].mas1 & MAS1_VALID))
2675265996Sian				continue;
2676265996Sian			if (pa >= tlb1[i].phys &&
2677265996Sian			    (pa + size) <= (tlb1[i].phys + tlb1[i].size))
2678265996Sian				return (void *)(tlb1[i].virt +
2679265996Sian				    (pa - tlb1[i].phys));
2680265996Sian		}
2681242526Smarcel	}
2682242526Smarcel
2683265996Sian	size = roundup(size, PAGE_SIZE);
2684265996Sian
2685265998Sian	/*
2686265998Sian	 * We leave a hole for device direct mapping between the maximum user
2687265998Sian	 * address (0x8000000) and the minimum KVA address (0xc0000000). If
2688265998Sian	 * devices are in there, just map them 1:1. If not, map them to the
2689265998Sian	 * device mapping area about VM_MAX_KERNEL_ADDRESS. These mapped
2690265998Sian	 * addresses should be pulled from an allocator, but since we do not
2691265998Sian	 * ever free TLB1 entries, it is safe just to increment a counter.
2692265998Sian	 * Note that there isn't a lot of address space here (128 MB) and it
2693265998Sian	 * is not at all difficult to imagine running out, since that is a 4:1
2694265998Sian	 * compression from the 0xc0000000 - 0xf0000000 address space that gets
2695265998Sian	 * mapped there.
2696265998Sian	 */
2697265996Sian	if (pa >= (VM_MAXUSER_ADDRESS + PAGE_SIZE) &&
2698265996Sian	    (pa + size - 1) < VM_MIN_KERNEL_ADDRESS)
2699265996Sian		va = pa;
2700265996Sian	else
2701265998Sian		va = atomic_fetchadd_int(&tlb1_map_base, size);
2702184244Smarcel	res = (void *)va;
2703184244Smarcel
2704184244Smarcel	do {
2705184244Smarcel		sz = 1 << (ilog2(size) & ~1);
2706184244Smarcel		if (bootverbose)
2707184244Smarcel			printf("Wiring VA=%x to PA=%x (size=%x), "
2708184244Smarcel			    "using TLB1[%d]\n", va, pa, sz, tlb1_idx);
2709265996Sian		tlb1_set_entry(va, pa, sz, tlb_calc_wimg(pa, ma));
2710184244Smarcel		size -= sz;
2711184244Smarcel		pa += sz;
2712184244Smarcel		va += sz;
2713184244Smarcel	} while (size > 0);
2714184244Smarcel
2715184244Smarcel	return (res);
2716176771Sraj}
2717176771Sraj
2718176771Sraj/*
2719176771Sraj * 'Unmap' a range mapped by mmu_booke_mapdev().
2720176771Sraj */
2721176771Srajstatic void
2722176771Srajmmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2723176771Sraj{
2724265996Sian#ifdef SUPPORTS_SHRINKING_TLB1
2725176771Sraj	vm_offset_t base, offset;
2726176771Sraj
2727176771Sraj	/*
2728176771Sraj	 * Unmap only if this is inside kernel virtual space.
2729176771Sraj	 */
2730176771Sraj	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2731176771Sraj		base = trunc_page(va);
2732176771Sraj		offset = va & PAGE_MASK;
2733176771Sraj		size = roundup(offset + size, PAGE_SIZE);
2734254025Sjeff		kva_free(base, size);
2735176771Sraj	}
2736265996Sian#endif
2737176771Sraj}
2738176771Sraj
2739176771Sraj/*
2740187151Sraj * mmu_booke_object_init_pt preloads the ptes for a given object into the
2741187151Sraj * specified pmap. This eliminates the blast of soft faults on process startup
2742187151Sraj * and immediately after an mmap.
2743176771Sraj */
2744176771Srajstatic void
2745176771Srajmmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2746176771Sraj    vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2747176771Sraj{
2748187151Sraj
2749248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(object);
2750195840Sjhb	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2751176771Sraj	    ("mmu_booke_object_init_pt: non-device object"));
2752176771Sraj}
2753176771Sraj
2754176771Sraj/*
2755176771Sraj * Perform the pmap work for mincore.
2756176771Sraj */
2757176771Srajstatic int
2758208504Salcmmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2759208504Salc    vm_paddr_t *locked_pa)
2760176771Sraj{
2761176771Sraj
2762266000Sian	/* XXX: this should be implemented at some point */
2763176771Sraj	return (0);
2764176771Sraj}
2765176771Sraj
2766176771Sraj/**************************************************************************/
2767176771Sraj/* TID handling */
2768176771Sraj/**************************************************************************/
2769176771Sraj
2770176771Sraj/*
2771176771Sraj * Allocate a TID. If necessary, steal one from someone else.
2772176771Sraj * The new TID is flushed from the TLB before returning.
2773176771Sraj */
2774176771Srajstatic tlbtid_t
2775176771Srajtid_alloc(pmap_t pmap)
2776176771Sraj{
2777176771Sraj	tlbtid_t tid;
2778187149Sraj	int thiscpu;
2779176771Sraj
2780187149Sraj	KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2781176771Sraj
2782187149Sraj	CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2783176771Sraj
2784187149Sraj	thiscpu = PCPU_GET(cpuid);
2785176771Sraj
2786187149Sraj	tid = PCPU_GET(tid_next);
2787187149Sraj	if (tid > TID_MAX)
2788187149Sraj		tid = TID_MIN;
2789187149Sraj	PCPU_SET(tid_next, tid + 1);
2790176771Sraj
2791187149Sraj	/* If we are stealing TID then clear the relevant pmap's field */
2792187149Sraj	if (tidbusy[thiscpu][tid] != NULL) {
2793176771Sraj
2794187149Sraj		CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2795187149Sraj
2796187149Sraj		tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2797176771Sraj
2798187149Sraj		/* Flush all entries from TLB0 matching this TID. */
2799187149Sraj		tid_flush(tid);
2800176771Sraj	}
2801176771Sraj
2802187149Sraj	tidbusy[thiscpu][tid] = pmap;
2803187149Sraj	pmap->pm_tid[thiscpu] = tid;
2804187149Sraj	__asm __volatile("msync; isync");
2805176771Sraj
2806187149Sraj	CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2807187149Sraj	    PCPU_GET(tid_next));
2808176771Sraj
2809176771Sraj	return (tid);
2810176771Sraj}
2811176771Sraj
2812176771Sraj/**************************************************************************/
2813176771Sraj/* TLB0 handling */
2814176771Sraj/**************************************************************************/
2815176771Sraj
2816176771Srajstatic void
2817187149Srajtlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
2818187149Sraj    uint32_t mas7)
2819176771Sraj{
2820176771Sraj	int as;
2821176771Sraj	char desc[3];
2822176771Sraj	tlbtid_t tid;
2823176771Sraj	vm_size_t size;
2824176771Sraj	unsigned int tsize;
2825176771Sraj
2826176771Sraj	desc[2] = '\0';
2827176771Sraj	if (mas1 & MAS1_VALID)
2828176771Sraj		desc[0] = 'V';
2829176771Sraj	else
2830176771Sraj		desc[0] = ' ';
2831176771Sraj
2832176771Sraj	if (mas1 & MAS1_IPROT)
2833176771Sraj		desc[1] = 'P';
2834176771Sraj	else
2835176771Sraj		desc[1] = ' ';
2836176771Sraj
2837187149Sraj	as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
2838176771Sraj	tid = MAS1_GETTID(mas1);
2839176771Sraj
2840176771Sraj	tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2841176771Sraj	size = 0;
2842176771Sraj	if (tsize)
2843176771Sraj		size = tsize2size(tsize);
2844176771Sraj
2845176771Sraj	debugf("%3d: (%s) [AS=%d] "
2846176771Sraj	    "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2847176771Sraj	    "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2848176771Sraj	    i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2849176771Sraj}
2850176771Sraj
2851176771Sraj/* Convert TLB0 va and way number to tlb0[] table index. */
2852176771Srajstatic inline unsigned int
2853176771Srajtlb0_tableidx(vm_offset_t va, unsigned int way)
2854176771Sraj{
2855176771Sraj	unsigned int idx;
2856176771Sraj
2857176771Sraj	idx = (way * TLB0_ENTRIES_PER_WAY);
2858176771Sraj	idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2859176771Sraj	return (idx);
2860176771Sraj}
2861176771Sraj
2862176771Sraj/*
2863187149Sraj * Invalidate TLB0 entry.
2864176771Sraj */
2865187149Srajstatic inline void
2866187149Srajtlb0_flush_entry(vm_offset_t va)
2867176771Sraj{
2868176771Sraj
2869187149Sraj	CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2870176771Sraj
2871187149Sraj	mtx_assert(&tlbivax_mutex, MA_OWNED);
2872176771Sraj
2873187149Sraj	__asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2874187149Sraj	__asm __volatile("isync; msync");
2875187149Sraj	__asm __volatile("tlbsync; msync");
2876176771Sraj
2877187149Sraj	CTR1(KTR_PMAP, "%s: e", __func__);
2878176771Sraj}
2879176771Sraj
2880176771Sraj/* Print out contents of the MAS registers for each TLB0 entry */
2881187149Srajvoid
2882176771Srajtlb0_print_tlbentries(void)
2883176771Sraj{
2884187149Sraj	uint32_t mas0, mas1, mas2, mas3, mas7;
2885176771Sraj	int entryidx, way, idx;
2886176771Sraj
2887176771Sraj	debugf("TLB0 entries:\n");
2888187149Sraj	for (way = 0; way < TLB0_WAYS; way ++)
2889176771Sraj		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2890176771Sraj
2891176771Sraj			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2892176771Sraj			mtspr(SPR_MAS0, mas0);
2893187149Sraj			__asm __volatile("isync");
2894176771Sraj
2895176771Sraj			mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
2896176771Sraj			mtspr(SPR_MAS2, mas2);
2897176771Sraj
2898187149Sraj			__asm __volatile("isync; tlbre");
2899176771Sraj
2900176771Sraj			mas1 = mfspr(SPR_MAS1);
2901176771Sraj			mas2 = mfspr(SPR_MAS2);
2902176771Sraj			mas3 = mfspr(SPR_MAS3);
2903176771Sraj			mas7 = mfspr(SPR_MAS7);
2904176771Sraj
2905176771Sraj			idx = tlb0_tableidx(mas2, way);
2906176771Sraj			tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2907176771Sraj		}
2908176771Sraj}
2909176771Sraj
2910176771Sraj/**************************************************************************/
2911176771Sraj/* TLB1 handling */
2912176771Sraj/**************************************************************************/
2913187149Sraj
2914176771Sraj/*
2915187149Sraj * TLB1 mapping notes:
2916187149Sraj *
2917265996Sian * TLB1[0]	Kernel text and data.
2918265996Sian * TLB1[1-15]	Additional kernel text and data mappings (if required), PCI
2919187149Sraj *		windows, other devices mappings.
2920187149Sraj */
2921187149Sraj
2922187149Sraj/*
2923176771Sraj * Write given entry to TLB1 hardware.
2924176771Sraj * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2925176771Sraj */
2926176771Srajstatic void
2927176771Srajtlb1_write_entry(unsigned int idx)
2928176771Sraj{
2929187151Sraj	uint32_t mas0, mas7;
2930176771Sraj
2931176771Sraj	//debugf("tlb1_write_entry: s\n");
2932176771Sraj
2933176771Sraj	/* Clear high order RPN bits */
2934176771Sraj	mas7 = 0;
2935176771Sraj
2936176771Sraj	/* Select entry */
2937176771Sraj	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2938176771Sraj	//debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
2939176771Sraj
2940176771Sraj	mtspr(SPR_MAS0, mas0);
2941187151Sraj	__asm __volatile("isync");
2942176771Sraj	mtspr(SPR_MAS1, tlb1[idx].mas1);
2943187151Sraj	__asm __volatile("isync");
2944176771Sraj	mtspr(SPR_MAS2, tlb1[idx].mas2);
2945187151Sraj	__asm __volatile("isync");
2946176771Sraj	mtspr(SPR_MAS3, tlb1[idx].mas3);
2947187151Sraj	__asm __volatile("isync");
2948176771Sraj	mtspr(SPR_MAS7, mas7);
2949187151Sraj	__asm __volatile("isync; tlbwe; isync; msync");
2950176771Sraj
2951201758Smbr	//debugf("tlb1_write_entry: e\n");
2952176771Sraj}
2953176771Sraj
2954176771Sraj/*
2955176771Sraj * Return the largest uint value log such that 2^log <= num.
2956176771Sraj */
2957176771Srajstatic unsigned int
2958176771Srajilog2(unsigned int num)
2959176771Sraj{
2960176771Sraj	int lz;
2961176771Sraj
2962176771Sraj	__asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
2963176771Sraj	return (31 - lz);
2964176771Sraj}
2965176771Sraj
2966176771Sraj/*
2967176771Sraj * Convert TLB TSIZE value to mapped region size.
2968176771Sraj */
2969176771Srajstatic vm_size_t
2970176771Srajtsize2size(unsigned int tsize)
2971176771Sraj{
2972176771Sraj
2973176771Sraj	/*
2974176771Sraj	 * size = 4^tsize KB
2975176771Sraj	 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2976176771Sraj	 */
2977176771Sraj
2978176771Sraj	return ((1 << (2 * tsize)) * 1024);
2979176771Sraj}
2980176771Sraj
2981176771Sraj/*
2982176771Sraj * Convert region size (must be power of 4) to TLB TSIZE value.
2983176771Sraj */
2984176771Srajstatic unsigned int
2985176771Srajsize2tsize(vm_size_t size)
2986176771Sraj{
2987176771Sraj
2988176771Sraj	return (ilog2(size) / 2 - 5);
2989176771Sraj}
2990176771Sraj
2991176771Sraj/*
2992187149Sraj * Register permanent kernel mapping in TLB1.
2993176771Sraj *
2994187149Sraj * Entries are created starting from index 0 (current free entry is
2995187149Sraj * kept in tlb1_idx) and are not supposed to be invalidated.
2996176771Sraj */
2997187149Srajstatic int
2998187149Srajtlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
2999187149Sraj    uint32_t flags)
3000176771Sraj{
3001187149Sraj	uint32_t ts, tid;
3002265996Sian	int tsize, index;
3003265996Sian
3004265996Sian	index = atomic_fetchadd_int(&tlb1_idx, 1);
3005265996Sian	if (index >= TLB1_ENTRIES) {
3006187149Sraj		printf("tlb1_set_entry: TLB1 full!\n");
3007187149Sraj		return (-1);
3008187149Sraj	}
3009176771Sraj
3010176771Sraj	/* Convert size to TSIZE */
3011176771Sraj	tsize = size2tsize(size);
3012176771Sraj
3013187149Sraj	tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
3014187149Sraj	/* XXX TS is hard coded to 0 for now as we only use single address space */
3015187149Sraj	ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
3016176771Sraj
3017265996Sian	/*
3018265996Sian	 * Atomicity is preserved by the atomic increment above since nothing
3019265996Sian	 * is ever removed from tlb1.
3020265996Sian	 */
3021176771Sraj
3022265996Sian	tlb1[index].phys = pa;
3023265996Sian	tlb1[index].virt = va;
3024265996Sian	tlb1[index].size = size;
3025265996Sian	tlb1[index].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
3026265996Sian	tlb1[index].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
3027265996Sian	tlb1[index].mas2 = (va & MAS2_EPN_MASK) | flags;
3028176771Sraj
3029187149Sraj	/* Set supervisor RWX permission bits */
3030265996Sian	tlb1[index].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
3031176771Sraj
3032265996Sian	tlb1_write_entry(index);
3033176771Sraj
3034187149Sraj	/*
3035187149Sraj	 * XXX in general TLB1 updates should be propagated between CPUs,
3036187149Sraj	 * since current design assumes to have the same TLB1 set-up on all
3037187149Sraj	 * cores.
3038187149Sraj	 */
3039176771Sraj	return (0);
3040176771Sraj}
3041176771Sraj
3042176771Sraj/*
3043187151Sraj * Map in contiguous RAM region into the TLB1 using maximum of
3044176771Sraj * KERNEL_REGION_MAX_TLB_ENTRIES entries.
3045176771Sraj *
3046187151Sraj * If necessary round up last entry size and return total size
3047176771Sraj * used by all allocated entries.
3048176771Sraj */
3049176771Srajvm_size_t
3050224611Smarceltlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
3051176771Sraj{
3052224611Smarcel	vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
3053224611Smarcel	vm_size_t mapped, pgsz, base, mask;
3054224611Smarcel	int idx, nents;
3055176771Sraj
3056224611Smarcel	/* Round up to the next 1M */
3057224611Smarcel	size = (size + (1 << 20) - 1) & ~((1 << 20) - 1);
3058176771Sraj
3059224611Smarcel	mapped = 0;
3060224611Smarcel	idx = 0;
3061224611Smarcel	base = va;
3062224611Smarcel	pgsz = 64*1024*1024;
3063224611Smarcel	while (mapped < size) {
3064224611Smarcel		while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
3065224611Smarcel			while (pgsz > (size - mapped))
3066224611Smarcel				pgsz >>= 2;
3067224611Smarcel			pgs[idx++] = pgsz;
3068224611Smarcel			mapped += pgsz;
3069224611Smarcel		}
3070176771Sraj
3071224611Smarcel		/* We under-map. Correct for this. */
3072224611Smarcel		if (mapped < size) {
3073224611Smarcel			while (pgs[idx - 1] == pgsz) {
3074224611Smarcel				idx--;
3075224611Smarcel				mapped -= pgsz;
3076224611Smarcel			}
3077224611Smarcel			/* XXX We may increase beyond out starting point. */
3078224611Smarcel			pgsz <<= 2;
3079224611Smarcel			pgs[idx++] = pgsz;
3080224611Smarcel			mapped += pgsz;
3081176771Sraj		}
3082224611Smarcel	}
3083176771Sraj
3084224611Smarcel	nents = idx;
3085224611Smarcel	mask = pgs[0] - 1;
3086224611Smarcel	/* Align address to the boundary */
3087224611Smarcel	if (va & mask) {
3088224611Smarcel		va = (va + mask) & ~mask;
3089224611Smarcel		pa = (pa + mask) & ~mask;
3090176771Sraj	}
3091176771Sraj
3092224611Smarcel	for (idx = 0; idx < nents; idx++) {
3093224611Smarcel		pgsz = pgs[idx];
3094224611Smarcel		debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz);
3095224611Smarcel		tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM);
3096224611Smarcel		pa += pgsz;
3097224611Smarcel		va += pgsz;
3098176771Sraj	}
3099176771Sraj
3100224611Smarcel	mapped = (va - base);
3101265998Sian	printf("mapped size 0x%08x (wasted space 0x%08x)\n",
3102224611Smarcel	    mapped, mapped - size);
3103224611Smarcel	return (mapped);
3104176771Sraj}
3105176771Sraj
3106176771Sraj/*
3107176771Sraj * TLB1 initialization routine, to be called after the very first
3108176771Sraj * assembler level setup done in locore.S.
3109176771Sraj */
3110176771Srajvoid
3111265996Siantlb1_init()
3112176771Sraj{
3113265996Sian	uint32_t mas0, mas1, mas2, mas3;
3114224611Smarcel	uint32_t tsz;
3115224611Smarcel	u_int i;
3116176771Sraj
3117224611Smarcel	if (bootinfo != NULL && bootinfo[0] != 1) {
3118224611Smarcel		tlb1_idx = *((uint16_t *)(bootinfo + 8));
3119224611Smarcel	} else
3120224611Smarcel		tlb1_idx = 1;
3121176771Sraj
3122224611Smarcel	/* The first entry/entries are used to map the kernel. */
3123224611Smarcel	for (i = 0; i < tlb1_idx; i++) {
3124224611Smarcel		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3125224611Smarcel		mtspr(SPR_MAS0, mas0);
3126224611Smarcel		__asm __volatile("isync; tlbre");
3127176771Sraj
3128224611Smarcel		mas1 = mfspr(SPR_MAS1);
3129224611Smarcel		if ((mas1 & MAS1_VALID) == 0)
3130224611Smarcel			continue;
3131224611Smarcel
3132265996Sian		mas2 = mfspr(SPR_MAS2);
3133224611Smarcel		mas3 = mfspr(SPR_MAS3);
3134224611Smarcel
3135224611Smarcel		tlb1[i].mas1 = mas1;
3136224611Smarcel		tlb1[i].mas2 = mfspr(SPR_MAS2);
3137224611Smarcel		tlb1[i].mas3 = mas3;
3138265996Sian		tlb1[i].virt = mas2 & MAS2_EPN_MASK;
3139265996Sian		tlb1[i].phys = mas3 & MAS3_RPN;
3140224611Smarcel
3141224611Smarcel		if (i == 0)
3142224611Smarcel			kernload = mas3 & MAS3_RPN;
3143224611Smarcel
3144224611Smarcel		tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3145265996Sian		tlb1[i].size = (tsz > 0) ? tsize2size(tsz) : 0;
3146265996Sian		kernsize += tlb1[i].size;
3147224611Smarcel	}
3148224611Smarcel
3149242526Smarcel#ifdef SMP
3150242526Smarcel	bp_ntlb1s = tlb1_idx;
3151242526Smarcel#endif
3152242526Smarcel
3153238031Smarcel	/* Purge the remaining entries */
3154238031Smarcel	for (i = tlb1_idx; i < TLB1_ENTRIES; i++)
3155238031Smarcel		tlb1_write_entry(i);
3156238031Smarcel
3157176771Sraj	/* Setup TLB miss defaults */
3158176771Sraj	set_mas4_defaults();
3159176771Sraj}
3160176771Sraj
3161265996Sianvm_offset_t
3162265996Sianpmap_early_io_map(vm_paddr_t pa, vm_size_t size)
3163265996Sian{
3164265996Sian	vm_paddr_t pa_base;
3165265996Sian	vm_offset_t va, sz;
3166265996Sian	int i;
3167265996Sian
3168265996Sian	KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!"));
3169265996Sian
3170265996Sian	for (i = 0; i < tlb1_idx; i++) {
3171265996Sian		if (!(tlb1[i].mas1 & MAS1_VALID))
3172265996Sian			continue;
3173265996Sian		if (pa >= tlb1[i].phys && (pa + size) <=
3174265996Sian		    (tlb1[i].phys + tlb1[i].size))
3175265996Sian			return (tlb1[i].virt + (pa - tlb1[i].phys));
3176265996Sian	}
3177265996Sian
3178265996Sian	pa_base = trunc_page(pa);
3179265996Sian	size = roundup(size + (pa - pa_base), PAGE_SIZE);
3180265998Sian	va = tlb1_map_base + (pa - pa_base);
3181265996Sian
3182265996Sian	do {
3183265996Sian		sz = 1 << (ilog2(size) & ~1);
3184265998Sian		tlb1_set_entry(tlb1_map_base, pa_base, sz, _TLB_ENTRY_IO);
3185265996Sian		size -= sz;
3186265996Sian		pa_base += sz;
3187265998Sian		tlb1_map_base += sz;
3188265996Sian	} while (size > 0);
3189265996Sian
3190265996Sian#ifdef SMP
3191265996Sian	bp_ntlb1s = tlb1_idx;
3192265996Sian#endif
3193265996Sian
3194265996Sian	return (va);
3195265996Sian}
3196265996Sian
3197176771Sraj/*
3198176771Sraj * Setup MAS4 defaults.
3199176771Sraj * These values are loaded to MAS0-2 on a TLB miss.
3200176771Sraj */
3201176771Srajstatic void
3202176771Srajset_mas4_defaults(void)
3203176771Sraj{
3204187151Sraj	uint32_t mas4;
3205176771Sraj
3206176771Sraj	/* Defaults: TLB0, PID0, TSIZED=4K */
3207176771Sraj	mas4 = MAS4_TLBSELD0;
3208176771Sraj	mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
3209192532Sraj#ifdef SMP
3210192532Sraj	mas4 |= MAS4_MD;
3211192532Sraj#endif
3212176771Sraj	mtspr(SPR_MAS4, mas4);
3213187151Sraj	__asm __volatile("isync");
3214176771Sraj}
3215176771Sraj
3216176771Sraj/*
3217176771Sraj * Print out contents of the MAS registers for each TLB1 entry
3218176771Sraj */
3219176771Srajvoid
3220176771Srajtlb1_print_tlbentries(void)
3221176771Sraj{
3222187149Sraj	uint32_t mas0, mas1, mas2, mas3, mas7;
3223176771Sraj	int i;
3224176771Sraj
3225176771Sraj	debugf("TLB1 entries:\n");
3226187149Sraj	for (i = 0; i < TLB1_ENTRIES; i++) {
3227176771Sraj
3228176771Sraj		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3229176771Sraj		mtspr(SPR_MAS0, mas0);
3230176771Sraj
3231187149Sraj		__asm __volatile("isync; tlbre");
3232176771Sraj
3233176771Sraj		mas1 = mfspr(SPR_MAS1);
3234176771Sraj		mas2 = mfspr(SPR_MAS2);
3235176771Sraj		mas3 = mfspr(SPR_MAS3);
3236176771Sraj		mas7 = mfspr(SPR_MAS7);
3237176771Sraj
3238176771Sraj		tlb_print_entry(i, mas1, mas2, mas3, mas7);
3239176771Sraj	}
3240176771Sraj}
3241176771Sraj
3242176771Sraj/*
3243176771Sraj * Print out contents of the in-ram tlb1 table.
3244176771Sraj */
3245176771Srajvoid
3246176771Srajtlb1_print_entries(void)
3247176771Sraj{
3248176771Sraj	int i;
3249176771Sraj
3250176771Sraj	debugf("tlb1[] table entries:\n");
3251187149Sraj	for (i = 0; i < TLB1_ENTRIES; i++)
3252176771Sraj		tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0);
3253176771Sraj}
3254176771Sraj
3255176771Sraj/*
3256176771Sraj * Return 0 if the physical IO range is encompassed by one of the
3257176771Sraj * the TLB1 entries, otherwise return related error code.
3258176771Sraj */
3259176771Srajstatic int
3260176771Srajtlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
3261176771Sraj{
3262187151Sraj	uint32_t prot;
3263176771Sraj	vm_paddr_t pa_start;
3264176771Sraj	vm_paddr_t pa_end;
3265176771Sraj	unsigned int entry_tsize;
3266176771Sraj	vm_size_t entry_size;
3267176771Sraj
3268176771Sraj	*va = (vm_offset_t)NULL;
3269176771Sraj
3270176771Sraj	/* Skip invalid entries */
3271176771Sraj	if (!(tlb1[i].mas1 & MAS1_VALID))
3272176771Sraj		return (EINVAL);
3273176771Sraj
3274176771Sraj	/*
3275176771Sraj	 * The entry must be cache-inhibited, guarded, and r/w
3276176771Sraj	 * so it can function as an i/o page
3277176771Sraj	 */
3278176771Sraj	prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
3279176771Sraj	if (prot != (MAS2_I | MAS2_G))
3280176771Sraj		return (EPERM);
3281176771Sraj
3282176771Sraj	prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
3283176771Sraj	if (prot != (MAS3_SR | MAS3_SW))
3284176771Sraj		return (EPERM);
3285176771Sraj
3286176771Sraj	/* The address should be within the entry range. */
3287176771Sraj	entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3288176771Sraj	KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3289176771Sraj
3290176771Sraj	entry_size = tsize2size(entry_tsize);
3291176771Sraj	pa_start = tlb1[i].mas3 & MAS3_RPN;
3292176771Sraj	pa_end = pa_start + entry_size - 1;
3293176771Sraj
3294176771Sraj	if ((pa < pa_start) || ((pa + size) > pa_end))
3295176771Sraj		return (ERANGE);
3296176771Sraj
3297176771Sraj	/* Return virtual address of this mapping. */
3298187149Sraj	*va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start);
3299176771Sraj	return (0);
3300176771Sraj}
3301