pmap.c revision 209048
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * Some hw specific parts of this pmap were derived or influenced
27 * by NetBSD's ibm4xx pmap module. More generic code is shared with
28 * a few other pmap modules from the FreeBSD tree.
29 */
30
31 /*
32  * VM layout notes:
33  *
34  * Kernel and user threads run within one common virtual address space
35  * defined by AS=0.
36  *
37  * Virtual address space layout:
38  * -----------------------------
39  * 0x0000_0000 - 0xafff_ffff	: user process
40  * 0xb000_0000 - 0xbfff_ffff	: pmap_mapdev()-ed area (PCI/PCIE etc.)
41  * 0xc000_0000 - 0xc0ff_ffff	: kernel reserved
42  *   0xc000_0000 - data_end	: kernel code+data, env, metadata etc.
43  * 0xc100_0000 - 0xfeef_ffff	: KVA
44  *   0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
45  *   0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
46  *   0xc200_4000 - 0xc200_8fff : guard page + kstack0
47  *   0xc200_9000 - 0xfeef_ffff	: actual free KVA space
48  * 0xfef0_0000 - 0xffff_ffff	: I/O devices region
49  */
50
51#include <sys/cdefs.h>
52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 209048 2010-06-11 15:49:39Z alc $");
53
54#include <sys/types.h>
55#include <sys/param.h>
56#include <sys/malloc.h>
57#include <sys/ktr.h>
58#include <sys/proc.h>
59#include <sys/user.h>
60#include <sys/queue.h>
61#include <sys/systm.h>
62#include <sys/kernel.h>
63#include <sys/msgbuf.h>
64#include <sys/lock.h>
65#include <sys/mutex.h>
66#include <sys/smp.h>
67#include <sys/vmmeter.h>
68
69#include <vm/vm.h>
70#include <vm/vm_page.h>
71#include <vm/vm_kern.h>
72#include <vm/vm_pageout.h>
73#include <vm/vm_extern.h>
74#include <vm/vm_object.h>
75#include <vm/vm_param.h>
76#include <vm/vm_map.h>
77#include <vm/vm_pager.h>
78#include <vm/uma.h>
79
80#include <machine/bootinfo.h>
81#include <machine/cpu.h>
82#include <machine/pcb.h>
83#include <machine/platform.h>
84
85#include <machine/tlb.h>
86#include <machine/spr.h>
87#include <machine/vmparam.h>
88#include <machine/md_var.h>
89#include <machine/mmuvar.h>
90#include <machine/pmap.h>
91#include <machine/pte.h>
92
93#include "mmu_if.h"
94
95#define DEBUG
96#undef DEBUG
97
98#ifdef  DEBUG
99#define debugf(fmt, args...) printf(fmt, ##args)
100#else
101#define debugf(fmt, args...)
102#endif
103
104#define TODO			panic("%s: not implemented", __func__);
105
106#include "opt_sched.h"
107#ifndef SCHED_4BSD
108#error "e500 only works with SCHED_4BSD which uses a global scheduler lock."
109#endif
110extern struct mtx sched_lock;
111
112extern int dumpsys_minidump;
113
114extern unsigned char _etext[];
115extern unsigned char _end[];
116
117/* Kernel physical load address. */
118extern uint32_t kernload;
119vm_offset_t kernstart;
120vm_size_t kernsize;
121
122/* Message buffer and tables. */
123static vm_offset_t data_start;
124static vm_size_t data_end;
125
126/* Phys/avail memory regions. */
127static struct mem_region *availmem_regions;
128static int availmem_regions_sz;
129static struct mem_region *physmem_regions;
130static int physmem_regions_sz;
131
132/* Reserved KVA space and mutex for mmu_booke_zero_page. */
133static vm_offset_t zero_page_va;
134static struct mtx zero_page_mutex;
135
136static struct mtx tlbivax_mutex;
137
138/*
139 * Reserved KVA space for mmu_booke_zero_page_idle. This is used
140 * by idle thred only, no lock required.
141 */
142static vm_offset_t zero_page_idle_va;
143
144/* Reserved KVA space and mutex for mmu_booke_copy_page. */
145static vm_offset_t copy_page_src_va;
146static vm_offset_t copy_page_dst_va;
147static struct mtx copy_page_mutex;
148
149/**************************************************************************/
150/* PMAP */
151/**************************************************************************/
152
153static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
154    vm_prot_t, boolean_t);
155
156unsigned int kptbl_min;		/* Index of the first kernel ptbl. */
157unsigned int kernel_ptbls;	/* Number of KVA ptbls. */
158
159/*
160 * If user pmap is processed with mmu_booke_remove and the resident count
161 * drops to 0, there are no more pages to remove, so we need not continue.
162 */
163#define PMAP_REMOVE_DONE(pmap) \
164	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
165
166extern void tlb_lock(uint32_t *);
167extern void tlb_unlock(uint32_t *);
168extern void tid_flush(tlbtid_t);
169
170/**************************************************************************/
171/* TLB and TID handling */
172/**************************************************************************/
173
174/* Translation ID busy table */
175static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
176
177/*
178 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
179 * core revisions and should be read from h/w registers during early config.
180 */
181uint32_t tlb0_entries;
182uint32_t tlb0_ways;
183uint32_t tlb0_entries_per_way;
184
185#define TLB0_ENTRIES		(tlb0_entries)
186#define TLB0_WAYS		(tlb0_ways)
187#define TLB0_ENTRIES_PER_WAY	(tlb0_entries_per_way)
188
189#define TLB1_ENTRIES 16
190
191/* In-ram copy of the TLB1 */
192static tlb_entry_t tlb1[TLB1_ENTRIES];
193
194/* Next free entry in the TLB1 */
195static unsigned int tlb1_idx;
196
197static tlbtid_t tid_alloc(struct pmap *);
198
199static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
200
201static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t);
202static void tlb1_write_entry(unsigned int);
203static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
204static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t);
205
206static vm_size_t tsize2size(unsigned int);
207static unsigned int size2tsize(vm_size_t);
208static unsigned int ilog2(unsigned int);
209
210static void set_mas4_defaults(void);
211
212static inline void tlb0_flush_entry(vm_offset_t);
213static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
214
215/**************************************************************************/
216/* Page table management */
217/**************************************************************************/
218
219/* Data for the pv entry allocation mechanism */
220static uma_zone_t pvzone;
221static struct vm_object pvzone_obj;
222static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
223
224#define PV_ENTRY_ZONE_MIN	2048	/* min pv entries in uma zone */
225
226#ifndef PMAP_SHPGPERPROC
227#define PMAP_SHPGPERPROC	200
228#endif
229
230static void ptbl_init(void);
231static struct ptbl_buf *ptbl_buf_alloc(void);
232static void ptbl_buf_free(struct ptbl_buf *);
233static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
234
235static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int);
236static void ptbl_free(mmu_t, pmap_t, unsigned int);
237static void ptbl_hold(mmu_t, pmap_t, unsigned int);
238static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
239
240static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
241static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
242static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t);
243static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
244
245static pv_entry_t pv_alloc(void);
246static void pv_free(pv_entry_t);
247static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
248static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
249
250/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
251#define PTBL_BUFS		(128 * 16)
252
253struct ptbl_buf {
254	TAILQ_ENTRY(ptbl_buf) link;	/* list link */
255	vm_offset_t kva;		/* va of mapping */
256};
257
258/* ptbl free list and a lock used for access synchronization. */
259static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
260static struct mtx ptbl_buf_freelist_lock;
261
262/* Base address of kva space allocated fot ptbl bufs. */
263static vm_offset_t ptbl_buf_pool_vabase;
264
265/* Pointer to ptbl_buf structures. */
266static struct ptbl_buf *ptbl_bufs;
267
268void pmap_bootstrap_ap(volatile uint32_t *);
269
270/*
271 * Kernel MMU interface
272 */
273static void		mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
274static void		mmu_booke_clear_modify(mmu_t, vm_page_t);
275static void		mmu_booke_clear_reference(mmu_t, vm_page_t);
276static void		mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
277    vm_size_t, vm_offset_t);
278static void		mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
279static void		mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
280    vm_prot_t, boolean_t);
281static void		mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
282    vm_page_t, vm_prot_t);
283static void		mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
284    vm_prot_t);
285static vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
286static vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
287    vm_prot_t);
288static void		mmu_booke_init(mmu_t);
289static boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
290static boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
291static boolean_t	mmu_booke_is_referenced(mmu_t, vm_page_t);
292static boolean_t	mmu_booke_ts_referenced(mmu_t, vm_page_t);
293static vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
294    int);
295static int		mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t,
296    vm_paddr_t *);
297static void		mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
298    vm_object_t, vm_pindex_t, vm_size_t);
299static boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
300static void		mmu_booke_page_init(mmu_t, vm_page_t);
301static int		mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
302static void		mmu_booke_pinit(mmu_t, pmap_t);
303static void		mmu_booke_pinit0(mmu_t, pmap_t);
304static void		mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
305    vm_prot_t);
306static void		mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
307static void		mmu_booke_qremove(mmu_t, vm_offset_t, int);
308static void		mmu_booke_release(mmu_t, pmap_t);
309static void		mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
310static void		mmu_booke_remove_all(mmu_t, vm_page_t);
311static void		mmu_booke_remove_write(mmu_t, vm_page_t);
312static void		mmu_booke_zero_page(mmu_t, vm_page_t);
313static void		mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
314static void		mmu_booke_zero_page_idle(mmu_t, vm_page_t);
315static void		mmu_booke_activate(mmu_t, struct thread *);
316static void		mmu_booke_deactivate(mmu_t, struct thread *);
317static void		mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
318static void		*mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
319static void		mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
320static vm_offset_t	mmu_booke_kextract(mmu_t, vm_offset_t);
321static void		mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
322static void		mmu_booke_kremove(mmu_t, vm_offset_t);
323static boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
324static void		mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
325    vm_size_t);
326static vm_offset_t	mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
327    vm_size_t, vm_size_t *);
328static void		mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
329    vm_size_t, vm_offset_t);
330static struct pmap_md	*mmu_booke_scan_md(mmu_t, struct pmap_md *);
331
332static mmu_method_t mmu_booke_methods[] = {
333	/* pmap dispatcher interface */
334	MMUMETHOD(mmu_change_wiring,	mmu_booke_change_wiring),
335	MMUMETHOD(mmu_clear_modify,	mmu_booke_clear_modify),
336	MMUMETHOD(mmu_clear_reference,	mmu_booke_clear_reference),
337	MMUMETHOD(mmu_copy,		mmu_booke_copy),
338	MMUMETHOD(mmu_copy_page,	mmu_booke_copy_page),
339	MMUMETHOD(mmu_enter,		mmu_booke_enter),
340	MMUMETHOD(mmu_enter_object,	mmu_booke_enter_object),
341	MMUMETHOD(mmu_enter_quick,	mmu_booke_enter_quick),
342	MMUMETHOD(mmu_extract,		mmu_booke_extract),
343	MMUMETHOD(mmu_extract_and_hold,	mmu_booke_extract_and_hold),
344	MMUMETHOD(mmu_init,		mmu_booke_init),
345	MMUMETHOD(mmu_is_modified,	mmu_booke_is_modified),
346	MMUMETHOD(mmu_is_prefaultable,	mmu_booke_is_prefaultable),
347	MMUMETHOD(mmu_is_referenced,	mmu_booke_is_referenced),
348	MMUMETHOD(mmu_ts_referenced,	mmu_booke_ts_referenced),
349	MMUMETHOD(mmu_map,		mmu_booke_map),
350	MMUMETHOD(mmu_mincore,		mmu_booke_mincore),
351	MMUMETHOD(mmu_object_init_pt,	mmu_booke_object_init_pt),
352	MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
353	MMUMETHOD(mmu_page_init,	mmu_booke_page_init),
354	MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
355	MMUMETHOD(mmu_pinit,		mmu_booke_pinit),
356	MMUMETHOD(mmu_pinit0,		mmu_booke_pinit0),
357	MMUMETHOD(mmu_protect,		mmu_booke_protect),
358	MMUMETHOD(mmu_qenter,		mmu_booke_qenter),
359	MMUMETHOD(mmu_qremove,		mmu_booke_qremove),
360	MMUMETHOD(mmu_release,		mmu_booke_release),
361	MMUMETHOD(mmu_remove,		mmu_booke_remove),
362	MMUMETHOD(mmu_remove_all,	mmu_booke_remove_all),
363	MMUMETHOD(mmu_remove_write,	mmu_booke_remove_write),
364	MMUMETHOD(mmu_sync_icache,	mmu_booke_sync_icache),
365	MMUMETHOD(mmu_zero_page,	mmu_booke_zero_page),
366	MMUMETHOD(mmu_zero_page_area,	mmu_booke_zero_page_area),
367	MMUMETHOD(mmu_zero_page_idle,	mmu_booke_zero_page_idle),
368	MMUMETHOD(mmu_activate,		mmu_booke_activate),
369	MMUMETHOD(mmu_deactivate,	mmu_booke_deactivate),
370
371	/* Internal interfaces */
372	MMUMETHOD(mmu_bootstrap,	mmu_booke_bootstrap),
373	MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
374	MMUMETHOD(mmu_mapdev,		mmu_booke_mapdev),
375	MMUMETHOD(mmu_kenter,		mmu_booke_kenter),
376	MMUMETHOD(mmu_kextract,		mmu_booke_kextract),
377/*	MMUMETHOD(mmu_kremove,		mmu_booke_kremove),	*/
378	MMUMETHOD(mmu_unmapdev,		mmu_booke_unmapdev),
379
380	/* dumpsys() support */
381	MMUMETHOD(mmu_dumpsys_map,	mmu_booke_dumpsys_map),
382	MMUMETHOD(mmu_dumpsys_unmap,	mmu_booke_dumpsys_unmap),
383	MMUMETHOD(mmu_scan_md,		mmu_booke_scan_md),
384
385	{ 0, 0 }
386};
387
388static mmu_def_t booke_mmu = {
389	MMU_TYPE_BOOKE,
390	mmu_booke_methods,
391	0
392};
393MMU_DEF(booke_mmu);
394
395static inline void
396tlb_miss_lock(void)
397{
398#ifdef SMP
399	struct pcpu *pc;
400
401	if (!smp_started)
402		return;
403
404	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
405		if (pc != pcpup) {
406
407			CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
408			    "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
409
410			KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
411			    ("tlb_miss_lock: tried to lock self"));
412
413			tlb_lock(pc->pc_booke_tlb_lock);
414
415			CTR1(KTR_PMAP, "%s: locked", __func__);
416		}
417	}
418#endif
419}
420
421static inline void
422tlb_miss_unlock(void)
423{
424#ifdef SMP
425	struct pcpu *pc;
426
427	if (!smp_started)
428		return;
429
430	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
431		if (pc != pcpup) {
432			CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
433			    __func__, pc->pc_cpuid);
434
435			tlb_unlock(pc->pc_booke_tlb_lock);
436
437			CTR1(KTR_PMAP, "%s: unlocked", __func__);
438		}
439	}
440#endif
441}
442
443/* Return number of entries in TLB0. */
444static __inline void
445tlb0_get_tlbconf(void)
446{
447	uint32_t tlb0_cfg;
448
449	tlb0_cfg = mfspr(SPR_TLB0CFG);
450	tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
451	tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
452	tlb0_entries_per_way = tlb0_entries / tlb0_ways;
453}
454
455/* Initialize pool of kva ptbl buffers. */
456static void
457ptbl_init(void)
458{
459	int i;
460
461	CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
462	    (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
463	CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
464	    __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
465
466	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
467	TAILQ_INIT(&ptbl_buf_freelist);
468
469	for (i = 0; i < PTBL_BUFS; i++) {
470		ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
471		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
472	}
473}
474
475/* Get a ptbl_buf from the freelist. */
476static struct ptbl_buf *
477ptbl_buf_alloc(void)
478{
479	struct ptbl_buf *buf;
480
481	mtx_lock(&ptbl_buf_freelist_lock);
482	buf = TAILQ_FIRST(&ptbl_buf_freelist);
483	if (buf != NULL)
484		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
485	mtx_unlock(&ptbl_buf_freelist_lock);
486
487	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
488
489	return (buf);
490}
491
492/* Return ptbl buff to free pool. */
493static void
494ptbl_buf_free(struct ptbl_buf *buf)
495{
496
497	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
498
499	mtx_lock(&ptbl_buf_freelist_lock);
500	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
501	mtx_unlock(&ptbl_buf_freelist_lock);
502}
503
504/*
505 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
506 */
507static void
508ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
509{
510	struct ptbl_buf *pbuf;
511
512	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
513
514	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
515
516	TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
517		if (pbuf->kva == (vm_offset_t)ptbl) {
518			/* Remove from pmap ptbl buf list. */
519			TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
520
521			/* Free corresponding ptbl buf. */
522			ptbl_buf_free(pbuf);
523			break;
524		}
525}
526
527/* Allocate page table. */
528static pte_t *
529ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
530{
531	vm_page_t mtbl[PTBL_PAGES];
532	vm_page_t m;
533	struct ptbl_buf *pbuf;
534	unsigned int pidx;
535	pte_t *ptbl;
536	int i;
537
538	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
539	    (pmap == kernel_pmap), pdir_idx);
540
541	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
542	    ("ptbl_alloc: invalid pdir_idx"));
543	KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
544	    ("pte_alloc: valid ptbl entry exists!"));
545
546	pbuf = ptbl_buf_alloc();
547	if (pbuf == NULL)
548		panic("pte_alloc: couldn't alloc kernel virtual memory");
549
550	ptbl = (pte_t *)pbuf->kva;
551
552	CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
553
554	/* Allocate ptbl pages, this will sleep! */
555	for (i = 0; i < PTBL_PAGES; i++) {
556		pidx = (PTBL_PAGES * pdir_idx) + i;
557		while ((m = vm_page_alloc(NULL, pidx,
558		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
559
560			PMAP_UNLOCK(pmap);
561			vm_page_unlock_queues();
562			VM_WAIT;
563			vm_page_lock_queues();
564			PMAP_LOCK(pmap);
565		}
566		mtbl[i] = m;
567	}
568
569	/* Map allocated pages into kernel_pmap. */
570	mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
571
572	/* Zero whole ptbl. */
573	bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
574
575	/* Add pbuf to the pmap ptbl bufs list. */
576	TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
577
578	return (ptbl);
579}
580
581/* Free ptbl pages and invalidate pdir entry. */
582static void
583ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
584{
585	pte_t *ptbl;
586	vm_paddr_t pa;
587	vm_offset_t va;
588	vm_page_t m;
589	int i;
590
591	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
592	    (pmap == kernel_pmap), pdir_idx);
593
594	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
595	    ("ptbl_free: invalid pdir_idx"));
596
597	ptbl = pmap->pm_pdir[pdir_idx];
598
599	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
600
601	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
602
603	/*
604	 * Invalidate the pdir entry as soon as possible, so that other CPUs
605	 * don't attempt to look up the page tables we are releasing.
606	 */
607	mtx_lock_spin(&tlbivax_mutex);
608	tlb_miss_lock();
609
610	pmap->pm_pdir[pdir_idx] = NULL;
611
612	tlb_miss_unlock();
613	mtx_unlock_spin(&tlbivax_mutex);
614
615	for (i = 0; i < PTBL_PAGES; i++) {
616		va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
617		pa = pte_vatopa(mmu, kernel_pmap, va);
618		m = PHYS_TO_VM_PAGE(pa);
619		vm_page_free_zero(m);
620		atomic_subtract_int(&cnt.v_wire_count, 1);
621		mmu_booke_kremove(mmu, va);
622	}
623
624	ptbl_free_pmap_ptbl(pmap, ptbl);
625}
626
627/*
628 * Decrement ptbl pages hold count and attempt to free ptbl pages.
629 * Called when removing pte entry from ptbl.
630 *
631 * Return 1 if ptbl pages were freed.
632 */
633static int
634ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
635{
636	pte_t *ptbl;
637	vm_paddr_t pa;
638	vm_page_t m;
639	int i;
640
641	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
642	    (pmap == kernel_pmap), pdir_idx);
643
644	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
645	    ("ptbl_unhold: invalid pdir_idx"));
646	KASSERT((pmap != kernel_pmap),
647	    ("ptbl_unhold: unholding kernel ptbl!"));
648
649	ptbl = pmap->pm_pdir[pdir_idx];
650
651	//debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
652	KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
653	    ("ptbl_unhold: non kva ptbl"));
654
655	/* decrement hold count */
656	for (i = 0; i < PTBL_PAGES; i++) {
657		pa = pte_vatopa(mmu, kernel_pmap,
658		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
659		m = PHYS_TO_VM_PAGE(pa);
660		m->wire_count--;
661	}
662
663	/*
664	 * Free ptbl pages if there are no pte etries in this ptbl.
665	 * wire_count has the same value for all ptbl pages, so check the last
666	 * page.
667	 */
668	if (m->wire_count == 0) {
669		ptbl_free(mmu, pmap, pdir_idx);
670
671		//debugf("ptbl_unhold: e (freed ptbl)\n");
672		return (1);
673	}
674
675	return (0);
676}
677
678/*
679 * Increment hold count for ptbl pages. This routine is used when a new pte
680 * entry is being inserted into the ptbl.
681 */
682static void
683ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
684{
685	vm_paddr_t pa;
686	pte_t *ptbl;
687	vm_page_t m;
688	int i;
689
690	CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
691	    pdir_idx);
692
693	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
694	    ("ptbl_hold: invalid pdir_idx"));
695	KASSERT((pmap != kernel_pmap),
696	    ("ptbl_hold: holding kernel ptbl!"));
697
698	ptbl = pmap->pm_pdir[pdir_idx];
699
700	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
701
702	for (i = 0; i < PTBL_PAGES; i++) {
703		pa = pte_vatopa(mmu, kernel_pmap,
704		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
705		m = PHYS_TO_VM_PAGE(pa);
706		m->wire_count++;
707	}
708}
709
710/* Allocate pv_entry structure. */
711pv_entry_t
712pv_alloc(void)
713{
714	pv_entry_t pv;
715
716	pv_entry_count++;
717	if (pv_entry_count > pv_entry_high_water)
718		pagedaemon_wakeup();
719	pv = uma_zalloc(pvzone, M_NOWAIT);
720
721	return (pv);
722}
723
724/* Free pv_entry structure. */
725static __inline void
726pv_free(pv_entry_t pve)
727{
728
729	pv_entry_count--;
730	uma_zfree(pvzone, pve);
731}
732
733
734/* Allocate and initialize pv_entry structure. */
735static void
736pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
737{
738	pv_entry_t pve;
739
740	//int su = (pmap == kernel_pmap);
741	//debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
742	//	(u_int32_t)pmap, va, (u_int32_t)m);
743
744	pve = pv_alloc();
745	if (pve == NULL)
746		panic("pv_insert: no pv entries!");
747
748	pve->pv_pmap = pmap;
749	pve->pv_va = va;
750
751	/* add to pv_list */
752	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
753	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
754
755	TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
756
757	//debugf("pv_insert: e\n");
758}
759
760/* Destroy pv entry. */
761static void
762pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
763{
764	pv_entry_t pve;
765
766	//int su = (pmap == kernel_pmap);
767	//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
768
769	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
770	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
771
772	/* find pv entry */
773	TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
774		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
775			/* remove from pv_list */
776			TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
777			if (TAILQ_EMPTY(&m->md.pv_list))
778				vm_page_flag_clear(m, PG_WRITEABLE);
779
780			/* free pv entry struct */
781			pv_free(pve);
782			break;
783		}
784	}
785
786	//debugf("pv_remove: e\n");
787}
788
789/*
790 * Clean pte entry, try to free page table page if requested.
791 *
792 * Return 1 if ptbl pages were freed, otherwise return 0.
793 */
794static int
795pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
796{
797	unsigned int pdir_idx = PDIR_IDX(va);
798	unsigned int ptbl_idx = PTBL_IDX(va);
799	vm_page_t m;
800	pte_t *ptbl;
801	pte_t *pte;
802
803	//int su = (pmap == kernel_pmap);
804	//debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
805	//		su, (u_int32_t)pmap, va, flags);
806
807	ptbl = pmap->pm_pdir[pdir_idx];
808	KASSERT(ptbl, ("pte_remove: null ptbl"));
809
810	pte = &ptbl[ptbl_idx];
811
812	if (pte == NULL || !PTE_ISVALID(pte))
813		return (0);
814
815	if (PTE_ISWIRED(pte))
816		pmap->pm_stats.wired_count--;
817
818	/* Handle managed entry. */
819	if (PTE_ISMANAGED(pte)) {
820		/* Get vm_page_t for mapped pte. */
821		m = PHYS_TO_VM_PAGE(PTE_PA(pte));
822
823		if (PTE_ISMODIFIED(pte))
824			vm_page_dirty(m);
825
826		if (PTE_ISREFERENCED(pte))
827			vm_page_flag_set(m, PG_REFERENCED);
828
829		pv_remove(pmap, va, m);
830	}
831
832	mtx_lock_spin(&tlbivax_mutex);
833	tlb_miss_lock();
834
835	tlb0_flush_entry(va);
836	pte->flags = 0;
837	pte->rpn = 0;
838
839	tlb_miss_unlock();
840	mtx_unlock_spin(&tlbivax_mutex);
841
842	pmap->pm_stats.resident_count--;
843
844	if (flags & PTBL_UNHOLD) {
845		//debugf("pte_remove: e (unhold)\n");
846		return (ptbl_unhold(mmu, pmap, pdir_idx));
847	}
848
849	//debugf("pte_remove: e\n");
850	return (0);
851}
852
853/*
854 * Insert PTE for a given page and virtual address.
855 */
856static void
857pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
858{
859	unsigned int pdir_idx = PDIR_IDX(va);
860	unsigned int ptbl_idx = PTBL_IDX(va);
861	pte_t *ptbl, *pte;
862
863	CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
864	    pmap == kernel_pmap, pmap, va);
865
866	/* Get the page table pointer. */
867	ptbl = pmap->pm_pdir[pdir_idx];
868
869	if (ptbl == NULL) {
870		/* Allocate page table pages. */
871		ptbl = ptbl_alloc(mmu, pmap, pdir_idx);
872	} else {
873		/*
874		 * Check if there is valid mapping for requested
875		 * va, if there is, remove it.
876		 */
877		pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
878		if (PTE_ISVALID(pte)) {
879			pte_remove(mmu, pmap, va, PTBL_HOLD);
880		} else {
881			/*
882			 * pte is not used, increment hold count
883			 * for ptbl pages.
884			 */
885			if (pmap != kernel_pmap)
886				ptbl_hold(mmu, pmap, pdir_idx);
887		}
888	}
889
890	/*
891	 * Insert pv_entry into pv_list for mapped page if part of managed
892	 * memory.
893	 */
894        if ((m->flags & PG_FICTITIOUS) == 0) {
895		if ((m->flags & PG_UNMANAGED) == 0) {
896			flags |= PTE_MANAGED;
897
898			/* Create and insert pv entry. */
899			pv_insert(pmap, va, m);
900		}
901	}
902
903	pmap->pm_stats.resident_count++;
904
905	mtx_lock_spin(&tlbivax_mutex);
906	tlb_miss_lock();
907
908	tlb0_flush_entry(va);
909	if (pmap->pm_pdir[pdir_idx] == NULL) {
910		/*
911		 * If we just allocated a new page table, hook it in
912		 * the pdir.
913		 */
914		pmap->pm_pdir[pdir_idx] = ptbl;
915	}
916	pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
917	pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
918	pte->flags |= (PTE_VALID | flags);
919
920	tlb_miss_unlock();
921	mtx_unlock_spin(&tlbivax_mutex);
922}
923
924/* Return the pa for the given pmap/va. */
925static vm_paddr_t
926pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
927{
928	vm_paddr_t pa = 0;
929	pte_t *pte;
930
931	pte = pte_find(mmu, pmap, va);
932	if ((pte != NULL) && PTE_ISVALID(pte))
933		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
934	return (pa);
935}
936
937/* Get a pointer to a PTE in a page table. */
938static pte_t *
939pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
940{
941	unsigned int pdir_idx = PDIR_IDX(va);
942	unsigned int ptbl_idx = PTBL_IDX(va);
943
944	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
945
946	if (pmap->pm_pdir[pdir_idx])
947		return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
948
949	return (NULL);
950}
951
952/**************************************************************************/
953/* PMAP related */
954/**************************************************************************/
955
956/*
957 * This is called during e500_init, before the system is really initialized.
958 */
959static void
960mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
961{
962	vm_offset_t phys_kernelend;
963	struct mem_region *mp, *mp1;
964	int cnt, i, j;
965	u_int s, e, sz;
966	u_int phys_avail_count;
967	vm_size_t physsz, hwphyssz, kstack0_sz;
968	vm_offset_t kernel_pdir, kstack0, va;
969	vm_paddr_t kstack0_phys;
970	void *dpcpu;
971	pte_t *pte;
972
973	debugf("mmu_booke_bootstrap: entered\n");
974
975	/* Initialize invalidation mutex */
976	mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
977
978	/* Read TLB0 size and associativity. */
979	tlb0_get_tlbconf();
980
981	/* Align kernel start and end address (kernel image). */
982	kernstart = trunc_page(start);
983	data_start = round_page(kernelend);
984	kernsize = data_start - kernstart;
985
986	data_end = data_start;
987
988	/* Allocate space for the message buffer. */
989	msgbufp = (struct msgbuf *)data_end;
990	data_end += MSGBUF_SIZE;
991	debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
992	    data_end);
993
994	data_end = round_page(data_end);
995
996	/* Allocate the dynamic per-cpu area. */
997	dpcpu = (void *)data_end;
998	data_end += DPCPU_SIZE;
999	dpcpu_init(dpcpu, 0);
1000
1001	/* Allocate space for ptbl_bufs. */
1002	ptbl_bufs = (struct ptbl_buf *)data_end;
1003	data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1004	debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
1005	    data_end);
1006
1007	data_end = round_page(data_end);
1008
1009	/* Allocate PTE tables for kernel KVA. */
1010	kernel_pdir = data_end;
1011	kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
1012	    PDIR_SIZE - 1) / PDIR_SIZE;
1013	data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1014	debugf(" kernel ptbls: %d\n", kernel_ptbls);
1015	debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
1016
1017	debugf(" data_end: 0x%08x\n", data_end);
1018	if (data_end - kernstart > 0x1000000) {
1019		data_end = (data_end + 0x3fffff) & ~0x3fffff;
1020		tlb1_mapin_region(kernstart + 0x1000000,
1021		    kernload + 0x1000000, data_end - kernstart - 0x1000000);
1022	} else
1023		data_end = (data_end + 0xffffff) & ~0xffffff;
1024
1025	debugf(" updated data_end: 0x%08x\n", data_end);
1026
1027	kernsize += data_end - data_start;
1028
1029	/*
1030	 * Clear the structures - note we can only do it safely after the
1031	 * possible additional TLB1 translations are in place (above) so that
1032	 * all range up to the currently calculated 'data_end' is covered.
1033	 */
1034	memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1035	memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1036
1037	/*******************************************************/
1038	/* Set the start and end of kva. */
1039	/*******************************************************/
1040	virtual_avail = round_page(data_end);
1041	virtual_end = VM_MAX_KERNEL_ADDRESS;
1042
1043	/* Allocate KVA space for page zero/copy operations. */
1044	zero_page_va = virtual_avail;
1045	virtual_avail += PAGE_SIZE;
1046	zero_page_idle_va = virtual_avail;
1047	virtual_avail += PAGE_SIZE;
1048	copy_page_src_va = virtual_avail;
1049	virtual_avail += PAGE_SIZE;
1050	copy_page_dst_va = virtual_avail;
1051	virtual_avail += PAGE_SIZE;
1052	debugf("zero_page_va = 0x%08x\n", zero_page_va);
1053	debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va);
1054	debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1055	debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1056
1057	/* Initialize page zero/copy mutexes. */
1058	mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1059	mtx_init(&copy_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1060
1061	/* Allocate KVA space for ptbl bufs. */
1062	ptbl_buf_pool_vabase = virtual_avail;
1063	virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1064	debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1065	    ptbl_buf_pool_vabase, virtual_avail);
1066
1067	/* Calculate corresponding physical addresses for the kernel region. */
1068	phys_kernelend = kernload + kernsize;
1069	debugf("kernel image and allocated data:\n");
1070	debugf(" kernload    = 0x%08x\n", kernload);
1071	debugf(" kernstart   = 0x%08x\n", kernstart);
1072	debugf(" kernsize    = 0x%08x\n", kernsize);
1073
1074	if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1075		panic("mmu_booke_bootstrap: phys_avail too small");
1076
1077	/*
1078	 * Remove kernel physical address range from avail regions list. Page
1079	 * align all regions.  Non-page aligned memory isn't very interesting
1080	 * to us.  Also, sort the entries for ascending addresses.
1081	 */
1082
1083	/* Retrieve phys/avail mem regions */
1084	mem_regions(&physmem_regions, &physmem_regions_sz,
1085	    &availmem_regions, &availmem_regions_sz);
1086	sz = 0;
1087	cnt = availmem_regions_sz;
1088	debugf("processing avail regions:\n");
1089	for (mp = availmem_regions; mp->mr_size; mp++) {
1090		s = mp->mr_start;
1091		e = mp->mr_start + mp->mr_size;
1092		debugf(" %08x-%08x -> ", s, e);
1093		/* Check whether this region holds all of the kernel. */
1094		if (s < kernload && e > phys_kernelend) {
1095			availmem_regions[cnt].mr_start = phys_kernelend;
1096			availmem_regions[cnt++].mr_size = e - phys_kernelend;
1097			e = kernload;
1098		}
1099		/* Look whether this regions starts within the kernel. */
1100		if (s >= kernload && s < phys_kernelend) {
1101			if (e <= phys_kernelend)
1102				goto empty;
1103			s = phys_kernelend;
1104		}
1105		/* Now look whether this region ends within the kernel. */
1106		if (e > kernload && e <= phys_kernelend) {
1107			if (s >= kernload)
1108				goto empty;
1109			e = kernload;
1110		}
1111		/* Now page align the start and size of the region. */
1112		s = round_page(s);
1113		e = trunc_page(e);
1114		if (e < s)
1115			e = s;
1116		sz = e - s;
1117		debugf("%08x-%08x = %x\n", s, e, sz);
1118
1119		/* Check whether some memory is left here. */
1120		if (sz == 0) {
1121		empty:
1122			memmove(mp, mp + 1,
1123			    (cnt - (mp - availmem_regions)) * sizeof(*mp));
1124			cnt--;
1125			mp--;
1126			continue;
1127		}
1128
1129		/* Do an insertion sort. */
1130		for (mp1 = availmem_regions; mp1 < mp; mp1++)
1131			if (s < mp1->mr_start)
1132				break;
1133		if (mp1 < mp) {
1134			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1135			mp1->mr_start = s;
1136			mp1->mr_size = sz;
1137		} else {
1138			mp->mr_start = s;
1139			mp->mr_size = sz;
1140		}
1141	}
1142	availmem_regions_sz = cnt;
1143
1144	/*******************************************************/
1145	/* Steal physical memory for kernel stack from the end */
1146	/* of the first avail region                           */
1147	/*******************************************************/
1148	kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
1149	kstack0_phys = availmem_regions[0].mr_start +
1150	    availmem_regions[0].mr_size;
1151	kstack0_phys -= kstack0_sz;
1152	availmem_regions[0].mr_size -= kstack0_sz;
1153
1154	/*******************************************************/
1155	/* Fill in phys_avail table, based on availmem_regions */
1156	/*******************************************************/
1157	phys_avail_count = 0;
1158	physsz = 0;
1159	hwphyssz = 0;
1160	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1161
1162	debugf("fill in phys_avail:\n");
1163	for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1164
1165		debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
1166		    availmem_regions[i].mr_start,
1167		    availmem_regions[i].mr_start +
1168		        availmem_regions[i].mr_size,
1169		    availmem_regions[i].mr_size);
1170
1171		if (hwphyssz != 0 &&
1172		    (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1173			debugf(" hw.physmem adjust\n");
1174			if (physsz < hwphyssz) {
1175				phys_avail[j] = availmem_regions[i].mr_start;
1176				phys_avail[j + 1] =
1177				    availmem_regions[i].mr_start +
1178				    hwphyssz - physsz;
1179				physsz = hwphyssz;
1180				phys_avail_count++;
1181			}
1182			break;
1183		}
1184
1185		phys_avail[j] = availmem_regions[i].mr_start;
1186		phys_avail[j + 1] = availmem_regions[i].mr_start +
1187		    availmem_regions[i].mr_size;
1188		phys_avail_count++;
1189		physsz += availmem_regions[i].mr_size;
1190	}
1191	physmem = btoc(physsz);
1192
1193	/* Calculate the last available physical address. */
1194	for (i = 0; phys_avail[i + 2] != 0; i += 2)
1195		;
1196	Maxmem = powerpc_btop(phys_avail[i + 1]);
1197
1198	debugf("Maxmem = 0x%08lx\n", Maxmem);
1199	debugf("phys_avail_count = %d\n", phys_avail_count);
1200	debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem,
1201	    physmem);
1202
1203	/*******************************************************/
1204	/* Initialize (statically allocated) kernel pmap. */
1205	/*******************************************************/
1206	PMAP_LOCK_INIT(kernel_pmap);
1207	kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1208
1209	debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
1210	debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
1211	debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1212	    kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1213
1214	/* Initialize kernel pdir */
1215	for (i = 0; i < kernel_ptbls; i++)
1216		kernel_pmap->pm_pdir[kptbl_min + i] =
1217		    (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1218
1219	for (i = 0; i < MAXCPU; i++) {
1220		kernel_pmap->pm_tid[i] = TID_KERNEL;
1221
1222		/* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1223		tidbusy[i][0] = kernel_pmap;
1224	}
1225
1226	/*
1227	 * Fill in PTEs covering kernel code and data. They are not required
1228	 * for address translation, as this area is covered by static TLB1
1229	 * entries, but for pte_vatopa() to work correctly with kernel area
1230	 * addresses.
1231	 */
1232	for (va = KERNBASE; va < data_end; va += PAGE_SIZE) {
1233		pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1234		pte->rpn = kernload + (va - KERNBASE);
1235		pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1236		    PTE_VALID;
1237	}
1238	/* Mark kernel_pmap active on all CPUs */
1239	kernel_pmap->pm_active = ~0;
1240
1241	/*******************************************************/
1242	/* Final setup */
1243	/*******************************************************/
1244
1245	/* Enter kstack0 into kernel map, provide guard page */
1246	kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1247	thread0.td_kstack = kstack0;
1248	thread0.td_kstack_pages = KSTACK_PAGES;
1249
1250	debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1251	debugf("kstack0_phys at 0x%08x - 0x%08x\n",
1252	    kstack0_phys, kstack0_phys + kstack0_sz);
1253	debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
1254
1255	virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
1256	for (i = 0; i < KSTACK_PAGES; i++) {
1257		mmu_booke_kenter(mmu, kstack0, kstack0_phys);
1258		kstack0 += PAGE_SIZE;
1259		kstack0_phys += PAGE_SIZE;
1260	}
1261
1262	debugf("virtual_avail = %08x\n", virtual_avail);
1263	debugf("virtual_end   = %08x\n", virtual_end);
1264
1265	debugf("mmu_booke_bootstrap: exit\n");
1266}
1267
1268void
1269pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
1270{
1271	int i;
1272
1273	/*
1274	 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
1275	 * have the snapshot of its contents in the s/w tlb1[] table, so use
1276	 * these values directly to (re)program AP's TLB1 hardware.
1277	 */
1278	for (i = 0; i < tlb1_idx; i ++) {
1279		/* Skip invalid entries */
1280		if (!(tlb1[i].mas1 & MAS1_VALID))
1281			continue;
1282
1283		tlb1_write_entry(i);
1284	}
1285
1286	set_mas4_defaults();
1287}
1288
1289/*
1290 * Get the physical page address for the given pmap/virtual address.
1291 */
1292static vm_paddr_t
1293mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1294{
1295	vm_paddr_t pa;
1296
1297	PMAP_LOCK(pmap);
1298	pa = pte_vatopa(mmu, pmap, va);
1299	PMAP_UNLOCK(pmap);
1300
1301	return (pa);
1302}
1303
1304/*
1305 * Extract the physical page address associated with the given
1306 * kernel virtual address.
1307 */
1308static vm_paddr_t
1309mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1310{
1311
1312	return (pte_vatopa(mmu, kernel_pmap, va));
1313}
1314
1315/*
1316 * Initialize the pmap module.
1317 * Called by vm_init, to initialize any structures that the pmap
1318 * system needs to map virtual memory.
1319 */
1320static void
1321mmu_booke_init(mmu_t mmu)
1322{
1323	int shpgperproc = PMAP_SHPGPERPROC;
1324
1325	/*
1326	 * Initialize the address space (zone) for the pv entries.  Set a
1327	 * high water mark so that the system can recover from excessive
1328	 * numbers of pv entries.
1329	 */
1330	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1331	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1332
1333	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1334	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1335
1336	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1337	pv_entry_high_water = 9 * (pv_entry_max / 10);
1338
1339	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
1340
1341	/* Pre-fill pvzone with initial number of pv entries. */
1342	uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1343
1344	/* Initialize ptbl allocation. */
1345	ptbl_init();
1346}
1347
1348/*
1349 * Map a list of wired pages into kernel virtual address space.  This is
1350 * intended for temporary mappings which do not need page modification or
1351 * references recorded.  Existing mappings in the region are overwritten.
1352 */
1353static void
1354mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1355{
1356	vm_offset_t va;
1357
1358	va = sva;
1359	while (count-- > 0) {
1360		mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1361		va += PAGE_SIZE;
1362		m++;
1363	}
1364}
1365
1366/*
1367 * Remove page mappings from kernel virtual address space.  Intended for
1368 * temporary mappings entered by mmu_booke_qenter.
1369 */
1370static void
1371mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1372{
1373	vm_offset_t va;
1374
1375	va = sva;
1376	while (count-- > 0) {
1377		mmu_booke_kremove(mmu, va);
1378		va += PAGE_SIZE;
1379	}
1380}
1381
1382/*
1383 * Map a wired page into kernel virtual address space.
1384 */
1385static void
1386mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1387{
1388	unsigned int pdir_idx = PDIR_IDX(va);
1389	unsigned int ptbl_idx = PTBL_IDX(va);
1390	uint32_t flags;
1391	pte_t *pte;
1392
1393	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1394	    (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1395
1396	flags = 0;
1397	flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID);
1398	flags |= PTE_M;
1399
1400	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1401
1402	mtx_lock_spin(&tlbivax_mutex);
1403	tlb_miss_lock();
1404
1405	if (PTE_ISVALID(pte)) {
1406
1407		CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1408
1409		/* Flush entry from TLB0 */
1410		tlb0_flush_entry(va);
1411	}
1412
1413	pte->rpn = pa & ~PTE_PA_MASK;
1414	pte->flags = flags;
1415
1416	//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1417	//		"pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1418	//		pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1419
1420	/* Flush the real memory from the instruction cache. */
1421	if ((flags & (PTE_I | PTE_G)) == 0) {
1422		__syncicache((void *)va, PAGE_SIZE);
1423	}
1424
1425	tlb_miss_unlock();
1426	mtx_unlock_spin(&tlbivax_mutex);
1427}
1428
1429/*
1430 * Remove a page from kernel page table.
1431 */
1432static void
1433mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1434{
1435	unsigned int pdir_idx = PDIR_IDX(va);
1436	unsigned int ptbl_idx = PTBL_IDX(va);
1437	pte_t *pte;
1438
1439//	CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va));
1440
1441	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1442	    (va <= VM_MAX_KERNEL_ADDRESS)),
1443	    ("mmu_booke_kremove: invalid va"));
1444
1445	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1446
1447	if (!PTE_ISVALID(pte)) {
1448
1449		CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1450
1451		return;
1452	}
1453
1454	mtx_lock_spin(&tlbivax_mutex);
1455	tlb_miss_lock();
1456
1457	/* Invalidate entry in TLB0, update PTE. */
1458	tlb0_flush_entry(va);
1459	pte->flags = 0;
1460	pte->rpn = 0;
1461
1462	tlb_miss_unlock();
1463	mtx_unlock_spin(&tlbivax_mutex);
1464}
1465
1466/*
1467 * Initialize pmap associated with process 0.
1468 */
1469static void
1470mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1471{
1472
1473	mmu_booke_pinit(mmu, pmap);
1474	PCPU_SET(curpmap, pmap);
1475}
1476
1477/*
1478 * Initialize a preallocated and zeroed pmap structure,
1479 * such as one in a vmspace structure.
1480 */
1481static void
1482mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1483{
1484	int i;
1485
1486	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
1487	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
1488
1489	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
1490
1491	PMAP_LOCK_INIT(pmap);
1492	for (i = 0; i < MAXCPU; i++)
1493		pmap->pm_tid[i] = TID_NONE;
1494	pmap->pm_active = 0;
1495	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1496	bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1497	TAILQ_INIT(&pmap->pm_ptbl_list);
1498}
1499
1500/*
1501 * Release any resources held by the given physical map.
1502 * Called when a pmap initialized by mmu_booke_pinit is being released.
1503 * Should only be called if the map contains no valid mappings.
1504 */
1505static void
1506mmu_booke_release(mmu_t mmu, pmap_t pmap)
1507{
1508
1509	printf("mmu_booke_release: s\n");
1510
1511	KASSERT(pmap->pm_stats.resident_count == 0,
1512	    ("pmap_release: pmap resident count %ld != 0",
1513	    pmap->pm_stats.resident_count));
1514
1515	PMAP_LOCK_DESTROY(pmap);
1516}
1517
1518/*
1519 * Insert the given physical page at the specified virtual address in the
1520 * target physical map with the protection requested. If specified the page
1521 * will be wired down.
1522 */
1523static void
1524mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1525    vm_prot_t prot, boolean_t wired)
1526{
1527
1528	vm_page_lock_queues();
1529	PMAP_LOCK(pmap);
1530	mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
1531	vm_page_unlock_queues();
1532	PMAP_UNLOCK(pmap);
1533}
1534
1535static void
1536mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1537    vm_prot_t prot, boolean_t wired)
1538{
1539	pte_t *pte;
1540	vm_paddr_t pa;
1541	uint32_t flags;
1542	int su, sync;
1543
1544	pa = VM_PAGE_TO_PHYS(m);
1545	su = (pmap == kernel_pmap);
1546	sync = 0;
1547
1548	//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1549	//		"pa=0x%08x prot=0x%08x wired=%d)\n",
1550	//		(u_int32_t)pmap, su, pmap->pm_tid,
1551	//		(u_int32_t)m, va, pa, prot, wired);
1552
1553	if (su) {
1554		KASSERT(((va >= virtual_avail) &&
1555		    (va <= VM_MAX_KERNEL_ADDRESS)),
1556		    ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1557	} else {
1558		KASSERT((va <= VM_MAXUSER_ADDRESS),
1559		    ("mmu_booke_enter_locked: user pmap, non user va"));
1560	}
1561	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1562	    (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
1563	    ("mmu_booke_enter_locked: page %p is not busy", m));
1564
1565	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1566
1567	/*
1568	 * If there is an existing mapping, and the physical address has not
1569	 * changed, must be protection or wiring change.
1570	 */
1571	if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1572	    (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1573
1574		/*
1575		 * Before actually updating pte->flags we calculate and
1576		 * prepare its new value in a helper var.
1577		 */
1578		flags = pte->flags;
1579		flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1580
1581		/* Wiring change, just update stats. */
1582		if (wired) {
1583			if (!PTE_ISWIRED(pte)) {
1584				flags |= PTE_WIRED;
1585				pmap->pm_stats.wired_count++;
1586			}
1587		} else {
1588			if (PTE_ISWIRED(pte)) {
1589				flags &= ~PTE_WIRED;
1590				pmap->pm_stats.wired_count--;
1591			}
1592		}
1593
1594		if (prot & VM_PROT_WRITE) {
1595			/* Add write permissions. */
1596			flags |= PTE_SW;
1597			if (!su)
1598				flags |= PTE_UW;
1599
1600			if ((flags & PTE_MANAGED) != 0)
1601				vm_page_flag_set(m, PG_WRITEABLE);
1602		} else {
1603			/* Handle modified pages, sense modify status. */
1604
1605			/*
1606			 * The PTE_MODIFIED flag could be set by underlying
1607			 * TLB misses since we last read it (above), possibly
1608			 * other CPUs could update it so we check in the PTE
1609			 * directly rather than rely on that saved local flags
1610			 * copy.
1611			 */
1612			if (PTE_ISMODIFIED(pte))
1613				vm_page_dirty(m);
1614		}
1615
1616		if (prot & VM_PROT_EXECUTE) {
1617			flags |= PTE_SX;
1618			if (!su)
1619				flags |= PTE_UX;
1620
1621			/*
1622			 * Check existing flags for execute permissions: if we
1623			 * are turning execute permissions on, icache should
1624			 * be flushed.
1625			 */
1626			if ((pte->flags & (PTE_UX | PTE_SX)) == 0)
1627				sync++;
1628		}
1629
1630		flags &= ~PTE_REFERENCED;
1631
1632		/*
1633		 * The new flags value is all calculated -- only now actually
1634		 * update the PTE.
1635		 */
1636		mtx_lock_spin(&tlbivax_mutex);
1637		tlb_miss_lock();
1638
1639		tlb0_flush_entry(va);
1640		pte->flags = flags;
1641
1642		tlb_miss_unlock();
1643		mtx_unlock_spin(&tlbivax_mutex);
1644
1645	} else {
1646		/*
1647		 * If there is an existing mapping, but it's for a different
1648		 * physical address, pte_enter() will delete the old mapping.
1649		 */
1650		//if ((pte != NULL) && PTE_ISVALID(pte))
1651		//	debugf("mmu_booke_enter_locked: replace\n");
1652		//else
1653		//	debugf("mmu_booke_enter_locked: new\n");
1654
1655		/* Now set up the flags and install the new mapping. */
1656		flags = (PTE_SR | PTE_VALID);
1657		flags |= PTE_M;
1658
1659		if (!su)
1660			flags |= PTE_UR;
1661
1662		if (prot & VM_PROT_WRITE) {
1663			flags |= PTE_SW;
1664			if (!su)
1665				flags |= PTE_UW;
1666
1667			if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
1668				vm_page_flag_set(m, PG_WRITEABLE);
1669		}
1670
1671		if (prot & VM_PROT_EXECUTE) {
1672			flags |= PTE_SX;
1673			if (!su)
1674				flags |= PTE_UX;
1675		}
1676
1677		/* If its wired update stats. */
1678		if (wired) {
1679			pmap->pm_stats.wired_count++;
1680			flags |= PTE_WIRED;
1681		}
1682
1683		pte_enter(mmu, pmap, m, va, flags);
1684
1685		/* Flush the real memory from the instruction cache. */
1686		if (prot & VM_PROT_EXECUTE)
1687			sync++;
1688	}
1689
1690	if (sync && (su || pmap == PCPU_GET(curpmap))) {
1691		__syncicache((void *)va, PAGE_SIZE);
1692		sync = 0;
1693	}
1694}
1695
1696/*
1697 * Maps a sequence of resident pages belonging to the same object.
1698 * The sequence begins with the given page m_start.  This page is
1699 * mapped at the given virtual address start.  Each subsequent page is
1700 * mapped at a virtual address that is offset from start by the same
1701 * amount as the page is offset from m_start within the object.  The
1702 * last page in the sequence is the page with the largest offset from
1703 * m_start that can be mapped at a virtual address less than the given
1704 * virtual address end.  Not every virtual page between start and end
1705 * is mapped; only those for which a resident page exists with the
1706 * corresponding offset from m_start are mapped.
1707 */
1708static void
1709mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1710    vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1711{
1712	vm_page_t m;
1713	vm_pindex_t diff, psize;
1714
1715	psize = atop(end - start);
1716	m = m_start;
1717	vm_page_lock_queues();
1718	PMAP_LOCK(pmap);
1719	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1720		mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
1721		    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1722		m = TAILQ_NEXT(m, listq);
1723	}
1724	vm_page_unlock_queues();
1725	PMAP_UNLOCK(pmap);
1726}
1727
1728static void
1729mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1730    vm_prot_t prot)
1731{
1732
1733	vm_page_lock_queues();
1734	PMAP_LOCK(pmap);
1735	mmu_booke_enter_locked(mmu, pmap, va, m,
1736	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1737	vm_page_unlock_queues();
1738	PMAP_UNLOCK(pmap);
1739}
1740
1741/*
1742 * Remove the given range of addresses from the specified map.
1743 *
1744 * It is assumed that the start and end are properly rounded to the page size.
1745 */
1746static void
1747mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1748{
1749	pte_t *pte;
1750	uint8_t hold_flag;
1751
1752	int su = (pmap == kernel_pmap);
1753
1754	//debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1755	//		su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1756
1757	if (su) {
1758		KASSERT(((va >= virtual_avail) &&
1759		    (va <= VM_MAX_KERNEL_ADDRESS)),
1760		    ("mmu_booke_remove: kernel pmap, non kernel va"));
1761	} else {
1762		KASSERT((va <= VM_MAXUSER_ADDRESS),
1763		    ("mmu_booke_remove: user pmap, non user va"));
1764	}
1765
1766	if (PMAP_REMOVE_DONE(pmap)) {
1767		//debugf("mmu_booke_remove: e (empty)\n");
1768		return;
1769	}
1770
1771	hold_flag = PTBL_HOLD_FLAG(pmap);
1772	//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1773
1774	vm_page_lock_queues();
1775	PMAP_LOCK(pmap);
1776	for (; va < endva; va += PAGE_SIZE) {
1777		pte = pte_find(mmu, pmap, va);
1778		if ((pte != NULL) && PTE_ISVALID(pte))
1779			pte_remove(mmu, pmap, va, hold_flag);
1780	}
1781	PMAP_UNLOCK(pmap);
1782	vm_page_unlock_queues();
1783
1784	//debugf("mmu_booke_remove: e\n");
1785}
1786
1787/*
1788 * Remove physical page from all pmaps in which it resides.
1789 */
1790static void
1791mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1792{
1793	pv_entry_t pv, pvn;
1794	uint8_t hold_flag;
1795
1796	vm_page_lock_queues();
1797	for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1798		pvn = TAILQ_NEXT(pv, pv_link);
1799
1800		PMAP_LOCK(pv->pv_pmap);
1801		hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1802		pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1803		PMAP_UNLOCK(pv->pv_pmap);
1804	}
1805	vm_page_flag_clear(m, PG_WRITEABLE);
1806	vm_page_unlock_queues();
1807}
1808
1809/*
1810 * Map a range of physical addresses into kernel virtual address space.
1811 */
1812static vm_offset_t
1813mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1814    vm_offset_t pa_end, int prot)
1815{
1816	vm_offset_t sva = *virt;
1817	vm_offset_t va = sva;
1818
1819	//debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1820	//		sva, pa_start, pa_end);
1821
1822	while (pa_start < pa_end) {
1823		mmu_booke_kenter(mmu, va, pa_start);
1824		va += PAGE_SIZE;
1825		pa_start += PAGE_SIZE;
1826	}
1827	*virt = va;
1828
1829	//debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1830	return (sva);
1831}
1832
1833/*
1834 * The pmap must be activated before it's address space can be accessed in any
1835 * way.
1836 */
1837static void
1838mmu_booke_activate(mmu_t mmu, struct thread *td)
1839{
1840	pmap_t pmap;
1841
1842	pmap = &td->td_proc->p_vmspace->vm_pmap;
1843
1844	CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
1845	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1846
1847	KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1848
1849	mtx_lock_spin(&sched_lock);
1850
1851	atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
1852	PCPU_SET(curpmap, pmap);
1853
1854	if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE)
1855		tid_alloc(pmap);
1856
1857	/* Load PID0 register with pmap tid value. */
1858	mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]);
1859	__asm __volatile("isync");
1860
1861	mtx_unlock_spin(&sched_lock);
1862
1863	CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1864	    pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1865}
1866
1867/*
1868 * Deactivate the specified process's address space.
1869 */
1870static void
1871mmu_booke_deactivate(mmu_t mmu, struct thread *td)
1872{
1873	pmap_t pmap;
1874
1875	pmap = &td->td_proc->p_vmspace->vm_pmap;
1876
1877	CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
1878	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1879
1880	atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask));
1881	PCPU_SET(curpmap, NULL);
1882}
1883
1884/*
1885 * Copy the range specified by src_addr/len
1886 * from the source map to the range dst_addr/len
1887 * in the destination map.
1888 *
1889 * This routine is only advisory and need not do anything.
1890 */
1891static void
1892mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
1893    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
1894{
1895
1896}
1897
1898/*
1899 * Set the physical protection on the specified range of this map as requested.
1900 */
1901static void
1902mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1903    vm_prot_t prot)
1904{
1905	vm_offset_t va;
1906	vm_page_t m;
1907	pte_t *pte;
1908
1909	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1910		mmu_booke_remove(mmu, pmap, sva, eva);
1911		return;
1912	}
1913
1914	if (prot & VM_PROT_WRITE)
1915		return;
1916
1917	vm_page_lock_queues();
1918	PMAP_LOCK(pmap);
1919	for (va = sva; va < eva; va += PAGE_SIZE) {
1920		if ((pte = pte_find(mmu, pmap, va)) != NULL) {
1921			if (PTE_ISVALID(pte)) {
1922				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1923
1924				mtx_lock_spin(&tlbivax_mutex);
1925				tlb_miss_lock();
1926
1927				/* Handle modified pages. */
1928				if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
1929					vm_page_dirty(m);
1930
1931				tlb0_flush_entry(va);
1932				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1933
1934				tlb_miss_unlock();
1935				mtx_unlock_spin(&tlbivax_mutex);
1936			}
1937		}
1938	}
1939	PMAP_UNLOCK(pmap);
1940	vm_page_unlock_queues();
1941}
1942
1943/*
1944 * Clear the write and modified bits in each of the given page's mappings.
1945 */
1946static void
1947mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
1948{
1949	pv_entry_t pv;
1950	pte_t *pte;
1951
1952	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
1953	    ("mmu_booke_remove_write: page %p is not managed", m));
1954
1955	/*
1956	 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
1957	 * another thread while the object is locked.  Thus, if PG_WRITEABLE
1958	 * is clear, no page table entries need updating.
1959	 */
1960	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1961	if ((m->oflags & VPO_BUSY) == 0 &&
1962	    (m->flags & PG_WRITEABLE) == 0)
1963		return;
1964	vm_page_lock_queues();
1965	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1966		PMAP_LOCK(pv->pv_pmap);
1967		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
1968			if (PTE_ISVALID(pte)) {
1969				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1970
1971				mtx_lock_spin(&tlbivax_mutex);
1972				tlb_miss_lock();
1973
1974				/* Handle modified pages. */
1975				if (PTE_ISMODIFIED(pte))
1976					vm_page_dirty(m);
1977
1978				/* Flush mapping from TLB0. */
1979				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1980
1981				tlb_miss_unlock();
1982				mtx_unlock_spin(&tlbivax_mutex);
1983			}
1984		}
1985		PMAP_UNLOCK(pv->pv_pmap);
1986	}
1987	vm_page_flag_clear(m, PG_WRITEABLE);
1988	vm_page_unlock_queues();
1989}
1990
1991static void
1992mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
1993{
1994	pte_t *pte;
1995	pmap_t pmap;
1996	vm_page_t m;
1997	vm_offset_t addr;
1998	vm_paddr_t pa;
1999	int active, valid;
2000
2001	va = trunc_page(va);
2002	sz = round_page(sz);
2003
2004	vm_page_lock_queues();
2005	pmap = PCPU_GET(curpmap);
2006	active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
2007	while (sz > 0) {
2008		PMAP_LOCK(pm);
2009		pte = pte_find(mmu, pm, va);
2010		valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
2011		if (valid)
2012			pa = PTE_PA(pte);
2013		PMAP_UNLOCK(pm);
2014		if (valid) {
2015			if (!active) {
2016				/* Create a mapping in the active pmap. */
2017				addr = 0;
2018				m = PHYS_TO_VM_PAGE(pa);
2019				PMAP_LOCK(pmap);
2020				pte_enter(mmu, pmap, m, addr,
2021				    PTE_SR | PTE_VALID | PTE_UR);
2022				__syncicache((void *)addr, PAGE_SIZE);
2023				pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2024				PMAP_UNLOCK(pmap);
2025			} else
2026				__syncicache((void *)va, PAGE_SIZE);
2027		}
2028		va += PAGE_SIZE;
2029		sz -= PAGE_SIZE;
2030	}
2031	vm_page_unlock_queues();
2032}
2033
2034/*
2035 * Atomically extract and hold the physical page with the given
2036 * pmap and virtual address pair if that mapping permits the given
2037 * protection.
2038 */
2039static vm_page_t
2040mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2041    vm_prot_t prot)
2042{
2043	pte_t *pte;
2044	vm_page_t m;
2045	uint32_t pte_wbit;
2046	vm_paddr_t pa;
2047
2048	m = NULL;
2049	pa = 0;
2050	PMAP_LOCK(pmap);
2051retry:
2052	pte = pte_find(mmu, pmap, va);
2053	if ((pte != NULL) && PTE_ISVALID(pte)) {
2054		if (pmap == kernel_pmap)
2055			pte_wbit = PTE_SW;
2056		else
2057			pte_wbit = PTE_UW;
2058
2059		if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2060			if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2061				goto retry;
2062			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2063			vm_page_hold(m);
2064		}
2065	}
2066
2067	PA_UNLOCK_COND(pa);
2068	PMAP_UNLOCK(pmap);
2069	return (m);
2070}
2071
2072/*
2073 * Initialize a vm_page's machine-dependent fields.
2074 */
2075static void
2076mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2077{
2078
2079	TAILQ_INIT(&m->md.pv_list);
2080}
2081
2082/*
2083 * mmu_booke_zero_page_area zeros the specified hardware page by
2084 * mapping it into virtual memory and using bzero to clear
2085 * its contents.
2086 *
2087 * off and size must reside within a single page.
2088 */
2089static void
2090mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2091{
2092	vm_offset_t va;
2093
2094	/* XXX KASSERT off and size are within a single page? */
2095
2096	mtx_lock(&zero_page_mutex);
2097	va = zero_page_va;
2098
2099	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2100	bzero((caddr_t)va + off, size);
2101	mmu_booke_kremove(mmu, va);
2102
2103	mtx_unlock(&zero_page_mutex);
2104}
2105
2106/*
2107 * mmu_booke_zero_page zeros the specified hardware page.
2108 */
2109static void
2110mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2111{
2112
2113	mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
2114}
2115
2116/*
2117 * mmu_booke_copy_page copies the specified (machine independent) page by
2118 * mapping the page into virtual memory and using memcopy to copy the page,
2119 * one machine dependent page at a time.
2120 */
2121static void
2122mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
2123{
2124	vm_offset_t sva, dva;
2125
2126	sva = copy_page_src_va;
2127	dva = copy_page_dst_va;
2128
2129	mtx_lock(&copy_page_mutex);
2130	mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2131	mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2132	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2133	mmu_booke_kremove(mmu, dva);
2134	mmu_booke_kremove(mmu, sva);
2135	mtx_unlock(&copy_page_mutex);
2136}
2137
2138/*
2139 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2140 * into virtual memory and using bzero to clear its contents. This is intended
2141 * to be called from the vm_pagezero process only and outside of Giant. No
2142 * lock is required.
2143 */
2144static void
2145mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2146{
2147	vm_offset_t va;
2148
2149	va = zero_page_idle_va;
2150	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2151	bzero((caddr_t)va, PAGE_SIZE);
2152	mmu_booke_kremove(mmu, va);
2153}
2154
2155/*
2156 * Return whether or not the specified physical page was modified
2157 * in any of physical maps.
2158 */
2159static boolean_t
2160mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2161{
2162	pte_t *pte;
2163	pv_entry_t pv;
2164	boolean_t rv;
2165
2166	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2167	    ("mmu_booke_is_modified: page %p is not managed", m));
2168	rv = FALSE;
2169
2170	/*
2171	 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
2172	 * concurrently set while the object is locked.  Thus, if PG_WRITEABLE
2173	 * is clear, no PTEs can be modified.
2174	 */
2175	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2176	if ((m->oflags & VPO_BUSY) == 0 &&
2177	    (m->flags & PG_WRITEABLE) == 0)
2178		return (rv);
2179	vm_page_lock_queues();
2180	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2181		PMAP_LOCK(pv->pv_pmap);
2182		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2183		    PTE_ISVALID(pte)) {
2184			if (PTE_ISMODIFIED(pte))
2185				rv = TRUE;
2186		}
2187		PMAP_UNLOCK(pv->pv_pmap);
2188		if (rv)
2189			break;
2190	}
2191	vm_page_unlock_queues();
2192	return (rv);
2193}
2194
2195/*
2196 * Return whether or not the specified virtual address is eligible
2197 * for prefault.
2198 */
2199static boolean_t
2200mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2201{
2202
2203	return (FALSE);
2204}
2205
2206/*
2207 * Return whether or not the specified physical page was referenced
2208 * in any physical maps.
2209 */
2210static boolean_t
2211mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
2212{
2213	pte_t *pte;
2214	pv_entry_t pv;
2215	boolean_t rv;
2216
2217	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2218	    ("mmu_booke_is_referenced: page %p is not managed", m));
2219	rv = FALSE;
2220	vm_page_lock_queues();
2221	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2222		PMAP_LOCK(pv->pv_pmap);
2223		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2224		    PTE_ISVALID(pte)) {
2225			if (PTE_ISREFERENCED(pte))
2226				rv = TRUE;
2227		}
2228		PMAP_UNLOCK(pv->pv_pmap);
2229		if (rv)
2230			break;
2231	}
2232	vm_page_unlock_queues();
2233	return (rv);
2234}
2235
2236/*
2237 * Clear the modify bits on the specified physical page.
2238 */
2239static void
2240mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2241{
2242	pte_t *pte;
2243	pv_entry_t pv;
2244
2245	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2246	    ("mmu_booke_clear_modify: page %p is not managed", m));
2247	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2248	KASSERT((m->oflags & VPO_BUSY) == 0,
2249	    ("mmu_booke_clear_modify: page %p is busy", m));
2250
2251	/*
2252	 * If the page is not PG_WRITEABLE, then no PTEs can be modified.
2253	 * If the object containing the page is locked and the page is not
2254	 * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
2255	 */
2256	if ((m->flags & PG_WRITEABLE) == 0)
2257		return;
2258	vm_page_lock_queues();
2259	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2260		PMAP_LOCK(pv->pv_pmap);
2261		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2262		    PTE_ISVALID(pte)) {
2263			mtx_lock_spin(&tlbivax_mutex);
2264			tlb_miss_lock();
2265
2266			if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2267				tlb0_flush_entry(pv->pv_va);
2268				pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2269				    PTE_REFERENCED);
2270			}
2271
2272			tlb_miss_unlock();
2273			mtx_unlock_spin(&tlbivax_mutex);
2274		}
2275		PMAP_UNLOCK(pv->pv_pmap);
2276	}
2277	vm_page_unlock_queues();
2278}
2279
2280/*
2281 * Return a count of reference bits for a page, clearing those bits.
2282 * It is not necessary for every reference bit to be cleared, but it
2283 * is necessary that 0 only be returned when there are truly no
2284 * reference bits set.
2285 *
2286 * XXX: The exact number of bits to check and clear is a matter that
2287 * should be tested and standardized at some point in the future for
2288 * optimal aging of shared pages.
2289 */
2290static int
2291mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2292{
2293	pte_t *pte;
2294	pv_entry_t pv;
2295	int count;
2296
2297	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2298	    ("mmu_booke_ts_referenced: page %p is not managed", m));
2299	count = 0;
2300	vm_page_lock_queues();
2301	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2302		PMAP_LOCK(pv->pv_pmap);
2303		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2304		    PTE_ISVALID(pte)) {
2305			if (PTE_ISREFERENCED(pte)) {
2306				mtx_lock_spin(&tlbivax_mutex);
2307				tlb_miss_lock();
2308
2309				tlb0_flush_entry(pv->pv_va);
2310				pte->flags &= ~PTE_REFERENCED;
2311
2312				tlb_miss_unlock();
2313				mtx_unlock_spin(&tlbivax_mutex);
2314
2315				if (++count > 4) {
2316					PMAP_UNLOCK(pv->pv_pmap);
2317					break;
2318				}
2319			}
2320		}
2321		PMAP_UNLOCK(pv->pv_pmap);
2322	}
2323	vm_page_unlock_queues();
2324	return (count);
2325}
2326
2327/*
2328 * Clear the reference bit on the specified physical page.
2329 */
2330static void
2331mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
2332{
2333	pte_t *pte;
2334	pv_entry_t pv;
2335
2336	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2337	    ("mmu_booke_clear_reference: page %p is not managed", m));
2338	vm_page_lock_queues();
2339	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2340		PMAP_LOCK(pv->pv_pmap);
2341		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2342		    PTE_ISVALID(pte)) {
2343			if (PTE_ISREFERENCED(pte)) {
2344				mtx_lock_spin(&tlbivax_mutex);
2345				tlb_miss_lock();
2346
2347				tlb0_flush_entry(pv->pv_va);
2348				pte->flags &= ~PTE_REFERENCED;
2349
2350				tlb_miss_unlock();
2351				mtx_unlock_spin(&tlbivax_mutex);
2352			}
2353		}
2354		PMAP_UNLOCK(pv->pv_pmap);
2355	}
2356	vm_page_unlock_queues();
2357}
2358
2359/*
2360 * Change wiring attribute for a map/virtual-address pair.
2361 */
2362static void
2363mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
2364{
2365	pte_t *pte;
2366
2367	PMAP_LOCK(pmap);
2368	if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2369		if (wired) {
2370			if (!PTE_ISWIRED(pte)) {
2371				pte->flags |= PTE_WIRED;
2372				pmap->pm_stats.wired_count++;
2373			}
2374		} else {
2375			if (PTE_ISWIRED(pte)) {
2376				pte->flags &= ~PTE_WIRED;
2377				pmap->pm_stats.wired_count--;
2378			}
2379		}
2380	}
2381	PMAP_UNLOCK(pmap);
2382}
2383
2384/*
2385 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2386 * page.  This count may be changed upwards or downwards in the future; it is
2387 * only necessary that true be returned for a small subset of pmaps for proper
2388 * page aging.
2389 */
2390static boolean_t
2391mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2392{
2393	pv_entry_t pv;
2394	int loops;
2395	boolean_t rv;
2396
2397	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2398	    ("mmu_booke_page_exists_quick: page %p is not managed", m));
2399	loops = 0;
2400	rv = FALSE;
2401	vm_page_lock_queues();
2402	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2403		if (pv->pv_pmap == pmap) {
2404			rv = TRUE;
2405			break;
2406		}
2407		if (++loops >= 16)
2408			break;
2409	}
2410	vm_page_unlock_queues();
2411	return (rv);
2412}
2413
2414/*
2415 * Return the number of managed mappings to the given physical page that are
2416 * wired.
2417 */
2418static int
2419mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2420{
2421	pv_entry_t pv;
2422	pte_t *pte;
2423	int count = 0;
2424
2425	if ((m->flags & PG_FICTITIOUS) != 0)
2426		return (count);
2427	vm_page_lock_queues();
2428	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2429		PMAP_LOCK(pv->pv_pmap);
2430		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2431			if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2432				count++;
2433		PMAP_UNLOCK(pv->pv_pmap);
2434	}
2435	vm_page_unlock_queues();
2436	return (count);
2437}
2438
2439static int
2440mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2441{
2442	int i;
2443	vm_offset_t va;
2444
2445	/*
2446	 * This currently does not work for entries that
2447	 * overlap TLB1 entries.
2448	 */
2449	for (i = 0; i < tlb1_idx; i ++) {
2450		if (tlb1_iomapped(i, pa, size, &va) == 0)
2451			return (0);
2452	}
2453
2454	return (EFAULT);
2455}
2456
2457vm_offset_t
2458mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2459    vm_size_t *sz)
2460{
2461	vm_paddr_t pa, ppa;
2462	vm_offset_t va;
2463	vm_size_t gran;
2464
2465	/* Raw physical memory dumps don't have a virtual address. */
2466	if (md->md_vaddr == ~0UL) {
2467		/* We always map a 256MB page at 256M. */
2468		gran = 256 * 1024 * 1024;
2469		pa = md->md_paddr + ofs;
2470		ppa = pa & ~(gran - 1);
2471		ofs = pa - ppa;
2472		va = gran;
2473		tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO);
2474		if (*sz > (gran - ofs))
2475			*sz = gran - ofs;
2476		return (va + ofs);
2477	}
2478
2479	/* Minidumps are based on virtual memory addresses. */
2480	va = md->md_vaddr + ofs;
2481	if (va >= kernstart + kernsize) {
2482		gran = PAGE_SIZE - (va & PAGE_MASK);
2483		if (*sz > gran)
2484			*sz = gran;
2485	}
2486	return (va);
2487}
2488
2489void
2490mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2491    vm_offset_t va)
2492{
2493
2494	/* Raw physical memory dumps don't have a virtual address. */
2495	if (md->md_vaddr == ~0UL) {
2496		tlb1_idx--;
2497		tlb1[tlb1_idx].mas1 = 0;
2498		tlb1[tlb1_idx].mas2 = 0;
2499		tlb1[tlb1_idx].mas3 = 0;
2500		tlb1_write_entry(tlb1_idx);
2501		return;
2502	}
2503
2504	/* Minidumps are based on virtual memory addresses. */
2505	/* Nothing to do... */
2506}
2507
2508struct pmap_md *
2509mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
2510{
2511	static struct pmap_md md;
2512	struct bi_mem_region *mr;
2513	pte_t *pte;
2514	vm_offset_t va;
2515
2516	if (dumpsys_minidump) {
2517		md.md_paddr = ~0UL;	/* Minidumps use virtual addresses. */
2518		if (prev == NULL) {
2519			/* 1st: kernel .data and .bss. */
2520			md.md_index = 1;
2521			md.md_vaddr = trunc_page((uintptr_t)_etext);
2522			md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
2523			return (&md);
2524		}
2525		switch (prev->md_index) {
2526		case 1:
2527			/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2528			md.md_index = 2;
2529			md.md_vaddr = data_start;
2530			md.md_size = data_end - data_start;
2531			break;
2532		case 2:
2533			/* 3rd: kernel VM. */
2534			va = prev->md_vaddr + prev->md_size;
2535			/* Find start of next chunk (from va). */
2536			while (va < virtual_end) {
2537				/* Don't dump the buffer cache. */
2538				if (va >= kmi.buffer_sva &&
2539				    va < kmi.buffer_eva) {
2540					va = kmi.buffer_eva;
2541					continue;
2542				}
2543				pte = pte_find(mmu, kernel_pmap, va);
2544				if (pte != NULL && PTE_ISVALID(pte))
2545					break;
2546				va += PAGE_SIZE;
2547			}
2548			if (va < virtual_end) {
2549				md.md_vaddr = va;
2550				va += PAGE_SIZE;
2551				/* Find last page in chunk. */
2552				while (va < virtual_end) {
2553					/* Don't run into the buffer cache. */
2554					if (va == kmi.buffer_sva)
2555						break;
2556					pte = pte_find(mmu, kernel_pmap, va);
2557					if (pte == NULL || !PTE_ISVALID(pte))
2558						break;
2559					va += PAGE_SIZE;
2560				}
2561				md.md_size = va - md.md_vaddr;
2562				break;
2563			}
2564			md.md_index = 3;
2565			/* FALLTHROUGH */
2566		default:
2567			return (NULL);
2568		}
2569	} else { /* minidumps */
2570		mr = bootinfo_mr();
2571		if (prev == NULL) {
2572			/* first physical chunk. */
2573			md.md_paddr = mr->mem_base;
2574			md.md_size = mr->mem_size;
2575			md.md_vaddr = ~0UL;
2576			md.md_index = 1;
2577		} else if (md.md_index < bootinfo->bi_mem_reg_no) {
2578			md.md_paddr = mr[md.md_index].mem_base;
2579			md.md_size = mr[md.md_index].mem_size;
2580			md.md_vaddr = ~0UL;
2581			md.md_index++;
2582		} else {
2583			/* There's no next physical chunk. */
2584			return (NULL);
2585		}
2586	}
2587
2588	return (&md);
2589}
2590
2591/*
2592 * Map a set of physical memory pages into the kernel virtual address space.
2593 * Return a pointer to where it is mapped. This routine is intended to be used
2594 * for mapping device memory, NOT real memory.
2595 */
2596static void *
2597mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2598{
2599	void *res;
2600	uintptr_t va;
2601	vm_size_t sz;
2602
2603	va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
2604	res = (void *)va;
2605
2606	do {
2607		sz = 1 << (ilog2(size) & ~1);
2608		if (bootverbose)
2609			printf("Wiring VA=%x to PA=%x (size=%x), "
2610			    "using TLB1[%d]\n", va, pa, sz, tlb1_idx);
2611		tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO);
2612		size -= sz;
2613		pa += sz;
2614		va += sz;
2615	} while (size > 0);
2616
2617	return (res);
2618}
2619
2620/*
2621 * 'Unmap' a range mapped by mmu_booke_mapdev().
2622 */
2623static void
2624mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2625{
2626	vm_offset_t base, offset;
2627
2628	/*
2629	 * Unmap only if this is inside kernel virtual space.
2630	 */
2631	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2632		base = trunc_page(va);
2633		offset = va & PAGE_MASK;
2634		size = roundup(offset + size, PAGE_SIZE);
2635		kmem_free(kernel_map, base, size);
2636	}
2637}
2638
2639/*
2640 * mmu_booke_object_init_pt preloads the ptes for a given object into the
2641 * specified pmap. This eliminates the blast of soft faults on process startup
2642 * and immediately after an mmap.
2643 */
2644static void
2645mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2646    vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2647{
2648
2649	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2650	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2651	    ("mmu_booke_object_init_pt: non-device object"));
2652}
2653
2654/*
2655 * Perform the pmap work for mincore.
2656 */
2657static int
2658mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2659    vm_paddr_t *locked_pa)
2660{
2661
2662	TODO;
2663	return (0);
2664}
2665
2666/**************************************************************************/
2667/* TID handling */
2668/**************************************************************************/
2669
2670/*
2671 * Allocate a TID. If necessary, steal one from someone else.
2672 * The new TID is flushed from the TLB before returning.
2673 */
2674static tlbtid_t
2675tid_alloc(pmap_t pmap)
2676{
2677	tlbtid_t tid;
2678	int thiscpu;
2679
2680	KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2681
2682	CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2683
2684	thiscpu = PCPU_GET(cpuid);
2685
2686	tid = PCPU_GET(tid_next);
2687	if (tid > TID_MAX)
2688		tid = TID_MIN;
2689	PCPU_SET(tid_next, tid + 1);
2690
2691	/* If we are stealing TID then clear the relevant pmap's field */
2692	if (tidbusy[thiscpu][tid] != NULL) {
2693
2694		CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2695
2696		tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2697
2698		/* Flush all entries from TLB0 matching this TID. */
2699		tid_flush(tid);
2700	}
2701
2702	tidbusy[thiscpu][tid] = pmap;
2703	pmap->pm_tid[thiscpu] = tid;
2704	__asm __volatile("msync; isync");
2705
2706	CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2707	    PCPU_GET(tid_next));
2708
2709	return (tid);
2710}
2711
2712/**************************************************************************/
2713/* TLB0 handling */
2714/**************************************************************************/
2715
2716static void
2717tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
2718    uint32_t mas7)
2719{
2720	int as;
2721	char desc[3];
2722	tlbtid_t tid;
2723	vm_size_t size;
2724	unsigned int tsize;
2725
2726	desc[2] = '\0';
2727	if (mas1 & MAS1_VALID)
2728		desc[0] = 'V';
2729	else
2730		desc[0] = ' ';
2731
2732	if (mas1 & MAS1_IPROT)
2733		desc[1] = 'P';
2734	else
2735		desc[1] = ' ';
2736
2737	as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
2738	tid = MAS1_GETTID(mas1);
2739
2740	tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2741	size = 0;
2742	if (tsize)
2743		size = tsize2size(tsize);
2744
2745	debugf("%3d: (%s) [AS=%d] "
2746	    "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2747	    "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2748	    i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2749}
2750
2751/* Convert TLB0 va and way number to tlb0[] table index. */
2752static inline unsigned int
2753tlb0_tableidx(vm_offset_t va, unsigned int way)
2754{
2755	unsigned int idx;
2756
2757	idx = (way * TLB0_ENTRIES_PER_WAY);
2758	idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2759	return (idx);
2760}
2761
2762/*
2763 * Invalidate TLB0 entry.
2764 */
2765static inline void
2766tlb0_flush_entry(vm_offset_t va)
2767{
2768
2769	CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2770
2771	mtx_assert(&tlbivax_mutex, MA_OWNED);
2772
2773	__asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2774	__asm __volatile("isync; msync");
2775	__asm __volatile("tlbsync; msync");
2776
2777	CTR1(KTR_PMAP, "%s: e", __func__);
2778}
2779
2780/* Print out contents of the MAS registers for each TLB0 entry */
2781void
2782tlb0_print_tlbentries(void)
2783{
2784	uint32_t mas0, mas1, mas2, mas3, mas7;
2785	int entryidx, way, idx;
2786
2787	debugf("TLB0 entries:\n");
2788	for (way = 0; way < TLB0_WAYS; way ++)
2789		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2790
2791			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2792			mtspr(SPR_MAS0, mas0);
2793			__asm __volatile("isync");
2794
2795			mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
2796			mtspr(SPR_MAS2, mas2);
2797
2798			__asm __volatile("isync; tlbre");
2799
2800			mas1 = mfspr(SPR_MAS1);
2801			mas2 = mfspr(SPR_MAS2);
2802			mas3 = mfspr(SPR_MAS3);
2803			mas7 = mfspr(SPR_MAS7);
2804
2805			idx = tlb0_tableidx(mas2, way);
2806			tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2807		}
2808}
2809
2810/**************************************************************************/
2811/* TLB1 handling */
2812/**************************************************************************/
2813
2814/*
2815 * TLB1 mapping notes:
2816 *
2817 * TLB1[0]	CCSRBAR
2818 * TLB1[1]	Kernel text and data.
2819 * TLB1[2-15]	Additional kernel text and data mappings (if required), PCI
2820 *		windows, other devices mappings.
2821 */
2822
2823/*
2824 * Write given entry to TLB1 hardware.
2825 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2826 */
2827static void
2828tlb1_write_entry(unsigned int idx)
2829{
2830	uint32_t mas0, mas7;
2831
2832	//debugf("tlb1_write_entry: s\n");
2833
2834	/* Clear high order RPN bits */
2835	mas7 = 0;
2836
2837	/* Select entry */
2838	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2839	//debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
2840
2841	mtspr(SPR_MAS0, mas0);
2842	__asm __volatile("isync");
2843	mtspr(SPR_MAS1, tlb1[idx].mas1);
2844	__asm __volatile("isync");
2845	mtspr(SPR_MAS2, tlb1[idx].mas2);
2846	__asm __volatile("isync");
2847	mtspr(SPR_MAS3, tlb1[idx].mas3);
2848	__asm __volatile("isync");
2849	mtspr(SPR_MAS7, mas7);
2850	__asm __volatile("isync; tlbwe; isync; msync");
2851
2852	//debugf("tlb1_write_entry: e\n");
2853}
2854
2855/*
2856 * Return the largest uint value log such that 2^log <= num.
2857 */
2858static unsigned int
2859ilog2(unsigned int num)
2860{
2861	int lz;
2862
2863	__asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
2864	return (31 - lz);
2865}
2866
2867/*
2868 * Convert TLB TSIZE value to mapped region size.
2869 */
2870static vm_size_t
2871tsize2size(unsigned int tsize)
2872{
2873
2874	/*
2875	 * size = 4^tsize KB
2876	 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2877	 */
2878
2879	return ((1 << (2 * tsize)) * 1024);
2880}
2881
2882/*
2883 * Convert region size (must be power of 4) to TLB TSIZE value.
2884 */
2885static unsigned int
2886size2tsize(vm_size_t size)
2887{
2888
2889	return (ilog2(size) / 2 - 5);
2890}
2891
2892/*
2893 * Register permanent kernel mapping in TLB1.
2894 *
2895 * Entries are created starting from index 0 (current free entry is
2896 * kept in tlb1_idx) and are not supposed to be invalidated.
2897 */
2898static int
2899tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
2900    uint32_t flags)
2901{
2902	uint32_t ts, tid;
2903	int tsize;
2904
2905	if (tlb1_idx >= TLB1_ENTRIES) {
2906		printf("tlb1_set_entry: TLB1 full!\n");
2907		return (-1);
2908	}
2909
2910	/* Convert size to TSIZE */
2911	tsize = size2tsize(size);
2912
2913	tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
2914	/* XXX TS is hard coded to 0 for now as we only use single address space */
2915	ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
2916
2917	/* XXX LOCK tlb1[] */
2918
2919	tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
2920	tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
2921	tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags;
2922
2923	/* Set supervisor RWX permission bits */
2924	tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
2925
2926	tlb1_write_entry(tlb1_idx++);
2927
2928	/* XXX UNLOCK tlb1[] */
2929
2930	/*
2931	 * XXX in general TLB1 updates should be propagated between CPUs,
2932	 * since current design assumes to have the same TLB1 set-up on all
2933	 * cores.
2934	 */
2935	return (0);
2936}
2937
2938static int
2939tlb1_entry_size_cmp(const void *a, const void *b)
2940{
2941	const vm_size_t *sza;
2942	const vm_size_t *szb;
2943
2944	sza = a;
2945	szb = b;
2946	if (*sza > *szb)
2947		return (-1);
2948	else if (*sza < *szb)
2949		return (1);
2950	else
2951		return (0);
2952}
2953
2954/*
2955 * Map in contiguous RAM region into the TLB1 using maximum of
2956 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
2957 *
2958 * If necessary round up last entry size and return total size
2959 * used by all allocated entries.
2960 */
2961vm_size_t
2962tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size)
2963{
2964	vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES];
2965	vm_size_t mapped_size, sz, esz;
2966	unsigned int log;
2967	int i;
2968
2969	CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x",
2970	    __func__, size, va, pa);
2971
2972	mapped_size = 0;
2973	sz = size;
2974	memset(entry_size, 0, sizeof(entry_size));
2975
2976	/* Calculate entry sizes. */
2977	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) {
2978
2979		/* Largest region that is power of 4 and fits within size */
2980		log = ilog2(sz) / 2;
2981		esz = 1 << (2 * log);
2982
2983		/* If this is last entry cover remaining size. */
2984		if (i ==  KERNEL_REGION_MAX_TLB_ENTRIES - 1) {
2985			while (esz < sz)
2986				esz = esz << 2;
2987		}
2988
2989		entry_size[i] = esz;
2990		mapped_size += esz;
2991		if (esz < sz)
2992			sz -= esz;
2993		else
2994			sz = 0;
2995	}
2996
2997	/* Sort entry sizes, required to get proper entry address alignment. */
2998	qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES,
2999	    sizeof(vm_size_t), tlb1_entry_size_cmp);
3000
3001	/* Load TLB1 entries. */
3002	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) {
3003		esz = entry_size[i];
3004		if (!esz)
3005			break;
3006
3007		CTR5(KTR_PMAP, "%s: entry %d: sz  = 0x%08x (va = 0x%08x "
3008		    "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa);
3009
3010		tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM);
3011
3012		va += esz;
3013		pa += esz;
3014	}
3015
3016	CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)",
3017	    __func__, mapped_size, mapped_size - size);
3018
3019	return (mapped_size);
3020}
3021
3022/*
3023 * TLB1 initialization routine, to be called after the very first
3024 * assembler level setup done in locore.S.
3025 */
3026void
3027tlb1_init(vm_offset_t ccsrbar)
3028{
3029	uint32_t mas0;
3030
3031	/* TLB1[1] is used to map the kernel. Save that entry. */
3032	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1);
3033	mtspr(SPR_MAS0, mas0);
3034	__asm __volatile("isync; tlbre");
3035
3036	tlb1[1].mas1 = mfspr(SPR_MAS1);
3037	tlb1[1].mas2 = mfspr(SPR_MAS2);
3038	tlb1[1].mas3 = mfspr(SPR_MAS3);
3039
3040	/* Map in CCSRBAR in TLB1[0] */
3041	tlb1_idx = 0;
3042	tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO);
3043	/*
3044	 * Set the next available TLB1 entry index. Note TLB[1] is reserved
3045	 * for initial mapping of kernel text+data, which was set early in
3046	 * locore, we need to skip this [busy] entry.
3047	 */
3048	tlb1_idx = 2;
3049
3050	/* Setup TLB miss defaults */
3051	set_mas4_defaults();
3052}
3053
3054/*
3055 * Setup MAS4 defaults.
3056 * These values are loaded to MAS0-2 on a TLB miss.
3057 */
3058static void
3059set_mas4_defaults(void)
3060{
3061	uint32_t mas4;
3062
3063	/* Defaults: TLB0, PID0, TSIZED=4K */
3064	mas4 = MAS4_TLBSELD0;
3065	mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
3066#ifdef SMP
3067	mas4 |= MAS4_MD;
3068#endif
3069	mtspr(SPR_MAS4, mas4);
3070	__asm __volatile("isync");
3071}
3072
3073/*
3074 * Print out contents of the MAS registers for each TLB1 entry
3075 */
3076void
3077tlb1_print_tlbentries(void)
3078{
3079	uint32_t mas0, mas1, mas2, mas3, mas7;
3080	int i;
3081
3082	debugf("TLB1 entries:\n");
3083	for (i = 0; i < TLB1_ENTRIES; i++) {
3084
3085		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3086		mtspr(SPR_MAS0, mas0);
3087
3088		__asm __volatile("isync; tlbre");
3089
3090		mas1 = mfspr(SPR_MAS1);
3091		mas2 = mfspr(SPR_MAS2);
3092		mas3 = mfspr(SPR_MAS3);
3093		mas7 = mfspr(SPR_MAS7);
3094
3095		tlb_print_entry(i, mas1, mas2, mas3, mas7);
3096	}
3097}
3098
3099/*
3100 * Print out contents of the in-ram tlb1 table.
3101 */
3102void
3103tlb1_print_entries(void)
3104{
3105	int i;
3106
3107	debugf("tlb1[] table entries:\n");
3108	for (i = 0; i < TLB1_ENTRIES; i++)
3109		tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0);
3110}
3111
3112/*
3113 * Return 0 if the physical IO range is encompassed by one of the
3114 * the TLB1 entries, otherwise return related error code.
3115 */
3116static int
3117tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
3118{
3119	uint32_t prot;
3120	vm_paddr_t pa_start;
3121	vm_paddr_t pa_end;
3122	unsigned int entry_tsize;
3123	vm_size_t entry_size;
3124
3125	*va = (vm_offset_t)NULL;
3126
3127	/* Skip invalid entries */
3128	if (!(tlb1[i].mas1 & MAS1_VALID))
3129		return (EINVAL);
3130
3131	/*
3132	 * The entry must be cache-inhibited, guarded, and r/w
3133	 * so it can function as an i/o page
3134	 */
3135	prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
3136	if (prot != (MAS2_I | MAS2_G))
3137		return (EPERM);
3138
3139	prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
3140	if (prot != (MAS3_SR | MAS3_SW))
3141		return (EPERM);
3142
3143	/* The address should be within the entry range. */
3144	entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3145	KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3146
3147	entry_size = tsize2size(entry_tsize);
3148	pa_start = tlb1[i].mas3 & MAS3_RPN;
3149	pa_end = pa_start + entry_size - 1;
3150
3151	if ((pa < pa_start) || ((pa + size) > pa_end))
3152		return (ERANGE);
3153
3154	/* Return virtual address of this mapping. */
3155	*va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start);
3156	return (0);
3157}
3158