pmap.c revision 207437
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * Some hw specific parts of this pmap were derived or influenced
27 * by NetBSD's ibm4xx pmap module. More generic code is shared with
28 * a few other pmap modules from the FreeBSD tree.
29 */
30
31 /*
32  * VM layout notes:
33  *
34  * Kernel and user threads run within one common virtual address space
35  * defined by AS=0.
36  *
37  * Virtual address space layout:
38  * -----------------------------
39  * 0x0000_0000 - 0xafff_ffff	: user process
40  * 0xb000_0000 - 0xbfff_ffff	: pmap_mapdev()-ed area (PCI/PCIE etc.)
41  * 0xc000_0000 - 0xc0ff_ffff	: kernel reserved
42  *   0xc000_0000 - data_end	: kernel code+data, env, metadata etc.
43  * 0xc100_0000 - 0xfeef_ffff	: KVA
44  *   0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
45  *   0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
46  *   0xc200_4000 - 0xc200_8fff : guard page + kstack0
47  *   0xc200_9000 - 0xfeef_ffff	: actual free KVA space
48  * 0xfef0_0000 - 0xffff_ffff	: I/O devices region
49  */
50
51#include <sys/cdefs.h>
52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 207437 2010-04-30 15:22:52Z alc $");
53
54#include <sys/types.h>
55#include <sys/param.h>
56#include <sys/malloc.h>
57#include <sys/ktr.h>
58#include <sys/proc.h>
59#include <sys/user.h>
60#include <sys/queue.h>
61#include <sys/systm.h>
62#include <sys/kernel.h>
63#include <sys/msgbuf.h>
64#include <sys/lock.h>
65#include <sys/mutex.h>
66#include <sys/smp.h>
67#include <sys/vmmeter.h>
68
69#include <vm/vm.h>
70#include <vm/vm_page.h>
71#include <vm/vm_kern.h>
72#include <vm/vm_pageout.h>
73#include <vm/vm_extern.h>
74#include <vm/vm_object.h>
75#include <vm/vm_param.h>
76#include <vm/vm_map.h>
77#include <vm/vm_pager.h>
78#include <vm/uma.h>
79
80#include <machine/bootinfo.h>
81#include <machine/cpu.h>
82#include <machine/pcb.h>
83#include <machine/platform.h>
84
85#include <machine/tlb.h>
86#include <machine/spr.h>
87#include <machine/vmparam.h>
88#include <machine/md_var.h>
89#include <machine/mmuvar.h>
90#include <machine/pmap.h>
91#include <machine/pte.h>
92
93#include "mmu_if.h"
94
95#define DEBUG
96#undef DEBUG
97
98#ifdef  DEBUG
99#define debugf(fmt, args...) printf(fmt, ##args)
100#else
101#define debugf(fmt, args...)
102#endif
103
104#define TODO			panic("%s: not implemented", __func__);
105
106#include "opt_sched.h"
107#ifndef SCHED_4BSD
108#error "e500 only works with SCHED_4BSD which uses a global scheduler lock."
109#endif
110extern struct mtx sched_lock;
111
112extern int dumpsys_minidump;
113
114extern unsigned char _etext[];
115extern unsigned char _end[];
116
117/* Kernel physical load address. */
118extern uint32_t kernload;
119vm_offset_t kernstart;
120vm_size_t kernsize;
121
122/* Message buffer and tables. */
123static vm_offset_t data_start;
124static vm_size_t data_end;
125
126/* Phys/avail memory regions. */
127static struct mem_region *availmem_regions;
128static int availmem_regions_sz;
129static struct mem_region *physmem_regions;
130static int physmem_regions_sz;
131
132/* Reserved KVA space and mutex for mmu_booke_zero_page. */
133static vm_offset_t zero_page_va;
134static struct mtx zero_page_mutex;
135
136static struct mtx tlbivax_mutex;
137
138/*
139 * Reserved KVA space for mmu_booke_zero_page_idle. This is used
140 * by idle thred only, no lock required.
141 */
142static vm_offset_t zero_page_idle_va;
143
144/* Reserved KVA space and mutex for mmu_booke_copy_page. */
145static vm_offset_t copy_page_src_va;
146static vm_offset_t copy_page_dst_va;
147static struct mtx copy_page_mutex;
148
149/**************************************************************************/
150/* PMAP */
151/**************************************************************************/
152
153static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
154    vm_prot_t, boolean_t);
155
156unsigned int kptbl_min;		/* Index of the first kernel ptbl. */
157unsigned int kernel_ptbls;	/* Number of KVA ptbls. */
158
159/*
160 * If user pmap is processed with mmu_booke_remove and the resident count
161 * drops to 0, there are no more pages to remove, so we need not continue.
162 */
163#define PMAP_REMOVE_DONE(pmap) \
164	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
165
166extern void tlb_lock(uint32_t *);
167extern void tlb_unlock(uint32_t *);
168extern void tid_flush(tlbtid_t);
169
170/**************************************************************************/
171/* TLB and TID handling */
172/**************************************************************************/
173
174/* Translation ID busy table */
175static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
176
177/*
178 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
179 * core revisions and should be read from h/w registers during early config.
180 */
181uint32_t tlb0_entries;
182uint32_t tlb0_ways;
183uint32_t tlb0_entries_per_way;
184
185#define TLB0_ENTRIES		(tlb0_entries)
186#define TLB0_WAYS		(tlb0_ways)
187#define TLB0_ENTRIES_PER_WAY	(tlb0_entries_per_way)
188
189#define TLB1_ENTRIES 16
190
191/* In-ram copy of the TLB1 */
192static tlb_entry_t tlb1[TLB1_ENTRIES];
193
194/* Next free entry in the TLB1 */
195static unsigned int tlb1_idx;
196
197static tlbtid_t tid_alloc(struct pmap *);
198
199static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
200
201static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t);
202static void tlb1_write_entry(unsigned int);
203static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
204static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t);
205
206static vm_size_t tsize2size(unsigned int);
207static unsigned int size2tsize(vm_size_t);
208static unsigned int ilog2(unsigned int);
209
210static void set_mas4_defaults(void);
211
212static inline void tlb0_flush_entry(vm_offset_t);
213static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
214
215/**************************************************************************/
216/* Page table management */
217/**************************************************************************/
218
219/* Data for the pv entry allocation mechanism */
220static uma_zone_t pvzone;
221static struct vm_object pvzone_obj;
222static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
223
224#define PV_ENTRY_ZONE_MIN	2048	/* min pv entries in uma zone */
225
226#ifndef PMAP_SHPGPERPROC
227#define PMAP_SHPGPERPROC	200
228#endif
229
230static void ptbl_init(void);
231static struct ptbl_buf *ptbl_buf_alloc(void);
232static void ptbl_buf_free(struct ptbl_buf *);
233static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
234
235static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int);
236static void ptbl_free(mmu_t, pmap_t, unsigned int);
237static void ptbl_hold(mmu_t, pmap_t, unsigned int);
238static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
239
240static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
241static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
242static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t);
243static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
244
245static pv_entry_t pv_alloc(void);
246static void pv_free(pv_entry_t);
247static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
248static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
249
250/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
251#define PTBL_BUFS		(128 * 16)
252
253struct ptbl_buf {
254	TAILQ_ENTRY(ptbl_buf) link;	/* list link */
255	vm_offset_t kva;		/* va of mapping */
256};
257
258/* ptbl free list and a lock used for access synchronization. */
259static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
260static struct mtx ptbl_buf_freelist_lock;
261
262/* Base address of kva space allocated fot ptbl bufs. */
263static vm_offset_t ptbl_buf_pool_vabase;
264
265/* Pointer to ptbl_buf structures. */
266static struct ptbl_buf *ptbl_bufs;
267
268void pmap_bootstrap_ap(volatile uint32_t *);
269
270/*
271 * Kernel MMU interface
272 */
273static void		mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
274static void		mmu_booke_clear_modify(mmu_t, vm_page_t);
275static void		mmu_booke_clear_reference(mmu_t, vm_page_t);
276static void		mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
277    vm_size_t, vm_offset_t);
278static void		mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
279static void		mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
280    vm_prot_t, boolean_t);
281static void		mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
282    vm_page_t, vm_prot_t);
283static void		mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
284    vm_prot_t);
285static vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
286static vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
287    vm_prot_t);
288static void		mmu_booke_init(mmu_t);
289static boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
290static boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
291static boolean_t	mmu_booke_is_referenced(mmu_t, vm_page_t);
292static boolean_t	mmu_booke_ts_referenced(mmu_t, vm_page_t);
293static vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
294    int);
295static int		mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t);
296static void		mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
297    vm_object_t, vm_pindex_t, vm_size_t);
298static boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
299static void		mmu_booke_page_init(mmu_t, vm_page_t);
300static int		mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
301static void		mmu_booke_pinit(mmu_t, pmap_t);
302static void		mmu_booke_pinit0(mmu_t, pmap_t);
303static void		mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
304    vm_prot_t);
305static void		mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
306static void		mmu_booke_qremove(mmu_t, vm_offset_t, int);
307static void		mmu_booke_release(mmu_t, pmap_t);
308static void		mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
309static void		mmu_booke_remove_all(mmu_t, vm_page_t);
310static void		mmu_booke_remove_write(mmu_t, vm_page_t);
311static void		mmu_booke_zero_page(mmu_t, vm_page_t);
312static void		mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
313static void		mmu_booke_zero_page_idle(mmu_t, vm_page_t);
314static void		mmu_booke_activate(mmu_t, struct thread *);
315static void		mmu_booke_deactivate(mmu_t, struct thread *);
316static void		mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
317static void		*mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
318static void		mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
319static vm_offset_t	mmu_booke_kextract(mmu_t, vm_offset_t);
320static void		mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
321static void		mmu_booke_kremove(mmu_t, vm_offset_t);
322static boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
323static void		mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
324    vm_size_t);
325static vm_offset_t	mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
326    vm_size_t, vm_size_t *);
327static void		mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
328    vm_size_t, vm_offset_t);
329static struct pmap_md	*mmu_booke_scan_md(mmu_t, struct pmap_md *);
330
331static mmu_method_t mmu_booke_methods[] = {
332	/* pmap dispatcher interface */
333	MMUMETHOD(mmu_change_wiring,	mmu_booke_change_wiring),
334	MMUMETHOD(mmu_clear_modify,	mmu_booke_clear_modify),
335	MMUMETHOD(mmu_clear_reference,	mmu_booke_clear_reference),
336	MMUMETHOD(mmu_copy,		mmu_booke_copy),
337	MMUMETHOD(mmu_copy_page,	mmu_booke_copy_page),
338	MMUMETHOD(mmu_enter,		mmu_booke_enter),
339	MMUMETHOD(mmu_enter_object,	mmu_booke_enter_object),
340	MMUMETHOD(mmu_enter_quick,	mmu_booke_enter_quick),
341	MMUMETHOD(mmu_extract,		mmu_booke_extract),
342	MMUMETHOD(mmu_extract_and_hold,	mmu_booke_extract_and_hold),
343	MMUMETHOD(mmu_init,		mmu_booke_init),
344	MMUMETHOD(mmu_is_modified,	mmu_booke_is_modified),
345	MMUMETHOD(mmu_is_prefaultable,	mmu_booke_is_prefaultable),
346	MMUMETHOD(mmu_is_referenced,	mmu_booke_is_referenced),
347	MMUMETHOD(mmu_ts_referenced,	mmu_booke_ts_referenced),
348	MMUMETHOD(mmu_map,		mmu_booke_map),
349	MMUMETHOD(mmu_mincore,		mmu_booke_mincore),
350	MMUMETHOD(mmu_object_init_pt,	mmu_booke_object_init_pt),
351	MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
352	MMUMETHOD(mmu_page_init,	mmu_booke_page_init),
353	MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
354	MMUMETHOD(mmu_pinit,		mmu_booke_pinit),
355	MMUMETHOD(mmu_pinit0,		mmu_booke_pinit0),
356	MMUMETHOD(mmu_protect,		mmu_booke_protect),
357	MMUMETHOD(mmu_qenter,		mmu_booke_qenter),
358	MMUMETHOD(mmu_qremove,		mmu_booke_qremove),
359	MMUMETHOD(mmu_release,		mmu_booke_release),
360	MMUMETHOD(mmu_remove,		mmu_booke_remove),
361	MMUMETHOD(mmu_remove_all,	mmu_booke_remove_all),
362	MMUMETHOD(mmu_remove_write,	mmu_booke_remove_write),
363	MMUMETHOD(mmu_sync_icache,	mmu_booke_sync_icache),
364	MMUMETHOD(mmu_zero_page,	mmu_booke_zero_page),
365	MMUMETHOD(mmu_zero_page_area,	mmu_booke_zero_page_area),
366	MMUMETHOD(mmu_zero_page_idle,	mmu_booke_zero_page_idle),
367	MMUMETHOD(mmu_activate,		mmu_booke_activate),
368	MMUMETHOD(mmu_deactivate,	mmu_booke_deactivate),
369
370	/* Internal interfaces */
371	MMUMETHOD(mmu_bootstrap,	mmu_booke_bootstrap),
372	MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
373	MMUMETHOD(mmu_mapdev,		mmu_booke_mapdev),
374	MMUMETHOD(mmu_kenter,		mmu_booke_kenter),
375	MMUMETHOD(mmu_kextract,		mmu_booke_kextract),
376/*	MMUMETHOD(mmu_kremove,		mmu_booke_kremove),	*/
377	MMUMETHOD(mmu_unmapdev,		mmu_booke_unmapdev),
378
379	/* dumpsys() support */
380	MMUMETHOD(mmu_dumpsys_map,	mmu_booke_dumpsys_map),
381	MMUMETHOD(mmu_dumpsys_unmap,	mmu_booke_dumpsys_unmap),
382	MMUMETHOD(mmu_scan_md,		mmu_booke_scan_md),
383
384	{ 0, 0 }
385};
386
387static mmu_def_t booke_mmu = {
388	MMU_TYPE_BOOKE,
389	mmu_booke_methods,
390	0
391};
392MMU_DEF(booke_mmu);
393
394static inline void
395tlb_miss_lock(void)
396{
397#ifdef SMP
398	struct pcpu *pc;
399
400	if (!smp_started)
401		return;
402
403	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
404		if (pc != pcpup) {
405
406			CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, "
407			    "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock);
408
409			KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)),
410			    ("tlb_miss_lock: tried to lock self"));
411
412			tlb_lock(pc->pc_booke_tlb_lock);
413
414			CTR1(KTR_PMAP, "%s: locked", __func__);
415		}
416	}
417#endif
418}
419
420static inline void
421tlb_miss_unlock(void)
422{
423#ifdef SMP
424	struct pcpu *pc;
425
426	if (!smp_started)
427		return;
428
429	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
430		if (pc != pcpup) {
431			CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d",
432			    __func__, pc->pc_cpuid);
433
434			tlb_unlock(pc->pc_booke_tlb_lock);
435
436			CTR1(KTR_PMAP, "%s: unlocked", __func__);
437		}
438	}
439#endif
440}
441
442/* Return number of entries in TLB0. */
443static __inline void
444tlb0_get_tlbconf(void)
445{
446	uint32_t tlb0_cfg;
447
448	tlb0_cfg = mfspr(SPR_TLB0CFG);
449	tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
450	tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
451	tlb0_entries_per_way = tlb0_entries / tlb0_ways;
452}
453
454/* Initialize pool of kva ptbl buffers. */
455static void
456ptbl_init(void)
457{
458	int i;
459
460	CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
461	    (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
462	CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
463	    __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
464
465	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
466	TAILQ_INIT(&ptbl_buf_freelist);
467
468	for (i = 0; i < PTBL_BUFS; i++) {
469		ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
470		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
471	}
472}
473
474/* Get a ptbl_buf from the freelist. */
475static struct ptbl_buf *
476ptbl_buf_alloc(void)
477{
478	struct ptbl_buf *buf;
479
480	mtx_lock(&ptbl_buf_freelist_lock);
481	buf = TAILQ_FIRST(&ptbl_buf_freelist);
482	if (buf != NULL)
483		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
484	mtx_unlock(&ptbl_buf_freelist_lock);
485
486	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
487
488	return (buf);
489}
490
491/* Return ptbl buff to free pool. */
492static void
493ptbl_buf_free(struct ptbl_buf *buf)
494{
495
496	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
497
498	mtx_lock(&ptbl_buf_freelist_lock);
499	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
500	mtx_unlock(&ptbl_buf_freelist_lock);
501}
502
503/*
504 * Search the list of allocated ptbl bufs and find on list of allocated ptbls
505 */
506static void
507ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
508{
509	struct ptbl_buf *pbuf;
510
511	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
512
513	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
514
515	TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
516		if (pbuf->kva == (vm_offset_t)ptbl) {
517			/* Remove from pmap ptbl buf list. */
518			TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
519
520			/* Free corresponding ptbl buf. */
521			ptbl_buf_free(pbuf);
522			break;
523		}
524}
525
526/* Allocate page table. */
527static pte_t *
528ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
529{
530	vm_page_t mtbl[PTBL_PAGES];
531	vm_page_t m;
532	struct ptbl_buf *pbuf;
533	unsigned int pidx;
534	pte_t *ptbl;
535	int i;
536
537	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
538	    (pmap == kernel_pmap), pdir_idx);
539
540	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
541	    ("ptbl_alloc: invalid pdir_idx"));
542	KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
543	    ("pte_alloc: valid ptbl entry exists!"));
544
545	pbuf = ptbl_buf_alloc();
546	if (pbuf == NULL)
547		panic("pte_alloc: couldn't alloc kernel virtual memory");
548
549	ptbl = (pte_t *)pbuf->kva;
550
551	CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
552
553	/* Allocate ptbl pages, this will sleep! */
554	for (i = 0; i < PTBL_PAGES; i++) {
555		pidx = (PTBL_PAGES * pdir_idx) + i;
556		while ((m = vm_page_alloc(NULL, pidx,
557		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
558
559			PMAP_UNLOCK(pmap);
560			vm_page_unlock_queues();
561			VM_WAIT;
562			vm_page_lock_queues();
563			PMAP_LOCK(pmap);
564		}
565		mtbl[i] = m;
566	}
567
568	/* Map allocated pages into kernel_pmap. */
569	mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
570
571	/* Zero whole ptbl. */
572	bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
573
574	/* Add pbuf to the pmap ptbl bufs list. */
575	TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
576
577	return (ptbl);
578}
579
580/* Free ptbl pages and invalidate pdir entry. */
581static void
582ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
583{
584	pte_t *ptbl;
585	vm_paddr_t pa;
586	vm_offset_t va;
587	vm_page_t m;
588	int i;
589
590	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
591	    (pmap == kernel_pmap), pdir_idx);
592
593	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
594	    ("ptbl_free: invalid pdir_idx"));
595
596	ptbl = pmap->pm_pdir[pdir_idx];
597
598	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
599
600	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
601
602	/*
603	 * Invalidate the pdir entry as soon as possible, so that other CPUs
604	 * don't attempt to look up the page tables we are releasing.
605	 */
606	mtx_lock_spin(&tlbivax_mutex);
607	tlb_miss_lock();
608
609	pmap->pm_pdir[pdir_idx] = NULL;
610
611	tlb_miss_unlock();
612	mtx_unlock_spin(&tlbivax_mutex);
613
614	for (i = 0; i < PTBL_PAGES; i++) {
615		va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
616		pa = pte_vatopa(mmu, kernel_pmap, va);
617		m = PHYS_TO_VM_PAGE(pa);
618		vm_page_free_zero(m);
619		atomic_subtract_int(&cnt.v_wire_count, 1);
620		mmu_booke_kremove(mmu, va);
621	}
622
623	ptbl_free_pmap_ptbl(pmap, ptbl);
624}
625
626/*
627 * Decrement ptbl pages hold count and attempt to free ptbl pages.
628 * Called when removing pte entry from ptbl.
629 *
630 * Return 1 if ptbl pages were freed.
631 */
632static int
633ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
634{
635	pte_t *ptbl;
636	vm_paddr_t pa;
637	vm_page_t m;
638	int i;
639
640	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
641	    (pmap == kernel_pmap), pdir_idx);
642
643	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
644	    ("ptbl_unhold: invalid pdir_idx"));
645	KASSERT((pmap != kernel_pmap),
646	    ("ptbl_unhold: unholding kernel ptbl!"));
647
648	ptbl = pmap->pm_pdir[pdir_idx];
649
650	//debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
651	KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
652	    ("ptbl_unhold: non kva ptbl"));
653
654	/* decrement hold count */
655	for (i = 0; i < PTBL_PAGES; i++) {
656		pa = pte_vatopa(mmu, kernel_pmap,
657		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
658		m = PHYS_TO_VM_PAGE(pa);
659		m->wire_count--;
660	}
661
662	/*
663	 * Free ptbl pages if there are no pte etries in this ptbl.
664	 * wire_count has the same value for all ptbl pages, so check the last
665	 * page.
666	 */
667	if (m->wire_count == 0) {
668		ptbl_free(mmu, pmap, pdir_idx);
669
670		//debugf("ptbl_unhold: e (freed ptbl)\n");
671		return (1);
672	}
673
674	return (0);
675}
676
677/*
678 * Increment hold count for ptbl pages. This routine is used when a new pte
679 * entry is being inserted into the ptbl.
680 */
681static void
682ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
683{
684	vm_paddr_t pa;
685	pte_t *ptbl;
686	vm_page_t m;
687	int i;
688
689	CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
690	    pdir_idx);
691
692	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
693	    ("ptbl_hold: invalid pdir_idx"));
694	KASSERT((pmap != kernel_pmap),
695	    ("ptbl_hold: holding kernel ptbl!"));
696
697	ptbl = pmap->pm_pdir[pdir_idx];
698
699	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
700
701	for (i = 0; i < PTBL_PAGES; i++) {
702		pa = pte_vatopa(mmu, kernel_pmap,
703		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
704		m = PHYS_TO_VM_PAGE(pa);
705		m->wire_count++;
706	}
707}
708
709/* Allocate pv_entry structure. */
710pv_entry_t
711pv_alloc(void)
712{
713	pv_entry_t pv;
714
715	pv_entry_count++;
716	if (pv_entry_count > pv_entry_high_water)
717		pagedaemon_wakeup();
718	pv = uma_zalloc(pvzone, M_NOWAIT);
719
720	return (pv);
721}
722
723/* Free pv_entry structure. */
724static __inline void
725pv_free(pv_entry_t pve)
726{
727
728	pv_entry_count--;
729	uma_zfree(pvzone, pve);
730}
731
732
733/* Allocate and initialize pv_entry structure. */
734static void
735pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
736{
737	pv_entry_t pve;
738
739	//int su = (pmap == kernel_pmap);
740	//debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
741	//	(u_int32_t)pmap, va, (u_int32_t)m);
742
743	pve = pv_alloc();
744	if (pve == NULL)
745		panic("pv_insert: no pv entries!");
746
747	pve->pv_pmap = pmap;
748	pve->pv_va = va;
749
750	/* add to pv_list */
751	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
752	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
753
754	TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
755
756	//debugf("pv_insert: e\n");
757}
758
759/* Destroy pv entry. */
760static void
761pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
762{
763	pv_entry_t pve;
764
765	//int su = (pmap == kernel_pmap);
766	//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
767
768	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
769	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
770
771	/* find pv entry */
772	TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
773		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
774			/* remove from pv_list */
775			TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
776			if (TAILQ_EMPTY(&m->md.pv_list))
777				vm_page_flag_clear(m, PG_WRITEABLE);
778
779			/* free pv entry struct */
780			pv_free(pve);
781			break;
782		}
783	}
784
785	//debugf("pv_remove: e\n");
786}
787
788/*
789 * Clean pte entry, try to free page table page if requested.
790 *
791 * Return 1 if ptbl pages were freed, otherwise return 0.
792 */
793static int
794pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
795{
796	unsigned int pdir_idx = PDIR_IDX(va);
797	unsigned int ptbl_idx = PTBL_IDX(va);
798	vm_page_t m;
799	pte_t *ptbl;
800	pte_t *pte;
801
802	//int su = (pmap == kernel_pmap);
803	//debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
804	//		su, (u_int32_t)pmap, va, flags);
805
806	ptbl = pmap->pm_pdir[pdir_idx];
807	KASSERT(ptbl, ("pte_remove: null ptbl"));
808
809	pte = &ptbl[ptbl_idx];
810
811	if (pte == NULL || !PTE_ISVALID(pte))
812		return (0);
813
814	if (PTE_ISWIRED(pte))
815		pmap->pm_stats.wired_count--;
816
817	/* Handle managed entry. */
818	if (PTE_ISMANAGED(pte)) {
819		/* Get vm_page_t for mapped pte. */
820		m = PHYS_TO_VM_PAGE(PTE_PA(pte));
821
822		if (PTE_ISMODIFIED(pte))
823			vm_page_dirty(m);
824
825		if (PTE_ISREFERENCED(pte))
826			vm_page_flag_set(m, PG_REFERENCED);
827
828		pv_remove(pmap, va, m);
829	}
830
831	mtx_lock_spin(&tlbivax_mutex);
832	tlb_miss_lock();
833
834	tlb0_flush_entry(va);
835	pte->flags = 0;
836	pte->rpn = 0;
837
838	tlb_miss_unlock();
839	mtx_unlock_spin(&tlbivax_mutex);
840
841	pmap->pm_stats.resident_count--;
842
843	if (flags & PTBL_UNHOLD) {
844		//debugf("pte_remove: e (unhold)\n");
845		return (ptbl_unhold(mmu, pmap, pdir_idx));
846	}
847
848	//debugf("pte_remove: e\n");
849	return (0);
850}
851
852/*
853 * Insert PTE for a given page and virtual address.
854 */
855static void
856pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
857{
858	unsigned int pdir_idx = PDIR_IDX(va);
859	unsigned int ptbl_idx = PTBL_IDX(va);
860	pte_t *ptbl, *pte;
861
862	CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
863	    pmap == kernel_pmap, pmap, va);
864
865	/* Get the page table pointer. */
866	ptbl = pmap->pm_pdir[pdir_idx];
867
868	if (ptbl == NULL) {
869		/* Allocate page table pages. */
870		ptbl = ptbl_alloc(mmu, pmap, pdir_idx);
871	} else {
872		/*
873		 * Check if there is valid mapping for requested
874		 * va, if there is, remove it.
875		 */
876		pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
877		if (PTE_ISVALID(pte)) {
878			pte_remove(mmu, pmap, va, PTBL_HOLD);
879		} else {
880			/*
881			 * pte is not used, increment hold count
882			 * for ptbl pages.
883			 */
884			if (pmap != kernel_pmap)
885				ptbl_hold(mmu, pmap, pdir_idx);
886		}
887	}
888
889	/*
890	 * Insert pv_entry into pv_list for mapped page if part of managed
891	 * memory.
892	 */
893        if ((m->flags & PG_FICTITIOUS) == 0) {
894		if ((m->flags & PG_UNMANAGED) == 0) {
895			flags |= PTE_MANAGED;
896
897			/* Create and insert pv entry. */
898			pv_insert(pmap, va, m);
899		}
900	}
901
902	pmap->pm_stats.resident_count++;
903
904	mtx_lock_spin(&tlbivax_mutex);
905	tlb_miss_lock();
906
907	tlb0_flush_entry(va);
908	if (pmap->pm_pdir[pdir_idx] == NULL) {
909		/*
910		 * If we just allocated a new page table, hook it in
911		 * the pdir.
912		 */
913		pmap->pm_pdir[pdir_idx] = ptbl;
914	}
915	pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
916	pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
917	pte->flags |= (PTE_VALID | flags);
918
919	tlb_miss_unlock();
920	mtx_unlock_spin(&tlbivax_mutex);
921}
922
923/* Return the pa for the given pmap/va. */
924static vm_paddr_t
925pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
926{
927	vm_paddr_t pa = 0;
928	pte_t *pte;
929
930	pte = pte_find(mmu, pmap, va);
931	if ((pte != NULL) && PTE_ISVALID(pte))
932		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
933	return (pa);
934}
935
936/* Get a pointer to a PTE in a page table. */
937static pte_t *
938pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
939{
940	unsigned int pdir_idx = PDIR_IDX(va);
941	unsigned int ptbl_idx = PTBL_IDX(va);
942
943	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
944
945	if (pmap->pm_pdir[pdir_idx])
946		return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
947
948	return (NULL);
949}
950
951/**************************************************************************/
952/* PMAP related */
953/**************************************************************************/
954
955/*
956 * This is called during e500_init, before the system is really initialized.
957 */
958static void
959mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
960{
961	vm_offset_t phys_kernelend;
962	struct mem_region *mp, *mp1;
963	int cnt, i, j;
964	u_int s, e, sz;
965	u_int phys_avail_count;
966	vm_size_t physsz, hwphyssz, kstack0_sz;
967	vm_offset_t kernel_pdir, kstack0, va;
968	vm_paddr_t kstack0_phys;
969	void *dpcpu;
970	pte_t *pte;
971
972	debugf("mmu_booke_bootstrap: entered\n");
973
974	/* Initialize invalidation mutex */
975	mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
976
977	/* Read TLB0 size and associativity. */
978	tlb0_get_tlbconf();
979
980	/* Align kernel start and end address (kernel image). */
981	kernstart = trunc_page(start);
982	data_start = round_page(kernelend);
983	kernsize = data_start - kernstart;
984
985	data_end = data_start;
986
987	/* Allocate space for the message buffer. */
988	msgbufp = (struct msgbuf *)data_end;
989	data_end += MSGBUF_SIZE;
990	debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
991	    data_end);
992
993	data_end = round_page(data_end);
994
995	/* Allocate the dynamic per-cpu area. */
996	dpcpu = (void *)data_end;
997	data_end += DPCPU_SIZE;
998	dpcpu_init(dpcpu, 0);
999
1000	/* Allocate space for ptbl_bufs. */
1001	ptbl_bufs = (struct ptbl_buf *)data_end;
1002	data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
1003	debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
1004	    data_end);
1005
1006	data_end = round_page(data_end);
1007
1008	/* Allocate PTE tables for kernel KVA. */
1009	kernel_pdir = data_end;
1010	kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
1011	    PDIR_SIZE - 1) / PDIR_SIZE;
1012	data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
1013	debugf(" kernel ptbls: %d\n", kernel_ptbls);
1014	debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
1015
1016	debugf(" data_end: 0x%08x\n", data_end);
1017	if (data_end - kernstart > 0x1000000) {
1018		data_end = (data_end + 0x3fffff) & ~0x3fffff;
1019		tlb1_mapin_region(kernstart + 0x1000000,
1020		    kernload + 0x1000000, data_end - kernstart - 0x1000000);
1021	} else
1022		data_end = (data_end + 0xffffff) & ~0xffffff;
1023
1024	debugf(" updated data_end: 0x%08x\n", data_end);
1025
1026	kernsize += data_end - data_start;
1027
1028	/*
1029	 * Clear the structures - note we can only do it safely after the
1030	 * possible additional TLB1 translations are in place (above) so that
1031	 * all range up to the currently calculated 'data_end' is covered.
1032	 */
1033	memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
1034	memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
1035
1036	/*******************************************************/
1037	/* Set the start and end of kva. */
1038	/*******************************************************/
1039	virtual_avail = round_page(data_end);
1040	virtual_end = VM_MAX_KERNEL_ADDRESS;
1041
1042	/* Allocate KVA space for page zero/copy operations. */
1043	zero_page_va = virtual_avail;
1044	virtual_avail += PAGE_SIZE;
1045	zero_page_idle_va = virtual_avail;
1046	virtual_avail += PAGE_SIZE;
1047	copy_page_src_va = virtual_avail;
1048	virtual_avail += PAGE_SIZE;
1049	copy_page_dst_va = virtual_avail;
1050	virtual_avail += PAGE_SIZE;
1051	debugf("zero_page_va = 0x%08x\n", zero_page_va);
1052	debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va);
1053	debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
1054	debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
1055
1056	/* Initialize page zero/copy mutexes. */
1057	mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
1058	mtx_init(&copy_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
1059
1060	/* Allocate KVA space for ptbl bufs. */
1061	ptbl_buf_pool_vabase = virtual_avail;
1062	virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1063	debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1064	    ptbl_buf_pool_vabase, virtual_avail);
1065
1066	/* Calculate corresponding physical addresses for the kernel region. */
1067	phys_kernelend = kernload + kernsize;
1068	debugf("kernel image and allocated data:\n");
1069	debugf(" kernload    = 0x%08x\n", kernload);
1070	debugf(" kernstart   = 0x%08x\n", kernstart);
1071	debugf(" kernsize    = 0x%08x\n", kernsize);
1072
1073	if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1074		panic("mmu_booke_bootstrap: phys_avail too small");
1075
1076	/*
1077	 * Remove kernel physical address range from avail regions list. Page
1078	 * align all regions.  Non-page aligned memory isn't very interesting
1079	 * to us.  Also, sort the entries for ascending addresses.
1080	 */
1081
1082	/* Retrieve phys/avail mem regions */
1083	mem_regions(&physmem_regions, &physmem_regions_sz,
1084	    &availmem_regions, &availmem_regions_sz);
1085	sz = 0;
1086	cnt = availmem_regions_sz;
1087	debugf("processing avail regions:\n");
1088	for (mp = availmem_regions; mp->mr_size; mp++) {
1089		s = mp->mr_start;
1090		e = mp->mr_start + mp->mr_size;
1091		debugf(" %08x-%08x -> ", s, e);
1092		/* Check whether this region holds all of the kernel. */
1093		if (s < kernload && e > phys_kernelend) {
1094			availmem_regions[cnt].mr_start = phys_kernelend;
1095			availmem_regions[cnt++].mr_size = e - phys_kernelend;
1096			e = kernload;
1097		}
1098		/* Look whether this regions starts within the kernel. */
1099		if (s >= kernload && s < phys_kernelend) {
1100			if (e <= phys_kernelend)
1101				goto empty;
1102			s = phys_kernelend;
1103		}
1104		/* Now look whether this region ends within the kernel. */
1105		if (e > kernload && e <= phys_kernelend) {
1106			if (s >= kernload)
1107				goto empty;
1108			e = kernload;
1109		}
1110		/* Now page align the start and size of the region. */
1111		s = round_page(s);
1112		e = trunc_page(e);
1113		if (e < s)
1114			e = s;
1115		sz = e - s;
1116		debugf("%08x-%08x = %x\n", s, e, sz);
1117
1118		/* Check whether some memory is left here. */
1119		if (sz == 0) {
1120		empty:
1121			memmove(mp, mp + 1,
1122			    (cnt - (mp - availmem_regions)) * sizeof(*mp));
1123			cnt--;
1124			mp--;
1125			continue;
1126		}
1127
1128		/* Do an insertion sort. */
1129		for (mp1 = availmem_regions; mp1 < mp; mp1++)
1130			if (s < mp1->mr_start)
1131				break;
1132		if (mp1 < mp) {
1133			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1134			mp1->mr_start = s;
1135			mp1->mr_size = sz;
1136		} else {
1137			mp->mr_start = s;
1138			mp->mr_size = sz;
1139		}
1140	}
1141	availmem_regions_sz = cnt;
1142
1143	/*******************************************************/
1144	/* Steal physical memory for kernel stack from the end */
1145	/* of the first avail region                           */
1146	/*******************************************************/
1147	kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
1148	kstack0_phys = availmem_regions[0].mr_start +
1149	    availmem_regions[0].mr_size;
1150	kstack0_phys -= kstack0_sz;
1151	availmem_regions[0].mr_size -= kstack0_sz;
1152
1153	/*******************************************************/
1154	/* Fill in phys_avail table, based on availmem_regions */
1155	/*******************************************************/
1156	phys_avail_count = 0;
1157	physsz = 0;
1158	hwphyssz = 0;
1159	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1160
1161	debugf("fill in phys_avail:\n");
1162	for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1163
1164		debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
1165		    availmem_regions[i].mr_start,
1166		    availmem_regions[i].mr_start +
1167		        availmem_regions[i].mr_size,
1168		    availmem_regions[i].mr_size);
1169
1170		if (hwphyssz != 0 &&
1171		    (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1172			debugf(" hw.physmem adjust\n");
1173			if (physsz < hwphyssz) {
1174				phys_avail[j] = availmem_regions[i].mr_start;
1175				phys_avail[j + 1] =
1176				    availmem_regions[i].mr_start +
1177				    hwphyssz - physsz;
1178				physsz = hwphyssz;
1179				phys_avail_count++;
1180			}
1181			break;
1182		}
1183
1184		phys_avail[j] = availmem_regions[i].mr_start;
1185		phys_avail[j + 1] = availmem_regions[i].mr_start +
1186		    availmem_regions[i].mr_size;
1187		phys_avail_count++;
1188		physsz += availmem_regions[i].mr_size;
1189	}
1190	physmem = btoc(physsz);
1191
1192	/* Calculate the last available physical address. */
1193	for (i = 0; phys_avail[i + 2] != 0; i += 2)
1194		;
1195	Maxmem = powerpc_btop(phys_avail[i + 1]);
1196
1197	debugf("Maxmem = 0x%08lx\n", Maxmem);
1198	debugf("phys_avail_count = %d\n", phys_avail_count);
1199	debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem,
1200	    physmem);
1201
1202	/*******************************************************/
1203	/* Initialize (statically allocated) kernel pmap. */
1204	/*******************************************************/
1205	PMAP_LOCK_INIT(kernel_pmap);
1206	kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1207
1208	debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
1209	debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
1210	debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1211	    kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1212
1213	/* Initialize kernel pdir */
1214	for (i = 0; i < kernel_ptbls; i++)
1215		kernel_pmap->pm_pdir[kptbl_min + i] =
1216		    (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1217
1218	for (i = 0; i < MAXCPU; i++) {
1219		kernel_pmap->pm_tid[i] = TID_KERNEL;
1220
1221		/* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1222		tidbusy[i][0] = kernel_pmap;
1223	}
1224
1225	/*
1226	 * Fill in PTEs covering kernel code and data. They are not required
1227	 * for address translation, as this area is covered by static TLB1
1228	 * entries, but for pte_vatopa() to work correctly with kernel area
1229	 * addresses.
1230	 */
1231	for (va = KERNBASE; va < data_end; va += PAGE_SIZE) {
1232		pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]);
1233		pte->rpn = kernload + (va - KERNBASE);
1234		pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
1235		    PTE_VALID;
1236	}
1237	/* Mark kernel_pmap active on all CPUs */
1238	kernel_pmap->pm_active = ~0;
1239
1240	/*******************************************************/
1241	/* Final setup */
1242	/*******************************************************/
1243
1244	/* Enter kstack0 into kernel map, provide guard page */
1245	kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1246	thread0.td_kstack = kstack0;
1247	thread0.td_kstack_pages = KSTACK_PAGES;
1248
1249	debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1250	debugf("kstack0_phys at 0x%08x - 0x%08x\n",
1251	    kstack0_phys, kstack0_phys + kstack0_sz);
1252	debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
1253
1254	virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
1255	for (i = 0; i < KSTACK_PAGES; i++) {
1256		mmu_booke_kenter(mmu, kstack0, kstack0_phys);
1257		kstack0 += PAGE_SIZE;
1258		kstack0_phys += PAGE_SIZE;
1259	}
1260
1261	debugf("virtual_avail = %08x\n", virtual_avail);
1262	debugf("virtual_end   = %08x\n", virtual_end);
1263
1264	debugf("mmu_booke_bootstrap: exit\n");
1265}
1266
1267void
1268pmap_bootstrap_ap(volatile uint32_t *trcp __unused)
1269{
1270	int i;
1271
1272	/*
1273	 * Finish TLB1 configuration: the BSP already set up its TLB1 and we
1274	 * have the snapshot of its contents in the s/w tlb1[] table, so use
1275	 * these values directly to (re)program AP's TLB1 hardware.
1276	 */
1277	for (i = 0; i < tlb1_idx; i ++) {
1278		/* Skip invalid entries */
1279		if (!(tlb1[i].mas1 & MAS1_VALID))
1280			continue;
1281
1282		tlb1_write_entry(i);
1283	}
1284
1285	set_mas4_defaults();
1286}
1287
1288/*
1289 * Get the physical page address for the given pmap/virtual address.
1290 */
1291static vm_paddr_t
1292mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1293{
1294	vm_paddr_t pa;
1295
1296	PMAP_LOCK(pmap);
1297	pa = pte_vatopa(mmu, pmap, va);
1298	PMAP_UNLOCK(pmap);
1299
1300	return (pa);
1301}
1302
1303/*
1304 * Extract the physical page address associated with the given
1305 * kernel virtual address.
1306 */
1307static vm_paddr_t
1308mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1309{
1310
1311	return (pte_vatopa(mmu, kernel_pmap, va));
1312}
1313
1314/*
1315 * Initialize the pmap module.
1316 * Called by vm_init, to initialize any structures that the pmap
1317 * system needs to map virtual memory.
1318 */
1319static void
1320mmu_booke_init(mmu_t mmu)
1321{
1322	int shpgperproc = PMAP_SHPGPERPROC;
1323
1324	/*
1325	 * Initialize the address space (zone) for the pv entries.  Set a
1326	 * high water mark so that the system can recover from excessive
1327	 * numbers of pv entries.
1328	 */
1329	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1330	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1331
1332	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1333	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1334
1335	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1336	pv_entry_high_water = 9 * (pv_entry_max / 10);
1337
1338	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
1339
1340	/* Pre-fill pvzone with initial number of pv entries. */
1341	uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1342
1343	/* Initialize ptbl allocation. */
1344	ptbl_init();
1345}
1346
1347/*
1348 * Map a list of wired pages into kernel virtual address space.  This is
1349 * intended for temporary mappings which do not need page modification or
1350 * references recorded.  Existing mappings in the region are overwritten.
1351 */
1352static void
1353mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1354{
1355	vm_offset_t va;
1356
1357	va = sva;
1358	while (count-- > 0) {
1359		mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1360		va += PAGE_SIZE;
1361		m++;
1362	}
1363}
1364
1365/*
1366 * Remove page mappings from kernel virtual address space.  Intended for
1367 * temporary mappings entered by mmu_booke_qenter.
1368 */
1369static void
1370mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1371{
1372	vm_offset_t va;
1373
1374	va = sva;
1375	while (count-- > 0) {
1376		mmu_booke_kremove(mmu, va);
1377		va += PAGE_SIZE;
1378	}
1379}
1380
1381/*
1382 * Map a wired page into kernel virtual address space.
1383 */
1384static void
1385mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1386{
1387	unsigned int pdir_idx = PDIR_IDX(va);
1388	unsigned int ptbl_idx = PTBL_IDX(va);
1389	uint32_t flags;
1390	pte_t *pte;
1391
1392	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1393	    (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1394
1395	flags = 0;
1396	flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID);
1397	flags |= PTE_M;
1398
1399	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1400
1401	mtx_lock_spin(&tlbivax_mutex);
1402	tlb_miss_lock();
1403
1404	if (PTE_ISVALID(pte)) {
1405
1406		CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1407
1408		/* Flush entry from TLB0 */
1409		tlb0_flush_entry(va);
1410	}
1411
1412	pte->rpn = pa & ~PTE_PA_MASK;
1413	pte->flags = flags;
1414
1415	//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1416	//		"pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1417	//		pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1418
1419	/* Flush the real memory from the instruction cache. */
1420	if ((flags & (PTE_I | PTE_G)) == 0) {
1421		__syncicache((void *)va, PAGE_SIZE);
1422	}
1423
1424	tlb_miss_unlock();
1425	mtx_unlock_spin(&tlbivax_mutex);
1426}
1427
1428/*
1429 * Remove a page from kernel page table.
1430 */
1431static void
1432mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1433{
1434	unsigned int pdir_idx = PDIR_IDX(va);
1435	unsigned int ptbl_idx = PTBL_IDX(va);
1436	pte_t *pte;
1437
1438//	CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va));
1439
1440	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1441	    (va <= VM_MAX_KERNEL_ADDRESS)),
1442	    ("mmu_booke_kremove: invalid va"));
1443
1444	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1445
1446	if (!PTE_ISVALID(pte)) {
1447
1448		CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1449
1450		return;
1451	}
1452
1453	mtx_lock_spin(&tlbivax_mutex);
1454	tlb_miss_lock();
1455
1456	/* Invalidate entry in TLB0, update PTE. */
1457	tlb0_flush_entry(va);
1458	pte->flags = 0;
1459	pte->rpn = 0;
1460
1461	tlb_miss_unlock();
1462	mtx_unlock_spin(&tlbivax_mutex);
1463}
1464
1465/*
1466 * Initialize pmap associated with process 0.
1467 */
1468static void
1469mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1470{
1471
1472	mmu_booke_pinit(mmu, pmap);
1473	PCPU_SET(curpmap, pmap);
1474}
1475
1476/*
1477 * Initialize a preallocated and zeroed pmap structure,
1478 * such as one in a vmspace structure.
1479 */
1480static void
1481mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1482{
1483	int i;
1484
1485	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
1486	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
1487
1488	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
1489
1490	PMAP_LOCK_INIT(pmap);
1491	for (i = 0; i < MAXCPU; i++)
1492		pmap->pm_tid[i] = TID_NONE;
1493	pmap->pm_active = 0;
1494	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1495	bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1496	TAILQ_INIT(&pmap->pm_ptbl_list);
1497}
1498
1499/*
1500 * Release any resources held by the given physical map.
1501 * Called when a pmap initialized by mmu_booke_pinit is being released.
1502 * Should only be called if the map contains no valid mappings.
1503 */
1504static void
1505mmu_booke_release(mmu_t mmu, pmap_t pmap)
1506{
1507
1508	printf("mmu_booke_release: s\n");
1509
1510	KASSERT(pmap->pm_stats.resident_count == 0,
1511	    ("pmap_release: pmap resident count %ld != 0",
1512	    pmap->pm_stats.resident_count));
1513
1514	PMAP_LOCK_DESTROY(pmap);
1515}
1516
1517/*
1518 * Insert the given physical page at the specified virtual address in the
1519 * target physical map with the protection requested. If specified the page
1520 * will be wired down.
1521 */
1522static void
1523mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1524    vm_prot_t prot, boolean_t wired)
1525{
1526
1527	vm_page_lock_queues();
1528	PMAP_LOCK(pmap);
1529	mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
1530	vm_page_unlock_queues();
1531	PMAP_UNLOCK(pmap);
1532}
1533
1534static void
1535mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1536    vm_prot_t prot, boolean_t wired)
1537{
1538	pte_t *pte;
1539	vm_paddr_t pa;
1540	uint32_t flags;
1541	int su, sync;
1542
1543	pa = VM_PAGE_TO_PHYS(m);
1544	su = (pmap == kernel_pmap);
1545	sync = 0;
1546
1547	//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1548	//		"pa=0x%08x prot=0x%08x wired=%d)\n",
1549	//		(u_int32_t)pmap, su, pmap->pm_tid,
1550	//		(u_int32_t)m, va, pa, prot, wired);
1551
1552	if (su) {
1553		KASSERT(((va >= virtual_avail) &&
1554		    (va <= VM_MAX_KERNEL_ADDRESS)),
1555		    ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1556	} else {
1557		KASSERT((va <= VM_MAXUSER_ADDRESS),
1558		    ("mmu_booke_enter_locked: user pmap, non user va"));
1559	}
1560
1561	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1562
1563	/*
1564	 * If there is an existing mapping, and the physical address has not
1565	 * changed, must be protection or wiring change.
1566	 */
1567	if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1568	    (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1569
1570		/*
1571		 * Before actually updating pte->flags we calculate and
1572		 * prepare its new value in a helper var.
1573		 */
1574		flags = pte->flags;
1575		flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1576
1577		/* Wiring change, just update stats. */
1578		if (wired) {
1579			if (!PTE_ISWIRED(pte)) {
1580				flags |= PTE_WIRED;
1581				pmap->pm_stats.wired_count++;
1582			}
1583		} else {
1584			if (PTE_ISWIRED(pte)) {
1585				flags &= ~PTE_WIRED;
1586				pmap->pm_stats.wired_count--;
1587			}
1588		}
1589
1590		if (prot & VM_PROT_WRITE) {
1591			/* Add write permissions. */
1592			flags |= PTE_SW;
1593			if (!su)
1594				flags |= PTE_UW;
1595
1596			vm_page_flag_set(m, PG_WRITEABLE);
1597		} else {
1598			/* Handle modified pages, sense modify status. */
1599
1600			/*
1601			 * The PTE_MODIFIED flag could be set by underlying
1602			 * TLB misses since we last read it (above), possibly
1603			 * other CPUs could update it so we check in the PTE
1604			 * directly rather than rely on that saved local flags
1605			 * copy.
1606			 */
1607			if (PTE_ISMODIFIED(pte))
1608				vm_page_dirty(m);
1609		}
1610
1611		if (prot & VM_PROT_EXECUTE) {
1612			flags |= PTE_SX;
1613			if (!su)
1614				flags |= PTE_UX;
1615
1616			/*
1617			 * Check existing flags for execute permissions: if we
1618			 * are turning execute permissions on, icache should
1619			 * be flushed.
1620			 */
1621			if ((flags & (PTE_UX | PTE_SX)) == 0)
1622				sync++;
1623		}
1624
1625		flags &= ~PTE_REFERENCED;
1626
1627		/*
1628		 * The new flags value is all calculated -- only now actually
1629		 * update the PTE.
1630		 */
1631		mtx_lock_spin(&tlbivax_mutex);
1632		tlb_miss_lock();
1633
1634		tlb0_flush_entry(va);
1635		pte->flags = flags;
1636
1637		tlb_miss_unlock();
1638		mtx_unlock_spin(&tlbivax_mutex);
1639
1640	} else {
1641		/*
1642		 * If there is an existing mapping, but it's for a different
1643		 * physical address, pte_enter() will delete the old mapping.
1644		 */
1645		//if ((pte != NULL) && PTE_ISVALID(pte))
1646		//	debugf("mmu_booke_enter_locked: replace\n");
1647		//else
1648		//	debugf("mmu_booke_enter_locked: new\n");
1649
1650		/* Now set up the flags and install the new mapping. */
1651		flags = (PTE_SR | PTE_VALID);
1652		flags |= PTE_M;
1653
1654		if (!su)
1655			flags |= PTE_UR;
1656
1657		if (prot & VM_PROT_WRITE) {
1658			flags |= PTE_SW;
1659			if (!su)
1660				flags |= PTE_UW;
1661
1662			vm_page_flag_set(m, PG_WRITEABLE);
1663		}
1664
1665		if (prot & VM_PROT_EXECUTE) {
1666			flags |= PTE_SX;
1667			if (!su)
1668				flags |= PTE_UX;
1669		}
1670
1671		/* If its wired update stats. */
1672		if (wired) {
1673			pmap->pm_stats.wired_count++;
1674			flags |= PTE_WIRED;
1675		}
1676
1677		pte_enter(mmu, pmap, m, va, flags);
1678
1679		/* Flush the real memory from the instruction cache. */
1680		if (prot & VM_PROT_EXECUTE)
1681			sync++;
1682	}
1683
1684	if (sync && (su || pmap == PCPU_GET(curpmap))) {
1685		__syncicache((void *)va, PAGE_SIZE);
1686		sync = 0;
1687	}
1688}
1689
1690/*
1691 * Maps a sequence of resident pages belonging to the same object.
1692 * The sequence begins with the given page m_start.  This page is
1693 * mapped at the given virtual address start.  Each subsequent page is
1694 * mapped at a virtual address that is offset from start by the same
1695 * amount as the page is offset from m_start within the object.  The
1696 * last page in the sequence is the page with the largest offset from
1697 * m_start that can be mapped at a virtual address less than the given
1698 * virtual address end.  Not every virtual page between start and end
1699 * is mapped; only those for which a resident page exists with the
1700 * corresponding offset from m_start are mapped.
1701 */
1702static void
1703mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1704    vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1705{
1706	vm_page_t m;
1707	vm_pindex_t diff, psize;
1708
1709	psize = atop(end - start);
1710	m = m_start;
1711	PMAP_LOCK(pmap);
1712	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1713		mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
1714		    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1715		m = TAILQ_NEXT(m, listq);
1716	}
1717	PMAP_UNLOCK(pmap);
1718}
1719
1720static void
1721mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1722    vm_prot_t prot)
1723{
1724
1725	PMAP_LOCK(pmap);
1726	mmu_booke_enter_locked(mmu, pmap, va, m,
1727	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1728	PMAP_UNLOCK(pmap);
1729}
1730
1731/*
1732 * Remove the given range of addresses from the specified map.
1733 *
1734 * It is assumed that the start and end are properly rounded to the page size.
1735 */
1736static void
1737mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1738{
1739	pte_t *pte;
1740	uint8_t hold_flag;
1741
1742	int su = (pmap == kernel_pmap);
1743
1744	//debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1745	//		su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1746
1747	if (su) {
1748		KASSERT(((va >= virtual_avail) &&
1749		    (va <= VM_MAX_KERNEL_ADDRESS)),
1750		    ("mmu_booke_remove: kernel pmap, non kernel va"));
1751	} else {
1752		KASSERT((va <= VM_MAXUSER_ADDRESS),
1753		    ("mmu_booke_remove: user pmap, non user va"));
1754	}
1755
1756	if (PMAP_REMOVE_DONE(pmap)) {
1757		//debugf("mmu_booke_remove: e (empty)\n");
1758		return;
1759	}
1760
1761	hold_flag = PTBL_HOLD_FLAG(pmap);
1762	//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1763
1764	vm_page_lock_queues();
1765	PMAP_LOCK(pmap);
1766	for (; va < endva; va += PAGE_SIZE) {
1767		pte = pte_find(mmu, pmap, va);
1768		if ((pte != NULL) && PTE_ISVALID(pte))
1769			pte_remove(mmu, pmap, va, hold_flag);
1770	}
1771	PMAP_UNLOCK(pmap);
1772	vm_page_unlock_queues();
1773
1774	//debugf("mmu_booke_remove: e\n");
1775}
1776
1777/*
1778 * Remove physical page from all pmaps in which it resides.
1779 */
1780static void
1781mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1782{
1783	pv_entry_t pv, pvn;
1784	uint8_t hold_flag;
1785
1786	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1787
1788	for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1789		pvn = TAILQ_NEXT(pv, pv_link);
1790
1791		PMAP_LOCK(pv->pv_pmap);
1792		hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1793		pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1794		PMAP_UNLOCK(pv->pv_pmap);
1795	}
1796	vm_page_flag_clear(m, PG_WRITEABLE);
1797}
1798
1799/*
1800 * Map a range of physical addresses into kernel virtual address space.
1801 */
1802static vm_offset_t
1803mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1804    vm_offset_t pa_end, int prot)
1805{
1806	vm_offset_t sva = *virt;
1807	vm_offset_t va = sva;
1808
1809	//debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1810	//		sva, pa_start, pa_end);
1811
1812	while (pa_start < pa_end) {
1813		mmu_booke_kenter(mmu, va, pa_start);
1814		va += PAGE_SIZE;
1815		pa_start += PAGE_SIZE;
1816	}
1817	*virt = va;
1818
1819	//debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1820	return (sva);
1821}
1822
1823/*
1824 * The pmap must be activated before it's address space can be accessed in any
1825 * way.
1826 */
1827static void
1828mmu_booke_activate(mmu_t mmu, struct thread *td)
1829{
1830	pmap_t pmap;
1831
1832	pmap = &td->td_proc->p_vmspace->vm_pmap;
1833
1834	CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
1835	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1836
1837	KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1838
1839	mtx_lock_spin(&sched_lock);
1840
1841	atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
1842	PCPU_SET(curpmap, pmap);
1843
1844	if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE)
1845		tid_alloc(pmap);
1846
1847	/* Load PID0 register with pmap tid value. */
1848	mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]);
1849	__asm __volatile("isync");
1850
1851	mtx_unlock_spin(&sched_lock);
1852
1853	CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1854	    pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1855}
1856
1857/*
1858 * Deactivate the specified process's address space.
1859 */
1860static void
1861mmu_booke_deactivate(mmu_t mmu, struct thread *td)
1862{
1863	pmap_t pmap;
1864
1865	pmap = &td->td_proc->p_vmspace->vm_pmap;
1866
1867	CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
1868	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1869
1870	atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask));
1871	PCPU_SET(curpmap, NULL);
1872}
1873
1874/*
1875 * Copy the range specified by src_addr/len
1876 * from the source map to the range dst_addr/len
1877 * in the destination map.
1878 *
1879 * This routine is only advisory and need not do anything.
1880 */
1881static void
1882mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
1883    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
1884{
1885
1886}
1887
1888/*
1889 * Set the physical protection on the specified range of this map as requested.
1890 */
1891static void
1892mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1893    vm_prot_t prot)
1894{
1895	vm_offset_t va;
1896	vm_page_t m;
1897	pte_t *pte;
1898
1899	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1900		mmu_booke_remove(mmu, pmap, sva, eva);
1901		return;
1902	}
1903
1904	if (prot & VM_PROT_WRITE)
1905		return;
1906
1907	vm_page_lock_queues();
1908	PMAP_LOCK(pmap);
1909	for (va = sva; va < eva; va += PAGE_SIZE) {
1910		if ((pte = pte_find(mmu, pmap, va)) != NULL) {
1911			if (PTE_ISVALID(pte)) {
1912				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1913
1914				mtx_lock_spin(&tlbivax_mutex);
1915				tlb_miss_lock();
1916
1917				/* Handle modified pages. */
1918				if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte))
1919					vm_page_dirty(m);
1920
1921				tlb0_flush_entry(va);
1922				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1923
1924				tlb_miss_unlock();
1925				mtx_unlock_spin(&tlbivax_mutex);
1926			}
1927		}
1928	}
1929	PMAP_UNLOCK(pmap);
1930	vm_page_unlock_queues();
1931}
1932
1933/*
1934 * Clear the write and modified bits in each of the given page's mappings.
1935 */
1936static void
1937mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
1938{
1939	pv_entry_t pv;
1940	pte_t *pte;
1941
1942	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1943	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1944	    (m->flags & PG_WRITEABLE) == 0)
1945		return;
1946
1947	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1948		PMAP_LOCK(pv->pv_pmap);
1949		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
1950			if (PTE_ISVALID(pte)) {
1951				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1952
1953				mtx_lock_spin(&tlbivax_mutex);
1954				tlb_miss_lock();
1955
1956				/* Handle modified pages. */
1957				if (PTE_ISMODIFIED(pte))
1958					vm_page_dirty(m);
1959
1960				/* Flush mapping from TLB0. */
1961				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED);
1962
1963				tlb_miss_unlock();
1964				mtx_unlock_spin(&tlbivax_mutex);
1965			}
1966		}
1967		PMAP_UNLOCK(pv->pv_pmap);
1968	}
1969	vm_page_flag_clear(m, PG_WRITEABLE);
1970}
1971
1972static void
1973mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
1974{
1975	pte_t *pte;
1976	pmap_t pmap;
1977	vm_page_t m;
1978	vm_offset_t addr;
1979	vm_paddr_t pa;
1980	int active, valid;
1981
1982	va = trunc_page(va);
1983	sz = round_page(sz);
1984
1985	vm_page_lock_queues();
1986	pmap = PCPU_GET(curpmap);
1987	active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
1988	while (sz > 0) {
1989		PMAP_LOCK(pm);
1990		pte = pte_find(mmu, pm, va);
1991		valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
1992		if (valid)
1993			pa = PTE_PA(pte);
1994		PMAP_UNLOCK(pm);
1995		if (valid) {
1996			if (!active) {
1997				/* Create a mapping in the active pmap. */
1998				addr = 0;
1999				m = PHYS_TO_VM_PAGE(pa);
2000				PMAP_LOCK(pmap);
2001				pte_enter(mmu, pmap, m, addr,
2002				    PTE_SR | PTE_VALID | PTE_UR);
2003				__syncicache((void *)addr, PAGE_SIZE);
2004				pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
2005				PMAP_UNLOCK(pmap);
2006			} else
2007				__syncicache((void *)va, PAGE_SIZE);
2008		}
2009		va += PAGE_SIZE;
2010		sz -= PAGE_SIZE;
2011	}
2012	vm_page_unlock_queues();
2013}
2014
2015/*
2016 * Atomically extract and hold the physical page with the given
2017 * pmap and virtual address pair if that mapping permits the given
2018 * protection.
2019 */
2020static vm_page_t
2021mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
2022    vm_prot_t prot)
2023{
2024	pte_t *pte;
2025	vm_page_t m;
2026	uint32_t pte_wbit;
2027	vm_paddr_t pa;
2028
2029	m = NULL;
2030	pa = 0;
2031	PMAP_LOCK(pmap);
2032retry:
2033	pte = pte_find(mmu, pmap, va);
2034	if ((pte != NULL) && PTE_ISVALID(pte)) {
2035		if (pmap == kernel_pmap)
2036			pte_wbit = PTE_SW;
2037		else
2038			pte_wbit = PTE_UW;
2039
2040		if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
2041			if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa))
2042				goto retry;
2043			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
2044			vm_page_hold(m);
2045		}
2046	}
2047
2048	PA_UNLOCK_COND(pa);
2049	PMAP_UNLOCK(pmap);
2050	return (m);
2051}
2052
2053/*
2054 * Initialize a vm_page's machine-dependent fields.
2055 */
2056static void
2057mmu_booke_page_init(mmu_t mmu, vm_page_t m)
2058{
2059
2060	TAILQ_INIT(&m->md.pv_list);
2061}
2062
2063/*
2064 * mmu_booke_zero_page_area zeros the specified hardware page by
2065 * mapping it into virtual memory and using bzero to clear
2066 * its contents.
2067 *
2068 * off and size must reside within a single page.
2069 */
2070static void
2071mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
2072{
2073	vm_offset_t va;
2074
2075	/* XXX KASSERT off and size are within a single page? */
2076
2077	mtx_lock(&zero_page_mutex);
2078	va = zero_page_va;
2079
2080	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2081	bzero((caddr_t)va + off, size);
2082	mmu_booke_kremove(mmu, va);
2083
2084	mtx_unlock(&zero_page_mutex);
2085}
2086
2087/*
2088 * mmu_booke_zero_page zeros the specified hardware page.
2089 */
2090static void
2091mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
2092{
2093
2094	mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
2095}
2096
2097/*
2098 * mmu_booke_copy_page copies the specified (machine independent) page by
2099 * mapping the page into virtual memory and using memcopy to copy the page,
2100 * one machine dependent page at a time.
2101 */
2102static void
2103mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
2104{
2105	vm_offset_t sva, dva;
2106
2107	sva = copy_page_src_va;
2108	dva = copy_page_dst_va;
2109
2110	mtx_lock(&copy_page_mutex);
2111	mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2112	mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2113	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2114	mmu_booke_kremove(mmu, dva);
2115	mmu_booke_kremove(mmu, sva);
2116	mtx_unlock(&copy_page_mutex);
2117}
2118
2119/*
2120 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2121 * into virtual memory and using bzero to clear its contents. This is intended
2122 * to be called from the vm_pagezero process only and outside of Giant. No
2123 * lock is required.
2124 */
2125static void
2126mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2127{
2128	vm_offset_t va;
2129
2130	va = zero_page_idle_va;
2131	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2132	bzero((caddr_t)va, PAGE_SIZE);
2133	mmu_booke_kremove(mmu, va);
2134}
2135
2136/*
2137 * Return whether or not the specified physical page was modified
2138 * in any of physical maps.
2139 */
2140static boolean_t
2141mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2142{
2143	pte_t *pte;
2144	pv_entry_t pv;
2145
2146	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2147	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2148		return (FALSE);
2149
2150	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2151		PMAP_LOCK(pv->pv_pmap);
2152		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2153			if (!PTE_ISVALID(pte))
2154				goto make_sure_to_unlock;
2155
2156			if (PTE_ISMODIFIED(pte)) {
2157				PMAP_UNLOCK(pv->pv_pmap);
2158				return (TRUE);
2159			}
2160		}
2161make_sure_to_unlock:
2162		PMAP_UNLOCK(pv->pv_pmap);
2163	}
2164	return (FALSE);
2165}
2166
2167/*
2168 * Return whether or not the specified virtual address is eligible
2169 * for prefault.
2170 */
2171static boolean_t
2172mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2173{
2174
2175	return (FALSE);
2176}
2177
2178/*
2179 * Return whether or not the specified physical page was referenced
2180 * in any physical maps.
2181 */
2182static boolean_t
2183mmu_booke_is_referenced(mmu_t mmu, vm_page_t m)
2184{
2185	pte_t *pte;
2186	pv_entry_t pv;
2187	boolean_t rv;
2188
2189	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2190	rv = FALSE;
2191	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2192		return (rv);
2193	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2194		PMAP_LOCK(pv->pv_pmap);
2195		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
2196		    PTE_ISVALID(pte))
2197			rv = PTE_ISREFERENCED(pte) ? TRUE : FALSE;
2198		PMAP_UNLOCK(pv->pv_pmap);
2199		if (rv)
2200			break;
2201	}
2202	return (rv);
2203}
2204
2205/*
2206 * Clear the modify bits on the specified physical page.
2207 */
2208static void
2209mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2210{
2211	pte_t *pte;
2212	pv_entry_t pv;
2213
2214	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2215	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2216		return;
2217
2218	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2219		PMAP_LOCK(pv->pv_pmap);
2220		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2221			if (!PTE_ISVALID(pte))
2222				goto make_sure_to_unlock;
2223
2224			mtx_lock_spin(&tlbivax_mutex);
2225			tlb_miss_lock();
2226
2227			if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2228				tlb0_flush_entry(pv->pv_va);
2229				pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2230				    PTE_REFERENCED);
2231			}
2232
2233			tlb_miss_unlock();
2234			mtx_unlock_spin(&tlbivax_mutex);
2235		}
2236make_sure_to_unlock:
2237		PMAP_UNLOCK(pv->pv_pmap);
2238	}
2239}
2240
2241/*
2242 * Return a count of reference bits for a page, clearing those bits.
2243 * It is not necessary for every reference bit to be cleared, but it
2244 * is necessary that 0 only be returned when there are truly no
2245 * reference bits set.
2246 *
2247 * XXX: The exact number of bits to check and clear is a matter that
2248 * should be tested and standardized at some point in the future for
2249 * optimal aging of shared pages.
2250 */
2251static int
2252mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2253{
2254	pte_t *pte;
2255	pv_entry_t pv;
2256	int count;
2257
2258	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2259	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2260		return (0);
2261
2262	count = 0;
2263	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2264		PMAP_LOCK(pv->pv_pmap);
2265		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2266			if (!PTE_ISVALID(pte))
2267				goto make_sure_to_unlock;
2268
2269			if (PTE_ISREFERENCED(pte)) {
2270				mtx_lock_spin(&tlbivax_mutex);
2271				tlb_miss_lock();
2272
2273				tlb0_flush_entry(pv->pv_va);
2274				pte->flags &= ~PTE_REFERENCED;
2275
2276				tlb_miss_unlock();
2277				mtx_unlock_spin(&tlbivax_mutex);
2278
2279				if (++count > 4) {
2280					PMAP_UNLOCK(pv->pv_pmap);
2281					break;
2282				}
2283			}
2284		}
2285make_sure_to_unlock:
2286		PMAP_UNLOCK(pv->pv_pmap);
2287	}
2288	return (count);
2289}
2290
2291/*
2292 * Clear the reference bit on the specified physical page.
2293 */
2294static void
2295mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
2296{
2297	pte_t *pte;
2298	pv_entry_t pv;
2299
2300	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2301	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2302		return;
2303
2304	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2305		PMAP_LOCK(pv->pv_pmap);
2306		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2307			if (!PTE_ISVALID(pte))
2308				goto make_sure_to_unlock;
2309
2310			if (PTE_ISREFERENCED(pte)) {
2311				mtx_lock_spin(&tlbivax_mutex);
2312				tlb_miss_lock();
2313
2314				tlb0_flush_entry(pv->pv_va);
2315				pte->flags &= ~PTE_REFERENCED;
2316
2317				tlb_miss_unlock();
2318				mtx_unlock_spin(&tlbivax_mutex);
2319			}
2320		}
2321make_sure_to_unlock:
2322		PMAP_UNLOCK(pv->pv_pmap);
2323	}
2324}
2325
2326/*
2327 * Change wiring attribute for a map/virtual-address pair.
2328 */
2329static void
2330mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
2331{
2332	pte_t *pte;
2333
2334	PMAP_LOCK(pmap);
2335	if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2336		if (wired) {
2337			if (!PTE_ISWIRED(pte)) {
2338				pte->flags |= PTE_WIRED;
2339				pmap->pm_stats.wired_count++;
2340			}
2341		} else {
2342			if (PTE_ISWIRED(pte)) {
2343				pte->flags &= ~PTE_WIRED;
2344				pmap->pm_stats.wired_count--;
2345			}
2346		}
2347	}
2348	PMAP_UNLOCK(pmap);
2349}
2350
2351/*
2352 * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2353 * page.  This count may be changed upwards or downwards in the future; it is
2354 * only necessary that true be returned for a small subset of pmaps for proper
2355 * page aging.
2356 */
2357static boolean_t
2358mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2359{
2360	pv_entry_t pv;
2361	int loops;
2362
2363	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2364	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2365		return (FALSE);
2366
2367	loops = 0;
2368	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2369		if (pv->pv_pmap == pmap)
2370			return (TRUE);
2371
2372		if (++loops >= 16)
2373			break;
2374	}
2375	return (FALSE);
2376}
2377
2378/*
2379 * Return the number of managed mappings to the given physical page that are
2380 * wired.
2381 */
2382static int
2383mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2384{
2385	pv_entry_t pv;
2386	pte_t *pte;
2387	int count = 0;
2388
2389	if ((m->flags & PG_FICTITIOUS) != 0)
2390		return (count);
2391	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2392
2393	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2394		PMAP_LOCK(pv->pv_pmap);
2395		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2396			if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2397				count++;
2398		PMAP_UNLOCK(pv->pv_pmap);
2399	}
2400
2401	return (count);
2402}
2403
2404static int
2405mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2406{
2407	int i;
2408	vm_offset_t va;
2409
2410	/*
2411	 * This currently does not work for entries that
2412	 * overlap TLB1 entries.
2413	 */
2414	for (i = 0; i < tlb1_idx; i ++) {
2415		if (tlb1_iomapped(i, pa, size, &va) == 0)
2416			return (0);
2417	}
2418
2419	return (EFAULT);
2420}
2421
2422vm_offset_t
2423mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2424    vm_size_t *sz)
2425{
2426	vm_paddr_t pa, ppa;
2427	vm_offset_t va;
2428	vm_size_t gran;
2429
2430	/* Raw physical memory dumps don't have a virtual address. */
2431	if (md->md_vaddr == ~0UL) {
2432		/* We always map a 256MB page at 256M. */
2433		gran = 256 * 1024 * 1024;
2434		pa = md->md_paddr + ofs;
2435		ppa = pa & ~(gran - 1);
2436		ofs = pa - ppa;
2437		va = gran;
2438		tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO);
2439		if (*sz > (gran - ofs))
2440			*sz = gran - ofs;
2441		return (va + ofs);
2442	}
2443
2444	/* Minidumps are based on virtual memory addresses. */
2445	va = md->md_vaddr + ofs;
2446	if (va >= kernstart + kernsize) {
2447		gran = PAGE_SIZE - (va & PAGE_MASK);
2448		if (*sz > gran)
2449			*sz = gran;
2450	}
2451	return (va);
2452}
2453
2454void
2455mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2456    vm_offset_t va)
2457{
2458
2459	/* Raw physical memory dumps don't have a virtual address. */
2460	if (md->md_vaddr == ~0UL) {
2461		tlb1_idx--;
2462		tlb1[tlb1_idx].mas1 = 0;
2463		tlb1[tlb1_idx].mas2 = 0;
2464		tlb1[tlb1_idx].mas3 = 0;
2465		tlb1_write_entry(tlb1_idx);
2466		return;
2467	}
2468
2469	/* Minidumps are based on virtual memory addresses. */
2470	/* Nothing to do... */
2471}
2472
2473struct pmap_md *
2474mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
2475{
2476	static struct pmap_md md;
2477	struct bi_mem_region *mr;
2478	pte_t *pte;
2479	vm_offset_t va;
2480
2481	if (dumpsys_minidump) {
2482		md.md_paddr = ~0UL;	/* Minidumps use virtual addresses. */
2483		if (prev == NULL) {
2484			/* 1st: kernel .data and .bss. */
2485			md.md_index = 1;
2486			md.md_vaddr = trunc_page((uintptr_t)_etext);
2487			md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
2488			return (&md);
2489		}
2490		switch (prev->md_index) {
2491		case 1:
2492			/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2493			md.md_index = 2;
2494			md.md_vaddr = data_start;
2495			md.md_size = data_end - data_start;
2496			break;
2497		case 2:
2498			/* 3rd: kernel VM. */
2499			va = prev->md_vaddr + prev->md_size;
2500			/* Find start of next chunk (from va). */
2501			while (va < virtual_end) {
2502				/* Don't dump the buffer cache. */
2503				if (va >= kmi.buffer_sva &&
2504				    va < kmi.buffer_eva) {
2505					va = kmi.buffer_eva;
2506					continue;
2507				}
2508				pte = pte_find(mmu, kernel_pmap, va);
2509				if (pte != NULL && PTE_ISVALID(pte))
2510					break;
2511				va += PAGE_SIZE;
2512			}
2513			if (va < virtual_end) {
2514				md.md_vaddr = va;
2515				va += PAGE_SIZE;
2516				/* Find last page in chunk. */
2517				while (va < virtual_end) {
2518					/* Don't run into the buffer cache. */
2519					if (va == kmi.buffer_sva)
2520						break;
2521					pte = pte_find(mmu, kernel_pmap, va);
2522					if (pte == NULL || !PTE_ISVALID(pte))
2523						break;
2524					va += PAGE_SIZE;
2525				}
2526				md.md_size = va - md.md_vaddr;
2527				break;
2528			}
2529			md.md_index = 3;
2530			/* FALLTHROUGH */
2531		default:
2532			return (NULL);
2533		}
2534	} else { /* minidumps */
2535		mr = bootinfo_mr();
2536		if (prev == NULL) {
2537			/* first physical chunk. */
2538			md.md_paddr = mr->mem_base;
2539			md.md_size = mr->mem_size;
2540			md.md_vaddr = ~0UL;
2541			md.md_index = 1;
2542		} else if (md.md_index < bootinfo->bi_mem_reg_no) {
2543			md.md_paddr = mr[md.md_index].mem_base;
2544			md.md_size = mr[md.md_index].mem_size;
2545			md.md_vaddr = ~0UL;
2546			md.md_index++;
2547		} else {
2548			/* There's no next physical chunk. */
2549			return (NULL);
2550		}
2551	}
2552
2553	return (&md);
2554}
2555
2556/*
2557 * Map a set of physical memory pages into the kernel virtual address space.
2558 * Return a pointer to where it is mapped. This routine is intended to be used
2559 * for mapping device memory, NOT real memory.
2560 */
2561static void *
2562mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2563{
2564	void *res;
2565	uintptr_t va;
2566	vm_size_t sz;
2567
2568	va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
2569	res = (void *)va;
2570
2571	do {
2572		sz = 1 << (ilog2(size) & ~1);
2573		if (bootverbose)
2574			printf("Wiring VA=%x to PA=%x (size=%x), "
2575			    "using TLB1[%d]\n", va, pa, sz, tlb1_idx);
2576		tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO);
2577		size -= sz;
2578		pa += sz;
2579		va += sz;
2580	} while (size > 0);
2581
2582	return (res);
2583}
2584
2585/*
2586 * 'Unmap' a range mapped by mmu_booke_mapdev().
2587 */
2588static void
2589mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2590{
2591	vm_offset_t base, offset;
2592
2593	/*
2594	 * Unmap only if this is inside kernel virtual space.
2595	 */
2596	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2597		base = trunc_page(va);
2598		offset = va & PAGE_MASK;
2599		size = roundup(offset + size, PAGE_SIZE);
2600		kmem_free(kernel_map, base, size);
2601	}
2602}
2603
2604/*
2605 * mmu_booke_object_init_pt preloads the ptes for a given object into the
2606 * specified pmap. This eliminates the blast of soft faults on process startup
2607 * and immediately after an mmap.
2608 */
2609static void
2610mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2611    vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2612{
2613
2614	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2615	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2616	    ("mmu_booke_object_init_pt: non-device object"));
2617}
2618
2619/*
2620 * Perform the pmap work for mincore.
2621 */
2622static int
2623mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2624{
2625
2626	TODO;
2627	return (0);
2628}
2629
2630/**************************************************************************/
2631/* TID handling */
2632/**************************************************************************/
2633
2634/*
2635 * Allocate a TID. If necessary, steal one from someone else.
2636 * The new TID is flushed from the TLB before returning.
2637 */
2638static tlbtid_t
2639tid_alloc(pmap_t pmap)
2640{
2641	tlbtid_t tid;
2642	int thiscpu;
2643
2644	KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2645
2646	CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2647
2648	thiscpu = PCPU_GET(cpuid);
2649
2650	tid = PCPU_GET(tid_next);
2651	if (tid > TID_MAX)
2652		tid = TID_MIN;
2653	PCPU_SET(tid_next, tid + 1);
2654
2655	/* If we are stealing TID then clear the relevant pmap's field */
2656	if (tidbusy[thiscpu][tid] != NULL) {
2657
2658		CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2659
2660		tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2661
2662		/* Flush all entries from TLB0 matching this TID. */
2663		tid_flush(tid);
2664	}
2665
2666	tidbusy[thiscpu][tid] = pmap;
2667	pmap->pm_tid[thiscpu] = tid;
2668	__asm __volatile("msync; isync");
2669
2670	CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2671	    PCPU_GET(tid_next));
2672
2673	return (tid);
2674}
2675
2676/**************************************************************************/
2677/* TLB0 handling */
2678/**************************************************************************/
2679
2680static void
2681tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
2682    uint32_t mas7)
2683{
2684	int as;
2685	char desc[3];
2686	tlbtid_t tid;
2687	vm_size_t size;
2688	unsigned int tsize;
2689
2690	desc[2] = '\0';
2691	if (mas1 & MAS1_VALID)
2692		desc[0] = 'V';
2693	else
2694		desc[0] = ' ';
2695
2696	if (mas1 & MAS1_IPROT)
2697		desc[1] = 'P';
2698	else
2699		desc[1] = ' ';
2700
2701	as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
2702	tid = MAS1_GETTID(mas1);
2703
2704	tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2705	size = 0;
2706	if (tsize)
2707		size = tsize2size(tsize);
2708
2709	debugf("%3d: (%s) [AS=%d] "
2710	    "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2711	    "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2712	    i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2713}
2714
2715/* Convert TLB0 va and way number to tlb0[] table index. */
2716static inline unsigned int
2717tlb0_tableidx(vm_offset_t va, unsigned int way)
2718{
2719	unsigned int idx;
2720
2721	idx = (way * TLB0_ENTRIES_PER_WAY);
2722	idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2723	return (idx);
2724}
2725
2726/*
2727 * Invalidate TLB0 entry.
2728 */
2729static inline void
2730tlb0_flush_entry(vm_offset_t va)
2731{
2732
2733	CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2734
2735	mtx_assert(&tlbivax_mutex, MA_OWNED);
2736
2737	__asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2738	__asm __volatile("isync; msync");
2739	__asm __volatile("tlbsync; msync");
2740
2741	CTR1(KTR_PMAP, "%s: e", __func__);
2742}
2743
2744/* Print out contents of the MAS registers for each TLB0 entry */
2745void
2746tlb0_print_tlbentries(void)
2747{
2748	uint32_t mas0, mas1, mas2, mas3, mas7;
2749	int entryidx, way, idx;
2750
2751	debugf("TLB0 entries:\n");
2752	for (way = 0; way < TLB0_WAYS; way ++)
2753		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2754
2755			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2756			mtspr(SPR_MAS0, mas0);
2757			__asm __volatile("isync");
2758
2759			mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
2760			mtspr(SPR_MAS2, mas2);
2761
2762			__asm __volatile("isync; tlbre");
2763
2764			mas1 = mfspr(SPR_MAS1);
2765			mas2 = mfspr(SPR_MAS2);
2766			mas3 = mfspr(SPR_MAS3);
2767			mas7 = mfspr(SPR_MAS7);
2768
2769			idx = tlb0_tableidx(mas2, way);
2770			tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2771		}
2772}
2773
2774/**************************************************************************/
2775/* TLB1 handling */
2776/**************************************************************************/
2777
2778/*
2779 * TLB1 mapping notes:
2780 *
2781 * TLB1[0]	CCSRBAR
2782 * TLB1[1]	Kernel text and data.
2783 * TLB1[2-15]	Additional kernel text and data mappings (if required), PCI
2784 *		windows, other devices mappings.
2785 */
2786
2787/*
2788 * Write given entry to TLB1 hardware.
2789 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2790 */
2791static void
2792tlb1_write_entry(unsigned int idx)
2793{
2794	uint32_t mas0, mas7;
2795
2796	//debugf("tlb1_write_entry: s\n");
2797
2798	/* Clear high order RPN bits */
2799	mas7 = 0;
2800
2801	/* Select entry */
2802	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2803	//debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
2804
2805	mtspr(SPR_MAS0, mas0);
2806	__asm __volatile("isync");
2807	mtspr(SPR_MAS1, tlb1[idx].mas1);
2808	__asm __volatile("isync");
2809	mtspr(SPR_MAS2, tlb1[idx].mas2);
2810	__asm __volatile("isync");
2811	mtspr(SPR_MAS3, tlb1[idx].mas3);
2812	__asm __volatile("isync");
2813	mtspr(SPR_MAS7, mas7);
2814	__asm __volatile("isync; tlbwe; isync; msync");
2815
2816	//debugf("tlb1_write_entry: e\n");
2817}
2818
2819/*
2820 * Return the largest uint value log such that 2^log <= num.
2821 */
2822static unsigned int
2823ilog2(unsigned int num)
2824{
2825	int lz;
2826
2827	__asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
2828	return (31 - lz);
2829}
2830
2831/*
2832 * Convert TLB TSIZE value to mapped region size.
2833 */
2834static vm_size_t
2835tsize2size(unsigned int tsize)
2836{
2837
2838	/*
2839	 * size = 4^tsize KB
2840	 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2841	 */
2842
2843	return ((1 << (2 * tsize)) * 1024);
2844}
2845
2846/*
2847 * Convert region size (must be power of 4) to TLB TSIZE value.
2848 */
2849static unsigned int
2850size2tsize(vm_size_t size)
2851{
2852
2853	return (ilog2(size) / 2 - 5);
2854}
2855
2856/*
2857 * Register permanent kernel mapping in TLB1.
2858 *
2859 * Entries are created starting from index 0 (current free entry is
2860 * kept in tlb1_idx) and are not supposed to be invalidated.
2861 */
2862static int
2863tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
2864    uint32_t flags)
2865{
2866	uint32_t ts, tid;
2867	int tsize;
2868
2869	if (tlb1_idx >= TLB1_ENTRIES) {
2870		printf("tlb1_set_entry: TLB1 full!\n");
2871		return (-1);
2872	}
2873
2874	/* Convert size to TSIZE */
2875	tsize = size2tsize(size);
2876
2877	tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
2878	/* XXX TS is hard coded to 0 for now as we only use single address space */
2879	ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
2880
2881	/* XXX LOCK tlb1[] */
2882
2883	tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
2884	tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
2885	tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags;
2886
2887	/* Set supervisor RWX permission bits */
2888	tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
2889
2890	tlb1_write_entry(tlb1_idx++);
2891
2892	/* XXX UNLOCK tlb1[] */
2893
2894	/*
2895	 * XXX in general TLB1 updates should be propagated between CPUs,
2896	 * since current design assumes to have the same TLB1 set-up on all
2897	 * cores.
2898	 */
2899	return (0);
2900}
2901
2902static int
2903tlb1_entry_size_cmp(const void *a, const void *b)
2904{
2905	const vm_size_t *sza;
2906	const vm_size_t *szb;
2907
2908	sza = a;
2909	szb = b;
2910	if (*sza > *szb)
2911		return (-1);
2912	else if (*sza < *szb)
2913		return (1);
2914	else
2915		return (0);
2916}
2917
2918/*
2919 * Map in contiguous RAM region into the TLB1 using maximum of
2920 * KERNEL_REGION_MAX_TLB_ENTRIES entries.
2921 *
2922 * If necessary round up last entry size and return total size
2923 * used by all allocated entries.
2924 */
2925vm_size_t
2926tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size)
2927{
2928	vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES];
2929	vm_size_t mapped_size, sz, esz;
2930	unsigned int log;
2931	int i;
2932
2933	CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x",
2934	    __func__, size, va, pa);
2935
2936	mapped_size = 0;
2937	sz = size;
2938	memset(entry_size, 0, sizeof(entry_size));
2939
2940	/* Calculate entry sizes. */
2941	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) {
2942
2943		/* Largest region that is power of 4 and fits within size */
2944		log = ilog2(sz) / 2;
2945		esz = 1 << (2 * log);
2946
2947		/* If this is last entry cover remaining size. */
2948		if (i ==  KERNEL_REGION_MAX_TLB_ENTRIES - 1) {
2949			while (esz < sz)
2950				esz = esz << 2;
2951		}
2952
2953		entry_size[i] = esz;
2954		mapped_size += esz;
2955		if (esz < sz)
2956			sz -= esz;
2957		else
2958			sz = 0;
2959	}
2960
2961	/* Sort entry sizes, required to get proper entry address alignment. */
2962	qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES,
2963	    sizeof(vm_size_t), tlb1_entry_size_cmp);
2964
2965	/* Load TLB1 entries. */
2966	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) {
2967		esz = entry_size[i];
2968		if (!esz)
2969			break;
2970
2971		CTR5(KTR_PMAP, "%s: entry %d: sz  = 0x%08x (va = 0x%08x "
2972		    "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa);
2973
2974		tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM);
2975
2976		va += esz;
2977		pa += esz;
2978	}
2979
2980	CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)",
2981	    __func__, mapped_size, mapped_size - size);
2982
2983	return (mapped_size);
2984}
2985
2986/*
2987 * TLB1 initialization routine, to be called after the very first
2988 * assembler level setup done in locore.S.
2989 */
2990void
2991tlb1_init(vm_offset_t ccsrbar)
2992{
2993	uint32_t mas0;
2994
2995	/* TLB1[1] is used to map the kernel. Save that entry. */
2996	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1);
2997	mtspr(SPR_MAS0, mas0);
2998	__asm __volatile("isync; tlbre");
2999
3000	tlb1[1].mas1 = mfspr(SPR_MAS1);
3001	tlb1[1].mas2 = mfspr(SPR_MAS2);
3002	tlb1[1].mas3 = mfspr(SPR_MAS3);
3003
3004	/* Map in CCSRBAR in TLB1[0] */
3005	tlb1_idx = 0;
3006	tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO);
3007	/*
3008	 * Set the next available TLB1 entry index. Note TLB[1] is reserved
3009	 * for initial mapping of kernel text+data, which was set early in
3010	 * locore, we need to skip this [busy] entry.
3011	 */
3012	tlb1_idx = 2;
3013
3014	/* Setup TLB miss defaults */
3015	set_mas4_defaults();
3016}
3017
3018/*
3019 * Setup MAS4 defaults.
3020 * These values are loaded to MAS0-2 on a TLB miss.
3021 */
3022static void
3023set_mas4_defaults(void)
3024{
3025	uint32_t mas4;
3026
3027	/* Defaults: TLB0, PID0, TSIZED=4K */
3028	mas4 = MAS4_TLBSELD0;
3029	mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
3030#ifdef SMP
3031	mas4 |= MAS4_MD;
3032#endif
3033	mtspr(SPR_MAS4, mas4);
3034	__asm __volatile("isync");
3035}
3036
3037/*
3038 * Print out contents of the MAS registers for each TLB1 entry
3039 */
3040void
3041tlb1_print_tlbentries(void)
3042{
3043	uint32_t mas0, mas1, mas2, mas3, mas7;
3044	int i;
3045
3046	debugf("TLB1 entries:\n");
3047	for (i = 0; i < TLB1_ENTRIES; i++) {
3048
3049		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
3050		mtspr(SPR_MAS0, mas0);
3051
3052		__asm __volatile("isync; tlbre");
3053
3054		mas1 = mfspr(SPR_MAS1);
3055		mas2 = mfspr(SPR_MAS2);
3056		mas3 = mfspr(SPR_MAS3);
3057		mas7 = mfspr(SPR_MAS7);
3058
3059		tlb_print_entry(i, mas1, mas2, mas3, mas7);
3060	}
3061}
3062
3063/*
3064 * Print out contents of the in-ram tlb1 table.
3065 */
3066void
3067tlb1_print_entries(void)
3068{
3069	int i;
3070
3071	debugf("tlb1[] table entries:\n");
3072	for (i = 0; i < TLB1_ENTRIES; i++)
3073		tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0);
3074}
3075
3076/*
3077 * Return 0 if the physical IO range is encompassed by one of the
3078 * the TLB1 entries, otherwise return related error code.
3079 */
3080static int
3081tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
3082{
3083	uint32_t prot;
3084	vm_paddr_t pa_start;
3085	vm_paddr_t pa_end;
3086	unsigned int entry_tsize;
3087	vm_size_t entry_size;
3088
3089	*va = (vm_offset_t)NULL;
3090
3091	/* Skip invalid entries */
3092	if (!(tlb1[i].mas1 & MAS1_VALID))
3093		return (EINVAL);
3094
3095	/*
3096	 * The entry must be cache-inhibited, guarded, and r/w
3097	 * so it can function as an i/o page
3098	 */
3099	prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
3100	if (prot != (MAS2_I | MAS2_G))
3101		return (EPERM);
3102
3103	prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
3104	if (prot != (MAS3_SR | MAS3_SW))
3105		return (EPERM);
3106
3107	/* The address should be within the entry range. */
3108	entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3109	KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
3110
3111	entry_size = tsize2size(entry_tsize);
3112	pa_start = tlb1[i].mas3 & MAS3_RPN;
3113	pa_end = pa_start + entry_size - 1;
3114
3115	if ((pa < pa_start) || ((pa + size) > pa_end))
3116		return (ERANGE);
3117
3118	/* Return virtual address of this mapping. */
3119	*va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start);
3120	return (0);
3121}
3122