pmap.c revision 192067
1176771Sraj/*-
2187149Sraj * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3176771Sraj * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4176771Sraj * All rights reserved.
5176771Sraj *
6176771Sraj * Redistribution and use in source and binary forms, with or without
7176771Sraj * modification, are permitted provided that the following conditions
8176771Sraj * are met:
9176771Sraj * 1. Redistributions of source code must retain the above copyright
10176771Sraj *    notice, this list of conditions and the following disclaimer.
11176771Sraj * 2. Redistributions in binary form must reproduce the above copyright
12176771Sraj *    notice, this list of conditions and the following disclaimer in the
13176771Sraj *    documentation and/or other materials provided with the distribution.
14176771Sraj *
15176771Sraj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16176771Sraj * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17176771Sraj * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
18176771Sraj * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19176771Sraj * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20176771Sraj * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21176771Sraj * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22176771Sraj * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23176771Sraj * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24176771Sraj * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25176771Sraj *
26176771Sraj * Some hw specific parts of this pmap were derived or influenced
27176771Sraj * by NetBSD's ibm4xx pmap module. More generic code is shared with
28176771Sraj * a few other pmap modules from the FreeBSD tree.
29176771Sraj */
30176771Sraj
31176771Sraj /*
32176771Sraj  * VM layout notes:
33176771Sraj  *
34176771Sraj  * Kernel and user threads run within one common virtual address space
35176771Sraj  * defined by AS=0.
36176771Sraj  *
37176771Sraj  * Virtual address space layout:
38176771Sraj  * -----------------------------
39187151Sraj  * 0x0000_0000 - 0xafff_ffff	: user process
40187151Sraj  * 0xb000_0000 - 0xbfff_ffff	: pmap_mapdev()-ed area (PCI/PCIE etc.)
41187151Sraj  * 0xc000_0000 - 0xc0ff_ffff	: kernel reserved
42190701Smarcel  *   0xc000_0000 - data_end	: kernel code+data, env, metadata etc.
43187151Sraj  * 0xc100_0000 - 0xfeef_ffff	: KVA
44187151Sraj  *   0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
45187151Sraj  *   0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
46187151Sraj  *   0xc200_4000 - 0xc200_8fff : guard page + kstack0
47187151Sraj  *   0xc200_9000 - 0xfeef_ffff	: actual free KVA space
48187151Sraj  * 0xfef0_0000 - 0xffff_ffff	: I/O devices region
49176771Sraj  */
50176771Sraj
51176771Sraj#include <sys/cdefs.h>
52176771Sraj__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 192067 2009-05-14 00:34:26Z nwhitehorn $");
53176771Sraj
54176771Sraj#include <sys/types.h>
55176771Sraj#include <sys/param.h>
56176771Sraj#include <sys/malloc.h>
57187149Sraj#include <sys/ktr.h>
58176771Sraj#include <sys/proc.h>
59176771Sraj#include <sys/user.h>
60176771Sraj#include <sys/queue.h>
61176771Sraj#include <sys/systm.h>
62176771Sraj#include <sys/kernel.h>
63176771Sraj#include <sys/msgbuf.h>
64176771Sraj#include <sys/lock.h>
65176771Sraj#include <sys/mutex.h>
66176771Sraj#include <sys/vmmeter.h>
67176771Sraj
68176771Sraj#include <vm/vm.h>
69176771Sraj#include <vm/vm_page.h>
70176771Sraj#include <vm/vm_kern.h>
71176771Sraj#include <vm/vm_pageout.h>
72176771Sraj#include <vm/vm_extern.h>
73176771Sraj#include <vm/vm_object.h>
74176771Sraj#include <vm/vm_param.h>
75176771Sraj#include <vm/vm_map.h>
76176771Sraj#include <vm/vm_pager.h>
77176771Sraj#include <vm/uma.h>
78176771Sraj
79190701Smarcel#include <machine/bootinfo.h>
80176771Sraj#include <machine/cpu.h>
81176771Sraj#include <machine/pcb.h>
82192067Snwhitehorn#include <machine/platform.h>
83176771Sraj
84176771Sraj#include <machine/tlb.h>
85176771Sraj#include <machine/spr.h>
86176771Sraj#include <machine/vmparam.h>
87176771Sraj#include <machine/md_var.h>
88176771Sraj#include <machine/mmuvar.h>
89176771Sraj#include <machine/pmap.h>
90176771Sraj#include <machine/pte.h>
91176771Sraj
92176771Sraj#include "mmu_if.h"
93176771Sraj
94176771Sraj#define DEBUG
95176771Sraj#undef DEBUG
96176771Sraj
97176771Sraj#ifdef  DEBUG
98176771Sraj#define debugf(fmt, args...) printf(fmt, ##args)
99176771Sraj#else
100176771Sraj#define debugf(fmt, args...)
101176771Sraj#endif
102176771Sraj
103176771Sraj#define TODO			panic("%s: not implemented", __func__);
104176771Sraj
105176771Sraj#include "opt_sched.h"
106176771Sraj#ifndef SCHED_4BSD
107176771Sraj#error "e500 only works with SCHED_4BSD which uses a global scheduler lock."
108176771Sraj#endif
109176771Srajextern struct mtx sched_lock;
110176771Sraj
111190701Smarcelextern int dumpsys_minidump;
112190701Smarcel
113190701Smarcelextern unsigned char _etext[];
114190701Smarcelextern unsigned char _end[];
115190701Smarcel
116176771Sraj/* Kernel physical load address. */
117176771Srajextern uint32_t kernload;
118190701Smarcelvm_offset_t kernstart;
119190701Smarcelvm_size_t kernsize;
120176771Sraj
121190701Smarcel/* Message buffer and tables. */
122190701Smarcelstatic vm_offset_t data_start;
123190701Smarcelstatic vm_size_t data_end;
124190701Smarcel
125192067Snwhitehorn/* Phys/avail memory regions. */
126192067Snwhitehornstatic struct mem_region *availmem_regions;
127192067Snwhitehornstatic int availmem_regions_sz;
128192067Snwhitehornstatic struct mem_region *physmem_regions;
129192067Snwhitehornstatic int physmem_regions_sz;
130176771Sraj
131176771Sraj/* Reserved KVA space and mutex for mmu_booke_zero_page. */
132176771Srajstatic vm_offset_t zero_page_va;
133176771Srajstatic struct mtx zero_page_mutex;
134176771Sraj
135187149Srajstatic struct mtx tlbivax_mutex;
136187149Sraj
137176771Sraj/*
138176771Sraj * Reserved KVA space for mmu_booke_zero_page_idle. This is used
139176771Sraj * by idle thred only, no lock required.
140176771Sraj */
141176771Srajstatic vm_offset_t zero_page_idle_va;
142176771Sraj
143176771Sraj/* Reserved KVA space and mutex for mmu_booke_copy_page. */
144176771Srajstatic vm_offset_t copy_page_src_va;
145176771Srajstatic vm_offset_t copy_page_dst_va;
146176771Srajstatic struct mtx copy_page_mutex;
147176771Sraj
148176771Sraj/**************************************************************************/
149176771Sraj/* PMAP */
150176771Sraj/**************************************************************************/
151176771Sraj
152176771Srajstatic void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
153176771Sraj    vm_prot_t, boolean_t);
154176771Sraj
155176771Srajunsigned int kptbl_min;		/* Index of the first kernel ptbl. */
156176771Srajunsigned int kernel_ptbls;	/* Number of KVA ptbls. */
157176771Sraj
158176771Srajstatic int pagedaemon_waken;
159176771Sraj
160176771Sraj/*
161176771Sraj * If user pmap is processed with mmu_booke_remove and the resident count
162176771Sraj * drops to 0, there are no more pages to remove, so we need not continue.
163176771Sraj */
164176771Sraj#define PMAP_REMOVE_DONE(pmap) \
165176771Sraj	((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
166176771Sraj
167187149Srajextern void tlb_lock(uint32_t *);
168187149Srajextern void tlb_unlock(uint32_t *);
169187149Srajextern void tid_flush(tlbtid_t);
170176771Sraj
171176771Sraj/**************************************************************************/
172176771Sraj/* TLB and TID handling */
173176771Sraj/**************************************************************************/
174176771Sraj
175176771Sraj/* Translation ID busy table */
176187149Srajstatic volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1];
177176771Sraj
178176771Sraj/*
179187149Sraj * TLB0 capabilities (entry, way numbers etc.). These can vary between e500
180187149Sraj * core revisions and should be read from h/w registers during early config.
181176771Sraj */
182187149Srajuint32_t tlb0_entries;
183187149Srajuint32_t tlb0_ways;
184187149Srajuint32_t tlb0_entries_per_way;
185176771Sraj
186187149Sraj#define TLB0_ENTRIES		(tlb0_entries)
187187149Sraj#define TLB0_WAYS		(tlb0_ways)
188187149Sraj#define TLB0_ENTRIES_PER_WAY	(tlb0_entries_per_way)
189176771Sraj
190187149Sraj#define TLB1_ENTRIES 16
191176771Sraj
192176771Sraj/* In-ram copy of the TLB1 */
193187149Srajstatic tlb_entry_t tlb1[TLB1_ENTRIES];
194176771Sraj
195176771Sraj/* Next free entry in the TLB1 */
196176771Srajstatic unsigned int tlb1_idx;
197176771Sraj
198176771Srajstatic tlbtid_t tid_alloc(struct pmap *);
199176771Sraj
200187149Srajstatic void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
201176771Sraj
202187149Srajstatic int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t);
203176771Srajstatic void tlb1_write_entry(unsigned int);
204176771Srajstatic int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
205176771Srajstatic vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t);
206176771Sraj
207176771Srajstatic vm_size_t tsize2size(unsigned int);
208176771Srajstatic unsigned int size2tsize(vm_size_t);
209176771Srajstatic unsigned int ilog2(unsigned int);
210176771Sraj
211176771Srajstatic void set_mas4_defaults(void);
212176771Sraj
213187149Srajstatic inline void tlb0_flush_entry(vm_offset_t);
214176771Srajstatic inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int);
215176771Sraj
216176771Sraj/**************************************************************************/
217176771Sraj/* Page table management */
218176771Sraj/**************************************************************************/
219176771Sraj
220176771Sraj/* Data for the pv entry allocation mechanism */
221176771Srajstatic uma_zone_t pvzone;
222176771Srajstatic struct vm_object pvzone_obj;
223176771Srajstatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
224176771Sraj
225176771Sraj#define PV_ENTRY_ZONE_MIN	2048	/* min pv entries in uma zone */
226176771Sraj
227176771Sraj#ifndef PMAP_SHPGPERPROC
228176771Sraj#define PMAP_SHPGPERPROC	200
229176771Sraj#endif
230176771Sraj
231176771Srajstatic void ptbl_init(void);
232176771Srajstatic struct ptbl_buf *ptbl_buf_alloc(void);
233176771Srajstatic void ptbl_buf_free(struct ptbl_buf *);
234176771Srajstatic void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
235176771Sraj
236187149Srajstatic pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int);
237176771Srajstatic void ptbl_free(mmu_t, pmap_t, unsigned int);
238176771Srajstatic void ptbl_hold(mmu_t, pmap_t, unsigned int);
239176771Srajstatic int ptbl_unhold(mmu_t, pmap_t, unsigned int);
240176771Sraj
241176771Srajstatic vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
242176771Srajstatic pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
243187149Srajstatic void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t);
244187149Srajstatic int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
245176771Sraj
246187149Srajstatic pv_entry_t pv_alloc(void);
247176771Srajstatic void pv_free(pv_entry_t);
248176771Srajstatic void pv_insert(pmap_t, vm_offset_t, vm_page_t);
249176771Srajstatic void pv_remove(pmap_t, vm_offset_t, vm_page_t);
250176771Sraj
251176771Sraj/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
252176771Sraj#define PTBL_BUFS		(128 * 16)
253176771Sraj
254176771Srajstruct ptbl_buf {
255176771Sraj	TAILQ_ENTRY(ptbl_buf) link;	/* list link */
256176771Sraj	vm_offset_t kva;		/* va of mapping */
257176771Sraj};
258176771Sraj
259176771Sraj/* ptbl free list and a lock used for access synchronization. */
260176771Srajstatic TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist;
261176771Srajstatic struct mtx ptbl_buf_freelist_lock;
262176771Sraj
263176771Sraj/* Base address of kva space allocated fot ptbl bufs. */
264176771Srajstatic vm_offset_t ptbl_buf_pool_vabase;
265176771Sraj
266176771Sraj/* Pointer to ptbl_buf structures. */
267176771Srajstatic struct ptbl_buf *ptbl_bufs;
268176771Sraj
269176771Sraj/*
270176771Sraj * Kernel MMU interface
271176771Sraj */
272176771Srajstatic void		mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
273176771Srajstatic void		mmu_booke_clear_modify(mmu_t, vm_page_t);
274176771Srajstatic void		mmu_booke_clear_reference(mmu_t, vm_page_t);
275176771Srajstatic void		mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t,
276176771Sraj    vm_offset_t);
277176771Srajstatic void		mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
278176771Srajstatic void		mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
279176771Sraj    vm_prot_t, boolean_t);
280176771Srajstatic void		mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
281176771Sraj    vm_page_t, vm_prot_t);
282176771Srajstatic void		mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
283176771Sraj    vm_prot_t);
284176771Srajstatic vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
285176771Srajstatic vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t,
286176771Sraj    vm_prot_t);
287176771Srajstatic void		mmu_booke_init(mmu_t);
288176771Srajstatic boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
289176771Srajstatic boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
290176771Srajstatic boolean_t	mmu_booke_ts_referenced(mmu_t, vm_page_t);
291176771Srajstatic vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t,
292176771Sraj    int);
293176771Srajstatic int		mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t);
294176771Srajstatic void		mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t,
295176771Sraj    vm_object_t, vm_pindex_t, vm_size_t);
296176771Srajstatic boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
297176771Srajstatic void		mmu_booke_page_init(mmu_t, vm_page_t);
298176771Srajstatic int		mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
299176771Srajstatic void		mmu_booke_pinit(mmu_t, pmap_t);
300176771Srajstatic void		mmu_booke_pinit0(mmu_t, pmap_t);
301176771Srajstatic void		mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
302176771Sraj    vm_prot_t);
303176771Srajstatic void		mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
304176771Srajstatic void		mmu_booke_qremove(mmu_t, vm_offset_t, int);
305176771Srajstatic void		mmu_booke_release(mmu_t, pmap_t);
306176771Srajstatic void		mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
307176771Srajstatic void		mmu_booke_remove_all(mmu_t, vm_page_t);
308176771Srajstatic void		mmu_booke_remove_write(mmu_t, vm_page_t);
309176771Srajstatic void		mmu_booke_zero_page(mmu_t, vm_page_t);
310176771Srajstatic void		mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
311176771Srajstatic void		mmu_booke_zero_page_idle(mmu_t, vm_page_t);
312176771Srajstatic void		mmu_booke_activate(mmu_t, struct thread *);
313176771Srajstatic void		mmu_booke_deactivate(mmu_t, struct thread *);
314176771Srajstatic void		mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
315176771Srajstatic void		*mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
316176771Srajstatic void		mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
317176771Srajstatic vm_offset_t	mmu_booke_kextract(mmu_t, vm_offset_t);
318176771Srajstatic void		mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
319176771Srajstatic void		mmu_booke_kremove(mmu_t, vm_offset_t);
320176771Srajstatic boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
321176771Srajstatic boolean_t	mmu_booke_page_executable(mmu_t, vm_page_t);
322190701Smarcelstatic vm_offset_t	mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
323190701Smarcel    vm_size_t, vm_size_t *);
324190701Smarcelstatic void		mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
325190701Smarcel    vm_size_t, vm_offset_t);
326190701Smarcelstatic struct pmap_md	*mmu_booke_scan_md(mmu_t, struct pmap_md *);
327176771Sraj
328176771Srajstatic mmu_method_t mmu_booke_methods[] = {
329176771Sraj	/* pmap dispatcher interface */
330176771Sraj	MMUMETHOD(mmu_change_wiring,	mmu_booke_change_wiring),
331176771Sraj	MMUMETHOD(mmu_clear_modify,	mmu_booke_clear_modify),
332176771Sraj	MMUMETHOD(mmu_clear_reference,	mmu_booke_clear_reference),
333176771Sraj	MMUMETHOD(mmu_copy,		mmu_booke_copy),
334176771Sraj	MMUMETHOD(mmu_copy_page,	mmu_booke_copy_page),
335176771Sraj	MMUMETHOD(mmu_enter,		mmu_booke_enter),
336176771Sraj	MMUMETHOD(mmu_enter_object,	mmu_booke_enter_object),
337176771Sraj	MMUMETHOD(mmu_enter_quick,	mmu_booke_enter_quick),
338176771Sraj	MMUMETHOD(mmu_extract,		mmu_booke_extract),
339176771Sraj	MMUMETHOD(mmu_extract_and_hold,	mmu_booke_extract_and_hold),
340176771Sraj	MMUMETHOD(mmu_init,		mmu_booke_init),
341176771Sraj	MMUMETHOD(mmu_is_modified,	mmu_booke_is_modified),
342176771Sraj	MMUMETHOD(mmu_is_prefaultable,	mmu_booke_is_prefaultable),
343176771Sraj	MMUMETHOD(mmu_ts_referenced,	mmu_booke_ts_referenced),
344176771Sraj	MMUMETHOD(mmu_map,		mmu_booke_map),
345176771Sraj	MMUMETHOD(mmu_mincore,		mmu_booke_mincore),
346176771Sraj	MMUMETHOD(mmu_object_init_pt,	mmu_booke_object_init_pt),
347176771Sraj	MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
348176771Sraj	MMUMETHOD(mmu_page_init,	mmu_booke_page_init),
349176771Sraj	MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings),
350176771Sraj	MMUMETHOD(mmu_pinit,		mmu_booke_pinit),
351176771Sraj	MMUMETHOD(mmu_pinit0,		mmu_booke_pinit0),
352176771Sraj	MMUMETHOD(mmu_protect,		mmu_booke_protect),
353176771Sraj	MMUMETHOD(mmu_qenter,		mmu_booke_qenter),
354176771Sraj	MMUMETHOD(mmu_qremove,		mmu_booke_qremove),
355176771Sraj	MMUMETHOD(mmu_release,		mmu_booke_release),
356176771Sraj	MMUMETHOD(mmu_remove,		mmu_booke_remove),
357176771Sraj	MMUMETHOD(mmu_remove_all,	mmu_booke_remove_all),
358176771Sraj	MMUMETHOD(mmu_remove_write,	mmu_booke_remove_write),
359176771Sraj	MMUMETHOD(mmu_zero_page,	mmu_booke_zero_page),
360176771Sraj	MMUMETHOD(mmu_zero_page_area,	mmu_booke_zero_page_area),
361176771Sraj	MMUMETHOD(mmu_zero_page_idle,	mmu_booke_zero_page_idle),
362176771Sraj	MMUMETHOD(mmu_activate,		mmu_booke_activate),
363176771Sraj	MMUMETHOD(mmu_deactivate,	mmu_booke_deactivate),
364176771Sraj
365176771Sraj	/* Internal interfaces */
366176771Sraj	MMUMETHOD(mmu_bootstrap,	mmu_booke_bootstrap),
367176771Sraj	MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
368176771Sraj	MMUMETHOD(mmu_mapdev,		mmu_booke_mapdev),
369176771Sraj	MMUMETHOD(mmu_kenter,		mmu_booke_kenter),
370176771Sraj	MMUMETHOD(mmu_kextract,		mmu_booke_kextract),
371176771Sraj/*	MMUMETHOD(mmu_kremove,		mmu_booke_kremove),	*/
372176771Sraj	MMUMETHOD(mmu_page_executable,	mmu_booke_page_executable),
373176771Sraj	MMUMETHOD(mmu_unmapdev,		mmu_booke_unmapdev),
374176771Sraj
375190701Smarcel	/* dumpsys() support */
376190701Smarcel	MMUMETHOD(mmu_dumpsys_map,	mmu_booke_dumpsys_map),
377190701Smarcel	MMUMETHOD(mmu_dumpsys_unmap,	mmu_booke_dumpsys_unmap),
378190701Smarcel	MMUMETHOD(mmu_scan_md,		mmu_booke_scan_md),
379190701Smarcel
380176771Sraj	{ 0, 0 }
381176771Sraj};
382176771Sraj
383176771Srajstatic mmu_def_t booke_mmu = {
384176771Sraj	MMU_TYPE_BOOKE,
385176771Sraj	mmu_booke_methods,
386176771Sraj	0
387176771Sraj};
388176771SrajMMU_DEF(booke_mmu);
389176771Sraj
390176771Sraj/* Return number of entries in TLB0. */
391176771Srajstatic __inline void
392176771Srajtlb0_get_tlbconf(void)
393176771Sraj{
394176771Sraj	uint32_t tlb0_cfg;
395176771Sraj
396176771Sraj	tlb0_cfg = mfspr(SPR_TLB0CFG);
397187149Sraj	tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK;
398187149Sraj	tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT;
399187149Sraj	tlb0_entries_per_way = tlb0_entries / tlb0_ways;
400176771Sraj}
401176771Sraj
402176771Sraj/* Initialize pool of kva ptbl buffers. */
403176771Srajstatic void
404176771Srajptbl_init(void)
405176771Sraj{
406176771Sraj	int i;
407176771Sraj
408187151Sraj	CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__,
409187151Sraj	    (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS);
410187151Sraj	CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)",
411187151Sraj	    __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE);
412176771Sraj
413176771Sraj	mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
414176771Sraj	TAILQ_INIT(&ptbl_buf_freelist);
415176771Sraj
416176771Sraj	for (i = 0; i < PTBL_BUFS; i++) {
417176771Sraj		ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
418176771Sraj		TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
419176771Sraj	}
420176771Sraj}
421176771Sraj
422182362Sraj/* Get a ptbl_buf from the freelist. */
423176771Srajstatic struct ptbl_buf *
424176771Srajptbl_buf_alloc(void)
425176771Sraj{
426176771Sraj	struct ptbl_buf *buf;
427176771Sraj
428176771Sraj	mtx_lock(&ptbl_buf_freelist_lock);
429176771Sraj	buf = TAILQ_FIRST(&ptbl_buf_freelist);
430176771Sraj	if (buf != NULL)
431176771Sraj		TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
432176771Sraj	mtx_unlock(&ptbl_buf_freelist_lock);
433176771Sraj
434187151Sraj	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
435187151Sraj
436176771Sraj	return (buf);
437176771Sraj}
438176771Sraj
439176771Sraj/* Return ptbl buff to free pool. */
440176771Srajstatic void
441176771Srajptbl_buf_free(struct ptbl_buf *buf)
442176771Sraj{
443176771Sraj
444187149Sraj	CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf);
445176771Sraj
446176771Sraj	mtx_lock(&ptbl_buf_freelist_lock);
447176771Sraj	TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
448176771Sraj	mtx_unlock(&ptbl_buf_freelist_lock);
449176771Sraj}
450176771Sraj
451176771Sraj/*
452187149Sraj * Search the list of allocated ptbl bufs and find on list of allocated ptbls
453176771Sraj */
454176771Srajstatic void
455176771Srajptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
456176771Sraj{
457176771Sraj	struct ptbl_buf *pbuf;
458176771Sraj
459187149Sraj	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
460176771Sraj
461187149Sraj	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
462187149Sraj
463187149Sraj	TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link)
464176771Sraj		if (pbuf->kva == (vm_offset_t)ptbl) {
465176771Sraj			/* Remove from pmap ptbl buf list. */
466187149Sraj			TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
467176771Sraj
468187149Sraj			/* Free corresponding ptbl buf. */
469176771Sraj			ptbl_buf_free(pbuf);
470176771Sraj			break;
471176771Sraj		}
472176771Sraj}
473176771Sraj
474176771Sraj/* Allocate page table. */
475187149Srajstatic pte_t *
476176771Srajptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
477176771Sraj{
478176771Sraj	vm_page_t mtbl[PTBL_PAGES];
479176771Sraj	vm_page_t m;
480176771Sraj	struct ptbl_buf *pbuf;
481176771Sraj	unsigned int pidx;
482187149Sraj	pte_t *ptbl;
483176771Sraj	int i;
484176771Sraj
485187149Sraj	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
486187149Sraj	    (pmap == kernel_pmap), pdir_idx);
487176771Sraj
488176771Sraj	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
489176771Sraj	    ("ptbl_alloc: invalid pdir_idx"));
490176771Sraj	KASSERT((pmap->pm_pdir[pdir_idx] == NULL),
491176771Sraj	    ("pte_alloc: valid ptbl entry exists!"));
492176771Sraj
493176771Sraj	pbuf = ptbl_buf_alloc();
494176771Sraj	if (pbuf == NULL)
495176771Sraj		panic("pte_alloc: couldn't alloc kernel virtual memory");
496187149Sraj
497187149Sraj	ptbl = (pte_t *)pbuf->kva;
498176771Sraj
499187149Sraj	CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
500187149Sraj
501176771Sraj	/* Allocate ptbl pages, this will sleep! */
502176771Sraj	for (i = 0; i < PTBL_PAGES; i++) {
503176771Sraj		pidx = (PTBL_PAGES * pdir_idx) + i;
504187149Sraj		while ((m = vm_page_alloc(NULL, pidx,
505187149Sraj		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
506187149Sraj
507176771Sraj			PMAP_UNLOCK(pmap);
508176771Sraj			vm_page_unlock_queues();
509176771Sraj			VM_WAIT;
510176771Sraj			vm_page_lock_queues();
511176771Sraj			PMAP_LOCK(pmap);
512176771Sraj		}
513176771Sraj		mtbl[i] = m;
514176771Sraj	}
515176771Sraj
516187149Sraj	/* Map allocated pages into kernel_pmap. */
517187149Sraj	mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES);
518176771Sraj
519176771Sraj	/* Zero whole ptbl. */
520187149Sraj	bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE);
521176771Sraj
522176771Sraj	/* Add pbuf to the pmap ptbl bufs list. */
523187149Sraj	TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
524176771Sraj
525187149Sraj	return (ptbl);
526176771Sraj}
527176771Sraj
528176771Sraj/* Free ptbl pages and invalidate pdir entry. */
529176771Srajstatic void
530176771Srajptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
531176771Sraj{
532176771Sraj	pte_t *ptbl;
533176771Sraj	vm_paddr_t pa;
534176771Sraj	vm_offset_t va;
535176771Sraj	vm_page_t m;
536176771Sraj	int i;
537176771Sraj
538187149Sraj	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
539187149Sraj	    (pmap == kernel_pmap), pdir_idx);
540176771Sraj
541176771Sraj	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
542176771Sraj	    ("ptbl_free: invalid pdir_idx"));
543176771Sraj
544176771Sraj	ptbl = pmap->pm_pdir[pdir_idx];
545176771Sraj
546187149Sraj	CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl);
547187149Sraj
548176771Sraj	KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
549176771Sraj
550187149Sraj	/*
551187149Sraj	 * Invalidate the pdir entry as soon as possible, so that other CPUs
552187149Sraj	 * don't attempt to look up the page tables we are releasing.
553187149Sraj	 */
554187149Sraj	mtx_lock_spin(&tlbivax_mutex);
555187149Sraj
556187149Sraj	pmap->pm_pdir[pdir_idx] = NULL;
557187149Sraj
558187149Sraj	mtx_unlock_spin(&tlbivax_mutex);
559187149Sraj
560176771Sraj	for (i = 0; i < PTBL_PAGES; i++) {
561176771Sraj		va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
562176771Sraj		pa = pte_vatopa(mmu, kernel_pmap, va);
563176771Sraj		m = PHYS_TO_VM_PAGE(pa);
564176771Sraj		vm_page_free_zero(m);
565176771Sraj		atomic_subtract_int(&cnt.v_wire_count, 1);
566176771Sraj		mmu_booke_kremove(mmu, va);
567176771Sraj	}
568176771Sraj
569176771Sraj	ptbl_free_pmap_ptbl(pmap, ptbl);
570176771Sraj}
571176771Sraj
572176771Sraj/*
573176771Sraj * Decrement ptbl pages hold count and attempt to free ptbl pages.
574176771Sraj * Called when removing pte entry from ptbl.
575176771Sraj *
576176771Sraj * Return 1 if ptbl pages were freed.
577176771Sraj */
578176771Srajstatic int
579176771Srajptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
580176771Sraj{
581176771Sraj	pte_t *ptbl;
582176771Sraj	vm_paddr_t pa;
583176771Sraj	vm_page_t m;
584176771Sraj	int i;
585176771Sraj
586187151Sraj	CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
587187151Sraj	    (pmap == kernel_pmap), pdir_idx);
588176771Sraj
589176771Sraj	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
590176771Sraj	    ("ptbl_unhold: invalid pdir_idx"));
591176771Sraj	KASSERT((pmap != kernel_pmap),
592176771Sraj	    ("ptbl_unhold: unholding kernel ptbl!"));
593176771Sraj
594176771Sraj	ptbl = pmap->pm_pdir[pdir_idx];
595176771Sraj
596176771Sraj	//debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl);
597176771Sraj	KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS),
598176771Sraj	    ("ptbl_unhold: non kva ptbl"));
599176771Sraj
600176771Sraj	/* decrement hold count */
601176771Sraj	for (i = 0; i < PTBL_PAGES; i++) {
602187151Sraj		pa = pte_vatopa(mmu, kernel_pmap,
603187151Sraj		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
604176771Sraj		m = PHYS_TO_VM_PAGE(pa);
605176771Sraj		m->wire_count--;
606176771Sraj	}
607176771Sraj
608176771Sraj	/*
609176771Sraj	 * Free ptbl pages if there are no pte etries in this ptbl.
610187151Sraj	 * wire_count has the same value for all ptbl pages, so check the last
611187151Sraj	 * page.
612176771Sraj	 */
613176771Sraj	if (m->wire_count == 0) {
614176771Sraj		ptbl_free(mmu, pmap, pdir_idx);
615176771Sraj
616176771Sraj		//debugf("ptbl_unhold: e (freed ptbl)\n");
617176771Sraj		return (1);
618176771Sraj	}
619176771Sraj
620176771Sraj	return (0);
621176771Sraj}
622176771Sraj
623176771Sraj/*
624187151Sraj * Increment hold count for ptbl pages. This routine is used when a new pte
625187151Sraj * entry is being inserted into the ptbl.
626176771Sraj */
627176771Srajstatic void
628176771Srajptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
629176771Sraj{
630176771Sraj	vm_paddr_t pa;
631176771Sraj	pte_t *ptbl;
632176771Sraj	vm_page_t m;
633176771Sraj	int i;
634176771Sraj
635187151Sraj	CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap,
636187151Sraj	    pdir_idx);
637176771Sraj
638176771Sraj	KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)),
639176771Sraj	    ("ptbl_hold: invalid pdir_idx"));
640176771Sraj	KASSERT((pmap != kernel_pmap),
641176771Sraj	    ("ptbl_hold: holding kernel ptbl!"));
642176771Sraj
643176771Sraj	ptbl = pmap->pm_pdir[pdir_idx];
644176771Sraj
645176771Sraj	KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
646176771Sraj
647176771Sraj	for (i = 0; i < PTBL_PAGES; i++) {
648187151Sraj		pa = pte_vatopa(mmu, kernel_pmap,
649187151Sraj		    (vm_offset_t)ptbl + (i * PAGE_SIZE));
650176771Sraj		m = PHYS_TO_VM_PAGE(pa);
651176771Sraj		m->wire_count++;
652176771Sraj	}
653176771Sraj}
654176771Sraj
655176771Sraj/* Allocate pv_entry structure. */
656176771Srajpv_entry_t
657176771Srajpv_alloc(void)
658176771Sraj{
659176771Sraj	pv_entry_t pv;
660176771Sraj
661176771Sraj	pv_entry_count++;
662187151Sraj	if ((pv_entry_count > pv_entry_high_water) &&
663187151Sraj	    (pagedaemon_waken == 0)) {
664176771Sraj		pagedaemon_waken = 1;
665187151Sraj		wakeup(&vm_pages_needed);
666176771Sraj	}
667176771Sraj	pv = uma_zalloc(pvzone, M_NOWAIT);
668176771Sraj
669176771Sraj	return (pv);
670176771Sraj}
671176771Sraj
672176771Sraj/* Free pv_entry structure. */
673176771Srajstatic __inline void
674176771Srajpv_free(pv_entry_t pve)
675176771Sraj{
676176771Sraj
677176771Sraj	pv_entry_count--;
678176771Sraj	uma_zfree(pvzone, pve);
679176771Sraj}
680176771Sraj
681176771Sraj
682176771Sraj/* Allocate and initialize pv_entry structure. */
683176771Srajstatic void
684176771Srajpv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m)
685176771Sraj{
686176771Sraj	pv_entry_t pve;
687176771Sraj
688176771Sraj	//int su = (pmap == kernel_pmap);
689176771Sraj	//debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
690176771Sraj	//	(u_int32_t)pmap, va, (u_int32_t)m);
691176771Sraj
692176771Sraj	pve = pv_alloc();
693176771Sraj	if (pve == NULL)
694176771Sraj		panic("pv_insert: no pv entries!");
695176771Sraj
696176771Sraj	pve->pv_pmap = pmap;
697176771Sraj	pve->pv_va = va;
698176771Sraj
699176771Sraj	/* add to pv_list */
700176771Sraj	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
701176771Sraj	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
702176771Sraj
703176771Sraj	TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
704176771Sraj
705176771Sraj	//debugf("pv_insert: e\n");
706176771Sraj}
707176771Sraj
708176771Sraj/* Destroy pv entry. */
709176771Srajstatic void
710176771Srajpv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m)
711176771Sraj{
712176771Sraj	pv_entry_t pve;
713176771Sraj
714176771Sraj	//int su = (pmap == kernel_pmap);
715176771Sraj	//debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
716176771Sraj
717176771Sraj	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
718176771Sraj	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
719176771Sraj
720176771Sraj	/* find pv entry */
721176771Sraj	TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
722176771Sraj		if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) {
723176771Sraj			/* remove from pv_list */
724176771Sraj			TAILQ_REMOVE(&m->md.pv_list, pve, pv_link);
725176771Sraj			if (TAILQ_EMPTY(&m->md.pv_list))
726176771Sraj				vm_page_flag_clear(m, PG_WRITEABLE);
727176771Sraj
728176771Sraj			/* free pv entry struct */
729176771Sraj			pv_free(pve);
730176771Sraj			break;
731176771Sraj		}
732176771Sraj	}
733176771Sraj
734176771Sraj	//debugf("pv_remove: e\n");
735176771Sraj}
736176771Sraj
737176771Sraj/*
738176771Sraj * Clean pte entry, try to free page table page if requested.
739176771Sraj *
740176771Sraj * Return 1 if ptbl pages were freed, otherwise return 0.
741176771Sraj */
742176771Srajstatic int
743187151Srajpte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
744176771Sraj{
745176771Sraj	unsigned int pdir_idx = PDIR_IDX(va);
746176771Sraj	unsigned int ptbl_idx = PTBL_IDX(va);
747176771Sraj	vm_page_t m;
748176771Sraj	pte_t *ptbl;
749176771Sraj	pte_t *pte;
750176771Sraj
751176771Sraj	//int su = (pmap == kernel_pmap);
752176771Sraj	//debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n",
753176771Sraj	//		su, (u_int32_t)pmap, va, flags);
754176771Sraj
755176771Sraj	ptbl = pmap->pm_pdir[pdir_idx];
756176771Sraj	KASSERT(ptbl, ("pte_remove: null ptbl"));
757176771Sraj
758176771Sraj	pte = &ptbl[ptbl_idx];
759176771Sraj
760176771Sraj	if (pte == NULL || !PTE_ISVALID(pte))
761176771Sraj		return (0);
762176771Sraj
763176771Sraj	if (PTE_ISWIRED(pte))
764176771Sraj		pmap->pm_stats.wired_count--;
765176771Sraj
766191445Smarcel	/* Handle managed entry. */
767191445Smarcel	if (PTE_ISMANAGED(pte)) {
768191445Smarcel		/* Get vm_page_t for mapped pte. */
769191445Smarcel		m = PHYS_TO_VM_PAGE(PTE_PA(pte));
770176771Sraj
771191445Smarcel		if (PTE_ISMODIFIED(pte))
772191445Smarcel			vm_page_dirty(m);
773176771Sraj
774191445Smarcel		if (PTE_ISREFERENCED(pte))
775191445Smarcel			vm_page_flag_set(m, PG_REFERENCED);
776176771Sraj
777191445Smarcel		pv_remove(pmap, va, m);
778176771Sraj	}
779176771Sraj
780187149Sraj	mtx_lock_spin(&tlbivax_mutex);
781187149Sraj
782187149Sraj	tlb0_flush_entry(va);
783176771Sraj	pte->flags = 0;
784176771Sraj	pte->rpn = 0;
785187149Sraj
786187149Sraj	mtx_unlock_spin(&tlbivax_mutex);
787187149Sraj
788176771Sraj	pmap->pm_stats.resident_count--;
789176771Sraj
790176771Sraj	if (flags & PTBL_UNHOLD) {
791176771Sraj		//debugf("pte_remove: e (unhold)\n");
792176771Sraj		return (ptbl_unhold(mmu, pmap, pdir_idx));
793176771Sraj	}
794176771Sraj
795176771Sraj	//debugf("pte_remove: e\n");
796176771Sraj	return (0);
797176771Sraj}
798176771Sraj
799176771Sraj/*
800176771Sraj * Insert PTE for a given page and virtual address.
801176771Sraj */
802187149Srajstatic void
803187149Srajpte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
804176771Sraj{
805176771Sraj	unsigned int pdir_idx = PDIR_IDX(va);
806176771Sraj	unsigned int ptbl_idx = PTBL_IDX(va);
807187149Sraj	pte_t *ptbl, *pte;
808176771Sraj
809187149Sraj	CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__,
810187149Sraj	    pmap == kernel_pmap, pmap, va);
811176771Sraj
812176771Sraj	/* Get the page table pointer. */
813176771Sraj	ptbl = pmap->pm_pdir[pdir_idx];
814176771Sraj
815187149Sraj	if (ptbl == NULL) {
816187149Sraj		/* Allocate page table pages. */
817187149Sraj		ptbl = ptbl_alloc(mmu, pmap, pdir_idx);
818187149Sraj	} else {
819176771Sraj		/*
820176771Sraj		 * Check if there is valid mapping for requested
821176771Sraj		 * va, if there is, remove it.
822176771Sraj		 */
823176771Sraj		pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
824176771Sraj		if (PTE_ISVALID(pte)) {
825176771Sraj			pte_remove(mmu, pmap, va, PTBL_HOLD);
826176771Sraj		} else {
827176771Sraj			/*
828176771Sraj			 * pte is not used, increment hold count
829176771Sraj			 * for ptbl pages.
830176771Sraj			 */
831176771Sraj			if (pmap != kernel_pmap)
832176771Sraj				ptbl_hold(mmu, pmap, pdir_idx);
833176771Sraj		}
834176771Sraj	}
835176771Sraj
836176771Sraj	/*
837187149Sraj	 * Insert pv_entry into pv_list for mapped page if part of managed
838187149Sraj	 * memory.
839176771Sraj	 */
840176771Sraj        if ((m->flags & PG_FICTITIOUS) == 0) {
841176771Sraj		if ((m->flags & PG_UNMANAGED) == 0) {
842187149Sraj			flags |= PTE_MANAGED;
843176771Sraj
844176771Sraj			/* Create and insert pv entry. */
845176771Sraj			pv_insert(pmap, va, m);
846176771Sraj		}
847176771Sraj	}
848176771Sraj
849176771Sraj	pmap->pm_stats.resident_count++;
850187149Sraj
851187149Sraj	mtx_lock_spin(&tlbivax_mutex);
852187149Sraj
853187149Sraj	tlb0_flush_entry(va);
854187149Sraj	if (pmap->pm_pdir[pdir_idx] == NULL) {
855187149Sraj		/*
856187149Sraj		 * If we just allocated a new page table, hook it in
857187149Sraj		 * the pdir.
858187149Sraj		 */
859187149Sraj		pmap->pm_pdir[pdir_idx] = ptbl;
860187149Sraj	}
861187149Sraj	pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]);
862176771Sraj	pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK;
863176771Sraj	pte->flags |= (PTE_VALID | flags);
864176771Sraj
865187149Sraj	mtx_unlock_spin(&tlbivax_mutex);
866176771Sraj}
867176771Sraj
868176771Sraj/* Return the pa for the given pmap/va. */
869176771Srajstatic vm_paddr_t
870176771Srajpte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
871176771Sraj{
872176771Sraj	vm_paddr_t pa = 0;
873176771Sraj	pte_t *pte;
874176771Sraj
875176771Sraj	pte = pte_find(mmu, pmap, va);
876176771Sraj	if ((pte != NULL) && PTE_ISVALID(pte))
877176771Sraj		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
878176771Sraj	return (pa);
879176771Sraj}
880176771Sraj
881176771Sraj/* Get a pointer to a PTE in a page table. */
882176771Srajstatic pte_t *
883176771Srajpte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
884176771Sraj{
885176771Sraj	unsigned int pdir_idx = PDIR_IDX(va);
886176771Sraj	unsigned int ptbl_idx = PTBL_IDX(va);
887176771Sraj
888176771Sraj	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
889176771Sraj
890176771Sraj	if (pmap->pm_pdir[pdir_idx])
891176771Sraj		return (&(pmap->pm_pdir[pdir_idx][ptbl_idx]));
892176771Sraj
893176771Sraj	return (NULL);
894176771Sraj}
895176771Sraj
896176771Sraj/**************************************************************************/
897176771Sraj/* PMAP related */
898176771Sraj/**************************************************************************/
899176771Sraj
900176771Sraj/*
901176771Sraj * This is called during e500_init, before the system is really initialized.
902176771Sraj */
903176771Srajstatic void
904190701Smarcelmmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
905176771Sraj{
906176771Sraj	vm_offset_t phys_kernelend;
907176771Sraj	struct mem_region *mp, *mp1;
908176771Sraj	int cnt, i, j;
909176771Sraj	u_int s, e, sz;
910176771Sraj	u_int phys_avail_count;
911182198Sraj	vm_size_t physsz, hwphyssz, kstack0_sz;
912182198Sraj	vm_offset_t kernel_pdir, kstack0;
913182198Sraj	vm_paddr_t kstack0_phys;
914176771Sraj
915176771Sraj	debugf("mmu_booke_bootstrap: entered\n");
916176771Sraj
917187149Sraj	/* Initialize invalidation mutex */
918187149Sraj	mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
919187149Sraj
920187149Sraj	/* Read TLB0 size and associativity. */
921187149Sraj	tlb0_get_tlbconf();
922187149Sraj
923176771Sraj	/* Align kernel start and end address (kernel image). */
924190701Smarcel	kernstart = trunc_page(start);
925190701Smarcel	data_start = round_page(kernelend);
926190701Smarcel	kernsize = data_start - kernstart;
927176771Sraj
928190701Smarcel	data_end = data_start;
929190701Smarcel
930176771Sraj	/* Allocate space for the message buffer. */
931190701Smarcel	msgbufp = (struct msgbuf *)data_end;
932190701Smarcel	data_end += MSGBUF_SIZE;
933187149Sraj	debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
934190701Smarcel	    data_end);
935176771Sraj
936190701Smarcel	data_end = round_page(data_end);
937176771Sraj
938176771Sraj	/* Allocate space for ptbl_bufs. */
939190701Smarcel	ptbl_bufs = (struct ptbl_buf *)data_end;
940190701Smarcel	data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
941187149Sraj	debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
942190701Smarcel	    data_end);
943176771Sraj
944190701Smarcel	data_end = round_page(data_end);
945176771Sraj
946176771Sraj	/* Allocate PTE tables for kernel KVA. */
947190701Smarcel	kernel_pdir = data_end;
948176771Sraj	kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS +
949176771Sraj	    PDIR_SIZE - 1) / PDIR_SIZE;
950190701Smarcel	data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
951176771Sraj	debugf(" kernel ptbls: %d\n", kernel_ptbls);
952190701Smarcel	debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
953176771Sraj
954190701Smarcel	debugf(" data_end: 0x%08x\n", data_end);
955190701Smarcel	if (data_end - kernstart > 0x1000000) {
956190701Smarcel		data_end = (data_end + 0x3fffff) & ~0x3fffff;
957190701Smarcel		tlb1_mapin_region(kernstart + 0x1000000,
958190701Smarcel		    kernload + 0x1000000, data_end - kernstart - 0x1000000);
959176771Sraj	} else
960190701Smarcel		data_end = (data_end + 0xffffff) & ~0xffffff;
961176771Sraj
962190701Smarcel	debugf(" updated data_end: 0x%08x\n", data_end);
963187149Sraj
964190701Smarcel	kernsize += data_end - data_start;
965190701Smarcel
966182362Sraj	/*
967182362Sraj	 * Clear the structures - note we can only do it safely after the
968187149Sraj	 * possible additional TLB1 translations are in place (above) so that
969190701Smarcel	 * all range up to the currently calculated 'data_end' is covered.
970182362Sraj	 */
971182362Sraj	memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
972182362Sraj	memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
973182362Sraj
974176771Sraj	/*******************************************************/
975176771Sraj	/* Set the start and end of kva. */
976176771Sraj	/*******************************************************/
977190701Smarcel	virtual_avail = round_page(data_end);
978176771Sraj	virtual_end = VM_MAX_KERNEL_ADDRESS;
979176771Sraj
980176771Sraj	/* Allocate KVA space for page zero/copy operations. */
981176771Sraj	zero_page_va = virtual_avail;
982176771Sraj	virtual_avail += PAGE_SIZE;
983176771Sraj	zero_page_idle_va = virtual_avail;
984176771Sraj	virtual_avail += PAGE_SIZE;
985176771Sraj	copy_page_src_va = virtual_avail;
986176771Sraj	virtual_avail += PAGE_SIZE;
987176771Sraj	copy_page_dst_va = virtual_avail;
988176771Sraj	virtual_avail += PAGE_SIZE;
989187149Sraj	debugf("zero_page_va = 0x%08x\n", zero_page_va);
990187149Sraj	debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va);
991187149Sraj	debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va);
992187149Sraj	debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va);
993176771Sraj
994176771Sraj	/* Initialize page zero/copy mutexes. */
995176771Sraj	mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
996176771Sraj	mtx_init(&copy_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
997176771Sraj
998176771Sraj	/* Allocate KVA space for ptbl bufs. */
999176771Sraj	ptbl_buf_pool_vabase = virtual_avail;
1000176771Sraj	virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE;
1001187149Sraj	debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n",
1002187149Sraj	    ptbl_buf_pool_vabase, virtual_avail);
1003176771Sraj
1004176771Sraj	/* Calculate corresponding physical addresses for the kernel region. */
1005190701Smarcel	phys_kernelend = kernload + kernsize;
1006176771Sraj	debugf("kernel image and allocated data:\n");
1007176771Sraj	debugf(" kernload    = 0x%08x\n", kernload);
1008190701Smarcel	debugf(" kernstart   = 0x%08x\n", kernstart);
1009190701Smarcel	debugf(" kernsize    = 0x%08x\n", kernsize);
1010176771Sraj
1011176771Sraj	if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
1012176771Sraj		panic("mmu_booke_bootstrap: phys_avail too small");
1013176771Sraj
1014176771Sraj	/*
1015187151Sraj	 * Remove kernel physical address range from avail regions list. Page
1016187151Sraj	 * align all regions.  Non-page aligned memory isn't very interesting
1017187151Sraj	 * to us.  Also, sort the entries for ascending addresses.
1018176771Sraj	 */
1019192067Snwhitehorn
1020192067Snwhitehorn	/* Retrieve phys/avail mem regions */
1021192067Snwhitehorn	mem_regions(&physmem_regions, &physmem_regions_sz,
1022192067Snwhitehorn	    &availmem_regions, &availmem_regions_sz);
1023176771Sraj	sz = 0;
1024176771Sraj	cnt = availmem_regions_sz;
1025176771Sraj	debugf("processing avail regions:\n");
1026176771Sraj	for (mp = availmem_regions; mp->mr_size; mp++) {
1027176771Sraj		s = mp->mr_start;
1028176771Sraj		e = mp->mr_start + mp->mr_size;
1029176771Sraj		debugf(" %08x-%08x -> ", s, e);
1030176771Sraj		/* Check whether this region holds all of the kernel. */
1031176771Sraj		if (s < kernload && e > phys_kernelend) {
1032176771Sraj			availmem_regions[cnt].mr_start = phys_kernelend;
1033176771Sraj			availmem_regions[cnt++].mr_size = e - phys_kernelend;
1034176771Sraj			e = kernload;
1035176771Sraj		}
1036176771Sraj		/* Look whether this regions starts within the kernel. */
1037176771Sraj		if (s >= kernload && s < phys_kernelend) {
1038176771Sraj			if (e <= phys_kernelend)
1039176771Sraj				goto empty;
1040176771Sraj			s = phys_kernelend;
1041176771Sraj		}
1042176771Sraj		/* Now look whether this region ends within the kernel. */
1043176771Sraj		if (e > kernload && e <= phys_kernelend) {
1044176771Sraj			if (s >= kernload)
1045176771Sraj				goto empty;
1046176771Sraj			e = kernload;
1047176771Sraj		}
1048176771Sraj		/* Now page align the start and size of the region. */
1049176771Sraj		s = round_page(s);
1050176771Sraj		e = trunc_page(e);
1051176771Sraj		if (e < s)
1052176771Sraj			e = s;
1053176771Sraj		sz = e - s;
1054176771Sraj		debugf("%08x-%08x = %x\n", s, e, sz);
1055176771Sraj
1056176771Sraj		/* Check whether some memory is left here. */
1057176771Sraj		if (sz == 0) {
1058176771Sraj		empty:
1059176771Sraj			memmove(mp, mp + 1,
1060176771Sraj			    (cnt - (mp - availmem_regions)) * sizeof(*mp));
1061176771Sraj			cnt--;
1062176771Sraj			mp--;
1063176771Sraj			continue;
1064176771Sraj		}
1065176771Sraj
1066176771Sraj		/* Do an insertion sort. */
1067176771Sraj		for (mp1 = availmem_regions; mp1 < mp; mp1++)
1068176771Sraj			if (s < mp1->mr_start)
1069176771Sraj				break;
1070176771Sraj		if (mp1 < mp) {
1071176771Sraj			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1072176771Sraj			mp1->mr_start = s;
1073176771Sraj			mp1->mr_size = sz;
1074176771Sraj		} else {
1075176771Sraj			mp->mr_start = s;
1076176771Sraj			mp->mr_size = sz;
1077176771Sraj		}
1078176771Sraj	}
1079176771Sraj	availmem_regions_sz = cnt;
1080176771Sraj
1081176771Sraj	/*******************************************************/
1082182198Sraj	/* Steal physical memory for kernel stack from the end */
1083182198Sraj	/* of the first avail region                           */
1084182198Sraj	/*******************************************************/
1085182198Sraj	kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
1086182198Sraj	kstack0_phys = availmem_regions[0].mr_start +
1087182198Sraj	    availmem_regions[0].mr_size;
1088182198Sraj	kstack0_phys -= kstack0_sz;
1089182198Sraj	availmem_regions[0].mr_size -= kstack0_sz;
1090182198Sraj
1091182198Sraj	/*******************************************************/
1092176771Sraj	/* Fill in phys_avail table, based on availmem_regions */
1093176771Sraj	/*******************************************************/
1094176771Sraj	phys_avail_count = 0;
1095176771Sraj	physsz = 0;
1096176771Sraj	hwphyssz = 0;
1097176771Sraj	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
1098176771Sraj
1099176771Sraj	debugf("fill in phys_avail:\n");
1100176771Sraj	for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
1101176771Sraj
1102176771Sraj		debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
1103176771Sraj		    availmem_regions[i].mr_start,
1104187151Sraj		    availmem_regions[i].mr_start +
1105187151Sraj		        availmem_regions[i].mr_size,
1106176771Sraj		    availmem_regions[i].mr_size);
1107176771Sraj
1108182362Sraj		if (hwphyssz != 0 &&
1109182362Sraj		    (physsz + availmem_regions[i].mr_size) >= hwphyssz) {
1110176771Sraj			debugf(" hw.physmem adjust\n");
1111176771Sraj			if (physsz < hwphyssz) {
1112176771Sraj				phys_avail[j] = availmem_regions[i].mr_start;
1113182362Sraj				phys_avail[j + 1] =
1114182362Sraj				    availmem_regions[i].mr_start +
1115176771Sraj				    hwphyssz - physsz;
1116176771Sraj				physsz = hwphyssz;
1117176771Sraj				phys_avail_count++;
1118176771Sraj			}
1119176771Sraj			break;
1120176771Sraj		}
1121176771Sraj
1122176771Sraj		phys_avail[j] = availmem_regions[i].mr_start;
1123176771Sraj		phys_avail[j + 1] = availmem_regions[i].mr_start +
1124176771Sraj		    availmem_regions[i].mr_size;
1125176771Sraj		phys_avail_count++;
1126176771Sraj		physsz += availmem_regions[i].mr_size;
1127176771Sraj	}
1128176771Sraj	physmem = btoc(physsz);
1129176771Sraj
1130176771Sraj	/* Calculate the last available physical address. */
1131176771Sraj	for (i = 0; phys_avail[i + 2] != 0; i += 2)
1132176771Sraj		;
1133176771Sraj	Maxmem = powerpc_btop(phys_avail[i + 1]);
1134176771Sraj
1135176771Sraj	debugf("Maxmem = 0x%08lx\n", Maxmem);
1136176771Sraj	debugf("phys_avail_count = %d\n", phys_avail_count);
1137187151Sraj	debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem,
1138187151Sraj	    physmem);
1139176771Sraj
1140176771Sraj	/*******************************************************/
1141176771Sraj	/* Initialize (statically allocated) kernel pmap. */
1142176771Sraj	/*******************************************************/
1143176771Sraj	PMAP_LOCK_INIT(kernel_pmap);
1144176771Sraj	kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
1145176771Sraj
1146187149Sraj	debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
1147187149Sraj	debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
1148176771Sraj	debugf("kernel pdir range: 0x%08x - 0x%08x\n",
1149176771Sraj	    kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
1150176771Sraj
1151176771Sraj	/* Initialize kernel pdir */
1152176771Sraj	for (i = 0; i < kernel_ptbls; i++)
1153176771Sraj		kernel_pmap->pm_pdir[kptbl_min + i] =
1154176771Sraj		    (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES));
1155176771Sraj
1156187149Sraj	for (i = 0; i < MAXCPU; i++) {
1157187149Sraj		kernel_pmap->pm_tid[i] = TID_KERNEL;
1158187149Sraj
1159187149Sraj		/* Initialize each CPU's tidbusy entry 0 with kernel_pmap */
1160187149Sraj		tidbusy[i][0] = kernel_pmap;
1161187149Sraj	}
1162187149Sraj	/* Mark kernel_pmap active on all CPUs */
1163176771Sraj	kernel_pmap->pm_active = ~0;
1164176771Sraj
1165176771Sraj	/*******************************************************/
1166176771Sraj	/* Final setup */
1167176771Sraj	/*******************************************************/
1168187149Sraj
1169182198Sraj	/* Enter kstack0 into kernel map, provide guard page */
1170182198Sraj	kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1171182198Sraj	thread0.td_kstack = kstack0;
1172182198Sraj	thread0.td_kstack_pages = KSTACK_PAGES;
1173182198Sraj
1174182198Sraj	debugf("kstack_sz = 0x%08x\n", kstack0_sz);
1175182198Sraj	debugf("kstack0_phys at 0x%08x - 0x%08x\n",
1176182198Sraj	    kstack0_phys, kstack0_phys + kstack0_sz);
1177182198Sraj	debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
1178182198Sraj
1179182198Sraj	virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
1180182198Sraj	for (i = 0; i < KSTACK_PAGES; i++) {
1181182198Sraj		mmu_booke_kenter(mmu, kstack0, kstack0_phys);
1182182198Sraj		kstack0 += PAGE_SIZE;
1183182198Sraj		kstack0_phys += PAGE_SIZE;
1184182198Sraj	}
1185187149Sraj
1186187149Sraj	debugf("virtual_avail = %08x\n", virtual_avail);
1187187149Sraj	debugf("virtual_end   = %08x\n", virtual_end);
1188182198Sraj
1189176771Sraj	debugf("mmu_booke_bootstrap: exit\n");
1190176771Sraj}
1191176771Sraj
1192176771Sraj/*
1193176771Sraj * Get the physical page address for the given pmap/virtual address.
1194176771Sraj */
1195176771Srajstatic vm_paddr_t
1196176771Srajmmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1197176771Sraj{
1198176771Sraj	vm_paddr_t pa;
1199176771Sraj
1200176771Sraj	PMAP_LOCK(pmap);
1201176771Sraj	pa = pte_vatopa(mmu, pmap, va);
1202176771Sraj	PMAP_UNLOCK(pmap);
1203176771Sraj
1204176771Sraj	return (pa);
1205176771Sraj}
1206176771Sraj
1207176771Sraj/*
1208176771Sraj * Extract the physical page address associated with the given
1209176771Sraj * kernel virtual address.
1210176771Sraj */
1211176771Srajstatic vm_paddr_t
1212176771Srajmmu_booke_kextract(mmu_t mmu, vm_offset_t va)
1213176771Sraj{
1214176771Sraj
1215176771Sraj	return (pte_vatopa(mmu, kernel_pmap, va));
1216176771Sraj}
1217176771Sraj
1218176771Sraj/*
1219176771Sraj * Initialize the pmap module.
1220176771Sraj * Called by vm_init, to initialize any structures that the pmap
1221176771Sraj * system needs to map virtual memory.
1222176771Sraj */
1223176771Srajstatic void
1224176771Srajmmu_booke_init(mmu_t mmu)
1225176771Sraj{
1226176771Sraj	int shpgperproc = PMAP_SHPGPERPROC;
1227176771Sraj
1228176771Sraj	/*
1229176771Sraj	 * Initialize the address space (zone) for the pv entries.  Set a
1230176771Sraj	 * high water mark so that the system can recover from excessive
1231176771Sraj	 * numbers of pv entries.
1232176771Sraj	 */
1233176771Sraj	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
1234176771Sraj	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1235176771Sraj
1236176771Sraj	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1237176771Sraj	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1238176771Sraj
1239176771Sraj	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
1240176771Sraj	pv_entry_high_water = 9 * (pv_entry_max / 10);
1241176771Sraj
1242176771Sraj	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
1243176771Sraj
1244176771Sraj	/* Pre-fill pvzone with initial number of pv entries. */
1245176771Sraj	uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
1246176771Sraj
1247176771Sraj	/* Initialize ptbl allocation. */
1248176771Sraj	ptbl_init();
1249176771Sraj}
1250176771Sraj
1251176771Sraj/*
1252176771Sraj * Map a list of wired pages into kernel virtual address space.  This is
1253176771Sraj * intended for temporary mappings which do not need page modification or
1254176771Sraj * references recorded.  Existing mappings in the region are overwritten.
1255176771Sraj */
1256176771Srajstatic void
1257176771Srajmmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1258176771Sraj{
1259176771Sraj	vm_offset_t va;
1260176771Sraj
1261176771Sraj	va = sva;
1262176771Sraj	while (count-- > 0) {
1263176771Sraj		mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1264176771Sraj		va += PAGE_SIZE;
1265176771Sraj		m++;
1266176771Sraj	}
1267176771Sraj}
1268176771Sraj
1269176771Sraj/*
1270176771Sraj * Remove page mappings from kernel virtual address space.  Intended for
1271176771Sraj * temporary mappings entered by mmu_booke_qenter.
1272176771Sraj */
1273176771Srajstatic void
1274176771Srajmmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
1275176771Sraj{
1276176771Sraj	vm_offset_t va;
1277176771Sraj
1278176771Sraj	va = sva;
1279176771Sraj	while (count-- > 0) {
1280176771Sraj		mmu_booke_kremove(mmu, va);
1281176771Sraj		va += PAGE_SIZE;
1282176771Sraj	}
1283176771Sraj}
1284176771Sraj
1285176771Sraj/*
1286176771Sraj * Map a wired page into kernel virtual address space.
1287176771Sraj */
1288176771Srajstatic void
1289176771Srajmmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1290176771Sraj{
1291176771Sraj	unsigned int pdir_idx = PDIR_IDX(va);
1292176771Sraj	unsigned int ptbl_idx = PTBL_IDX(va);
1293187151Sraj	uint32_t flags;
1294176771Sraj	pte_t *pte;
1295176771Sraj
1296187151Sraj	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1297187151Sraj	    (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va"));
1298176771Sraj
1299176771Sraj	flags = 0;
1300176771Sraj	flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID);
1301187149Sraj	flags |= PTE_M;
1302176771Sraj
1303176771Sraj	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1304176771Sraj
1305187149Sraj	mtx_lock_spin(&tlbivax_mutex);
1306187149Sraj
1307176771Sraj	if (PTE_ISVALID(pte)) {
1308187149Sraj
1309187149Sraj		CTR1(KTR_PMAP, "%s: replacing entry!", __func__);
1310176771Sraj
1311176771Sraj		/* Flush entry from TLB0 */
1312187149Sraj		tlb0_flush_entry(va);
1313176771Sraj	}
1314176771Sraj
1315176771Sraj	pte->rpn = pa & ~PTE_PA_MASK;
1316176771Sraj	pte->flags = flags;
1317176771Sraj
1318176771Sraj	//debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
1319176771Sraj	//		"pa=0x%08x rpn=0x%08x flags=0x%08x\n",
1320176771Sraj	//		pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
1321176771Sraj
1322176771Sraj	/* Flush the real memory from the instruction cache. */
1323176771Sraj	if ((flags & (PTE_I | PTE_G)) == 0) {
1324176771Sraj		__syncicache((void *)va, PAGE_SIZE);
1325176771Sraj	}
1326176771Sraj
1327187149Sraj	mtx_unlock_spin(&tlbivax_mutex);
1328176771Sraj}
1329176771Sraj
1330176771Sraj/*
1331176771Sraj * Remove a page from kernel page table.
1332176771Sraj */
1333176771Srajstatic void
1334176771Srajmmu_booke_kremove(mmu_t mmu, vm_offset_t va)
1335176771Sraj{
1336176771Sraj	unsigned int pdir_idx = PDIR_IDX(va);
1337176771Sraj	unsigned int ptbl_idx = PTBL_IDX(va);
1338176771Sraj	pte_t *pte;
1339176771Sraj
1340187149Sraj//	CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va));
1341176771Sraj
1342187149Sraj	KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) &&
1343187149Sraj	    (va <= VM_MAX_KERNEL_ADDRESS)),
1344176771Sraj	    ("mmu_booke_kremove: invalid va"));
1345176771Sraj
1346176771Sraj	pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
1347176771Sraj
1348176771Sraj	if (!PTE_ISVALID(pte)) {
1349187149Sraj
1350187149Sraj		CTR1(KTR_PMAP, "%s: invalid pte", __func__);
1351187149Sraj
1352176771Sraj		return;
1353176771Sraj	}
1354176771Sraj
1355187149Sraj	mtx_lock_spin(&tlbivax_mutex);
1356176771Sraj
1357187149Sraj	/* Invalidate entry in TLB0, update PTE. */
1358187149Sraj	tlb0_flush_entry(va);
1359176771Sraj	pte->flags = 0;
1360176771Sraj	pte->rpn = 0;
1361176771Sraj
1362187149Sraj	mtx_unlock_spin(&tlbivax_mutex);
1363176771Sraj}
1364176771Sraj
1365176771Sraj/*
1366176771Sraj * Initialize pmap associated with process 0.
1367176771Sraj */
1368176771Srajstatic void
1369176771Srajmmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
1370176771Sraj{
1371187151Sraj
1372176771Sraj	mmu_booke_pinit(mmu, pmap);
1373176771Sraj	PCPU_SET(curpmap, pmap);
1374176771Sraj}
1375176771Sraj
1376176771Sraj/*
1377176771Sraj * Initialize a preallocated and zeroed pmap structure,
1378176771Sraj * such as one in a vmspace structure.
1379176771Sraj */
1380176771Srajstatic void
1381176771Srajmmu_booke_pinit(mmu_t mmu, pmap_t pmap)
1382176771Sraj{
1383187149Sraj	int i;
1384176771Sraj
1385187149Sraj	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
1386187149Sraj	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
1387176771Sraj
1388187149Sraj	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
1389176771Sraj
1390176771Sraj	PMAP_LOCK_INIT(pmap);
1391187149Sraj	for (i = 0; i < MAXCPU; i++)
1392187149Sraj		pmap->pm_tid[i] = TID_NONE;
1393176771Sraj	pmap->pm_active = 0;
1394176771Sraj	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1395176771Sraj	bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
1396187149Sraj	TAILQ_INIT(&pmap->pm_ptbl_list);
1397176771Sraj}
1398176771Sraj
1399176771Sraj/*
1400176771Sraj * Release any resources held by the given physical map.
1401176771Sraj * Called when a pmap initialized by mmu_booke_pinit is being released.
1402176771Sraj * Should only be called if the map contains no valid mappings.
1403176771Sraj */
1404176771Srajstatic void
1405176771Srajmmu_booke_release(mmu_t mmu, pmap_t pmap)
1406176771Sraj{
1407176771Sraj
1408187151Sraj	printf("mmu_booke_release: s\n");
1409176771Sraj
1410187151Sraj	KASSERT(pmap->pm_stats.resident_count == 0,
1411187151Sraj	    ("pmap_release: pmap resident count %ld != 0",
1412187151Sraj	    pmap->pm_stats.resident_count));
1413187151Sraj
1414176771Sraj	PMAP_LOCK_DESTROY(pmap);
1415176771Sraj}
1416176771Sraj
1417176771Sraj/*
1418176771Sraj * Insert the given physical page at the specified virtual address in the
1419176771Sraj * target physical map with the protection requested. If specified the page
1420176771Sraj * will be wired down.
1421176771Sraj */
1422176771Srajstatic void
1423176771Srajmmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1424176771Sraj    vm_prot_t prot, boolean_t wired)
1425176771Sraj{
1426187151Sraj
1427176771Sraj	vm_page_lock_queues();
1428176771Sraj	PMAP_LOCK(pmap);
1429176771Sraj	mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
1430176771Sraj	vm_page_unlock_queues();
1431176771Sraj	PMAP_UNLOCK(pmap);
1432176771Sraj}
1433176771Sraj
1434176771Srajstatic void
1435176771Srajmmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1436176771Sraj    vm_prot_t prot, boolean_t wired)
1437176771Sraj{
1438176771Sraj	pte_t *pte;
1439176771Sraj	vm_paddr_t pa;
1440187151Sraj	uint32_t flags;
1441176771Sraj	int su, sync;
1442176771Sraj
1443176771Sraj	pa = VM_PAGE_TO_PHYS(m);
1444176771Sraj	su = (pmap == kernel_pmap);
1445176771Sraj	sync = 0;
1446176771Sraj
1447176771Sraj	//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
1448176771Sraj	//		"pa=0x%08x prot=0x%08x wired=%d)\n",
1449176771Sraj	//		(u_int32_t)pmap, su, pmap->pm_tid,
1450176771Sraj	//		(u_int32_t)m, va, pa, prot, wired);
1451176771Sraj
1452176771Sraj	if (su) {
1453187151Sraj		KASSERT(((va >= virtual_avail) &&
1454187151Sraj		    (va <= VM_MAX_KERNEL_ADDRESS)),
1455187151Sraj		    ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
1456176771Sraj	} else {
1457176771Sraj		KASSERT((va <= VM_MAXUSER_ADDRESS),
1458187151Sraj		    ("mmu_booke_enter_locked: user pmap, non user va"));
1459176771Sraj	}
1460176771Sraj
1461176771Sraj	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1462176771Sraj
1463176771Sraj	/*
1464176771Sraj	 * If there is an existing mapping, and the physical address has not
1465176771Sraj	 * changed, must be protection or wiring change.
1466176771Sraj	 */
1467176771Sraj	if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
1468176771Sraj	    (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
1469187149Sraj
1470187149Sraj		/*
1471187149Sraj		 * Before actually updating pte->flags we calculate and
1472187149Sraj		 * prepare its new value in a helper var.
1473187149Sraj		 */
1474187149Sraj		flags = pte->flags;
1475187149Sraj		flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
1476176771Sraj
1477176771Sraj		/* Wiring change, just update stats. */
1478176771Sraj		if (wired) {
1479176771Sraj			if (!PTE_ISWIRED(pte)) {
1480187149Sraj				flags |= PTE_WIRED;
1481176771Sraj				pmap->pm_stats.wired_count++;
1482176771Sraj			}
1483176771Sraj		} else {
1484176771Sraj			if (PTE_ISWIRED(pte)) {
1485187149Sraj				flags &= ~PTE_WIRED;
1486176771Sraj				pmap->pm_stats.wired_count--;
1487176771Sraj			}
1488176771Sraj		}
1489176771Sraj
1490176771Sraj		if (prot & VM_PROT_WRITE) {
1491176771Sraj			/* Add write permissions. */
1492187149Sraj			flags |= PTE_SW;
1493176771Sraj			if (!su)
1494187149Sraj				flags |= PTE_UW;
1495176771Sraj		} else {
1496176771Sraj			/* Handle modified pages, sense modify status. */
1497187149Sraj
1498187149Sraj			/*
1499187149Sraj			 * The PTE_MODIFIED flag could be set by underlying
1500187149Sraj			 * TLB misses since we last read it (above), possibly
1501187149Sraj			 * other CPUs could update it so we check in the PTE
1502187149Sraj			 * directly rather than rely on that saved local flags
1503187149Sraj			 * copy.
1504187149Sraj			 */
1505178626Smarcel			if (PTE_ISMODIFIED(pte))
1506178626Smarcel				vm_page_dirty(m);
1507176771Sraj		}
1508176771Sraj
1509176771Sraj		if (prot & VM_PROT_EXECUTE) {
1510187149Sraj			flags |= PTE_SX;
1511176771Sraj			if (!su)
1512187149Sraj				flags |= PTE_UX;
1513176771Sraj
1514187149Sraj			/*
1515187149Sraj			 * Check existing flags for execute permissions: if we
1516187149Sraj			 * are turning execute permissions on, icache should
1517187149Sraj			 * be flushed.
1518187149Sraj			 */
1519176771Sraj			if ((flags & (PTE_UX | PTE_SX)) == 0)
1520176771Sraj				sync++;
1521176771Sraj		}
1522176771Sraj
1523187149Sraj		flags &= ~PTE_REFERENCED;
1524187149Sraj
1525187149Sraj		/*
1526187149Sraj		 * The new flags value is all calculated -- only now actually
1527187149Sraj		 * update the PTE.
1528187149Sraj		 */
1529187149Sraj		mtx_lock_spin(&tlbivax_mutex);
1530187149Sraj
1531187149Sraj		tlb0_flush_entry(va);
1532187149Sraj		pte->flags = flags;
1533187149Sraj
1534187149Sraj		mtx_unlock_spin(&tlbivax_mutex);
1535187149Sraj
1536176771Sraj	} else {
1537176771Sraj		/*
1538187149Sraj		 * If there is an existing mapping, but it's for a different
1539176771Sraj		 * physical address, pte_enter() will delete the old mapping.
1540176771Sraj		 */
1541176771Sraj		//if ((pte != NULL) && PTE_ISVALID(pte))
1542176771Sraj		//	debugf("mmu_booke_enter_locked: replace\n");
1543176771Sraj		//else
1544176771Sraj		//	debugf("mmu_booke_enter_locked: new\n");
1545176771Sraj
1546176771Sraj		/* Now set up the flags and install the new mapping. */
1547176771Sraj		flags = (PTE_SR | PTE_VALID);
1548187149Sraj		flags |= PTE_M;
1549176771Sraj
1550176771Sraj		if (!su)
1551176771Sraj			flags |= PTE_UR;
1552176771Sraj
1553176771Sraj		if (prot & VM_PROT_WRITE) {
1554176771Sraj			flags |= PTE_SW;
1555176771Sraj			if (!su)
1556176771Sraj				flags |= PTE_UW;
1557176771Sraj		}
1558176771Sraj
1559176771Sraj		if (prot & VM_PROT_EXECUTE) {
1560176771Sraj			flags |= PTE_SX;
1561176771Sraj			if (!su)
1562176771Sraj				flags |= PTE_UX;
1563176771Sraj		}
1564176771Sraj
1565176771Sraj		/* If its wired update stats. */
1566176771Sraj		if (wired) {
1567176771Sraj			pmap->pm_stats.wired_count++;
1568176771Sraj			flags |= PTE_WIRED;
1569176771Sraj		}
1570176771Sraj
1571176771Sraj		pte_enter(mmu, pmap, m, va, flags);
1572176771Sraj
1573176771Sraj		/* Flush the real memory from the instruction cache. */
1574176771Sraj		if (prot & VM_PROT_EXECUTE)
1575176771Sraj			sync++;
1576176771Sraj	}
1577176771Sraj
1578176771Sraj	if (sync && (su || pmap == PCPU_GET(curpmap))) {
1579176771Sraj		__syncicache((void *)va, PAGE_SIZE);
1580176771Sraj		sync = 0;
1581176771Sraj	}
1582176771Sraj
1583176771Sraj	if (sync) {
1584176771Sraj		/* Create a temporary mapping. */
1585176771Sraj		pmap = PCPU_GET(curpmap);
1586176771Sraj
1587176771Sraj		va = 0;
1588176771Sraj		pte = pte_find(mmu, pmap, va);
1589176771Sraj		KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__));
1590176771Sraj
1591187149Sraj		flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M;
1592187149Sraj
1593176771Sraj		pte_enter(mmu, pmap, m, va, flags);
1594176771Sraj		__syncicache((void *)va, PAGE_SIZE);
1595176771Sraj		pte_remove(mmu, pmap, va, PTBL_UNHOLD);
1596176771Sraj	}
1597176771Sraj}
1598176771Sraj
1599176771Sraj/*
1600176771Sraj * Maps a sequence of resident pages belonging to the same object.
1601176771Sraj * The sequence begins with the given page m_start.  This page is
1602176771Sraj * mapped at the given virtual address start.  Each subsequent page is
1603176771Sraj * mapped at a virtual address that is offset from start by the same
1604176771Sraj * amount as the page is offset from m_start within the object.  The
1605176771Sraj * last page in the sequence is the page with the largest offset from
1606176771Sraj * m_start that can be mapped at a virtual address less than the given
1607176771Sraj * virtual address end.  Not every virtual page between start and end
1608176771Sraj * is mapped; only those for which a resident page exists with the
1609176771Sraj * corresponding offset from m_start are mapped.
1610176771Sraj */
1611176771Srajstatic void
1612176771Srajmmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
1613176771Sraj    vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
1614176771Sraj{
1615176771Sraj	vm_page_t m;
1616176771Sraj	vm_pindex_t diff, psize;
1617176771Sraj
1618176771Sraj	psize = atop(end - start);
1619176771Sraj	m = m_start;
1620176771Sraj	PMAP_LOCK(pmap);
1621176771Sraj	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1622187151Sraj		mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
1623187151Sraj		    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1624176771Sraj		m = TAILQ_NEXT(m, listq);
1625176771Sraj	}
1626176771Sraj	PMAP_UNLOCK(pmap);
1627176771Sraj}
1628176771Sraj
1629176771Srajstatic void
1630176771Srajmmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1631176771Sraj    vm_prot_t prot)
1632176771Sraj{
1633176771Sraj
1634176771Sraj	PMAP_LOCK(pmap);
1635176771Sraj	mmu_booke_enter_locked(mmu, pmap, va, m,
1636176771Sraj	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1637176771Sraj	PMAP_UNLOCK(pmap);
1638176771Sraj}
1639176771Sraj
1640176771Sraj/*
1641176771Sraj * Remove the given range of addresses from the specified map.
1642176771Sraj *
1643176771Sraj * It is assumed that the start and end are properly rounded to the page size.
1644176771Sraj */
1645176771Srajstatic void
1646176771Srajmmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
1647176771Sraj{
1648176771Sraj	pte_t *pte;
1649187151Sraj	uint8_t hold_flag;
1650176771Sraj
1651176771Sraj	int su = (pmap == kernel_pmap);
1652176771Sraj
1653176771Sraj	//debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n",
1654176771Sraj	//		su, (u_int32_t)pmap, pmap->pm_tid, va, endva);
1655176771Sraj
1656176771Sraj	if (su) {
1657187151Sraj		KASSERT(((va >= virtual_avail) &&
1658187151Sraj		    (va <= VM_MAX_KERNEL_ADDRESS)),
1659187151Sraj		    ("mmu_booke_remove: kernel pmap, non kernel va"));
1660176771Sraj	} else {
1661176771Sraj		KASSERT((va <= VM_MAXUSER_ADDRESS),
1662187151Sraj		    ("mmu_booke_remove: user pmap, non user va"));
1663176771Sraj	}
1664176771Sraj
1665176771Sraj	if (PMAP_REMOVE_DONE(pmap)) {
1666176771Sraj		//debugf("mmu_booke_remove: e (empty)\n");
1667176771Sraj		return;
1668176771Sraj	}
1669176771Sraj
1670176771Sraj	hold_flag = PTBL_HOLD_FLAG(pmap);
1671176771Sraj	//debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
1672176771Sraj
1673176771Sraj	vm_page_lock_queues();
1674176771Sraj	PMAP_LOCK(pmap);
1675176771Sraj	for (; va < endva; va += PAGE_SIZE) {
1676176771Sraj		pte = pte_find(mmu, pmap, va);
1677187149Sraj		if ((pte != NULL) && PTE_ISVALID(pte))
1678176771Sraj			pte_remove(mmu, pmap, va, hold_flag);
1679176771Sraj	}
1680176771Sraj	PMAP_UNLOCK(pmap);
1681176771Sraj	vm_page_unlock_queues();
1682176771Sraj
1683176771Sraj	//debugf("mmu_booke_remove: e\n");
1684176771Sraj}
1685176771Sraj
1686176771Sraj/*
1687176771Sraj * Remove physical page from all pmaps in which it resides.
1688176771Sraj */
1689176771Srajstatic void
1690176771Srajmmu_booke_remove_all(mmu_t mmu, vm_page_t m)
1691176771Sraj{
1692176771Sraj	pv_entry_t pv, pvn;
1693187151Sraj	uint8_t hold_flag;
1694176771Sraj
1695176771Sraj	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1696176771Sraj
1697176771Sraj	for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
1698176771Sraj		pvn = TAILQ_NEXT(pv, pv_link);
1699176771Sraj
1700176771Sraj		PMAP_LOCK(pv->pv_pmap);
1701176771Sraj		hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap);
1702176771Sraj		pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag);
1703176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
1704176771Sraj	}
1705176771Sraj	vm_page_flag_clear(m, PG_WRITEABLE);
1706176771Sraj}
1707176771Sraj
1708176771Sraj/*
1709176771Sraj * Map a range of physical addresses into kernel virtual address space.
1710176771Sraj */
1711176771Srajstatic vm_offset_t
1712176771Srajmmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1713176771Sraj    vm_offset_t pa_end, int prot)
1714176771Sraj{
1715176771Sraj	vm_offset_t sva = *virt;
1716176771Sraj	vm_offset_t va = sva;
1717176771Sraj
1718176771Sraj	//debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n",
1719176771Sraj	//		sva, pa_start, pa_end);
1720176771Sraj
1721176771Sraj	while (pa_start < pa_end) {
1722176771Sraj		mmu_booke_kenter(mmu, va, pa_start);
1723176771Sraj		va += PAGE_SIZE;
1724176771Sraj		pa_start += PAGE_SIZE;
1725176771Sraj	}
1726176771Sraj	*virt = va;
1727176771Sraj
1728176771Sraj	//debugf("mmu_booke_map: e (va = 0x%08x)\n", va);
1729176771Sraj	return (sva);
1730176771Sraj}
1731176771Sraj
1732176771Sraj/*
1733176771Sraj * The pmap must be activated before it's address space can be accessed in any
1734176771Sraj * way.
1735176771Sraj */
1736176771Srajstatic void
1737176771Srajmmu_booke_activate(mmu_t mmu, struct thread *td)
1738176771Sraj{
1739176771Sraj	pmap_t pmap;
1740176771Sraj
1741176771Sraj	pmap = &td->td_proc->p_vmspace->vm_pmap;
1742176771Sraj
1743187149Sraj	CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)",
1744187149Sraj	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1745176771Sraj
1746176771Sraj	KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!"));
1747176771Sraj
1748176771Sraj	mtx_lock_spin(&sched_lock);
1749176771Sraj
1750187149Sraj	atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
1751176771Sraj	PCPU_SET(curpmap, pmap);
1752187149Sraj
1753187149Sraj	if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE)
1754176771Sraj		tid_alloc(pmap);
1755176771Sraj
1756176771Sraj	/* Load PID0 register with pmap tid value. */
1757187149Sraj	mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]);
1758187149Sraj	__asm __volatile("isync");
1759176771Sraj
1760176771Sraj	mtx_unlock_spin(&sched_lock);
1761176771Sraj
1762187149Sraj	CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__,
1763187149Sraj	    pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm);
1764176771Sraj}
1765176771Sraj
1766176771Sraj/*
1767176771Sraj * Deactivate the specified process's address space.
1768176771Sraj */
1769176771Srajstatic void
1770176771Srajmmu_booke_deactivate(mmu_t mmu, struct thread *td)
1771176771Sraj{
1772176771Sraj	pmap_t pmap;
1773176771Sraj
1774176771Sraj	pmap = &td->td_proc->p_vmspace->vm_pmap;
1775187149Sraj
1776187149Sraj	CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x",
1777187149Sraj	    __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap);
1778187149Sraj
1779187149Sraj	atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask));
1780176771Sraj	PCPU_SET(curpmap, NULL);
1781176771Sraj}
1782176771Sraj
1783176771Sraj/*
1784176771Sraj * Copy the range specified by src_addr/len
1785176771Sraj * from the source map to the range dst_addr/len
1786176771Sraj * in the destination map.
1787176771Sraj *
1788176771Sraj * This routine is only advisory and need not do anything.
1789176771Sraj */
1790176771Srajstatic void
1791176771Srajmmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
1792176771Sraj    vm_size_t len, vm_offset_t src_addr)
1793176771Sraj{
1794176771Sraj
1795176771Sraj}
1796176771Sraj
1797176771Sraj/*
1798176771Sraj * Set the physical protection on the specified range of this map as requested.
1799176771Sraj */
1800176771Srajstatic void
1801176771Srajmmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1802176771Sraj    vm_prot_t prot)
1803176771Sraj{
1804176771Sraj	vm_offset_t va;
1805176771Sraj	vm_page_t m;
1806176771Sraj	pte_t *pte;
1807176771Sraj
1808176771Sraj	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1809176771Sraj		mmu_booke_remove(mmu, pmap, sva, eva);
1810176771Sraj		return;
1811176771Sraj	}
1812176771Sraj
1813176771Sraj	if (prot & VM_PROT_WRITE)
1814176771Sraj		return;
1815176771Sraj
1816176771Sraj	vm_page_lock_queues();
1817176771Sraj	PMAP_LOCK(pmap);
1818176771Sraj	for (va = sva; va < eva; va += PAGE_SIZE) {
1819176771Sraj		if ((pte = pte_find(mmu, pmap, va)) != NULL) {
1820176771Sraj			if (PTE_ISVALID(pte)) {
1821176771Sraj				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1822176771Sraj
1823187149Sraj				mtx_lock_spin(&tlbivax_mutex);
1824187149Sraj
1825176771Sraj				/* Handle modified pages. */
1826178626Smarcel				if (PTE_ISMODIFIED(pte))
1827178626Smarcel					vm_page_dirty(m);
1828176771Sraj
1829176771Sraj				/* Referenced pages. */
1830176771Sraj				if (PTE_ISREFERENCED(pte))
1831176771Sraj					vm_page_flag_set(m, PG_REFERENCED);
1832176771Sraj
1833187149Sraj				tlb0_flush_entry(va);
1834176771Sraj				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
1835176771Sraj				    PTE_REFERENCED);
1836187149Sraj
1837187149Sraj				mtx_unlock_spin(&tlbivax_mutex);
1838176771Sraj			}
1839176771Sraj		}
1840176771Sraj	}
1841176771Sraj	PMAP_UNLOCK(pmap);
1842176771Sraj	vm_page_unlock_queues();
1843176771Sraj}
1844176771Sraj
1845176771Sraj/*
1846176771Sraj * Clear the write and modified bits in each of the given page's mappings.
1847176771Sraj */
1848176771Srajstatic void
1849176771Srajmmu_booke_remove_write(mmu_t mmu, vm_page_t m)
1850176771Sraj{
1851176771Sraj	pv_entry_t pv;
1852176771Sraj	pte_t *pte;
1853176771Sraj
1854176771Sraj	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1855176771Sraj	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1856176771Sraj	    (m->flags & PG_WRITEABLE) == 0)
1857176771Sraj		return;
1858176771Sraj
1859176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1860176771Sraj		PMAP_LOCK(pv->pv_pmap);
1861176771Sraj		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
1862176771Sraj			if (PTE_ISVALID(pte)) {
1863176771Sraj				m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1864176771Sraj
1865187149Sraj				mtx_lock_spin(&tlbivax_mutex);
1866187149Sraj
1867176771Sraj				/* Handle modified pages. */
1868178626Smarcel				if (PTE_ISMODIFIED(pte))
1869178626Smarcel					vm_page_dirty(m);
1870176771Sraj
1871176771Sraj				/* Referenced pages. */
1872176771Sraj				if (PTE_ISREFERENCED(pte))
1873176771Sraj					vm_page_flag_set(m, PG_REFERENCED);
1874176771Sraj
1875176771Sraj				/* Flush mapping from TLB0. */
1876176771Sraj				pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED |
1877176771Sraj				    PTE_REFERENCED);
1878187149Sraj
1879187149Sraj				mtx_unlock_spin(&tlbivax_mutex);
1880176771Sraj			}
1881176771Sraj		}
1882176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
1883176771Sraj	}
1884176771Sraj	vm_page_flag_clear(m, PG_WRITEABLE);
1885176771Sraj}
1886176771Sraj
1887176771Srajstatic boolean_t
1888176771Srajmmu_booke_page_executable(mmu_t mmu, vm_page_t m)
1889176771Sraj{
1890176771Sraj	pv_entry_t pv;
1891176771Sraj	pte_t *pte;
1892176771Sraj	boolean_t executable;
1893176771Sraj
1894176771Sraj	executable = FALSE;
1895176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
1896176771Sraj		PMAP_LOCK(pv->pv_pmap);
1897176771Sraj		pte = pte_find(mmu, pv->pv_pmap, pv->pv_va);
1898176771Sraj		if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX))
1899176771Sraj			executable = TRUE;
1900176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
1901176771Sraj		if (executable)
1902176771Sraj			break;
1903176771Sraj	}
1904176771Sraj
1905176771Sraj	return (executable);
1906176771Sraj}
1907176771Sraj
1908176771Sraj/*
1909176771Sraj * Atomically extract and hold the physical page with the given
1910176771Sraj * pmap and virtual address pair if that mapping permits the given
1911176771Sraj * protection.
1912176771Sraj */
1913176771Srajstatic vm_page_t
1914176771Srajmmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
1915176771Sraj    vm_prot_t prot)
1916176771Sraj{
1917176771Sraj	pte_t *pte;
1918176771Sraj	vm_page_t m;
1919187151Sraj	uint32_t pte_wbit;
1920176771Sraj
1921176771Sraj	m = NULL;
1922176771Sraj	vm_page_lock_queues();
1923176771Sraj	PMAP_LOCK(pmap);
1924187151Sraj
1925176771Sraj	pte = pte_find(mmu, pmap, va);
1926176771Sraj	if ((pte != NULL) && PTE_ISVALID(pte)) {
1927176771Sraj		if (pmap == kernel_pmap)
1928176771Sraj			pte_wbit = PTE_SW;
1929176771Sraj		else
1930176771Sraj			pte_wbit = PTE_UW;
1931176771Sraj
1932176771Sraj		if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) {
1933176771Sraj			m = PHYS_TO_VM_PAGE(PTE_PA(pte));
1934176771Sraj			vm_page_hold(m);
1935176771Sraj		}
1936176771Sraj	}
1937176771Sraj
1938176771Sraj	vm_page_unlock_queues();
1939176771Sraj	PMAP_UNLOCK(pmap);
1940176771Sraj	return (m);
1941176771Sraj}
1942176771Sraj
1943176771Sraj/*
1944176771Sraj * Initialize a vm_page's machine-dependent fields.
1945176771Sraj */
1946176771Srajstatic void
1947176771Srajmmu_booke_page_init(mmu_t mmu, vm_page_t m)
1948176771Sraj{
1949176771Sraj
1950176771Sraj	TAILQ_INIT(&m->md.pv_list);
1951176771Sraj}
1952176771Sraj
1953176771Sraj/*
1954176771Sraj * mmu_booke_zero_page_area zeros the specified hardware page by
1955176771Sraj * mapping it into virtual memory and using bzero to clear
1956176771Sraj * its contents.
1957176771Sraj *
1958176771Sraj * off and size must reside within a single page.
1959176771Sraj */
1960176771Srajstatic void
1961176771Srajmmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1962176771Sraj{
1963176771Sraj	vm_offset_t va;
1964176771Sraj
1965187151Sraj	/* XXX KASSERT off and size are within a single page? */
1966176771Sraj
1967176771Sraj	mtx_lock(&zero_page_mutex);
1968176771Sraj	va = zero_page_va;
1969176771Sraj
1970176771Sraj	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
1971176771Sraj	bzero((caddr_t)va + off, size);
1972176771Sraj	mmu_booke_kremove(mmu, va);
1973176771Sraj
1974176771Sraj	mtx_unlock(&zero_page_mutex);
1975176771Sraj}
1976176771Sraj
1977176771Sraj/*
1978176771Sraj * mmu_booke_zero_page zeros the specified hardware page.
1979176771Sraj */
1980176771Srajstatic void
1981176771Srajmmu_booke_zero_page(mmu_t mmu, vm_page_t m)
1982176771Sraj{
1983176771Sraj
1984176771Sraj	mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE);
1985176771Sraj}
1986176771Sraj
1987176771Sraj/*
1988176771Sraj * mmu_booke_copy_page copies the specified (machine independent) page by
1989176771Sraj * mapping the page into virtual memory and using memcopy to copy the page,
1990176771Sraj * one machine dependent page at a time.
1991176771Sraj */
1992176771Srajstatic void
1993176771Srajmmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
1994176771Sraj{
1995176771Sraj	vm_offset_t sva, dva;
1996176771Sraj
1997176771Sraj	sva = copy_page_src_va;
1998176771Sraj	dva = copy_page_dst_va;
1999176771Sraj
2000187149Sraj	mtx_lock(&copy_page_mutex);
2001176771Sraj	mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
2002176771Sraj	mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
2003176771Sraj	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
2004176771Sraj	mmu_booke_kremove(mmu, dva);
2005176771Sraj	mmu_booke_kremove(mmu, sva);
2006176771Sraj	mtx_unlock(&copy_page_mutex);
2007176771Sraj}
2008176771Sraj
2009176771Sraj/*
2010176771Sraj * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it
2011176771Sraj * into virtual memory and using bzero to clear its contents. This is intended
2012176771Sraj * to be called from the vm_pagezero process only and outside of Giant. No
2013176771Sraj * lock is required.
2014176771Sraj */
2015176771Srajstatic void
2016176771Srajmmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
2017176771Sraj{
2018176771Sraj	vm_offset_t va;
2019176771Sraj
2020176771Sraj	va = zero_page_idle_va;
2021176771Sraj	mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
2022176771Sraj	bzero((caddr_t)va, PAGE_SIZE);
2023176771Sraj	mmu_booke_kremove(mmu, va);
2024176771Sraj}
2025176771Sraj
2026176771Sraj/*
2027176771Sraj * Return whether or not the specified physical page was modified
2028176771Sraj * in any of physical maps.
2029176771Sraj */
2030176771Srajstatic boolean_t
2031176771Srajmmu_booke_is_modified(mmu_t mmu, vm_page_t m)
2032176771Sraj{
2033176771Sraj	pte_t *pte;
2034176771Sraj	pv_entry_t pv;
2035176771Sraj
2036176771Sraj	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2037176771Sraj	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2038176771Sraj		return (FALSE);
2039176771Sraj
2040176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2041176771Sraj		PMAP_LOCK(pv->pv_pmap);
2042176771Sraj		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2043176771Sraj			if (!PTE_ISVALID(pte))
2044176771Sraj				goto make_sure_to_unlock;
2045176771Sraj
2046176771Sraj			if (PTE_ISMODIFIED(pte)) {
2047176771Sraj				PMAP_UNLOCK(pv->pv_pmap);
2048176771Sraj				return (TRUE);
2049176771Sraj			}
2050176771Sraj		}
2051176771Srajmake_sure_to_unlock:
2052176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
2053176771Sraj	}
2054176771Sraj	return (FALSE);
2055176771Sraj}
2056176771Sraj
2057176771Sraj/*
2058187151Sraj * Return whether or not the specified virtual address is eligible
2059176771Sraj * for prefault.
2060176771Sraj */
2061176771Srajstatic boolean_t
2062176771Srajmmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2063176771Sraj{
2064176771Sraj
2065176771Sraj	return (FALSE);
2066176771Sraj}
2067176771Sraj
2068176771Sraj/*
2069176771Sraj * Clear the modify bits on the specified physical page.
2070176771Sraj */
2071176771Srajstatic void
2072176771Srajmmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
2073176771Sraj{
2074176771Sraj	pte_t *pte;
2075176771Sraj	pv_entry_t pv;
2076176771Sraj
2077176771Sraj	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2078176771Sraj	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2079176771Sraj		return;
2080176771Sraj
2081176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2082176771Sraj		PMAP_LOCK(pv->pv_pmap);
2083176771Sraj		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2084176771Sraj			if (!PTE_ISVALID(pte))
2085176771Sraj				goto make_sure_to_unlock;
2086176771Sraj
2087187149Sraj			mtx_lock_spin(&tlbivax_mutex);
2088187149Sraj
2089176771Sraj			if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) {
2090187149Sraj				tlb0_flush_entry(pv->pv_va);
2091176771Sraj				pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED |
2092176771Sraj				    PTE_REFERENCED);
2093176771Sraj			}
2094187149Sraj
2095187149Sraj			mtx_unlock_spin(&tlbivax_mutex);
2096176771Sraj		}
2097176771Srajmake_sure_to_unlock:
2098176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
2099176771Sraj	}
2100176771Sraj}
2101176771Sraj
2102176771Sraj/*
2103176771Sraj * Return a count of reference bits for a page, clearing those bits.
2104176771Sraj * It is not necessary for every reference bit to be cleared, but it
2105176771Sraj * is necessary that 0 only be returned when there are truly no
2106176771Sraj * reference bits set.
2107176771Sraj *
2108176771Sraj * XXX: The exact number of bits to check and clear is a matter that
2109176771Sraj * should be tested and standardized at some point in the future for
2110176771Sraj * optimal aging of shared pages.
2111176771Sraj */
2112176771Srajstatic int
2113176771Srajmmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
2114176771Sraj{
2115176771Sraj	pte_t *pte;
2116176771Sraj	pv_entry_t pv;
2117176771Sraj	int count;
2118176771Sraj
2119176771Sraj	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2120176771Sraj	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2121176771Sraj		return (0);
2122176771Sraj
2123176771Sraj	count = 0;
2124176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2125176771Sraj		PMAP_LOCK(pv->pv_pmap);
2126176771Sraj		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2127176771Sraj			if (!PTE_ISVALID(pte))
2128176771Sraj				goto make_sure_to_unlock;
2129176771Sraj
2130176771Sraj			if (PTE_ISREFERENCED(pte)) {
2131187149Sraj				mtx_lock_spin(&tlbivax_mutex);
2132187149Sraj
2133187149Sraj				tlb0_flush_entry(pv->pv_va);
2134176771Sraj				pte->flags &= ~PTE_REFERENCED;
2135176771Sraj
2136187149Sraj				mtx_unlock_spin(&tlbivax_mutex);
2137187149Sraj
2138176771Sraj				if (++count > 4) {
2139176771Sraj					PMAP_UNLOCK(pv->pv_pmap);
2140176771Sraj					break;
2141176771Sraj				}
2142176771Sraj			}
2143176771Sraj		}
2144176771Srajmake_sure_to_unlock:
2145176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
2146176771Sraj	}
2147176771Sraj	return (count);
2148176771Sraj}
2149176771Sraj
2150176771Sraj/*
2151176771Sraj * Clear the reference bit on the specified physical page.
2152176771Sraj */
2153176771Srajstatic void
2154176771Srajmmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
2155176771Sraj{
2156176771Sraj	pte_t *pte;
2157176771Sraj	pv_entry_t pv;
2158176771Sraj
2159176771Sraj	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2160176771Sraj	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2161176771Sraj		return;
2162176771Sraj
2163176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2164176771Sraj		PMAP_LOCK(pv->pv_pmap);
2165176771Sraj		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
2166176771Sraj			if (!PTE_ISVALID(pte))
2167176771Sraj				goto make_sure_to_unlock;
2168176771Sraj
2169176771Sraj			if (PTE_ISREFERENCED(pte)) {
2170187149Sraj				mtx_lock_spin(&tlbivax_mutex);
2171187149Sraj
2172187149Sraj				tlb0_flush_entry(pv->pv_va);
2173176771Sraj				pte->flags &= ~PTE_REFERENCED;
2174187149Sraj
2175187149Sraj				mtx_unlock_spin(&tlbivax_mutex);
2176176771Sraj			}
2177176771Sraj		}
2178176771Srajmake_sure_to_unlock:
2179176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
2180176771Sraj	}
2181176771Sraj}
2182176771Sraj
2183176771Sraj/*
2184176771Sraj * Change wiring attribute for a map/virtual-address pair.
2185176771Sraj */
2186176771Srajstatic void
2187176771Srajmmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
2188176771Sraj{
2189176771Sraj	pte_t *pte;;
2190176771Sraj
2191176771Sraj	PMAP_LOCK(pmap);
2192176771Sraj	if ((pte = pte_find(mmu, pmap, va)) != NULL) {
2193176771Sraj		if (wired) {
2194176771Sraj			if (!PTE_ISWIRED(pte)) {
2195176771Sraj				pte->flags |= PTE_WIRED;
2196176771Sraj				pmap->pm_stats.wired_count++;
2197176771Sraj			}
2198176771Sraj		} else {
2199176771Sraj			if (PTE_ISWIRED(pte)) {
2200176771Sraj				pte->flags &= ~PTE_WIRED;
2201176771Sraj				pmap->pm_stats.wired_count--;
2202176771Sraj			}
2203176771Sraj		}
2204176771Sraj	}
2205176771Sraj	PMAP_UNLOCK(pmap);
2206176771Sraj}
2207176771Sraj
2208176771Sraj/*
2209176771Sraj * Return true if the pmap's pv is one of the first 16 pvs linked to from this
2210176771Sraj * page.  This count may be changed upwards or downwards in the future; it is
2211176771Sraj * only necessary that true be returned for a small subset of pmaps for proper
2212176771Sraj * page aging.
2213176771Sraj */
2214176771Srajstatic boolean_t
2215176771Srajmmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2216176771Sraj{
2217176771Sraj	pv_entry_t pv;
2218176771Sraj	int loops;
2219176771Sraj
2220176771Sraj	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2221176771Sraj	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
2222176771Sraj		return (FALSE);
2223176771Sraj
2224176771Sraj	loops = 0;
2225176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2226176771Sraj		if (pv->pv_pmap == pmap)
2227176771Sraj			return (TRUE);
2228176771Sraj
2229176771Sraj		if (++loops >= 16)
2230176771Sraj			break;
2231176771Sraj	}
2232176771Sraj	return (FALSE);
2233176771Sraj}
2234176771Sraj
2235176771Sraj/*
2236176771Sraj * Return the number of managed mappings to the given physical page that are
2237176771Sraj * wired.
2238176771Sraj */
2239176771Srajstatic int
2240176771Srajmmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
2241176771Sraj{
2242176771Sraj	pv_entry_t pv;
2243176771Sraj	pte_t *pte;
2244176771Sraj	int count = 0;
2245176771Sraj
2246176771Sraj	if ((m->flags & PG_FICTITIOUS) != 0)
2247176771Sraj		return (count);
2248176771Sraj	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2249176771Sraj
2250176771Sraj	TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
2251176771Sraj		PMAP_LOCK(pv->pv_pmap);
2252176771Sraj		if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
2253176771Sraj			if (PTE_ISVALID(pte) && PTE_ISWIRED(pte))
2254176771Sraj				count++;
2255176771Sraj		PMAP_UNLOCK(pv->pv_pmap);
2256176771Sraj	}
2257176771Sraj
2258176771Sraj	return (count);
2259176771Sraj}
2260176771Sraj
2261176771Srajstatic int
2262176771Srajmmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2263176771Sraj{
2264176771Sraj	int i;
2265176771Sraj	vm_offset_t va;
2266176771Sraj
2267176771Sraj	/*
2268176771Sraj	 * This currently does not work for entries that
2269176771Sraj	 * overlap TLB1 entries.
2270176771Sraj	 */
2271176771Sraj	for (i = 0; i < tlb1_idx; i ++) {
2272176771Sraj		if (tlb1_iomapped(i, pa, size, &va) == 0)
2273176771Sraj			return (0);
2274176771Sraj	}
2275176771Sraj
2276176771Sraj	return (EFAULT);
2277176771Sraj}
2278176771Sraj
2279190701Smarcelvm_offset_t
2280190701Smarcelmmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2281190701Smarcel    vm_size_t *sz)
2282190701Smarcel{
2283190701Smarcel	vm_paddr_t pa, ppa;
2284190701Smarcel	vm_offset_t va;
2285190701Smarcel	vm_size_t gran;
2286190701Smarcel
2287190701Smarcel	/* Raw physical memory dumps don't have a virtual address. */
2288190701Smarcel	if (md->md_vaddr == ~0UL) {
2289190701Smarcel		/* We always map a 256MB page at 256M. */
2290190701Smarcel		gran = 256 * 1024 * 1024;
2291190701Smarcel		pa = md->md_paddr + ofs;
2292190701Smarcel		ppa = pa & ~(gran - 1);
2293190701Smarcel		ofs = pa - ppa;
2294190701Smarcel		va = gran;
2295190701Smarcel		tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO);
2296190701Smarcel		if (*sz > (gran - ofs))
2297190701Smarcel			*sz = gran - ofs;
2298190701Smarcel		return (va + ofs);
2299190701Smarcel	}
2300190701Smarcel
2301190701Smarcel	/* Minidumps are based on virtual memory addresses. */
2302190701Smarcel	va = md->md_vaddr + ofs;
2303190701Smarcel	if (va >= kernstart + kernsize) {
2304190701Smarcel		gran = PAGE_SIZE - (va & PAGE_MASK);
2305190701Smarcel		if (*sz > gran)
2306190701Smarcel			*sz = gran;
2307190701Smarcel	}
2308190701Smarcel	return (va);
2309190701Smarcel}
2310190701Smarcel
2311190701Smarcelvoid
2312190701Smarcelmmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
2313190701Smarcel    vm_offset_t va)
2314190701Smarcel{
2315190701Smarcel
2316190701Smarcel	/* Raw physical memory dumps don't have a virtual address. */
2317190701Smarcel	if (md->md_vaddr == ~0UL) {
2318190701Smarcel		tlb1_idx--;
2319190701Smarcel		tlb1[tlb1_idx].mas1 = 0;
2320190701Smarcel		tlb1[tlb1_idx].mas2 = 0;
2321190701Smarcel		tlb1[tlb1_idx].mas3 = 0;
2322190701Smarcel		tlb1_write_entry(tlb1_idx);
2323190701Smarcel		return;
2324190701Smarcel	}
2325190701Smarcel
2326190701Smarcel	/* Minidumps are based on virtual memory addresses. */
2327190701Smarcel	/* Nothing to do... */
2328190701Smarcel}
2329190701Smarcel
2330190701Smarcelstruct pmap_md *
2331190701Smarcelmmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
2332190701Smarcel{
2333190701Smarcel	static struct pmap_md md;
2334190701Smarcel	struct bi_mem_region *mr;
2335190701Smarcel	pte_t *pte;
2336190701Smarcel	vm_offset_t va;
2337190701Smarcel
2338190701Smarcel	if (dumpsys_minidump) {
2339190701Smarcel		md.md_paddr = ~0UL;	/* Minidumps use virtual addresses. */
2340190701Smarcel		if (prev == NULL) {
2341190701Smarcel			/* 1st: kernel .data and .bss. */
2342190701Smarcel			md.md_index = 1;
2343190701Smarcel			md.md_vaddr = trunc_page((uintptr_t)_etext);
2344190701Smarcel			md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
2345190701Smarcel			return (&md);
2346190701Smarcel		}
2347190701Smarcel		switch (prev->md_index) {
2348190701Smarcel		case 1:
2349190701Smarcel			/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2350190701Smarcel			md.md_index = 2;
2351190701Smarcel			md.md_vaddr = data_start;
2352190701Smarcel			md.md_size = data_end - data_start;
2353190701Smarcel			break;
2354190701Smarcel		case 2:
2355190701Smarcel			/* 3rd: kernel VM. */
2356190701Smarcel			va = prev->md_vaddr + prev->md_size;
2357190701Smarcel			/* Find start of next chunk (from va). */
2358190701Smarcel			while (va < virtual_end) {
2359190701Smarcel				/* Don't dump the buffer cache. */
2360190701Smarcel				if (va >= kmi.buffer_sva &&
2361190701Smarcel				    va < kmi.buffer_eva) {
2362190701Smarcel					va = kmi.buffer_eva;
2363190701Smarcel					continue;
2364190701Smarcel				}
2365190701Smarcel				pte = pte_find(mmu, kernel_pmap, va);
2366190701Smarcel				if (pte != NULL && PTE_ISVALID(pte))
2367190701Smarcel					break;
2368190701Smarcel				va += PAGE_SIZE;
2369190701Smarcel			}
2370190701Smarcel			if (va < virtual_end) {
2371190701Smarcel				md.md_vaddr = va;
2372190701Smarcel				va += PAGE_SIZE;
2373190701Smarcel				/* Find last page in chunk. */
2374190701Smarcel				while (va < virtual_end) {
2375190701Smarcel					/* Don't run into the buffer cache. */
2376190701Smarcel					if (va == kmi.buffer_sva)
2377190701Smarcel						break;
2378190701Smarcel					pte = pte_find(mmu, kernel_pmap, va);
2379190701Smarcel					if (pte == NULL || !PTE_ISVALID(pte))
2380190701Smarcel						break;
2381190701Smarcel					va += PAGE_SIZE;
2382190701Smarcel				}
2383190701Smarcel				md.md_size = va - md.md_vaddr;
2384190701Smarcel				break;
2385190701Smarcel			}
2386190701Smarcel			md.md_index = 3;
2387190701Smarcel			/* FALLTHROUGH */
2388190701Smarcel		default:
2389190701Smarcel			return (NULL);
2390190701Smarcel		}
2391190701Smarcel	} else { /* minidumps */
2392190701Smarcel		mr = bootinfo_mr();
2393190701Smarcel		if (prev == NULL) {
2394190701Smarcel			/* first physical chunk. */
2395190701Smarcel			md.md_paddr = mr->mem_base;
2396190701Smarcel			md.md_size = mr->mem_size;
2397190701Smarcel			md.md_vaddr = ~0UL;
2398190701Smarcel			md.md_index = 1;
2399190701Smarcel		} else if (md.md_index < bootinfo->bi_mem_reg_no) {
2400190701Smarcel			md.md_paddr = mr[md.md_index].mem_base;
2401190701Smarcel			md.md_size = mr[md.md_index].mem_size;
2402190701Smarcel			md.md_vaddr = ~0UL;
2403190701Smarcel			md.md_index++;
2404190701Smarcel		} else {
2405190701Smarcel			/* There's no next physical chunk. */
2406190701Smarcel			return (NULL);
2407190701Smarcel		}
2408190701Smarcel	}
2409190701Smarcel
2410190701Smarcel	return (&md);
2411190701Smarcel}
2412190701Smarcel
2413176771Sraj/*
2414176771Sraj * Map a set of physical memory pages into the kernel virtual address space.
2415176771Sraj * Return a pointer to where it is mapped. This routine is intended to be used
2416176771Sraj * for mapping device memory, NOT real memory.
2417176771Sraj */
2418176771Srajstatic void *
2419176771Srajmmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2420176771Sraj{
2421184244Smarcel	void *res;
2422176771Sraj	uintptr_t va;
2423184244Smarcel	vm_size_t sz;
2424176771Sraj
2425176771Sraj	va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa);
2426184244Smarcel	res = (void *)va;
2427184244Smarcel
2428184244Smarcel	do {
2429184244Smarcel		sz = 1 << (ilog2(size) & ~1);
2430184244Smarcel		if (bootverbose)
2431184244Smarcel			printf("Wiring VA=%x to PA=%x (size=%x), "
2432184244Smarcel			    "using TLB1[%d]\n", va, pa, sz, tlb1_idx);
2433184244Smarcel		tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO);
2434184244Smarcel		size -= sz;
2435184244Smarcel		pa += sz;
2436184244Smarcel		va += sz;
2437184244Smarcel	} while (size > 0);
2438184244Smarcel
2439184244Smarcel	return (res);
2440176771Sraj}
2441176771Sraj
2442176771Sraj/*
2443176771Sraj * 'Unmap' a range mapped by mmu_booke_mapdev().
2444176771Sraj */
2445176771Srajstatic void
2446176771Srajmmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2447176771Sraj{
2448176771Sraj	vm_offset_t base, offset;
2449176771Sraj
2450176771Sraj	/*
2451176771Sraj	 * Unmap only if this is inside kernel virtual space.
2452176771Sraj	 */
2453176771Sraj	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2454176771Sraj		base = trunc_page(va);
2455176771Sraj		offset = va & PAGE_MASK;
2456176771Sraj		size = roundup(offset + size, PAGE_SIZE);
2457176771Sraj		kmem_free(kernel_map, base, size);
2458176771Sraj	}
2459176771Sraj}
2460176771Sraj
2461176771Sraj/*
2462187151Sraj * mmu_booke_object_init_pt preloads the ptes for a given object into the
2463187151Sraj * specified pmap. This eliminates the blast of soft faults on process startup
2464187151Sraj * and immediately after an mmap.
2465176771Sraj */
2466176771Srajstatic void
2467176771Srajmmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
2468176771Sraj    vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2469176771Sraj{
2470187151Sraj
2471176771Sraj	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2472176771Sraj	KASSERT(object->type == OBJT_DEVICE,
2473176771Sraj	    ("mmu_booke_object_init_pt: non-device object"));
2474176771Sraj}
2475176771Sraj
2476176771Sraj/*
2477176771Sraj * Perform the pmap work for mincore.
2478176771Sraj */
2479176771Srajstatic int
2480176771Srajmmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
2481176771Sraj{
2482176771Sraj
2483176771Sraj	TODO;
2484176771Sraj	return (0);
2485176771Sraj}
2486176771Sraj
2487176771Sraj/**************************************************************************/
2488176771Sraj/* TID handling */
2489176771Sraj/**************************************************************************/
2490176771Sraj
2491176771Sraj/*
2492176771Sraj * Allocate a TID. If necessary, steal one from someone else.
2493176771Sraj * The new TID is flushed from the TLB before returning.
2494176771Sraj */
2495176771Srajstatic tlbtid_t
2496176771Srajtid_alloc(pmap_t pmap)
2497176771Sraj{
2498176771Sraj	tlbtid_t tid;
2499187149Sraj	int thiscpu;
2500176771Sraj
2501187149Sraj	KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap"));
2502176771Sraj
2503187149Sraj	CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap);
2504176771Sraj
2505187149Sraj	thiscpu = PCPU_GET(cpuid);
2506176771Sraj
2507187149Sraj	tid = PCPU_GET(tid_next);
2508187149Sraj	if (tid > TID_MAX)
2509187149Sraj		tid = TID_MIN;
2510187149Sraj	PCPU_SET(tid_next, tid + 1);
2511176771Sraj
2512187149Sraj	/* If we are stealing TID then clear the relevant pmap's field */
2513187149Sraj	if (tidbusy[thiscpu][tid] != NULL) {
2514176771Sraj
2515187149Sraj		CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid);
2516187149Sraj
2517187149Sraj		tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE;
2518176771Sraj
2519187149Sraj		/* Flush all entries from TLB0 matching this TID. */
2520187149Sraj		tid_flush(tid);
2521176771Sraj	}
2522176771Sraj
2523187149Sraj	tidbusy[thiscpu][tid] = pmap;
2524187149Sraj	pmap->pm_tid[thiscpu] = tid;
2525187149Sraj	__asm __volatile("msync; isync");
2526176771Sraj
2527187149Sraj	CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid,
2528187149Sraj	    PCPU_GET(tid_next));
2529176771Sraj
2530176771Sraj	return (tid);
2531176771Sraj}
2532176771Sraj
2533176771Sraj/**************************************************************************/
2534176771Sraj/* TLB0 handling */
2535176771Sraj/**************************************************************************/
2536176771Sraj
2537176771Srajstatic void
2538187149Srajtlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
2539187149Sraj    uint32_t mas7)
2540176771Sraj{
2541176771Sraj	int as;
2542176771Sraj	char desc[3];
2543176771Sraj	tlbtid_t tid;
2544176771Sraj	vm_size_t size;
2545176771Sraj	unsigned int tsize;
2546176771Sraj
2547176771Sraj	desc[2] = '\0';
2548176771Sraj	if (mas1 & MAS1_VALID)
2549176771Sraj		desc[0] = 'V';
2550176771Sraj	else
2551176771Sraj		desc[0] = ' ';
2552176771Sraj
2553176771Sraj	if (mas1 & MAS1_IPROT)
2554176771Sraj		desc[1] = 'P';
2555176771Sraj	else
2556176771Sraj		desc[1] = ' ';
2557176771Sraj
2558187149Sraj	as = (mas1 & MAS1_TS_MASK) ? 1 : 0;
2559176771Sraj	tid = MAS1_GETTID(mas1);
2560176771Sraj
2561176771Sraj	tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2562176771Sraj	size = 0;
2563176771Sraj	if (tsize)
2564176771Sraj		size = tsize2size(tsize);
2565176771Sraj
2566176771Sraj	debugf("%3d: (%s) [AS=%d] "
2567176771Sraj	    "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
2568176771Sraj	    "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
2569176771Sraj	    i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
2570176771Sraj}
2571176771Sraj
2572176771Sraj/* Convert TLB0 va and way number to tlb0[] table index. */
2573176771Srajstatic inline unsigned int
2574176771Srajtlb0_tableidx(vm_offset_t va, unsigned int way)
2575176771Sraj{
2576176771Sraj	unsigned int idx;
2577176771Sraj
2578176771Sraj	idx = (way * TLB0_ENTRIES_PER_WAY);
2579176771Sraj	idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT;
2580176771Sraj	return (idx);
2581176771Sraj}
2582176771Sraj
2583176771Sraj/*
2584187149Sraj * Invalidate TLB0 entry.
2585176771Sraj */
2586187149Srajstatic inline void
2587187149Srajtlb0_flush_entry(vm_offset_t va)
2588176771Sraj{
2589176771Sraj
2590187149Sraj	CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va);
2591176771Sraj
2592187149Sraj	mtx_assert(&tlbivax_mutex, MA_OWNED);
2593176771Sraj
2594187149Sraj	__asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK));
2595187149Sraj	__asm __volatile("isync; msync");
2596187149Sraj	__asm __volatile("tlbsync; msync");
2597176771Sraj
2598187149Sraj	CTR1(KTR_PMAP, "%s: e", __func__);
2599176771Sraj}
2600176771Sraj
2601176771Sraj/* Print out contents of the MAS registers for each TLB0 entry */
2602187149Srajvoid
2603176771Srajtlb0_print_tlbentries(void)
2604176771Sraj{
2605187149Sraj	uint32_t mas0, mas1, mas2, mas3, mas7;
2606176771Sraj	int entryidx, way, idx;
2607176771Sraj
2608176771Sraj	debugf("TLB0 entries:\n");
2609187149Sraj	for (way = 0; way < TLB0_WAYS; way ++)
2610176771Sraj		for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) {
2611176771Sraj
2612176771Sraj			mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
2613176771Sraj			mtspr(SPR_MAS0, mas0);
2614187149Sraj			__asm __volatile("isync");
2615176771Sraj
2616176771Sraj			mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
2617176771Sraj			mtspr(SPR_MAS2, mas2);
2618176771Sraj
2619187149Sraj			__asm __volatile("isync; tlbre");
2620176771Sraj
2621176771Sraj			mas1 = mfspr(SPR_MAS1);
2622176771Sraj			mas2 = mfspr(SPR_MAS2);
2623176771Sraj			mas3 = mfspr(SPR_MAS3);
2624176771Sraj			mas7 = mfspr(SPR_MAS7);
2625176771Sraj
2626176771Sraj			idx = tlb0_tableidx(mas2, way);
2627176771Sraj			tlb_print_entry(idx, mas1, mas2, mas3, mas7);
2628176771Sraj		}
2629176771Sraj}
2630176771Sraj
2631176771Sraj/**************************************************************************/
2632176771Sraj/* TLB1 handling */
2633176771Sraj/**************************************************************************/
2634187149Sraj
2635176771Sraj/*
2636187149Sraj * TLB1 mapping notes:
2637187149Sraj *
2638187149Sraj * TLB1[0]	CCSRBAR
2639187149Sraj * TLB1[1]	Kernel text and data.
2640187149Sraj * TLB1[2-15]	Additional kernel text and data mappings (if required), PCI
2641187149Sraj *		windows, other devices mappings.
2642187149Sraj */
2643187149Sraj
2644187149Sraj/*
2645176771Sraj * Write given entry to TLB1 hardware.
2646176771Sraj * Use 32 bit pa, clear 4 high-order bits of RPN (mas7).
2647176771Sraj */
2648176771Srajstatic void
2649176771Srajtlb1_write_entry(unsigned int idx)
2650176771Sraj{
2651187151Sraj	uint32_t mas0, mas7;
2652176771Sraj
2653176771Sraj	//debugf("tlb1_write_entry: s\n");
2654176771Sraj
2655176771Sraj	/* Clear high order RPN bits */
2656176771Sraj	mas7 = 0;
2657176771Sraj
2658176771Sraj	/* Select entry */
2659176771Sraj	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx);
2660176771Sraj	//debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0);
2661176771Sraj
2662176771Sraj	mtspr(SPR_MAS0, mas0);
2663187151Sraj	__asm __volatile("isync");
2664176771Sraj	mtspr(SPR_MAS1, tlb1[idx].mas1);
2665187151Sraj	__asm __volatile("isync");
2666176771Sraj	mtspr(SPR_MAS2, tlb1[idx].mas2);
2667187151Sraj	__asm __volatile("isync");
2668176771Sraj	mtspr(SPR_MAS3, tlb1[idx].mas3);
2669187151Sraj	__asm __volatile("isync");
2670176771Sraj	mtspr(SPR_MAS7, mas7);
2671187151Sraj	__asm __volatile("isync; tlbwe; isync; msync");
2672176771Sraj
2673176771Sraj	//debugf("tlb1_write_entry: e\n");;
2674176771Sraj}
2675176771Sraj
2676176771Sraj/*
2677176771Sraj * Return the largest uint value log such that 2^log <= num.
2678176771Sraj */
2679176771Srajstatic unsigned int
2680176771Srajilog2(unsigned int num)
2681176771Sraj{
2682176771Sraj	int lz;
2683176771Sraj
2684176771Sraj	__asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num));
2685176771Sraj	return (31 - lz);
2686176771Sraj}
2687176771Sraj
2688176771Sraj/*
2689176771Sraj * Convert TLB TSIZE value to mapped region size.
2690176771Sraj */
2691176771Srajstatic vm_size_t
2692176771Srajtsize2size(unsigned int tsize)
2693176771Sraj{
2694176771Sraj
2695176771Sraj	/*
2696176771Sraj	 * size = 4^tsize KB
2697176771Sraj	 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10)
2698176771Sraj	 */
2699176771Sraj
2700176771Sraj	return ((1 << (2 * tsize)) * 1024);
2701176771Sraj}
2702176771Sraj
2703176771Sraj/*
2704176771Sraj * Convert region size (must be power of 4) to TLB TSIZE value.
2705176771Sraj */
2706176771Srajstatic unsigned int
2707176771Srajsize2tsize(vm_size_t size)
2708176771Sraj{
2709176771Sraj
2710176771Sraj	return (ilog2(size) / 2 - 5);
2711176771Sraj}
2712176771Sraj
2713176771Sraj/*
2714187149Sraj * Register permanent kernel mapping in TLB1.
2715176771Sraj *
2716187149Sraj * Entries are created starting from index 0 (current free entry is
2717187149Sraj * kept in tlb1_idx) and are not supposed to be invalidated.
2718176771Sraj */
2719187149Srajstatic int
2720187149Srajtlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
2721187149Sraj    uint32_t flags)
2722176771Sraj{
2723187149Sraj	uint32_t ts, tid;
2724176771Sraj	int tsize;
2725187149Sraj
2726187149Sraj	if (tlb1_idx >= TLB1_ENTRIES) {
2727187149Sraj		printf("tlb1_set_entry: TLB1 full!\n");
2728187149Sraj		return (-1);
2729187149Sraj	}
2730176771Sraj
2731176771Sraj	/* Convert size to TSIZE */
2732176771Sraj	tsize = size2tsize(size);
2733176771Sraj
2734187149Sraj	tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK;
2735187149Sraj	/* XXX TS is hard coded to 0 for now as we only use single address space */
2736187149Sraj	ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK;
2737176771Sraj
2738187149Sraj	/* XXX LOCK tlb1[] */
2739176771Sraj
2740187149Sraj	tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid;
2741187149Sraj	tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK);
2742187149Sraj	tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags;
2743176771Sraj
2744187149Sraj	/* Set supervisor RWX permission bits */
2745187149Sraj	tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX;
2746176771Sraj
2747187149Sraj	tlb1_write_entry(tlb1_idx++);
2748176771Sraj
2749187149Sraj	/* XXX UNLOCK tlb1[] */
2750176771Sraj
2751187149Sraj	/*
2752187149Sraj	 * XXX in general TLB1 updates should be propagated between CPUs,
2753187149Sraj	 * since current design assumes to have the same TLB1 set-up on all
2754187149Sraj	 * cores.
2755187149Sraj	 */
2756176771Sraj	return (0);
2757176771Sraj}
2758176771Sraj
2759176771Srajstatic int
2760176771Srajtlb1_entry_size_cmp(const void *a, const void *b)
2761176771Sraj{
2762176771Sraj	const vm_size_t *sza;
2763176771Sraj	const vm_size_t *szb;
2764176771Sraj
2765176771Sraj	sza = a;
2766176771Sraj	szb = b;
2767176771Sraj	if (*sza > *szb)
2768176771Sraj		return (-1);
2769176771Sraj	else if (*sza < *szb)
2770176771Sraj		return (1);
2771176771Sraj	else
2772176771Sraj		return (0);
2773176771Sraj}
2774176771Sraj
2775176771Sraj/*
2776187151Sraj * Map in contiguous RAM region into the TLB1 using maximum of
2777176771Sraj * KERNEL_REGION_MAX_TLB_ENTRIES entries.
2778176771Sraj *
2779187151Sraj * If necessary round up last entry size and return total size
2780176771Sraj * used by all allocated entries.
2781176771Sraj */
2782176771Srajvm_size_t
2783176771Srajtlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size)
2784176771Sraj{
2785176771Sraj	vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES];
2786176771Sraj	vm_size_t mapped_size, sz, esz;
2787176771Sraj	unsigned int log;
2788176771Sraj	int i;
2789176771Sraj
2790187151Sraj	CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x",
2791187151Sraj	    __func__, size, va, pa);
2792176771Sraj
2793176771Sraj	mapped_size = 0;
2794176771Sraj	sz = size;
2795176771Sraj	memset(entry_size, 0, sizeof(entry_size));
2796176771Sraj
2797176771Sraj	/* Calculate entry sizes. */
2798176771Sraj	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) {
2799176771Sraj
2800176771Sraj		/* Largest region that is power of 4 and fits within size */
2801187149Sraj		log = ilog2(sz) / 2;
2802176771Sraj		esz = 1 << (2 * log);
2803176771Sraj
2804176771Sraj		/* If this is last entry cover remaining size. */
2805176771Sraj		if (i ==  KERNEL_REGION_MAX_TLB_ENTRIES - 1) {
2806176771Sraj			while (esz < sz)
2807176771Sraj				esz = esz << 2;
2808176771Sraj		}
2809176771Sraj
2810176771Sraj		entry_size[i] = esz;
2811176771Sraj		mapped_size += esz;
2812176771Sraj		if (esz < sz)
2813176771Sraj			sz -= esz;
2814176771Sraj		else
2815176771Sraj			sz = 0;
2816176771Sraj	}
2817176771Sraj
2818176771Sraj	/* Sort entry sizes, required to get proper entry address alignment. */
2819176771Sraj	qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES,
2820176771Sraj	    sizeof(vm_size_t), tlb1_entry_size_cmp);
2821176771Sraj
2822176771Sraj	/* Load TLB1 entries. */
2823176771Sraj	for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) {
2824176771Sraj		esz = entry_size[i];
2825176771Sraj		if (!esz)
2826176771Sraj			break;
2827187151Sraj
2828187151Sraj		CTR5(KTR_PMAP, "%s: entry %d: sz  = 0x%08x (va = 0x%08x "
2829187151Sraj		    "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa);
2830187151Sraj
2831176771Sraj		tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM);
2832176771Sraj
2833176771Sraj		va += esz;
2834176771Sraj		pa += esz;
2835176771Sraj	}
2836176771Sraj
2837187151Sraj	CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)",
2838187151Sraj	    __func__, mapped_size, mapped_size - size);
2839176771Sraj
2840176771Sraj	return (mapped_size);
2841176771Sraj}
2842176771Sraj
2843176771Sraj/*
2844176771Sraj * TLB1 initialization routine, to be called after the very first
2845176771Sraj * assembler level setup done in locore.S.
2846176771Sraj */
2847176771Srajvoid
2848176771Srajtlb1_init(vm_offset_t ccsrbar)
2849176771Sraj{
2850176771Sraj	uint32_t mas0;
2851176771Sraj
2852187151Sraj	/* TLB1[1] is used to map the kernel. Save that entry. */
2853176771Sraj	mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1);
2854176771Sraj	mtspr(SPR_MAS0, mas0);
2855176771Sraj	__asm __volatile("isync; tlbre");
2856176771Sraj
2857176771Sraj	tlb1[1].mas1 = mfspr(SPR_MAS1);
2858176771Sraj	tlb1[1].mas2 = mfspr(SPR_MAS2);
2859176771Sraj	tlb1[1].mas3 = mfspr(SPR_MAS3);
2860176771Sraj
2861187149Sraj	/* Map in CCSRBAR in TLB1[0] */
2862187149Sraj	tlb1_idx = 0;
2863187149Sraj	tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO);
2864187149Sraj	/*
2865187149Sraj	 * Set the next available TLB1 entry index. Note TLB[1] is reserved
2866187149Sraj	 * for initial mapping of kernel text+data, which was set early in
2867187149Sraj	 * locore, we need to skip this [busy] entry.
2868187149Sraj	 */
2869187149Sraj	tlb1_idx = 2;
2870176771Sraj
2871176771Sraj	/* Setup TLB miss defaults */
2872176771Sraj	set_mas4_defaults();
2873176771Sraj}
2874176771Sraj
2875176771Sraj/*
2876176771Sraj * Setup MAS4 defaults.
2877176771Sraj * These values are loaded to MAS0-2 on a TLB miss.
2878176771Sraj */
2879176771Srajstatic void
2880176771Srajset_mas4_defaults(void)
2881176771Sraj{
2882187151Sraj	uint32_t mas4;
2883176771Sraj
2884176771Sraj	/* Defaults: TLB0, PID0, TSIZED=4K */
2885176771Sraj	mas4 = MAS4_TLBSELD0;
2886176771Sraj	mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK;
2887176771Sraj
2888176771Sraj	mtspr(SPR_MAS4, mas4);
2889187151Sraj	__asm __volatile("isync");
2890176771Sraj}
2891176771Sraj
2892176771Sraj/*
2893176771Sraj * Print out contents of the MAS registers for each TLB1 entry
2894176771Sraj */
2895176771Srajvoid
2896176771Srajtlb1_print_tlbentries(void)
2897176771Sraj{
2898187149Sraj	uint32_t mas0, mas1, mas2, mas3, mas7;
2899176771Sraj	int i;
2900176771Sraj
2901176771Sraj	debugf("TLB1 entries:\n");
2902187149Sraj	for (i = 0; i < TLB1_ENTRIES; i++) {
2903176771Sraj
2904176771Sraj		mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
2905176771Sraj		mtspr(SPR_MAS0, mas0);
2906176771Sraj
2907187149Sraj		__asm __volatile("isync; tlbre");
2908176771Sraj
2909176771Sraj		mas1 = mfspr(SPR_MAS1);
2910176771Sraj		mas2 = mfspr(SPR_MAS2);
2911176771Sraj		mas3 = mfspr(SPR_MAS3);
2912176771Sraj		mas7 = mfspr(SPR_MAS7);
2913176771Sraj
2914176771Sraj		tlb_print_entry(i, mas1, mas2, mas3, mas7);
2915176771Sraj	}
2916176771Sraj}
2917176771Sraj
2918176771Sraj/*
2919176771Sraj * Print out contents of the in-ram tlb1 table.
2920176771Sraj */
2921176771Srajvoid
2922176771Srajtlb1_print_entries(void)
2923176771Sraj{
2924176771Sraj	int i;
2925176771Sraj
2926176771Sraj	debugf("tlb1[] table entries:\n");
2927187149Sraj	for (i = 0; i < TLB1_ENTRIES; i++)
2928176771Sraj		tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0);
2929176771Sraj}
2930176771Sraj
2931176771Sraj/*
2932176771Sraj * Return 0 if the physical IO range is encompassed by one of the
2933176771Sraj * the TLB1 entries, otherwise return related error code.
2934176771Sraj */
2935176771Srajstatic int
2936176771Srajtlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va)
2937176771Sraj{
2938187151Sraj	uint32_t prot;
2939176771Sraj	vm_paddr_t pa_start;
2940176771Sraj	vm_paddr_t pa_end;
2941176771Sraj	unsigned int entry_tsize;
2942176771Sraj	vm_size_t entry_size;
2943176771Sraj
2944176771Sraj	*va = (vm_offset_t)NULL;
2945176771Sraj
2946176771Sraj	/* Skip invalid entries */
2947176771Sraj	if (!(tlb1[i].mas1 & MAS1_VALID))
2948176771Sraj		return (EINVAL);
2949176771Sraj
2950176771Sraj	/*
2951176771Sraj	 * The entry must be cache-inhibited, guarded, and r/w
2952176771Sraj	 * so it can function as an i/o page
2953176771Sraj	 */
2954176771Sraj	prot = tlb1[i].mas2 & (MAS2_I | MAS2_G);
2955176771Sraj	if (prot != (MAS2_I | MAS2_G))
2956176771Sraj		return (EPERM);
2957176771Sraj
2958176771Sraj	prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW);
2959176771Sraj	if (prot != (MAS3_SR | MAS3_SW))
2960176771Sraj		return (EPERM);
2961176771Sraj
2962176771Sraj	/* The address should be within the entry range. */
2963176771Sraj	entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2964176771Sraj	KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize"));
2965176771Sraj
2966176771Sraj	entry_size = tsize2size(entry_tsize);
2967176771Sraj	pa_start = tlb1[i].mas3 & MAS3_RPN;
2968176771Sraj	pa_end = pa_start + entry_size - 1;
2969176771Sraj
2970176771Sraj	if ((pa < pa_start) || ((pa + size) > pa_end))
2971176771Sraj		return (ERANGE);
2972176771Sraj
2973176771Sraj	/* Return virtual address of this mapping. */
2974187149Sraj	*va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start);
2975176771Sraj	return (0);
2976176771Sraj}
2977