mmu_oea64.c revision 323968
1/*-
2 * Copyright (c) 2008-2015 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/powerpc/aim/mmu_oea64.c 323968 2017-09-24 12:53:33Z markj $");
29
30/*
31 * Manages physical address maps.
32 *
33 * Since the information managed by this module is also stored by the
34 * logical address mapping module, this module may throw away valid virtual
35 * to physical mappings at almost any time.  However, invalidations of
36 * mappings must be done as requested.
37 *
38 * In order to cope with hardware architectures which make virtual to
39 * physical map invalidates expensive, this module may delay invalidate
40 * reduced protection operations until such time as they are actually
41 * necessary.  This module is given full information as to which processors
42 * are currently using which maps, and to when physical maps must be made
43 * correct.
44 */
45
46#include "opt_compat.h"
47#include "opt_kstack_pages.h"
48
49#include <sys/param.h>
50#include <sys/kernel.h>
51#include <sys/conf.h>
52#include <sys/queue.h>
53#include <sys/cpuset.h>
54#include <sys/kerneldump.h>
55#include <sys/ktr.h>
56#include <sys/lock.h>
57#include <sys/msgbuf.h>
58#include <sys/malloc.h>
59#include <sys/mutex.h>
60#include <sys/proc.h>
61#include <sys/rwlock.h>
62#include <sys/sched.h>
63#include <sys/sysctl.h>
64#include <sys/systm.h>
65#include <sys/vmmeter.h>
66#include <sys/smp.h>
67
68#include <sys/kdb.h>
69
70#include <dev/ofw/openfirm.h>
71
72#include <vm/vm.h>
73#include <vm/vm_param.h>
74#include <vm/vm_kern.h>
75#include <vm/vm_page.h>
76#include <vm/vm_map.h>
77#include <vm/vm_object.h>
78#include <vm/vm_extern.h>
79#include <vm/vm_pageout.h>
80#include <vm/uma.h>
81
82#include <machine/_inttypes.h>
83#include <machine/cpu.h>
84#include <machine/platform.h>
85#include <machine/frame.h>
86#include <machine/md_var.h>
87#include <machine/psl.h>
88#include <machine/bat.h>
89#include <machine/hid.h>
90#include <machine/pte.h>
91#include <machine/sr.h>
92#include <machine/trap.h>
93#include <machine/mmuvar.h>
94
95#include "mmu_oea64.h"
96#include "mmu_if.h"
97#include "moea64_if.h"
98
99void moea64_release_vsid(uint64_t vsid);
100uintptr_t moea64_get_unique_vsid(void);
101
102#define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
103#define ENABLE_TRANS(msr)	mtmsr(msr)
104
105#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
106#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
107#define	VSID_HASH_MASK		0x0000007fffffffffULL
108
109/*
110 * Locking semantics:
111 *
112 * There are two locks of interest: the page locks and the pmap locks, which
113 * protect their individual PVO lists and are locked in that order. The contents
114 * of all PVO entries are protected by the locks of their respective pmaps.
115 * The pmap of any PVO is guaranteed not to change so long as the PVO is linked
116 * into any list.
117 *
118 */
119
120#define PV_LOCK_COUNT	PA_LOCK_COUNT*3
121static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
122
123#define PV_LOCKPTR(pa)	((struct mtx *)(&pv_lock[pa_index(pa) % PV_LOCK_COUNT]))
124#define PV_LOCK(pa)		mtx_lock(PV_LOCKPTR(pa))
125#define PV_UNLOCK(pa)		mtx_unlock(PV_LOCKPTR(pa))
126#define PV_LOCKASSERT(pa) 	mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
127#define PV_PAGE_LOCK(m)		PV_LOCK(VM_PAGE_TO_PHYS(m))
128#define PV_PAGE_UNLOCK(m)	PV_UNLOCK(VM_PAGE_TO_PHYS(m))
129#define PV_PAGE_LOCKASSERT(m)	PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
130
131struct ofw_map {
132	cell_t	om_va;
133	cell_t	om_len;
134	uint64_t om_pa;
135	cell_t	om_mode;
136};
137
138extern unsigned char _etext[];
139extern unsigned char _end[];
140
141/*
142 * Map of physical memory regions.
143 */
144static struct	mem_region *regions;
145static struct	mem_region *pregions;
146static u_int	phys_avail_count;
147static int	regions_sz, pregions_sz;
148
149extern void bs_remap_earlyboot(void);
150
151/*
152 * Lock for the SLB tables.
153 */
154struct mtx	moea64_slb_mutex;
155
156/*
157 * PTEG data.
158 */
159u_int		moea64_pteg_count;
160u_int		moea64_pteg_mask;
161
162/*
163 * PVO data.
164 */
165
166uma_zone_t	moea64_pvo_zone; /* zone for pvo entries */
167
168static struct	pvo_entry *moea64_bpvo_pool;
169static int	moea64_bpvo_pool_index = 0;
170static int	moea64_bpvo_pool_size = 327680;
171TUNABLE_INT("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size);
172SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
173    &moea64_bpvo_pool_index, 0, "");
174
175#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
176#ifdef __powerpc64__
177#define	NVSIDS		(NPMAPS * 16)
178#define VSID_HASHMASK	0xffffffffUL
179#else
180#define NVSIDS		NPMAPS
181#define VSID_HASHMASK	0xfffffUL
182#endif
183static u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
184
185static boolean_t moea64_initialized = FALSE;
186
187/*
188 * Statistics.
189 */
190u_int	moea64_pte_valid = 0;
191u_int	moea64_pte_overflow = 0;
192u_int	moea64_pvo_entries = 0;
193u_int	moea64_pvo_enter_calls = 0;
194u_int	moea64_pvo_remove_calls = 0;
195SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
196    &moea64_pte_valid, 0, "");
197SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
198    &moea64_pte_overflow, 0, "");
199SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
200    &moea64_pvo_entries, 0, "");
201SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
202    &moea64_pvo_enter_calls, 0, "");
203SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
204    &moea64_pvo_remove_calls, 0, "");
205
206vm_offset_t	moea64_scratchpage_va[2];
207struct pvo_entry *moea64_scratchpage_pvo[2];
208struct	mtx	moea64_scratchpage_mtx;
209
210uint64_t 	moea64_large_page_mask = 0;
211uint64_t	moea64_large_page_size = 0;
212int		moea64_large_page_shift = 0;
213
214/*
215 * PVO calls.
216 */
217static int	moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo,
218		    struct pvo_head *pvo_head);
219static void	moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
220static void	moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
221static struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
222
223/*
224 * Utility routines.
225 */
226static boolean_t	moea64_query_bit(mmu_t, vm_page_t, uint64_t);
227static u_int		moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
228static void		moea64_kremove(mmu_t, vm_offset_t);
229static void		moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
230			    vm_paddr_t pa, vm_size_t sz);
231static void		moea64_pmap_init_qpages(void);
232
233/*
234 * Kernel MMU interface
235 */
236void moea64_clear_modify(mmu_t, vm_page_t);
237void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
238void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
239    vm_page_t *mb, vm_offset_t b_offset, int xfersize);
240int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
241    u_int flags, int8_t psind);
242void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
243    vm_prot_t);
244void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
245vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
246vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
247void moea64_init(mmu_t);
248boolean_t moea64_is_modified(mmu_t, vm_page_t);
249boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
250boolean_t moea64_is_referenced(mmu_t, vm_page_t);
251int moea64_ts_referenced(mmu_t, vm_page_t);
252vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
253boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
254void moea64_page_init(mmu_t, vm_page_t);
255int moea64_page_wired_mappings(mmu_t, vm_page_t);
256void moea64_pinit(mmu_t, pmap_t);
257void moea64_pinit0(mmu_t, pmap_t);
258void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
259void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
260void moea64_qremove(mmu_t, vm_offset_t, int);
261void moea64_release(mmu_t, pmap_t);
262void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
263void moea64_remove_pages(mmu_t, pmap_t);
264void moea64_remove_all(mmu_t, vm_page_t);
265void moea64_remove_write(mmu_t, vm_page_t);
266void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
267void moea64_zero_page(mmu_t, vm_page_t);
268void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
269void moea64_zero_page_idle(mmu_t, vm_page_t);
270void moea64_activate(mmu_t, struct thread *);
271void moea64_deactivate(mmu_t, struct thread *);
272void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
273void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
274void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
275vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
276void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
277void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
278void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
279boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
280static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
281void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
282    void **va);
283void moea64_scan_init(mmu_t mmu);
284vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m);
285void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
286
287static mmu_method_t moea64_methods[] = {
288	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
289	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
290	MMUMETHOD(mmu_copy_pages,	moea64_copy_pages),
291	MMUMETHOD(mmu_enter,		moea64_enter),
292	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
293	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
294	MMUMETHOD(mmu_extract,		moea64_extract),
295	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
296	MMUMETHOD(mmu_init,		moea64_init),
297	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
298	MMUMETHOD(mmu_is_prefaultable,	moea64_is_prefaultable),
299	MMUMETHOD(mmu_is_referenced,	moea64_is_referenced),
300	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
301	MMUMETHOD(mmu_map,     		moea64_map),
302	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
303	MMUMETHOD(mmu_page_init,	moea64_page_init),
304	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
305	MMUMETHOD(mmu_pinit,		moea64_pinit),
306	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
307	MMUMETHOD(mmu_protect,		moea64_protect),
308	MMUMETHOD(mmu_qenter,		moea64_qenter),
309	MMUMETHOD(mmu_qremove,		moea64_qremove),
310	MMUMETHOD(mmu_release,		moea64_release),
311	MMUMETHOD(mmu_remove,		moea64_remove),
312	MMUMETHOD(mmu_remove_pages,	moea64_remove_pages),
313	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
314	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
315	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
316	MMUMETHOD(mmu_unwire,		moea64_unwire),
317	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
318	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
319	MMUMETHOD(mmu_zero_page_idle,	moea64_zero_page_idle),
320	MMUMETHOD(mmu_activate,		moea64_activate),
321	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
322	MMUMETHOD(mmu_page_set_memattr,	moea64_page_set_memattr),
323	MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page),
324	MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page),
325
326	/* Internal interfaces */
327	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
328	MMUMETHOD(mmu_mapdev_attr,	moea64_mapdev_attr),
329	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
330	MMUMETHOD(mmu_kextract,		moea64_kextract),
331	MMUMETHOD(mmu_kenter,		moea64_kenter),
332	MMUMETHOD(mmu_kenter_attr,	moea64_kenter_attr),
333	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
334	MMUMETHOD(mmu_scan_init,	moea64_scan_init),
335	MMUMETHOD(mmu_dumpsys_map,	moea64_dumpsys_map),
336
337	{ 0, 0 }
338};
339
340MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
341
342static struct pvo_head *
343vm_page_to_pvoh(vm_page_t m)
344{
345
346	mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
347	return (&m->md.mdpg_pvoh);
348}
349
350static struct pvo_entry *
351alloc_pvo_entry(int bootstrap)
352{
353	struct pvo_entry *pvo;
354
355	if (!moea64_initialized || bootstrap) {
356		if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) {
357			panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
358			      moea64_bpvo_pool_index, moea64_bpvo_pool_size,
359			      moea64_bpvo_pool_size * sizeof(struct pvo_entry));
360		}
361		pvo = &moea64_bpvo_pool[
362		    atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)];
363		bzero(pvo, sizeof(*pvo));
364		pvo->pvo_vaddr = PVO_BOOTSTRAP;
365	} else {
366		pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT);
367		bzero(pvo, sizeof(*pvo));
368	}
369
370	return (pvo);
371}
372
373
374static void
375init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va)
376{
377	uint64_t vsid;
378	uint64_t hash;
379	int shift;
380
381	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
382
383	pvo->pvo_pmap = pmap;
384	va &= ~ADDR_POFF;
385	pvo->pvo_vaddr |= va;
386	vsid = va_to_vsid(pmap, va);
387	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
388	    | (vsid << 16);
389
390	shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift :
391	    ADDR_PIDX_SHFT;
392	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift);
393	pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3;
394}
395
396static void
397free_pvo_entry(struct pvo_entry *pvo)
398{
399
400	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
401		uma_zfree(moea64_pvo_zone, pvo);
402}
403
404void
405moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte)
406{
407
408	lpte->pte_hi = (pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) &
409	    LPTE_AVPN_MASK;
410	lpte->pte_hi |= LPTE_VALID;
411
412	if (pvo->pvo_vaddr & PVO_LARGE)
413		lpte->pte_hi |= LPTE_BIG;
414	if (pvo->pvo_vaddr & PVO_WIRED)
415		lpte->pte_hi |= LPTE_WIRED;
416	if (pvo->pvo_vaddr & PVO_HID)
417		lpte->pte_hi |= LPTE_HID;
418
419	lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */
420	if (pvo->pvo_pte.prot & VM_PROT_WRITE)
421		lpte->pte_lo |= LPTE_BW;
422	else
423		lpte->pte_lo |= LPTE_BR;
424
425	if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
426		lpte->pte_lo |= LPTE_NOEXEC;
427}
428
429static __inline uint64_t
430moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
431{
432	uint64_t pte_lo;
433	int i;
434
435	if (ma != VM_MEMATTR_DEFAULT) {
436		switch (ma) {
437		case VM_MEMATTR_UNCACHEABLE:
438			return (LPTE_I | LPTE_G);
439		case VM_MEMATTR_CACHEABLE:
440			return (LPTE_M);
441		case VM_MEMATTR_WRITE_COMBINING:
442		case VM_MEMATTR_WRITE_BACK:
443		case VM_MEMATTR_PREFETCHABLE:
444			return (LPTE_I);
445		case VM_MEMATTR_WRITE_THROUGH:
446			return (LPTE_W | LPTE_M);
447		}
448	}
449
450	/*
451	 * Assume the page is cache inhibited and access is guarded unless
452	 * it's in our available memory array.
453	 */
454	pte_lo = LPTE_I | LPTE_G;
455	for (i = 0; i < pregions_sz; i++) {
456		if ((pa >= pregions[i].mr_start) &&
457		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
458			pte_lo &= ~(LPTE_I | LPTE_G);
459			pte_lo |= LPTE_M;
460			break;
461		}
462	}
463
464	return pte_lo;
465}
466
467/*
468 * Quick sort callout for comparing memory regions.
469 */
470static int	om_cmp(const void *a, const void *b);
471
472static int
473om_cmp(const void *a, const void *b)
474{
475	const struct	ofw_map *mapa;
476	const struct	ofw_map *mapb;
477
478	mapa = a;
479	mapb = b;
480	if (mapa->om_pa < mapb->om_pa)
481		return (-1);
482	else if (mapa->om_pa > mapb->om_pa)
483		return (1);
484	else
485		return (0);
486}
487
488static void
489moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
490{
491	struct ofw_map	translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
492	pcell_t		acells, trans_cells[sz/sizeof(cell_t)];
493	struct pvo_entry *pvo;
494	register_t	msr;
495	vm_offset_t	off;
496	vm_paddr_t	pa_base;
497	int		i, j;
498
499	bzero(translations, sz);
500	OF_getencprop(OF_finddevice("/"), "#address-cells", &acells,
501	    sizeof(acells));
502	if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1)
503		panic("moea64_bootstrap: can't get ofw translations");
504
505	CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
506	sz /= sizeof(cell_t);
507	for (i = 0, j = 0; i < sz; j++) {
508		translations[j].om_va = trans_cells[i++];
509		translations[j].om_len = trans_cells[i++];
510		translations[j].om_pa = trans_cells[i++];
511		if (acells == 2) {
512			translations[j].om_pa <<= 32;
513			translations[j].om_pa |= trans_cells[i++];
514		}
515		translations[j].om_mode = trans_cells[i++];
516	}
517	KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)",
518	    i, sz));
519
520	sz = j;
521	qsort(translations, sz, sizeof (*translations), om_cmp);
522
523	for (i = 0; i < sz; i++) {
524		pa_base = translations[i].om_pa;
525	      #ifndef __powerpc64__
526		if ((translations[i].om_pa >> 32) != 0)
527			panic("OFW translations above 32-bit boundary!");
528	      #endif
529
530		if (pa_base % PAGE_SIZE)
531			panic("OFW translation not page-aligned (phys)!");
532		if (translations[i].om_va % PAGE_SIZE)
533			panic("OFW translation not page-aligned (virt)!");
534
535		CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
536		    pa_base, translations[i].om_va, translations[i].om_len);
537
538		/* Now enter the pages for this mapping */
539
540		DISABLE_TRANS(msr);
541		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
542			/* If this address is direct-mapped, skip remapping */
543			if (hw_direct_map && translations[i].om_va == pa_base &&
544			    moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) 			    == LPTE_M)
545				continue;
546
547			PMAP_LOCK(kernel_pmap);
548			pvo = moea64_pvo_find_va(kernel_pmap,
549			    translations[i].om_va + off);
550			PMAP_UNLOCK(kernel_pmap);
551			if (pvo != NULL)
552				continue;
553
554			moea64_kenter(mmup, translations[i].om_va + off,
555			    pa_base + off);
556		}
557		ENABLE_TRANS(msr);
558	}
559}
560
561#ifdef __powerpc64__
562static void
563moea64_probe_large_page(void)
564{
565	uint16_t pvr = mfpvr() >> 16;
566
567	switch (pvr) {
568	case IBM970:
569	case IBM970FX:
570	case IBM970MP:
571		powerpc_sync(); isync();
572		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
573		powerpc_sync(); isync();
574
575		/* FALLTHROUGH */
576	default:
577		moea64_large_page_size = 0x1000000; /* 16 MB */
578		moea64_large_page_shift = 24;
579	}
580
581	moea64_large_page_mask = moea64_large_page_size - 1;
582}
583
584static void
585moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
586{
587	struct slb *cache;
588	struct slb entry;
589	uint64_t esid, slbe;
590	uint64_t i;
591
592	cache = PCPU_GET(slb);
593	esid = va >> ADDR_SR_SHFT;
594	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
595
596	for (i = 0; i < 64; i++) {
597		if (cache[i].slbe == (slbe | i))
598			return;
599	}
600
601	entry.slbe = slbe;
602	entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
603	if (large)
604		entry.slbv |= SLBV_L;
605
606	slb_insert_kernel(entry.slbe, entry.slbv);
607}
608#endif
609
610static void
611moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
612    vm_offset_t kernelend)
613{
614	struct pvo_entry *pvo;
615	register_t msr;
616	vm_paddr_t pa;
617	vm_offset_t size, off;
618	uint64_t pte_lo;
619	int i;
620
621	if (moea64_large_page_size == 0)
622		hw_direct_map = 0;
623
624	DISABLE_TRANS(msr);
625	if (hw_direct_map) {
626		PMAP_LOCK(kernel_pmap);
627		for (i = 0; i < pregions_sz; i++) {
628		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
629		     pregions[i].mr_size; pa += moea64_large_page_size) {
630			pte_lo = LPTE_M;
631
632			pvo = alloc_pvo_entry(1 /* bootstrap */);
633			pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE;
634			init_pvo_entry(pvo, kernel_pmap, pa);
635
636			/*
637			 * Set memory access as guarded if prefetch within
638			 * the page could exit the available physmem area.
639			 */
640			if (pa & moea64_large_page_mask) {
641				pa &= moea64_large_page_mask;
642				pte_lo |= LPTE_G;
643			}
644			if (pa + moea64_large_page_size >
645			    pregions[i].mr_start + pregions[i].mr_size)
646				pte_lo |= LPTE_G;
647
648			pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE |
649			    VM_PROT_EXECUTE;
650			pvo->pvo_pte.pa = pa | pte_lo;
651			moea64_pvo_enter(mmup, pvo, NULL);
652		  }
653		}
654		PMAP_UNLOCK(kernel_pmap);
655	} else {
656		size = moea64_bpvo_pool_size*sizeof(struct pvo_entry);
657		off = (vm_offset_t)(moea64_bpvo_pool);
658		for (pa = off; pa < off + size; pa += PAGE_SIZE)
659		moea64_kenter(mmup, pa, pa);
660
661		/*
662		 * Map certain important things, like ourselves.
663		 *
664		 * NOTE: We do not map the exception vector space. That code is
665		 * used only in real mode, and leaving it unmapped allows us to
666		 * catch NULL pointer deferences, instead of making NULL a valid
667		 * address.
668		 */
669
670		for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
671		    pa += PAGE_SIZE)
672			moea64_kenter(mmup, pa, pa);
673	}
674	ENABLE_TRANS(msr);
675
676	/*
677	 * Allow user to override unmapped_buf_allowed for testing.
678	 * XXXKIB Only direct map implementation was tested.
679	 */
680	if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed",
681	    &unmapped_buf_allowed))
682		unmapped_buf_allowed = hw_direct_map;
683}
684
685void
686moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
687{
688	int		i, j;
689	vm_size_t	physsz, hwphyssz;
690
691#ifndef __powerpc64__
692	/* We don't have a direct map since there is no BAT */
693	hw_direct_map = 0;
694
695	/* Make sure battable is zero, since we have no BAT */
696	for (i = 0; i < 16; i++) {
697		battable[i].batu = 0;
698		battable[i].batl = 0;
699	}
700#else
701	moea64_probe_large_page();
702
703	/* Use a direct map if we have large page support */
704	if (moea64_large_page_size > 0)
705		hw_direct_map = 1;
706	else
707		hw_direct_map = 0;
708#endif
709
710	/* Get physical memory regions from firmware */
711	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
712	CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
713
714	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
715		panic("moea64_bootstrap: phys_avail too small");
716
717	phys_avail_count = 0;
718	physsz = 0;
719	hwphyssz = 0;
720	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
721	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
722		CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
723		    regions[i].mr_start, regions[i].mr_start +
724		    regions[i].mr_size, regions[i].mr_size);
725		if (hwphyssz != 0 &&
726		    (physsz + regions[i].mr_size) >= hwphyssz) {
727			if (physsz < hwphyssz) {
728				phys_avail[j] = regions[i].mr_start;
729				phys_avail[j + 1] = regions[i].mr_start +
730				    hwphyssz - physsz;
731				physsz = hwphyssz;
732				phys_avail_count++;
733			}
734			break;
735		}
736		phys_avail[j] = regions[i].mr_start;
737		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
738		phys_avail_count++;
739		physsz += regions[i].mr_size;
740	}
741
742	/* Check for overlap with the kernel and exception vectors */
743	for (j = 0; j < 2*phys_avail_count; j+=2) {
744		if (phys_avail[j] < EXC_LAST)
745			phys_avail[j] += EXC_LAST;
746
747		if (kernelstart >= phys_avail[j] &&
748		    kernelstart < phys_avail[j+1]) {
749			if (kernelend < phys_avail[j+1]) {
750				phys_avail[2*phys_avail_count] =
751				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
752				phys_avail[2*phys_avail_count + 1] =
753				    phys_avail[j+1];
754				phys_avail_count++;
755			}
756
757			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
758		}
759
760		if (kernelend >= phys_avail[j] &&
761		    kernelend < phys_avail[j+1]) {
762			if (kernelstart > phys_avail[j]) {
763				phys_avail[2*phys_avail_count] = phys_avail[j];
764				phys_avail[2*phys_avail_count + 1] =
765				    kernelstart & ~PAGE_MASK;
766				phys_avail_count++;
767			}
768
769			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
770		}
771	}
772
773	physmem = btoc(physsz);
774
775#ifdef PTEGCOUNT
776	moea64_pteg_count = PTEGCOUNT;
777#else
778	moea64_pteg_count = 0x1000;
779
780	while (moea64_pteg_count < physmem)
781		moea64_pteg_count <<= 1;
782
783	moea64_pteg_count >>= 1;
784#endif /* PTEGCOUNT */
785}
786
787void
788moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
789{
790	int		i;
791
792	/*
793	 * Set PTEG mask
794	 */
795	moea64_pteg_mask = moea64_pteg_count - 1;
796
797	/*
798	 * Initialize SLB table lock and page locks
799	 */
800	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
801	for (i = 0; i < PV_LOCK_COUNT; i++)
802		mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
803
804	/*
805	 * Initialise the bootstrap pvo pool.
806	 */
807	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
808		moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0);
809	moea64_bpvo_pool_index = 0;
810
811	/*
812	 * Make sure kernel vsid is allocated as well as VSID 0.
813	 */
814	#ifndef __powerpc64__
815	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
816		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
817	moea64_vsid_bitmap[0] |= 1;
818	#endif
819
820	/*
821	 * Initialize the kernel pmap (which is statically allocated).
822	 */
823	#ifdef __powerpc64__
824	for (i = 0; i < 64; i++) {
825		pcpup->pc_slb[i].slbv = 0;
826		pcpup->pc_slb[i].slbe = 0;
827	}
828	#else
829	for (i = 0; i < 16; i++)
830		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
831	#endif
832
833	kernel_pmap->pmap_phys = kernel_pmap;
834	CPU_FILL(&kernel_pmap->pm_active);
835	RB_INIT(&kernel_pmap->pmap_pvo);
836
837	PMAP_LOCK_INIT(kernel_pmap);
838
839	/*
840	 * Now map in all the other buffers we allocated earlier
841	 */
842
843	moea64_setup_direct_map(mmup, kernelstart, kernelend);
844}
845
846void
847moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
848{
849	ihandle_t	mmui;
850	phandle_t	chosen;
851	phandle_t	mmu;
852	ssize_t		sz;
853	int		i;
854	vm_offset_t	pa, va;
855	void		*dpcpu;
856
857	/*
858	 * Set up the Open Firmware pmap and add its mappings if not in real
859	 * mode.
860	 */
861
862	chosen = OF_finddevice("/chosen");
863	if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) {
864		mmu = OF_instance_to_package(mmui);
865		if (mmu == -1 ||
866		    (sz = OF_getproplen(mmu, "translations")) == -1)
867			sz = 0;
868		if (sz > 6144 /* tmpstksz - 2 KB headroom */)
869			panic("moea64_bootstrap: too many ofw translations");
870
871		if (sz > 0)
872			moea64_add_ofw_mappings(mmup, mmu, sz);
873	}
874
875	/*
876	 * Calculate the last available physical address.
877	 */
878	for (i = 0; phys_avail[i + 2] != 0; i += 2)
879		;
880	Maxmem = powerpc_btop(phys_avail[i + 1]);
881
882	/*
883	 * Initialize MMU and remap early physical mappings
884	 */
885	MMU_CPU_BOOTSTRAP(mmup,0);
886	mtmsr(mfmsr() | PSL_DR | PSL_IR);
887	pmap_bootstrapped++;
888	bs_remap_earlyboot();
889
890	/*
891	 * Set the start and end of kva.
892	 */
893	virtual_avail = VM_MIN_KERNEL_ADDRESS;
894	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
895
896	/*
897	 * Map the entire KVA range into the SLB. We must not fault there.
898	 */
899	#ifdef __powerpc64__
900	for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
901		moea64_bootstrap_slb_prefault(va, 0);
902	#endif
903
904	/*
905	 * Figure out how far we can extend virtual_end into segment 16
906	 * without running into existing mappings. Segment 16 is guaranteed
907	 * to contain neither RAM nor devices (at least on Apple hardware),
908	 * but will generally contain some OFW mappings we should not
909	 * step on.
910	 */
911
912	#ifndef __powerpc64__	/* KVA is in high memory on PPC64 */
913	PMAP_LOCK(kernel_pmap);
914	while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
915	    moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
916		virtual_end += PAGE_SIZE;
917	PMAP_UNLOCK(kernel_pmap);
918	#endif
919
920	/*
921	 * Allocate a kernel stack with a guard page for thread0 and map it
922	 * into the kernel page map.
923	 */
924	pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
925	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
926	virtual_avail = va + kstack_pages * PAGE_SIZE;
927	CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
928	thread0.td_kstack = va;
929	thread0.td_kstack_pages = kstack_pages;
930	for (i = 0; i < kstack_pages; i++) {
931		moea64_kenter(mmup, va, pa);
932		pa += PAGE_SIZE;
933		va += PAGE_SIZE;
934	}
935
936	/*
937	 * Allocate virtual address space for the message buffer.
938	 */
939	pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
940	msgbufp = (struct msgbuf *)virtual_avail;
941	va = virtual_avail;
942	virtual_avail += round_page(msgbufsize);
943	while (va < virtual_avail) {
944		moea64_kenter(mmup, va, pa);
945		pa += PAGE_SIZE;
946		va += PAGE_SIZE;
947	}
948
949	/*
950	 * Allocate virtual address space for the dynamic percpu area.
951	 */
952	pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
953	dpcpu = (void *)virtual_avail;
954	va = virtual_avail;
955	virtual_avail += DPCPU_SIZE;
956	while (va < virtual_avail) {
957		moea64_kenter(mmup, va, pa);
958		pa += PAGE_SIZE;
959		va += PAGE_SIZE;
960	}
961	dpcpu_init(dpcpu, 0);
962
963	/*
964	 * Allocate some things for page zeroing. We put this directly
965	 * in the page table and use MOEA64_PTE_REPLACE to avoid any
966	 * of the PVO book-keeping or other parts of the VM system
967	 * from even knowing that this hack exists.
968	 */
969
970	if (!hw_direct_map) {
971		mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
972		    MTX_DEF);
973		for (i = 0; i < 2; i++) {
974			moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
975			virtual_end -= PAGE_SIZE;
976
977			moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
978
979			PMAP_LOCK(kernel_pmap);
980			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
981			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
982			PMAP_UNLOCK(kernel_pmap);
983		}
984	}
985}
986
987static void
988moea64_pmap_init_qpages(void)
989{
990	struct pcpu *pc;
991	int i;
992
993	if (hw_direct_map)
994		return;
995
996	CPU_FOREACH(i) {
997		pc = pcpu_find(i);
998		pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
999		if (pc->pc_qmap_addr == 0)
1000			panic("pmap_init_qpages: unable to allocate KVA");
1001		PMAP_LOCK(kernel_pmap);
1002		pc->pc_qmap_pvo = moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
1003		PMAP_UNLOCK(kernel_pmap);
1004		mtx_init(&pc->pc_qmap_lock, "qmap lock", NULL, MTX_DEF);
1005	}
1006}
1007
1008SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL);
1009
1010/*
1011 * Activate a user pmap.  This mostly involves setting some non-CPU
1012 * state.
1013 */
1014void
1015moea64_activate(mmu_t mmu, struct thread *td)
1016{
1017	pmap_t	pm;
1018
1019	pm = &td->td_proc->p_vmspace->vm_pmap;
1020	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1021
1022	#ifdef __powerpc64__
1023	PCPU_SET(userslb, pm->pm_slb);
1024	__asm __volatile("slbmte %0, %1; isync" ::
1025	    "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
1026	#else
1027	PCPU_SET(curpmap, pm->pmap_phys);
1028	mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid);
1029	#endif
1030}
1031
1032void
1033moea64_deactivate(mmu_t mmu, struct thread *td)
1034{
1035	pmap_t	pm;
1036
1037	__asm __volatile("isync; slbie %0" :: "r"(USER_ADDR));
1038
1039	pm = &td->td_proc->p_vmspace->vm_pmap;
1040	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1041	#ifdef __powerpc64__
1042	PCPU_SET(userslb, NULL);
1043	#else
1044	PCPU_SET(curpmap, NULL);
1045	#endif
1046}
1047
1048void
1049moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1050{
1051	struct	pvo_entry key, *pvo;
1052	vm_page_t m;
1053	int64_t	refchg;
1054
1055	key.pvo_vaddr = sva;
1056	PMAP_LOCK(pm);
1057	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1058	    pvo != NULL && PVO_VADDR(pvo) < eva;
1059	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1060		if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1061			panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1062			    pvo);
1063		pvo->pvo_vaddr &= ~PVO_WIRED;
1064		refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */);
1065		if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1066		    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1067			if (refchg < 0)
1068				refchg = LPTE_CHG;
1069			m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1070
1071			refchg |= atomic_readandclear_32(&m->md.mdpg_attrs);
1072			if (refchg & LPTE_CHG)
1073				vm_page_dirty(m);
1074			if (refchg & LPTE_REF)
1075				vm_page_aflag_set(m, PGA_REFERENCED);
1076		}
1077		pm->pm_stats.wired_count--;
1078	}
1079	PMAP_UNLOCK(pm);
1080}
1081
1082/*
1083 * This goes through and sets the physical address of our
1084 * special scratch PTE to the PA we want to zero or copy. Because
1085 * of locking issues (this can get called in pvo_enter() by
1086 * the UMA allocator), we can't use most other utility functions here
1087 */
1088
1089static __inline
1090void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa) {
1091
1092	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1093	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1094
1095	moea64_scratchpage_pvo[which]->pvo_pte.pa =
1096	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1097	MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which],
1098	    MOEA64_PTE_INVALIDATE);
1099	isync();
1100}
1101
1102void
1103moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1104{
1105	vm_offset_t	dst;
1106	vm_offset_t	src;
1107
1108	dst = VM_PAGE_TO_PHYS(mdst);
1109	src = VM_PAGE_TO_PHYS(msrc);
1110
1111	if (hw_direct_map) {
1112		bcopy((void *)src, (void *)dst, PAGE_SIZE);
1113	} else {
1114		mtx_lock(&moea64_scratchpage_mtx);
1115
1116		moea64_set_scratchpage_pa(mmu, 0, src);
1117		moea64_set_scratchpage_pa(mmu, 1, dst);
1118
1119		bcopy((void *)moea64_scratchpage_va[0],
1120		    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1121
1122		mtx_unlock(&moea64_scratchpage_mtx);
1123	}
1124}
1125
1126static inline void
1127moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1128    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1129{
1130	void *a_cp, *b_cp;
1131	vm_offset_t a_pg_offset, b_pg_offset;
1132	int cnt;
1133
1134	while (xfersize > 0) {
1135		a_pg_offset = a_offset & PAGE_MASK;
1136		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1137		a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
1138		    a_pg_offset;
1139		b_pg_offset = b_offset & PAGE_MASK;
1140		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1141		b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
1142		    b_pg_offset;
1143		bcopy(a_cp, b_cp, cnt);
1144		a_offset += cnt;
1145		b_offset += cnt;
1146		xfersize -= cnt;
1147	}
1148}
1149
1150static inline void
1151moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1152    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1153{
1154	void *a_cp, *b_cp;
1155	vm_offset_t a_pg_offset, b_pg_offset;
1156	int cnt;
1157
1158	mtx_lock(&moea64_scratchpage_mtx);
1159	while (xfersize > 0) {
1160		a_pg_offset = a_offset & PAGE_MASK;
1161		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1162		moea64_set_scratchpage_pa(mmu, 0,
1163		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
1164		a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
1165		b_pg_offset = b_offset & PAGE_MASK;
1166		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1167		moea64_set_scratchpage_pa(mmu, 1,
1168		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
1169		b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
1170		bcopy(a_cp, b_cp, cnt);
1171		a_offset += cnt;
1172		b_offset += cnt;
1173		xfersize -= cnt;
1174	}
1175	mtx_unlock(&moea64_scratchpage_mtx);
1176}
1177
1178void
1179moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1180    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1181{
1182
1183	if (hw_direct_map) {
1184		moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
1185		    xfersize);
1186	} else {
1187		moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
1188		    xfersize);
1189	}
1190}
1191
1192void
1193moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1194{
1195	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1196
1197	if (size + off > PAGE_SIZE)
1198		panic("moea64_zero_page: size + off > PAGE_SIZE");
1199
1200	if (hw_direct_map) {
1201		bzero((caddr_t)pa + off, size);
1202	} else {
1203		mtx_lock(&moea64_scratchpage_mtx);
1204		moea64_set_scratchpage_pa(mmu, 0, pa);
1205		bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1206		mtx_unlock(&moea64_scratchpage_mtx);
1207	}
1208}
1209
1210/*
1211 * Zero a page of physical memory by temporarily mapping it
1212 */
1213void
1214moea64_zero_page(mmu_t mmu, vm_page_t m)
1215{
1216	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1217	vm_offset_t va, off;
1218
1219	if (!hw_direct_map) {
1220		mtx_lock(&moea64_scratchpage_mtx);
1221
1222		moea64_set_scratchpage_pa(mmu, 0, pa);
1223		va = moea64_scratchpage_va[0];
1224	} else {
1225		va = pa;
1226	}
1227
1228	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1229		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
1230
1231	if (!hw_direct_map)
1232		mtx_unlock(&moea64_scratchpage_mtx);
1233}
1234
1235void
1236moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1237{
1238
1239	moea64_zero_page(mmu, m);
1240}
1241
1242vm_offset_t
1243moea64_quick_enter_page(mmu_t mmu, vm_page_t m)
1244{
1245	struct pvo_entry *pvo;
1246	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1247
1248	if (hw_direct_map)
1249		return (pa);
1250
1251	/*
1252 	 * MOEA64_PTE_REPLACE does some locking, so we can't just grab
1253	 * a critical section and access the PCPU data like on i386.
1254	 * Instead, pin the thread and grab the PCPU lock to prevent
1255	 * a preempting thread from using the same PCPU data.
1256	 */
1257	sched_pin();
1258
1259	mtx_assert(PCPU_PTR(qmap_lock), MA_NOTOWNED);
1260	pvo = PCPU_GET(qmap_pvo);
1261
1262	mtx_lock(PCPU_PTR(qmap_lock));
1263	pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
1264	    (uint64_t)pa;
1265	MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE);
1266	isync();
1267
1268	return (PCPU_GET(qmap_addr));
1269}
1270
1271void
1272moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
1273{
1274	if (hw_direct_map)
1275		return;
1276
1277	mtx_assert(PCPU_PTR(qmap_lock), MA_OWNED);
1278	KASSERT(PCPU_GET(qmap_addr) == addr,
1279	    ("moea64_quick_remove_page: invalid address"));
1280	mtx_unlock(PCPU_PTR(qmap_lock));
1281	sched_unpin();
1282}
1283
1284/*
1285 * Map the given physical page at the specified virtual address in the
1286 * target pmap with the protection requested.  If specified the page
1287 * will be wired down.
1288 */
1289
1290int
1291moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1292    vm_prot_t prot, u_int flags, int8_t psind)
1293{
1294	struct		pvo_entry *pvo, *oldpvo;
1295	struct		pvo_head *pvo_head;
1296	uint64_t	pte_lo;
1297	int		error;
1298
1299	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1300		VM_OBJECT_ASSERT_LOCKED(m->object);
1301
1302	pvo = alloc_pvo_entry(0);
1303	pvo->pvo_pmap = NULL; /* to be filled in later */
1304	pvo->pvo_pte.prot = prot;
1305
1306	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1307	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo;
1308
1309	if ((flags & PMAP_ENTER_WIRED) != 0)
1310		pvo->pvo_vaddr |= PVO_WIRED;
1311
1312	if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
1313		pvo_head = NULL;
1314	} else {
1315		pvo_head = &m->md.mdpg_pvoh;
1316		pvo->pvo_vaddr |= PVO_MANAGED;
1317	}
1318
1319	for (;;) {
1320		PV_PAGE_LOCK(m);
1321		PMAP_LOCK(pmap);
1322		if (pvo->pvo_pmap == NULL)
1323			init_pvo_entry(pvo, pmap, va);
1324		if (prot & VM_PROT_WRITE)
1325			if (pmap_bootstrapped &&
1326			    (m->oflags & VPO_UNMANAGED) == 0)
1327				vm_page_aflag_set(m, PGA_WRITEABLE);
1328
1329		oldpvo = moea64_pvo_find_va(pmap, va);
1330		if (oldpvo != NULL) {
1331			if (oldpvo->pvo_vaddr == pvo->pvo_vaddr &&
1332			    oldpvo->pvo_pte.pa == pvo->pvo_pte.pa &&
1333			    oldpvo->pvo_pte.prot == prot) {
1334				/* Identical mapping already exists */
1335				error = 0;
1336
1337				/* If not in page table, reinsert it */
1338				if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) {
1339					moea64_pte_overflow--;
1340					MOEA64_PTE_INSERT(mmu, oldpvo);
1341				}
1342
1343				/* Then just clean up and go home */
1344				PV_PAGE_UNLOCK(m);
1345				PMAP_UNLOCK(pmap);
1346				free_pvo_entry(pvo);
1347				break;
1348			}
1349
1350			/* Otherwise, need to kill it first */
1351			KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old "
1352			    "mapping does not match new mapping"));
1353			moea64_pvo_remove_from_pmap(mmu, oldpvo);
1354		}
1355		error = moea64_pvo_enter(mmu, pvo, pvo_head);
1356		PV_PAGE_UNLOCK(m);
1357		PMAP_UNLOCK(pmap);
1358
1359		/* Free any dead pages */
1360		if (oldpvo != NULL) {
1361			PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1362			moea64_pvo_remove_from_page(mmu, oldpvo);
1363			PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1364			free_pvo_entry(oldpvo);
1365		}
1366
1367		if (error != ENOMEM)
1368			break;
1369		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
1370			return (KERN_RESOURCE_SHORTAGE);
1371		VM_OBJECT_ASSERT_UNLOCKED(m->object);
1372		VM_WAIT;
1373	}
1374
1375	/*
1376	 * Flush the page from the instruction cache if this page is
1377	 * mapped executable and cacheable.
1378	 */
1379	if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
1380	    (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1381		vm_page_aflag_set(m, PGA_EXECUTABLE);
1382		moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1383	}
1384	return (KERN_SUCCESS);
1385}
1386
1387static void
1388moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1389    vm_size_t sz)
1390{
1391
1392	/*
1393	 * This is much trickier than on older systems because
1394	 * we can't sync the icache on physical addresses directly
1395	 * without a direct map. Instead we check a couple of cases
1396	 * where the memory is already mapped in and, failing that,
1397	 * use the same trick we use for page zeroing to create
1398	 * a temporary mapping for this physical address.
1399	 */
1400
1401	if (!pmap_bootstrapped) {
1402		/*
1403		 * If PMAP is not bootstrapped, we are likely to be
1404		 * in real mode.
1405		 */
1406		__syncicache((void *)pa, sz);
1407	} else if (pmap == kernel_pmap) {
1408		__syncicache((void *)va, sz);
1409	} else if (hw_direct_map) {
1410		__syncicache((void *)pa, sz);
1411	} else {
1412		/* Use the scratch page to set up a temp mapping */
1413
1414		mtx_lock(&moea64_scratchpage_mtx);
1415
1416		moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1417		__syncicache((void *)(moea64_scratchpage_va[1] +
1418		    (va & ADDR_POFF)), sz);
1419
1420		mtx_unlock(&moea64_scratchpage_mtx);
1421	}
1422}
1423
1424/*
1425 * Maps a sequence of resident pages belonging to the same object.
1426 * The sequence begins with the given page m_start.  This page is
1427 * mapped at the given virtual address start.  Each subsequent page is
1428 * mapped at a virtual address that is offset from start by the same
1429 * amount as the page is offset from m_start within the object.  The
1430 * last page in the sequence is the page with the largest offset from
1431 * m_start that can be mapped at a virtual address less than the given
1432 * virtual address end.  Not every virtual page between start and end
1433 * is mapped; only those for which a resident page exists with the
1434 * corresponding offset from m_start are mapped.
1435 */
1436void
1437moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1438    vm_page_t m_start, vm_prot_t prot)
1439{
1440	vm_page_t m;
1441	vm_pindex_t diff, psize;
1442
1443	VM_OBJECT_ASSERT_LOCKED(m_start->object);
1444
1445	psize = atop(end - start);
1446	m = m_start;
1447	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1448		moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
1449		    (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0);
1450		m = TAILQ_NEXT(m, listq);
1451	}
1452}
1453
1454void
1455moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1456    vm_prot_t prot)
1457{
1458
1459	moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1460	    PMAP_ENTER_NOSLEEP, 0);
1461}
1462
1463vm_paddr_t
1464moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1465{
1466	struct	pvo_entry *pvo;
1467	vm_paddr_t pa;
1468
1469	PMAP_LOCK(pm);
1470	pvo = moea64_pvo_find_va(pm, va);
1471	if (pvo == NULL)
1472		pa = 0;
1473	else
1474		pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1475	PMAP_UNLOCK(pm);
1476
1477	return (pa);
1478}
1479
1480/*
1481 * Atomically extract and hold the physical page with the given
1482 * pmap and virtual address pair if that mapping permits the given
1483 * protection.
1484 */
1485vm_page_t
1486moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1487{
1488	struct	pvo_entry *pvo;
1489	vm_page_t m;
1490        vm_paddr_t pa;
1491
1492	m = NULL;
1493	pa = 0;
1494	PMAP_LOCK(pmap);
1495retry:
1496	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1497	if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) {
1498		if (vm_page_pa_tryrelock(pmap,
1499		    pvo->pvo_pte.pa & LPTE_RPGN, &pa))
1500			goto retry;
1501		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1502		vm_page_hold(m);
1503	}
1504	PA_UNLOCK_COND(pa);
1505	PMAP_UNLOCK(pmap);
1506	return (m);
1507}
1508
1509static mmu_t installed_mmu;
1510
1511static void *
1512moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags,
1513    int wait)
1514{
1515	struct pvo_entry *pvo;
1516        vm_offset_t va;
1517        vm_page_t m;
1518        int pflags, needed_lock;
1519
1520	/*
1521	 * This entire routine is a horrible hack to avoid bothering kmem
1522	 * for new KVA addresses. Because this can get called from inside
1523	 * kmem allocation routines, calling kmem for a new address here
1524	 * can lead to multiply locking non-recursive mutexes.
1525	 */
1526
1527	*flags = UMA_SLAB_PRIV;
1528	needed_lock = !PMAP_LOCKED(kernel_pmap);
1529	pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
1530
1531        for (;;) {
1532                m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
1533                if (m == NULL) {
1534                        if (wait & M_NOWAIT)
1535                                return (NULL);
1536                        VM_WAIT;
1537                } else
1538                        break;
1539        }
1540
1541	va = VM_PAGE_TO_PHYS(m);
1542
1543	pvo = alloc_pvo_entry(1 /* bootstrap */);
1544
1545	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE;
1546	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M;
1547
1548	if (needed_lock)
1549		PMAP_LOCK(kernel_pmap);
1550
1551	init_pvo_entry(pvo, kernel_pmap, va);
1552	pvo->pvo_vaddr |= PVO_WIRED;
1553
1554	moea64_pvo_enter(installed_mmu, pvo, NULL);
1555
1556	if (needed_lock)
1557		PMAP_UNLOCK(kernel_pmap);
1558
1559	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1560                bzero((void *)va, PAGE_SIZE);
1561
1562	return (void *)va;
1563}
1564
1565extern int elf32_nxstack;
1566
1567void
1568moea64_init(mmu_t mmu)
1569{
1570
1571	CTR0(KTR_PMAP, "moea64_init");
1572
1573	moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1574	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1575	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1576
1577	if (!hw_direct_map) {
1578		installed_mmu = mmu;
1579		uma_zone_set_allocf(moea64_pvo_zone,moea64_uma_page_alloc);
1580	}
1581
1582#ifdef COMPAT_FREEBSD32
1583	elf32_nxstack = 1;
1584#endif
1585
1586	moea64_initialized = TRUE;
1587}
1588
1589boolean_t
1590moea64_is_referenced(mmu_t mmu, vm_page_t m)
1591{
1592
1593	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1594	    ("moea64_is_referenced: page %p is not managed", m));
1595
1596	return (moea64_query_bit(mmu, m, LPTE_REF));
1597}
1598
1599boolean_t
1600moea64_is_modified(mmu_t mmu, vm_page_t m)
1601{
1602
1603	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1604	    ("moea64_is_modified: page %p is not managed", m));
1605
1606	/*
1607	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1608	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
1609	 * is clear, no PTEs can have LPTE_CHG set.
1610	 */
1611	VM_OBJECT_ASSERT_LOCKED(m->object);
1612	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1613		return (FALSE);
1614	return (moea64_query_bit(mmu, m, LPTE_CHG));
1615}
1616
1617boolean_t
1618moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1619{
1620	struct pvo_entry *pvo;
1621	boolean_t rv = TRUE;
1622
1623	PMAP_LOCK(pmap);
1624	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1625	if (pvo != NULL)
1626		rv = FALSE;
1627	PMAP_UNLOCK(pmap);
1628	return (rv);
1629}
1630
1631void
1632moea64_clear_modify(mmu_t mmu, vm_page_t m)
1633{
1634
1635	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1636	    ("moea64_clear_modify: page %p is not managed", m));
1637	VM_OBJECT_ASSERT_WLOCKED(m->object);
1638	KASSERT(!vm_page_xbusied(m),
1639	    ("moea64_clear_modify: page %p is exclusive busied", m));
1640
1641	/*
1642	 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
1643	 * set.  If the object containing the page is locked and the page is
1644	 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
1645	 */
1646	if ((m->aflags & PGA_WRITEABLE) == 0)
1647		return;
1648	moea64_clear_bit(mmu, m, LPTE_CHG);
1649}
1650
1651/*
1652 * Clear the write and modified bits in each of the given page's mappings.
1653 */
1654void
1655moea64_remove_write(mmu_t mmu, vm_page_t m)
1656{
1657	struct	pvo_entry *pvo;
1658	int64_t	refchg, ret;
1659	pmap_t	pmap;
1660
1661	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1662	    ("moea64_remove_write: page %p is not managed", m));
1663
1664	/*
1665	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1666	 * set by another thread while the object is locked.  Thus,
1667	 * if PGA_WRITEABLE is clear, no page table entries need updating.
1668	 */
1669	VM_OBJECT_ASSERT_WLOCKED(m->object);
1670	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1671		return;
1672	powerpc_sync();
1673	PV_PAGE_LOCK(m);
1674	refchg = 0;
1675	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1676		pmap = pvo->pvo_pmap;
1677		PMAP_LOCK(pmap);
1678		if (!(pvo->pvo_vaddr & PVO_DEAD) &&
1679		    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1680			pvo->pvo_pte.prot &= ~VM_PROT_WRITE;
1681			ret = MOEA64_PTE_REPLACE(mmu, pvo,
1682			    MOEA64_PTE_PROT_UPDATE);
1683			if (ret < 0)
1684				ret = LPTE_CHG;
1685			refchg |= ret;
1686			if (pvo->pvo_pmap == kernel_pmap)
1687				isync();
1688		}
1689		PMAP_UNLOCK(pmap);
1690	}
1691	if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG)
1692		vm_page_dirty(m);
1693	vm_page_aflag_clear(m, PGA_WRITEABLE);
1694	PV_PAGE_UNLOCK(m);
1695}
1696
1697/*
1698 *	moea64_ts_referenced:
1699 *
1700 *	Return a count of reference bits for a page, clearing those bits.
1701 *	It is not necessary for every reference bit to be cleared, but it
1702 *	is necessary that 0 only be returned when there are truly no
1703 *	reference bits set.
1704 *
1705 *	XXX: The exact number of bits to check and clear is a matter that
1706 *	should be tested and standardized at some point in the future for
1707 *	optimal aging of shared pages.
1708 */
1709int
1710moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1711{
1712
1713	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1714	    ("moea64_ts_referenced: page %p is not managed", m));
1715	return (moea64_clear_bit(mmu, m, LPTE_REF));
1716}
1717
1718/*
1719 * Modify the WIMG settings of all mappings for a page.
1720 */
1721void
1722moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1723{
1724	struct	pvo_entry *pvo;
1725	int64_t	refchg;
1726	pmap_t	pmap;
1727	uint64_t lo;
1728
1729	if ((m->oflags & VPO_UNMANAGED) != 0) {
1730		m->md.mdpg_cache_attrs = ma;
1731		return;
1732	}
1733
1734	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1735
1736	PV_PAGE_LOCK(m);
1737	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1738		pmap = pvo->pvo_pmap;
1739		PMAP_LOCK(pmap);
1740		if (!(pvo->pvo_vaddr & PVO_DEAD)) {
1741			pvo->pvo_pte.pa &= ~LPTE_WIMG;
1742			pvo->pvo_pte.pa |= lo;
1743			refchg = MOEA64_PTE_REPLACE(mmu, pvo,
1744			    MOEA64_PTE_INVALIDATE);
1745			if (refchg < 0)
1746				refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ?
1747				    LPTE_CHG : 0;
1748			if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1749			    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1750				refchg |=
1751				    atomic_readandclear_32(&m->md.mdpg_attrs);
1752				if (refchg & LPTE_CHG)
1753					vm_page_dirty(m);
1754				if (refchg & LPTE_REF)
1755					vm_page_aflag_set(m, PGA_REFERENCED);
1756			}
1757			if (pvo->pvo_pmap == kernel_pmap)
1758				isync();
1759		}
1760		PMAP_UNLOCK(pmap);
1761	}
1762	m->md.mdpg_cache_attrs = ma;
1763	PV_PAGE_UNLOCK(m);
1764}
1765
1766/*
1767 * Map a wired page into kernel virtual address space.
1768 */
1769void
1770moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1771{
1772	int		error;
1773	struct pvo_entry *pvo, *oldpvo;
1774
1775	pvo = alloc_pvo_entry(0);
1776	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1777	pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
1778	pvo->pvo_vaddr |= PVO_WIRED;
1779
1780	PMAP_LOCK(kernel_pmap);
1781	oldpvo = moea64_pvo_find_va(kernel_pmap, va);
1782	if (oldpvo != NULL)
1783		moea64_pvo_remove_from_pmap(mmu, oldpvo);
1784	init_pvo_entry(pvo, kernel_pmap, va);
1785	error = moea64_pvo_enter(mmu, pvo, NULL);
1786	PMAP_UNLOCK(kernel_pmap);
1787
1788	/* Free any dead pages */
1789	if (oldpvo != NULL) {
1790		PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1791		moea64_pvo_remove_from_page(mmu, oldpvo);
1792		PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1793		free_pvo_entry(oldpvo);
1794	}
1795
1796	if (error != 0 && error != ENOENT)
1797		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1798		    pa, error);
1799}
1800
1801void
1802moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1803{
1804
1805	moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1806}
1807
1808/*
1809 * Extract the physical page address associated with the given kernel virtual
1810 * address.
1811 */
1812vm_paddr_t
1813moea64_kextract(mmu_t mmu, vm_offset_t va)
1814{
1815	struct		pvo_entry *pvo;
1816	vm_paddr_t pa;
1817
1818	/*
1819	 * Shortcut the direct-mapped case when applicable.  We never put
1820	 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1821	 */
1822	if (va < VM_MIN_KERNEL_ADDRESS)
1823		return (va);
1824
1825	PMAP_LOCK(kernel_pmap);
1826	pvo = moea64_pvo_find_va(kernel_pmap, va);
1827	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1828	    va));
1829	pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1830	PMAP_UNLOCK(kernel_pmap);
1831	return (pa);
1832}
1833
1834/*
1835 * Remove a wired page from kernel virtual address space.
1836 */
1837void
1838moea64_kremove(mmu_t mmu, vm_offset_t va)
1839{
1840	moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1841}
1842
1843/*
1844 * Map a range of physical addresses into kernel virtual address space.
1845 *
1846 * The value passed in *virt is a suggested virtual address for the mapping.
1847 * Architectures which can support a direct-mapped physical to virtual region
1848 * can return the appropriate address within that region, leaving '*virt'
1849 * unchanged.  Other architectures should map the pages starting at '*virt' and
1850 * update '*virt' with the first usable address after the mapped region.
1851 */
1852vm_offset_t
1853moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1854    vm_paddr_t pa_end, int prot)
1855{
1856	vm_offset_t	sva, va;
1857
1858	if (hw_direct_map) {
1859		/*
1860		 * Check if every page in the region is covered by the direct
1861		 * map. The direct map covers all of physical memory. Use
1862		 * moea64_calc_wimg() as a shortcut to see if the page is in
1863		 * physical memory as a way to see if the direct map covers it.
1864		 */
1865		for (va = pa_start; va < pa_end; va += PAGE_SIZE)
1866			if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
1867				break;
1868		if (va == pa_end)
1869			return (pa_start);
1870	}
1871	sva = *virt;
1872	va = sva;
1873	/* XXX respect prot argument */
1874	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1875		moea64_kenter(mmu, va, pa_start);
1876	*virt = va;
1877
1878	return (sva);
1879}
1880
1881/*
1882 * Returns true if the pmap's pv is one of the first
1883 * 16 pvs linked to from this page.  This count may
1884 * be changed upwards or downwards in the future; it
1885 * is only necessary that true be returned for a small
1886 * subset of pmaps for proper page aging.
1887 */
1888boolean_t
1889moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1890{
1891        int loops;
1892	struct pvo_entry *pvo;
1893	boolean_t rv;
1894
1895	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1896	    ("moea64_page_exists_quick: page %p is not managed", m));
1897	loops = 0;
1898	rv = FALSE;
1899	PV_PAGE_LOCK(m);
1900	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1901		if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
1902			rv = TRUE;
1903			break;
1904		}
1905		if (++loops >= 16)
1906			break;
1907	}
1908	PV_PAGE_UNLOCK(m);
1909	return (rv);
1910}
1911
1912void
1913moea64_page_init(mmu_t mmu __unused, vm_page_t m)
1914{
1915
1916	m->md.mdpg_attrs = 0;
1917	m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
1918	LIST_INIT(&m->md.mdpg_pvoh);
1919}
1920
1921/*
1922 * Return the number of managed mappings to the given physical page
1923 * that are wired.
1924 */
1925int
1926moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1927{
1928	struct pvo_entry *pvo;
1929	int count;
1930
1931	count = 0;
1932	if ((m->oflags & VPO_UNMANAGED) != 0)
1933		return (count);
1934	PV_PAGE_LOCK(m);
1935	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1936		if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
1937			count++;
1938	PV_PAGE_UNLOCK(m);
1939	return (count);
1940}
1941
1942static uintptr_t	moea64_vsidcontext;
1943
1944uintptr_t
1945moea64_get_unique_vsid(void) {
1946	u_int entropy;
1947	register_t hash;
1948	uint32_t mask;
1949	int i;
1950
1951	entropy = 0;
1952	__asm __volatile("mftb %0" : "=r"(entropy));
1953
1954	mtx_lock(&moea64_slb_mutex);
1955	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
1956		u_int	n;
1957
1958		/*
1959		 * Create a new value by mutiplying by a prime and adding in
1960		 * entropy from the timebase register.  This is to make the
1961		 * VSID more random so that the PT hash function collides
1962		 * less often.  (Note that the prime casues gcc to do shifts
1963		 * instead of a multiply.)
1964		 */
1965		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1966		hash = moea64_vsidcontext & (NVSIDS - 1);
1967		if (hash == 0)		/* 0 is special, avoid it */
1968			continue;
1969		n = hash >> 5;
1970		mask = 1 << (hash & (VSID_NBPW - 1));
1971		hash = (moea64_vsidcontext & VSID_HASHMASK);
1972		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
1973			/* anything free in this bucket? */
1974			if (moea64_vsid_bitmap[n] == 0xffffffff) {
1975				entropy = (moea64_vsidcontext >> 20);
1976				continue;
1977			}
1978			i = ffs(~moea64_vsid_bitmap[n]) - 1;
1979			mask = 1 << i;
1980			hash &= rounddown2(VSID_HASHMASK, VSID_NBPW);
1981			hash |= i;
1982		}
1983		if (hash == VSID_VRMA)	/* also special, avoid this too */
1984			continue;
1985		KASSERT(!(moea64_vsid_bitmap[n] & mask),
1986		    ("Allocating in-use VSID %#zx\n", hash));
1987		moea64_vsid_bitmap[n] |= mask;
1988		mtx_unlock(&moea64_slb_mutex);
1989		return (hash);
1990	}
1991
1992	mtx_unlock(&moea64_slb_mutex);
1993	panic("%s: out of segments",__func__);
1994}
1995
1996#ifdef __powerpc64__
1997void
1998moea64_pinit(mmu_t mmu, pmap_t pmap)
1999{
2000
2001	RB_INIT(&pmap->pmap_pvo);
2002
2003	pmap->pm_slb_tree_root = slb_alloc_tree();
2004	pmap->pm_slb = slb_alloc_user_cache();
2005	pmap->pm_slb_len = 0;
2006}
2007#else
2008void
2009moea64_pinit(mmu_t mmu, pmap_t pmap)
2010{
2011	int	i;
2012	uint32_t hash;
2013
2014	RB_INIT(&pmap->pmap_pvo);
2015
2016	if (pmap_bootstrapped)
2017		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
2018		    (vm_offset_t)pmap);
2019	else
2020		pmap->pmap_phys = pmap;
2021
2022	/*
2023	 * Allocate some segment registers for this pmap.
2024	 */
2025	hash = moea64_get_unique_vsid();
2026
2027	for (i = 0; i < 16; i++)
2028		pmap->pm_sr[i] = VSID_MAKE(i, hash);
2029
2030	KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
2031}
2032#endif
2033
2034/*
2035 * Initialize the pmap associated with process 0.
2036 */
2037void
2038moea64_pinit0(mmu_t mmu, pmap_t pm)
2039{
2040
2041	PMAP_LOCK_INIT(pm);
2042	moea64_pinit(mmu, pm);
2043	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
2044}
2045
2046/*
2047 * Set the physical protection on the specified range of this map as requested.
2048 */
2049static void
2050moea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
2051{
2052	struct vm_page *pg;
2053	vm_prot_t oldprot;
2054	int32_t refchg;
2055
2056	PMAP_LOCK_ASSERT(pm, MA_OWNED);
2057
2058	/*
2059	 * Change the protection of the page.
2060	 */
2061	oldprot = pvo->pvo_pte.prot;
2062	pvo->pvo_pte.prot = prot;
2063	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2064
2065	/*
2066	 * If the PVO is in the page table, update mapping
2067	 */
2068	refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE);
2069	if (refchg < 0)
2070		refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
2071
2072	if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
2073	    (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
2074		if ((pg->oflags & VPO_UNMANAGED) == 0)
2075			vm_page_aflag_set(pg, PGA_EXECUTABLE);
2076		moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
2077		    pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE);
2078	}
2079
2080	/*
2081	 * Update vm about the REF/CHG bits if the page is managed and we have
2082	 * removed write access.
2083	 */
2084	if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) &&
2085	    (oldprot & VM_PROT_WRITE)) {
2086		refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2087		if (refchg & LPTE_CHG)
2088			vm_page_dirty(pg);
2089		if (refchg & LPTE_REF)
2090			vm_page_aflag_set(pg, PGA_REFERENCED);
2091	}
2092}
2093
2094void
2095moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
2096    vm_prot_t prot)
2097{
2098	struct	pvo_entry *pvo, *tpvo, key;
2099
2100	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
2101	    sva, eva, prot);
2102
2103	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
2104	    ("moea64_protect: non current pmap"));
2105
2106	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2107		moea64_remove(mmu, pm, sva, eva);
2108		return;
2109	}
2110
2111	PMAP_LOCK(pm);
2112	key.pvo_vaddr = sva;
2113	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2114	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2115		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2116		moea64_pvo_protect(mmu, pm, pvo, prot);
2117	}
2118	PMAP_UNLOCK(pm);
2119}
2120
2121/*
2122 * Map a list of wired pages into kernel virtual address space.  This is
2123 * intended for temporary mappings which do not need page modification or
2124 * references recorded.  Existing mappings in the region are overwritten.
2125 */
2126void
2127moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
2128{
2129	while (count-- > 0) {
2130		moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2131		va += PAGE_SIZE;
2132		m++;
2133	}
2134}
2135
2136/*
2137 * Remove page mappings from kernel virtual address space.  Intended for
2138 * temporary mappings entered by moea64_qenter.
2139 */
2140void
2141moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
2142{
2143	while (count-- > 0) {
2144		moea64_kremove(mmu, va);
2145		va += PAGE_SIZE;
2146	}
2147}
2148
2149void
2150moea64_release_vsid(uint64_t vsid)
2151{
2152	int idx, mask;
2153
2154	mtx_lock(&moea64_slb_mutex);
2155	idx = vsid & (NVSIDS-1);
2156	mask = 1 << (idx % VSID_NBPW);
2157	idx /= VSID_NBPW;
2158	KASSERT(moea64_vsid_bitmap[idx] & mask,
2159	    ("Freeing unallocated VSID %#jx", vsid));
2160	moea64_vsid_bitmap[idx] &= ~mask;
2161	mtx_unlock(&moea64_slb_mutex);
2162}
2163
2164
2165void
2166moea64_release(mmu_t mmu, pmap_t pmap)
2167{
2168
2169	/*
2170	 * Free segment registers' VSIDs
2171	 */
2172    #ifdef __powerpc64__
2173	slb_free_tree(pmap);
2174	slb_free_user_cache(pmap->pm_slb);
2175    #else
2176	KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2177
2178	moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2179    #endif
2180}
2181
2182/*
2183 * Remove all pages mapped by the specified pmap
2184 */
2185void
2186moea64_remove_pages(mmu_t mmu, pmap_t pm)
2187{
2188	struct pvo_entry *pvo, *tpvo;
2189	struct pvo_tree tofree;
2190
2191	RB_INIT(&tofree);
2192
2193	PMAP_LOCK(pm);
2194	RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2195		if (pvo->pvo_vaddr & PVO_WIRED)
2196			continue;
2197
2198		/*
2199		 * For locking reasons, remove this from the page table and
2200		 * pmap, but save delinking from the vm_page for a second
2201		 * pass
2202		 */
2203		moea64_pvo_remove_from_pmap(mmu, pvo);
2204		RB_INSERT(pvo_tree, &tofree, pvo);
2205	}
2206	PMAP_UNLOCK(pm);
2207
2208	RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
2209		PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2210		moea64_pvo_remove_from_page(mmu, pvo);
2211		PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2212		RB_REMOVE(pvo_tree, &tofree, pvo);
2213		free_pvo_entry(pvo);
2214	}
2215}
2216
2217/*
2218 * Remove the given range of addresses from the specified map.
2219 */
2220void
2221moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2222{
2223	struct  pvo_entry *pvo, *tpvo, key;
2224	struct pvo_tree tofree;
2225
2226	/*
2227	 * Perform an unsynchronized read.  This is, however, safe.
2228	 */
2229	if (pm->pm_stats.resident_count == 0)
2230		return;
2231
2232	key.pvo_vaddr = sva;
2233
2234	RB_INIT(&tofree);
2235
2236	PMAP_LOCK(pm);
2237	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2238	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2239		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2240
2241		/*
2242		 * For locking reasons, remove this from the page table and
2243		 * pmap, but save delinking from the vm_page for a second
2244		 * pass
2245		 */
2246		moea64_pvo_remove_from_pmap(mmu, pvo);
2247		RB_INSERT(pvo_tree, &tofree, pvo);
2248	}
2249	PMAP_UNLOCK(pm);
2250
2251	RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
2252		PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2253		moea64_pvo_remove_from_page(mmu, pvo);
2254		PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2255		RB_REMOVE(pvo_tree, &tofree, pvo);
2256		free_pvo_entry(pvo);
2257	}
2258}
2259
2260/*
2261 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2262 * will reflect changes in pte's back to the vm_page.
2263 */
2264void
2265moea64_remove_all(mmu_t mmu, vm_page_t m)
2266{
2267	struct	pvo_entry *pvo, *next_pvo;
2268	struct	pvo_head freequeue;
2269	int	wasdead;
2270	pmap_t	pmap;
2271
2272	LIST_INIT(&freequeue);
2273
2274	PV_PAGE_LOCK(m);
2275	LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2276		pmap = pvo->pvo_pmap;
2277		PMAP_LOCK(pmap);
2278		wasdead = (pvo->pvo_vaddr & PVO_DEAD);
2279		if (!wasdead)
2280			moea64_pvo_remove_from_pmap(mmu, pvo);
2281		moea64_pvo_remove_from_page(mmu, pvo);
2282		if (!wasdead)
2283			LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
2284		PMAP_UNLOCK(pmap);
2285
2286	}
2287	KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
2288	KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable"));
2289	PV_PAGE_UNLOCK(m);
2290
2291	/* Clean up UMA allocations */
2292	LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo)
2293		free_pvo_entry(pvo);
2294}
2295
2296/*
2297 * Allocate a physical page of memory directly from the phys_avail map.
2298 * Can only be called from moea64_bootstrap before avail start and end are
2299 * calculated.
2300 */
2301vm_offset_t
2302moea64_bootstrap_alloc(vm_size_t size, u_int align)
2303{
2304	vm_offset_t	s, e;
2305	int		i, j;
2306
2307	size = round_page(size);
2308	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2309		if (align != 0)
2310			s = roundup2(phys_avail[i], align);
2311		else
2312			s = phys_avail[i];
2313		e = s + size;
2314
2315		if (s < phys_avail[i] || e > phys_avail[i + 1])
2316			continue;
2317
2318		if (s + size > platform_real_maxaddr())
2319			continue;
2320
2321		if (s == phys_avail[i]) {
2322			phys_avail[i] += size;
2323		} else if (e == phys_avail[i + 1]) {
2324			phys_avail[i + 1] -= size;
2325		} else {
2326			for (j = phys_avail_count * 2; j > i; j -= 2) {
2327				phys_avail[j] = phys_avail[j - 2];
2328				phys_avail[j + 1] = phys_avail[j - 1];
2329			}
2330
2331			phys_avail[i + 3] = phys_avail[i + 1];
2332			phys_avail[i + 1] = s;
2333			phys_avail[i + 2] = e;
2334			phys_avail_count++;
2335		}
2336
2337		return (s);
2338	}
2339	panic("moea64_bootstrap_alloc: could not allocate memory");
2340}
2341
2342static int
2343moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head)
2344{
2345	int first, err;
2346
2347	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2348	KASSERT(moea64_pvo_find_va(pvo->pvo_pmap, PVO_VADDR(pvo)) == NULL,
2349	    ("Existing mapping for VA %#jx", (uintmax_t)PVO_VADDR(pvo)));
2350
2351	moea64_pvo_enter_calls++;
2352
2353	/*
2354	 * Add to pmap list
2355	 */
2356	RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2357
2358	/*
2359	 * Remember if the list was empty and therefore will be the first
2360	 * item.
2361	 */
2362	if (pvo_head != NULL) {
2363		if (LIST_FIRST(pvo_head) == NULL)
2364			first = 1;
2365		LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2366	}
2367
2368	if (pvo->pvo_vaddr & PVO_WIRED)
2369		pvo->pvo_pmap->pm_stats.wired_count++;
2370	pvo->pvo_pmap->pm_stats.resident_count++;
2371
2372	/*
2373	 * Insert it into the hardware page table
2374	 */
2375	err = MOEA64_PTE_INSERT(mmu, pvo);
2376	if (err != 0) {
2377		panic("moea64_pvo_enter: overflow");
2378	}
2379
2380	moea64_pvo_entries++;
2381
2382	if (pvo->pvo_pmap == kernel_pmap)
2383		isync();
2384
2385#ifdef __powerpc64__
2386	/*
2387	 * Make sure all our bootstrap mappings are in the SLB as soon
2388	 * as virtual memory is switched on.
2389	 */
2390	if (!pmap_bootstrapped)
2391		moea64_bootstrap_slb_prefault(PVO_VADDR(pvo),
2392		    pvo->pvo_vaddr & PVO_LARGE);
2393#endif
2394
2395	return (first ? ENOENT : 0);
2396}
2397
2398static void
2399moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
2400{
2401	struct	vm_page *pg;
2402	int32_t refchg;
2403
2404	KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap"));
2405	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2406	KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO"));
2407
2408	/*
2409	 * If there is an active pte entry, we need to deactivate it
2410	 */
2411	refchg = MOEA64_PTE_UNSET(mmu, pvo);
2412	if (refchg < 0) {
2413		/*
2414		 * If it was evicted from the page table, be pessimistic and
2415		 * dirty the page.
2416		 */
2417		if (pvo->pvo_pte.prot & VM_PROT_WRITE)
2418			refchg = LPTE_CHG;
2419		else
2420			refchg = 0;
2421	}
2422
2423	/*
2424	 * Update our statistics.
2425	 */
2426	pvo->pvo_pmap->pm_stats.resident_count--;
2427	if (pvo->pvo_vaddr & PVO_WIRED)
2428		pvo->pvo_pmap->pm_stats.wired_count--;
2429
2430	/*
2431	 * Remove this PVO from the pmap list.
2432	 */
2433	RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2434
2435	/*
2436	 * Mark this for the next sweep
2437	 */
2438	pvo->pvo_vaddr |= PVO_DEAD;
2439
2440	/* Send RC bits to VM */
2441	if ((pvo->pvo_vaddr & PVO_MANAGED) &&
2442	    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2443		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2444		if (pg != NULL) {
2445			refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2446			if (refchg & LPTE_CHG)
2447				vm_page_dirty(pg);
2448			if (refchg & LPTE_REF)
2449				vm_page_aflag_set(pg, PGA_REFERENCED);
2450		}
2451	}
2452}
2453
2454static void
2455moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
2456{
2457	struct	vm_page *pg;
2458
2459	KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
2460
2461	/* Use NULL pmaps as a sentinel for races in page deletion */
2462	if (pvo->pvo_pmap == NULL)
2463		return;
2464	pvo->pvo_pmap = NULL;
2465
2466	/*
2467	 * Update vm about page writeability/executability if managed
2468	 */
2469	PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN);
2470	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2471
2472	if ((pvo->pvo_vaddr & PVO_MANAGED) && pg != NULL) {
2473		LIST_REMOVE(pvo, pvo_vlink);
2474		if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2475			vm_page_aflag_clear(pg, PGA_WRITEABLE | PGA_EXECUTABLE);
2476	}
2477
2478	moea64_pvo_entries--;
2479	moea64_pvo_remove_calls++;
2480}
2481
2482static struct pvo_entry *
2483moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2484{
2485	struct pvo_entry key;
2486
2487	PMAP_LOCK_ASSERT(pm, MA_OWNED);
2488
2489	key.pvo_vaddr = va & ~ADDR_POFF;
2490	return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
2491}
2492
2493static boolean_t
2494moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
2495{
2496	struct	pvo_entry *pvo;
2497	int64_t ret;
2498	boolean_t rv;
2499
2500	/*
2501	 * See if this bit is stored in the page already.
2502	 */
2503	if (m->md.mdpg_attrs & ptebit)
2504		return (TRUE);
2505
2506	/*
2507	 * Examine each PTE.  Sync so that any pending REF/CHG bits are
2508	 * flushed to the PTEs.
2509	 */
2510	rv = FALSE;
2511	powerpc_sync();
2512	PV_PAGE_LOCK(m);
2513	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2514		ret = 0;
2515
2516		/*
2517		 * See if this pvo has a valid PTE.  if so, fetch the
2518		 * REF/CHG bits from the valid PTE.  If the appropriate
2519		 * ptebit is set, return success.
2520		 */
2521		PMAP_LOCK(pvo->pvo_pmap);
2522		if (!(pvo->pvo_vaddr & PVO_DEAD))
2523			ret = MOEA64_PTE_SYNCH(mmu, pvo);
2524		PMAP_UNLOCK(pvo->pvo_pmap);
2525
2526		if (ret > 0) {
2527			atomic_set_32(&m->md.mdpg_attrs,
2528			    ret & (LPTE_CHG | LPTE_REF));
2529			if (ret & ptebit) {
2530				rv = TRUE;
2531				break;
2532			}
2533		}
2534	}
2535	PV_PAGE_UNLOCK(m);
2536
2537	return (rv);
2538}
2539
2540static u_int
2541moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2542{
2543	u_int	count;
2544	struct	pvo_entry *pvo;
2545	int64_t ret;
2546
2547	/*
2548	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2549	 * we can reset the right ones).
2550	 */
2551	powerpc_sync();
2552
2553	/*
2554	 * For each pvo entry, clear the pte's ptebit.
2555	 */
2556	count = 0;
2557	PV_PAGE_LOCK(m);
2558	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2559		ret = 0;
2560
2561		PMAP_LOCK(pvo->pvo_pmap);
2562		if (!(pvo->pvo_vaddr & PVO_DEAD))
2563			ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit);
2564		PMAP_UNLOCK(pvo->pvo_pmap);
2565
2566		if (ret > 0 && (ret & ptebit))
2567			count++;
2568	}
2569	atomic_clear_32(&m->md.mdpg_attrs, ptebit);
2570	PV_PAGE_UNLOCK(m);
2571
2572	return (count);
2573}
2574
2575boolean_t
2576moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2577{
2578	struct pvo_entry *pvo, key;
2579	vm_offset_t ppa;
2580	int error = 0;
2581
2582	PMAP_LOCK(kernel_pmap);
2583	key.pvo_vaddr = ppa = pa & ~ADDR_POFF;
2584	for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
2585	    ppa < pa + size; ppa += PAGE_SIZE,
2586	    pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
2587		if (pvo == NULL || (pvo->pvo_pte.pa & LPTE_RPGN) != ppa) {
2588			error = EFAULT;
2589			break;
2590		}
2591	}
2592	PMAP_UNLOCK(kernel_pmap);
2593
2594	return (error);
2595}
2596
2597/*
2598 * Map a set of physical memory pages into the kernel virtual
2599 * address space. Return a pointer to where it is mapped. This
2600 * routine is intended to be used for mapping device memory,
2601 * NOT real memory.
2602 */
2603void *
2604moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2605{
2606	vm_offset_t va, tmpva, ppa, offset;
2607
2608	ppa = trunc_page(pa);
2609	offset = pa & PAGE_MASK;
2610	size = roundup2(offset + size, PAGE_SIZE);
2611
2612	va = kva_alloc(size);
2613
2614	if (!va)
2615		panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2616
2617	for (tmpva = va; size > 0;) {
2618		moea64_kenter_attr(mmu, tmpva, ppa, ma);
2619		size -= PAGE_SIZE;
2620		tmpva += PAGE_SIZE;
2621		ppa += PAGE_SIZE;
2622	}
2623
2624	return ((void *)(va + offset));
2625}
2626
2627void *
2628moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2629{
2630
2631	return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2632}
2633
2634void
2635moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2636{
2637	vm_offset_t base, offset;
2638
2639	base = trunc_page(va);
2640	offset = va & PAGE_MASK;
2641	size = roundup2(offset + size, PAGE_SIZE);
2642
2643	kva_free(base, size);
2644}
2645
2646void
2647moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2648{
2649	struct pvo_entry *pvo;
2650	vm_offset_t lim;
2651	vm_paddr_t pa;
2652	vm_size_t len;
2653
2654	PMAP_LOCK(pm);
2655	while (sz > 0) {
2656		lim = round_page(va);
2657		len = MIN(lim - va, sz);
2658		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2659		if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) {
2660			pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF);
2661			moea64_syncicache(mmu, pm, va, pa, len);
2662		}
2663		va += len;
2664		sz -= len;
2665	}
2666	PMAP_UNLOCK(pm);
2667}
2668
2669void
2670moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
2671{
2672
2673	*va = (void *)pa;
2674}
2675
2676extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
2677
2678void
2679moea64_scan_init(mmu_t mmu)
2680{
2681	struct pvo_entry *pvo;
2682	vm_offset_t va;
2683	int i;
2684
2685	if (!do_minidump) {
2686		/* Initialize phys. segments for dumpsys(). */
2687		memset(&dump_map, 0, sizeof(dump_map));
2688		mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
2689		for (i = 0; i < pregions_sz; i++) {
2690			dump_map[i].pa_start = pregions[i].mr_start;
2691			dump_map[i].pa_size = pregions[i].mr_size;
2692		}
2693		return;
2694	}
2695
2696	/* Virtual segments for minidumps: */
2697	memset(&dump_map, 0, sizeof(dump_map));
2698
2699	/* 1st: kernel .data and .bss. */
2700	dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
2701	dump_map[0].pa_size = round_page((uintptr_t)_end) -
2702	    dump_map[0].pa_start;
2703
2704	/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2705	dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
2706	dump_map[1].pa_size = round_page(msgbufp->msg_size);
2707
2708	/* 3rd: kernel VM. */
2709	va = dump_map[1].pa_start + dump_map[1].pa_size;
2710	/* Find start of next chunk (from va). */
2711	while (va < virtual_end) {
2712		/* Don't dump the buffer cache. */
2713		if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
2714			va = kmi.buffer_eva;
2715			continue;
2716		}
2717		pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2718		if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
2719			break;
2720		va += PAGE_SIZE;
2721	}
2722	if (va < virtual_end) {
2723		dump_map[2].pa_start = va;
2724		va += PAGE_SIZE;
2725		/* Find last page in chunk. */
2726		while (va < virtual_end) {
2727			/* Don't run into the buffer cache. */
2728			if (va == kmi.buffer_sva)
2729				break;
2730			pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2731			if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
2732				break;
2733			va += PAGE_SIZE;
2734		}
2735		dump_map[2].pa_size = va - dump_map[2].pa_start;
2736	}
2737}
2738
2739