mmu_oea.c revision 201758
1/*-
2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *        This product includes software developed by the NetBSD
19 *        Foundation, Inc. and its contributors.
20 * 4. Neither the name of The NetBSD Foundation nor the names of its
21 *    contributors may be used to endorse or promote products derived
22 *    from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36/*-
37 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38 * Copyright (C) 1995, 1996 TooLs GmbH.
39 * All rights reserved.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 *    notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in the
48 *    documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 *    must display the following acknowledgement:
51 *	This product includes software developed by TooLs GmbH.
52 * 4. The name of TooLs GmbH may not be used to endorse or promote products
53 *    derived from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67 */
68/*-
69 * Copyright (C) 2001 Benno Rice.
70 * All rights reserved.
71 *
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions
74 * are met:
75 * 1. Redistributions of source code must retain the above copyright
76 *    notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce the above copyright
78 *    notice, this list of conditions and the following disclaimer in the
79 *    documentation and/or other materials provided with the distribution.
80 *
81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93#include <sys/cdefs.h>
94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 201758 2010-01-07 21:01:37Z mbr $");
95
96/*
97 * Manages physical address maps.
98 *
99 * In addition to hardware address maps, this module is called upon to
100 * provide software-use-only maps which may or may not be stored in the
101 * same form as hardware maps.  These pseudo-maps are used to store
102 * intermediate results from copy operations to and from address spaces.
103 *
104 * Since the information managed by this module is also stored by the
105 * logical address mapping module, this module may throw away valid virtual
106 * to physical mappings at almost any time.  However, invalidations of
107 * mappings must be done as requested.
108 *
109 * In order to cope with hardware architectures which make virtual to
110 * physical map invalidates expensive, this module may delay invalidate
111 * reduced protection operations until such time as they are actually
112 * necessary.  This module is given full information as to which processors
113 * are currently using which maps, and to when physical maps must be made
114 * correct.
115 */
116
117#include "opt_kstack_pages.h"
118
119#include <sys/param.h>
120#include <sys/kernel.h>
121#include <sys/ktr.h>
122#include <sys/lock.h>
123#include <sys/msgbuf.h>
124#include <sys/mutex.h>
125#include <sys/proc.h>
126#include <sys/sysctl.h>
127#include <sys/systm.h>
128#include <sys/vmmeter.h>
129
130#include <dev/ofw/openfirm.h>
131
132#include <vm/vm.h>
133#include <vm/vm_param.h>
134#include <vm/vm_kern.h>
135#include <vm/vm_page.h>
136#include <vm/vm_map.h>
137#include <vm/vm_object.h>
138#include <vm/vm_extern.h>
139#include <vm/vm_pageout.h>
140#include <vm/vm_pager.h>
141#include <vm/uma.h>
142
143#include <machine/cpu.h>
144#include <machine/platform.h>
145#include <machine/bat.h>
146#include <machine/frame.h>
147#include <machine/md_var.h>
148#include <machine/psl.h>
149#include <machine/pte.h>
150#include <machine/smp.h>
151#include <machine/sr.h>
152#include <machine/mmuvar.h>
153
154#include "mmu_if.h"
155
156#define	MOEA_DEBUG
157
158#define TODO	panic("%s: not implemented", __func__);
159
160#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
161#define	VSID_TO_SR(vsid)	((vsid) & 0xf)
162#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
163
164#define	PVO_PTEGIDX_MASK	0x007		/* which PTEG slot */
165#define	PVO_PTEGIDX_VALID	0x008		/* slot is valid */
166#define	PVO_WIRED		0x010		/* PVO entry is wired */
167#define	PVO_MANAGED		0x020		/* PVO entry is managed */
168#define	PVO_EXECUTABLE		0x040		/* PVO entry is executable */
169#define	PVO_BOOTSTRAP		0x080		/* PVO entry allocated during
170						   bootstrap */
171#define PVO_FAKE		0x100		/* fictitious phys page */
172#define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
173#define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
174#define PVO_ISFAKE(pvo)		((pvo)->pvo_vaddr & PVO_FAKE)
175#define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
176#define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
177#define	PVO_PTEGIDX_CLR(pvo)	\
178	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
179#define	PVO_PTEGIDX_SET(pvo, i)	\
180	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
181
182#define	MOEA_PVO_CHECK(pvo)
183
184struct ofw_map {
185	vm_offset_t	om_va;
186	vm_size_t	om_len;
187	vm_offset_t	om_pa;
188	u_int		om_mode;
189};
190
191/*
192 * Map of physical memory regions.
193 */
194static struct	mem_region *regions;
195static struct	mem_region *pregions;
196u_int           phys_avail_count;
197int		regions_sz, pregions_sz;
198static struct	ofw_map *translations;
199
200extern struct pmap ofw_pmap;
201
202/*
203 * Lock for the pteg and pvo tables.
204 */
205struct mtx	moea_table_mutex;
206
207/* tlbie instruction synchronization */
208static struct mtx tlbie_mtx;
209
210/*
211 * PTEG data.
212 */
213static struct	pteg *moea_pteg_table;
214u_int		moea_pteg_count;
215u_int		moea_pteg_mask;
216
217/*
218 * PVO data.
219 */
220struct	pvo_head *moea_pvo_table;		/* pvo entries by pteg index */
221struct	pvo_head moea_pvo_kunmanaged =
222    LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged);	/* list of unmanaged pages */
223struct	pvo_head moea_pvo_unmanaged =
224    LIST_HEAD_INITIALIZER(moea_pvo_unmanaged);	/* list of unmanaged pages */
225
226uma_zone_t	moea_upvo_zone;	/* zone for pvo entries for unmanaged pages */
227uma_zone_t	moea_mpvo_zone;	/* zone for pvo entries for managed pages */
228
229#define	BPVO_POOL_SIZE	32768
230static struct	pvo_entry *moea_bpvo_pool;
231static int	moea_bpvo_pool_index = 0;
232
233#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
234static u_int	moea_vsid_bitmap[NPMAPS / VSID_NBPW];
235
236static boolean_t moea_initialized = FALSE;
237
238/*
239 * Statistics.
240 */
241u_int	moea_pte_valid = 0;
242u_int	moea_pte_overflow = 0;
243u_int	moea_pte_replacements = 0;
244u_int	moea_pvo_entries = 0;
245u_int	moea_pvo_enter_calls = 0;
246u_int	moea_pvo_remove_calls = 0;
247u_int	moea_pte_spills = 0;
248SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid,
249    0, "");
250SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD,
251    &moea_pte_overflow, 0, "");
252SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD,
253    &moea_pte_replacements, 0, "");
254SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries,
255    0, "");
256SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD,
257    &moea_pvo_enter_calls, 0, "");
258SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD,
259    &moea_pvo_remove_calls, 0, "");
260SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD,
261    &moea_pte_spills, 0, "");
262
263/*
264 * Allocate physical memory for use in moea_bootstrap.
265 */
266static vm_offset_t	moea_bootstrap_alloc(vm_size_t, u_int);
267
268/*
269 * PTE calls.
270 */
271static int		moea_pte_insert(u_int, struct pte *);
272
273/*
274 * PVO calls.
275 */
276static int	moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
277		    vm_offset_t, vm_offset_t, u_int, int);
278static void	moea_pvo_remove(struct pvo_entry *, int);
279static struct	pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *);
280static struct	pte *moea_pvo_to_pte(const struct pvo_entry *, int);
281
282/*
283 * Utility routines.
284 */
285static void		moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
286			    vm_prot_t, boolean_t);
287static void		moea_syncicache(vm_offset_t, vm_size_t);
288static boolean_t	moea_query_bit(vm_page_t, int);
289static u_int		moea_clear_bit(vm_page_t, int, int *);
290static void		moea_kremove(mmu_t, vm_offset_t);
291int		moea_pte_spill(vm_offset_t);
292
293/*
294 * Kernel MMU interface
295 */
296void moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
297void moea_clear_modify(mmu_t, vm_page_t);
298void moea_clear_reference(mmu_t, vm_page_t);
299void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
300void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
301void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
302    vm_prot_t);
303void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
304vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t);
305vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
306void moea_init(mmu_t);
307boolean_t moea_is_modified(mmu_t, vm_page_t);
308boolean_t moea_ts_referenced(mmu_t, vm_page_t);
309vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
310boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
311int moea_page_wired_mappings(mmu_t, vm_page_t);
312void moea_pinit(mmu_t, pmap_t);
313void moea_pinit0(mmu_t, pmap_t);
314void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
315void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
316void moea_qremove(mmu_t, vm_offset_t, int);
317void moea_release(mmu_t, pmap_t);
318void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
319void moea_remove_all(mmu_t, vm_page_t);
320void moea_remove_write(mmu_t, vm_page_t);
321void moea_zero_page(mmu_t, vm_page_t);
322void moea_zero_page_area(mmu_t, vm_page_t, int, int);
323void moea_zero_page_idle(mmu_t, vm_page_t);
324void moea_activate(mmu_t, struct thread *);
325void moea_deactivate(mmu_t, struct thread *);
326void moea_cpu_bootstrap(mmu_t, int);
327void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
328void *moea_mapdev(mmu_t, vm_offset_t, vm_size_t);
329void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
330vm_offset_t moea_kextract(mmu_t, vm_offset_t);
331void moea_kenter(mmu_t, vm_offset_t, vm_offset_t);
332boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
333static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
334
335static mmu_method_t moea_methods[] = {
336	MMUMETHOD(mmu_change_wiring,	moea_change_wiring),
337	MMUMETHOD(mmu_clear_modify,	moea_clear_modify),
338	MMUMETHOD(mmu_clear_reference,	moea_clear_reference),
339	MMUMETHOD(mmu_copy_page,	moea_copy_page),
340	MMUMETHOD(mmu_enter,		moea_enter),
341	MMUMETHOD(mmu_enter_object,	moea_enter_object),
342	MMUMETHOD(mmu_enter_quick,	moea_enter_quick),
343	MMUMETHOD(mmu_extract,		moea_extract),
344	MMUMETHOD(mmu_extract_and_hold,	moea_extract_and_hold),
345	MMUMETHOD(mmu_init,		moea_init),
346	MMUMETHOD(mmu_is_modified,	moea_is_modified),
347	MMUMETHOD(mmu_ts_referenced,	moea_ts_referenced),
348	MMUMETHOD(mmu_map,     		moea_map),
349	MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
350	MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings),
351	MMUMETHOD(mmu_pinit,		moea_pinit),
352	MMUMETHOD(mmu_pinit0,		moea_pinit0),
353	MMUMETHOD(mmu_protect,		moea_protect),
354	MMUMETHOD(mmu_qenter,		moea_qenter),
355	MMUMETHOD(mmu_qremove,		moea_qremove),
356	MMUMETHOD(mmu_release,		moea_release),
357	MMUMETHOD(mmu_remove,		moea_remove),
358	MMUMETHOD(mmu_remove_all,      	moea_remove_all),
359	MMUMETHOD(mmu_remove_write,	moea_remove_write),
360	MMUMETHOD(mmu_sync_icache,	moea_sync_icache),
361	MMUMETHOD(mmu_zero_page,       	moea_zero_page),
362	MMUMETHOD(mmu_zero_page_area,	moea_zero_page_area),
363	MMUMETHOD(mmu_zero_page_idle,	moea_zero_page_idle),
364	MMUMETHOD(mmu_activate,		moea_activate),
365	MMUMETHOD(mmu_deactivate,      	moea_deactivate),
366
367	/* Internal interfaces */
368	MMUMETHOD(mmu_bootstrap,       	moea_bootstrap),
369	MMUMETHOD(mmu_cpu_bootstrap,   	moea_cpu_bootstrap),
370	MMUMETHOD(mmu_mapdev,		moea_mapdev),
371	MMUMETHOD(mmu_unmapdev,		moea_unmapdev),
372	MMUMETHOD(mmu_kextract,		moea_kextract),
373	MMUMETHOD(mmu_kenter,		moea_kenter),
374	MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
375
376	{ 0, 0 }
377};
378
379static mmu_def_t oea_mmu = {
380	MMU_TYPE_OEA,
381	moea_methods,
382	0
383};
384MMU_DEF(oea_mmu);
385
386static void
387tlbie(vm_offset_t va)
388{
389
390	mtx_lock_spin(&tlbie_mtx);
391	__asm __volatile("tlbie %0" :: "r"(va));
392	__asm __volatile("tlbsync");
393	powerpc_sync();
394	mtx_unlock_spin(&tlbie_mtx);
395}
396
397static void
398tlbia(void)
399{
400	vm_offset_t va;
401
402	for (va = 0; va < 0x00040000; va += 0x00001000) {
403		__asm __volatile("tlbie %0" :: "r"(va));
404		powerpc_sync();
405	}
406	__asm __volatile("tlbsync");
407	powerpc_sync();
408}
409
410static __inline int
411va_to_sr(u_int *sr, vm_offset_t va)
412{
413	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
414}
415
416static __inline u_int
417va_to_pteg(u_int sr, vm_offset_t addr)
418{
419	u_int hash;
420
421	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
422	    ADDR_PIDX_SHFT);
423	return (hash & moea_pteg_mask);
424}
425
426static __inline struct pvo_head *
427pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
428{
429	struct	vm_page *pg;
430
431	pg = PHYS_TO_VM_PAGE(pa);
432
433	if (pg_p != NULL)
434		*pg_p = pg;
435
436	if (pg == NULL)
437		return (&moea_pvo_unmanaged);
438
439	return (&pg->md.mdpg_pvoh);
440}
441
442static __inline struct pvo_head *
443vm_page_to_pvoh(vm_page_t m)
444{
445
446	return (&m->md.mdpg_pvoh);
447}
448
449static __inline void
450moea_attr_clear(vm_page_t m, int ptebit)
451{
452
453	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
454	m->md.mdpg_attrs &= ~ptebit;
455}
456
457static __inline int
458moea_attr_fetch(vm_page_t m)
459{
460
461	return (m->md.mdpg_attrs);
462}
463
464static __inline void
465moea_attr_save(vm_page_t m, int ptebit)
466{
467
468	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
469	m->md.mdpg_attrs |= ptebit;
470}
471
472static __inline int
473moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
474{
475	if (pt->pte_hi == pvo_pt->pte_hi)
476		return (1);
477
478	return (0);
479}
480
481static __inline int
482moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
483{
484	return (pt->pte_hi & ~PTE_VALID) ==
485	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
486	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
487}
488
489static __inline void
490moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
491{
492
493	mtx_assert(&moea_table_mutex, MA_OWNED);
494
495	/*
496	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
497	 * set when the real pte is set in memory.
498	 *
499	 * Note: Don't set the valid bit for correct operation of tlb update.
500	 */
501	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
502	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
503	pt->pte_lo = pte_lo;
504}
505
506static __inline void
507moea_pte_synch(struct pte *pt, struct pte *pvo_pt)
508{
509
510	mtx_assert(&moea_table_mutex, MA_OWNED);
511	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
512}
513
514static __inline void
515moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
516{
517
518	mtx_assert(&moea_table_mutex, MA_OWNED);
519
520	/*
521	 * As shown in Section 7.6.3.2.3
522	 */
523	pt->pte_lo &= ~ptebit;
524	tlbie(va);
525}
526
527static __inline void
528moea_pte_set(struct pte *pt, struct pte *pvo_pt)
529{
530
531	mtx_assert(&moea_table_mutex, MA_OWNED);
532	pvo_pt->pte_hi |= PTE_VALID;
533
534	/*
535	 * Update the PTE as defined in section 7.6.3.1.
536	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
537	 * been saved so this routine can restore them (if desired).
538	 */
539	pt->pte_lo = pvo_pt->pte_lo;
540	powerpc_sync();
541	pt->pte_hi = pvo_pt->pte_hi;
542	powerpc_sync();
543	moea_pte_valid++;
544}
545
546static __inline void
547moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
548{
549
550	mtx_assert(&moea_table_mutex, MA_OWNED);
551	pvo_pt->pte_hi &= ~PTE_VALID;
552
553	/*
554	 * Force the reg & chg bits back into the PTEs.
555	 */
556	powerpc_sync();
557
558	/*
559	 * Invalidate the pte.
560	 */
561	pt->pte_hi &= ~PTE_VALID;
562
563	tlbie(va);
564
565	/*
566	 * Save the reg & chg bits.
567	 */
568	moea_pte_synch(pt, pvo_pt);
569	moea_pte_valid--;
570}
571
572static __inline void
573moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
574{
575
576	/*
577	 * Invalidate the PTE
578	 */
579	moea_pte_unset(pt, pvo_pt, va);
580	moea_pte_set(pt, pvo_pt);
581}
582
583/*
584 * Quick sort callout for comparing memory regions.
585 */
586static int	mr_cmp(const void *a, const void *b);
587static int	om_cmp(const void *a, const void *b);
588
589static int
590mr_cmp(const void *a, const void *b)
591{
592	const struct	mem_region *regiona;
593	const struct	mem_region *regionb;
594
595	regiona = a;
596	regionb = b;
597	if (regiona->mr_start < regionb->mr_start)
598		return (-1);
599	else if (regiona->mr_start > regionb->mr_start)
600		return (1);
601	else
602		return (0);
603}
604
605static int
606om_cmp(const void *a, const void *b)
607{
608	const struct	ofw_map *mapa;
609	const struct	ofw_map *mapb;
610
611	mapa = a;
612	mapb = b;
613	if (mapa->om_pa < mapb->om_pa)
614		return (-1);
615	else if (mapa->om_pa > mapb->om_pa)
616		return (1);
617	else
618		return (0);
619}
620
621void
622moea_cpu_bootstrap(mmu_t mmup, int ap)
623{
624	u_int sdr;
625	int i;
626
627	if (ap) {
628		powerpc_sync();
629		__asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu));
630		__asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl));
631		isync();
632		__asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu));
633		__asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl));
634		isync();
635	}
636
637	__asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
638	__asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
639	isync();
640
641	__asm __volatile("mtibatu 1,%0" :: "r"(0));
642	__asm __volatile("mtdbatu 2,%0" :: "r"(0));
643	__asm __volatile("mtibatu 2,%0" :: "r"(0));
644	__asm __volatile("mtdbatu 3,%0" :: "r"(0));
645	__asm __volatile("mtibatu 3,%0" :: "r"(0));
646	isync();
647
648	for (i = 0; i < 16; i++)
649		mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT);
650
651	__asm __volatile("mtsr %0,%1" :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
652	__asm __volatile("mtsr %0,%1" :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
653	powerpc_sync();
654
655	sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10);
656	__asm __volatile("mtsdr1 %0" :: "r"(sdr));
657	isync();
658
659	tlbia();
660}
661
662void
663moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
664{
665	ihandle_t	mmui;
666	phandle_t	chosen, mmu;
667	int		sz;
668	int		i, j;
669	int		ofw_mappings;
670	vm_size_t	size, physsz, hwphyssz;
671	vm_offset_t	pa, va, off;
672	void		*dpcpu;
673
674        /*
675         * Set up BAT0 to map the lowest 256 MB area
676         */
677        battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
678        battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
679
680        /*
681         * Map PCI memory space.
682         */
683        battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
684        battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
685
686        battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
687        battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
688
689        battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
690        battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
691
692        battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
693        battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
694
695        /*
696         * Map obio devices.
697         */
698        battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
699        battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
700
701	/*
702	 * Use an IBAT and a DBAT to map the bottom segment of memory
703	 * where we are.
704	 */
705	__asm (".balign 32; \n"
706	       "mtibatu 0,%0; mtibatl 0,%1; isync; \n"
707	       "mtdbatu 0,%0; mtdbatl 0,%1; isync"
708	    :: "r"(battable[0].batu), "r"(battable[0].batl));
709
710	/* map pci space */
711	__asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
712	__asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
713	isync();
714
715	/* set global direct map flag */
716	hw_direct_map = 1;
717
718	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
719	CTR0(KTR_PMAP, "moea_bootstrap: physical memory");
720
721	qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
722	for (i = 0; i < pregions_sz; i++) {
723		vm_offset_t pa;
724		vm_offset_t end;
725
726		CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
727			pregions[i].mr_start,
728			pregions[i].mr_start + pregions[i].mr_size,
729			pregions[i].mr_size);
730		/*
731		 * Install entries into the BAT table to allow all
732		 * of physmem to be convered by on-demand BAT entries.
733		 * The loop will sometimes set the same battable element
734		 * twice, but that's fine since they won't be used for
735		 * a while yet.
736		 */
737		pa = pregions[i].mr_start & 0xf0000000;
738		end = pregions[i].mr_start + pregions[i].mr_size;
739		do {
740                        u_int n = pa >> ADDR_SR_SHFT;
741
742			battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
743			battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
744			pa += SEGMENT_LENGTH;
745		} while (pa < end);
746	}
747
748	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
749		panic("moea_bootstrap: phys_avail too small");
750	qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
751	phys_avail_count = 0;
752	physsz = 0;
753	hwphyssz = 0;
754	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
755	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
756		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
757		    regions[i].mr_start + regions[i].mr_size,
758		    regions[i].mr_size);
759		if (hwphyssz != 0 &&
760		    (physsz + regions[i].mr_size) >= hwphyssz) {
761			if (physsz < hwphyssz) {
762				phys_avail[j] = regions[i].mr_start;
763				phys_avail[j + 1] = regions[i].mr_start +
764				    hwphyssz - physsz;
765				physsz = hwphyssz;
766				phys_avail_count++;
767			}
768			break;
769		}
770		phys_avail[j] = regions[i].mr_start;
771		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
772		phys_avail_count++;
773		physsz += regions[i].mr_size;
774	}
775	physmem = btoc(physsz);
776
777	/*
778	 * Allocate PTEG table.
779	 */
780#ifdef PTEGCOUNT
781	moea_pteg_count = PTEGCOUNT;
782#else
783	moea_pteg_count = 0x1000;
784
785	while (moea_pteg_count < physmem)
786		moea_pteg_count <<= 1;
787
788	moea_pteg_count >>= 1;
789#endif /* PTEGCOUNT */
790
791	size = moea_pteg_count * sizeof(struct pteg);
792	CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count,
793	    size);
794	moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size);
795	CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table);
796	bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg));
797	moea_pteg_mask = moea_pteg_count - 1;
798
799	/*
800	 * Allocate pv/overflow lists.
801	 */
802	size = sizeof(struct pvo_head) * moea_pteg_count;
803	moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size,
804	    PAGE_SIZE);
805	CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table);
806	for (i = 0; i < moea_pteg_count; i++)
807		LIST_INIT(&moea_pvo_table[i]);
808
809	/*
810	 * Initialize the lock that synchronizes access to the pteg and pvo
811	 * tables.
812	 */
813	mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF |
814	    MTX_RECURSE);
815
816	mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN);
817
818	/*
819	 * Initialise the unmanaged pvo pool.
820	 */
821	moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc(
822		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
823	moea_bpvo_pool_index = 0;
824
825	/*
826	 * Make sure kernel vsid is allocated as well as VSID 0.
827	 */
828	moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
829		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
830	moea_vsid_bitmap[0] |= 1;
831
832	/*
833	 * Set up the Open Firmware pmap and add it's mappings.
834	 */
835	moea_pinit(mmup, &ofw_pmap);
836	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
837	ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
838	if ((chosen = OF_finddevice("/chosen")) == -1)
839		panic("moea_bootstrap: can't find /chosen");
840	OF_getprop(chosen, "mmu", &mmui, 4);
841	if ((mmu = OF_instance_to_package(mmui)) == -1)
842		panic("moea_bootstrap: can't get mmu package");
843	if ((sz = OF_getproplen(mmu, "translations")) == -1)
844		panic("moea_bootstrap: can't get ofw translation count");
845	translations = NULL;
846	for (i = 0; phys_avail[i] != 0; i += 2) {
847		if (phys_avail[i + 1] >= sz) {
848			translations = (struct ofw_map *)phys_avail[i];
849			break;
850		}
851	}
852	if (translations == NULL)
853		panic("moea_bootstrap: no space to copy translations");
854	bzero(translations, sz);
855	if (OF_getprop(mmu, "translations", translations, sz) == -1)
856		panic("moea_bootstrap: can't get ofw translations");
857	CTR0(KTR_PMAP, "moea_bootstrap: translations");
858	sz /= sizeof(*translations);
859	qsort(translations, sz, sizeof (*translations), om_cmp);
860	for (i = 0, ofw_mappings = 0; i < sz; i++) {
861		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
862		    translations[i].om_pa, translations[i].om_va,
863		    translations[i].om_len);
864
865		/*
866		 * If the mapping is 1:1, let the RAM and device on-demand
867		 * BAT tables take care of the translation.
868		 */
869		if (translations[i].om_va == translations[i].om_pa)
870			continue;
871
872		/* Enter the pages */
873		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
874			struct	vm_page m;
875
876			m.phys_addr = translations[i].om_pa + off;
877			PMAP_LOCK(&ofw_pmap);
878			moea_enter_locked(&ofw_pmap,
879				   translations[i].om_va + off, &m,
880				   VM_PROT_ALL, 1);
881			PMAP_UNLOCK(&ofw_pmap);
882			ofw_mappings++;
883		}
884	}
885
886	/*
887	 * Calculate the last available physical address.
888	 */
889	for (i = 0; phys_avail[i + 2] != 0; i += 2)
890		;
891	Maxmem = powerpc_btop(phys_avail[i + 1]);
892
893	/*
894	 * Initialize the kernel pmap (which is statically allocated).
895	 */
896	PMAP_LOCK_INIT(kernel_pmap);
897	for (i = 0; i < 16; i++) {
898		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
899	}
900	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
901	kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
902	kernel_pmap->pm_active = ~0;
903
904	moea_cpu_bootstrap(mmup,0);
905
906	pmap_bootstrapped++;
907
908	/*
909	 * Set the start and end of kva.
910	 */
911	virtual_avail = VM_MIN_KERNEL_ADDRESS;
912	virtual_end = VM_MAX_KERNEL_ADDRESS;
913
914	/*
915	 * Allocate a kernel stack with a guard page for thread0 and map it
916	 * into the kernel page map.
917	 */
918	pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
919	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
920	virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
921	CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
922	thread0.td_kstack = va;
923	thread0.td_kstack_pages = KSTACK_PAGES;
924	for (i = 0; i < KSTACK_PAGES; i++) {
925		moea_kenter(mmup, va, pa);
926		pa += PAGE_SIZE;
927		va += PAGE_SIZE;
928	}
929
930	/*
931	 * Allocate virtual address space for the message buffer.
932	 */
933	pa = msgbuf_phys = moea_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE);
934	msgbufp = (struct msgbuf *)virtual_avail;
935	va = virtual_avail;
936	virtual_avail += round_page(MSGBUF_SIZE);
937	while (va < virtual_avail) {
938		moea_kenter(mmup, va, pa);
939		pa += PAGE_SIZE;
940		va += PAGE_SIZE;
941	}
942
943	/*
944	 * Allocate virtual address space for the dynamic percpu area.
945	 */
946	pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
947	dpcpu = (void *)virtual_avail;
948	va = virtual_avail;
949	virtual_avail += DPCPU_SIZE;
950	while (va < virtual_avail) {
951		moea_kenter(mmup, va, pa);
952		pa += PAGE_SIZE;
953		va += PAGE_SIZE;
954	}
955	dpcpu_init(dpcpu, 0);
956}
957
958/*
959 * Activate a user pmap.  The pmap must be activated before it's address
960 * space can be accessed in any way.
961 */
962void
963moea_activate(mmu_t mmu, struct thread *td)
964{
965	pmap_t	pm, pmr;
966
967	/*
968	 * Load all the data we need up front to encourage the compiler to
969	 * not issue any loads while we have interrupts disabled below.
970	 */
971	pm = &td->td_proc->p_vmspace->vm_pmap;
972	pmr = pm->pmap_phys;
973
974	pm->pm_active |= PCPU_GET(cpumask);
975	PCPU_SET(curpmap, pmr);
976}
977
978void
979moea_deactivate(mmu_t mmu, struct thread *td)
980{
981	pmap_t	pm;
982
983	pm = &td->td_proc->p_vmspace->vm_pmap;
984	pm->pm_active &= ~PCPU_GET(cpumask);
985	PCPU_SET(curpmap, NULL);
986}
987
988void
989moea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
990{
991	struct	pvo_entry *pvo;
992
993	PMAP_LOCK(pm);
994	pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
995
996	if (pvo != NULL) {
997		if (wired) {
998			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
999				pm->pm_stats.wired_count++;
1000			pvo->pvo_vaddr |= PVO_WIRED;
1001		} else {
1002			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1003				pm->pm_stats.wired_count--;
1004			pvo->pvo_vaddr &= ~PVO_WIRED;
1005		}
1006	}
1007	PMAP_UNLOCK(pm);
1008}
1009
1010void
1011moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1012{
1013	vm_offset_t	dst;
1014	vm_offset_t	src;
1015
1016	dst = VM_PAGE_TO_PHYS(mdst);
1017	src = VM_PAGE_TO_PHYS(msrc);
1018
1019	kcopy((void *)src, (void *)dst, PAGE_SIZE);
1020}
1021
1022/*
1023 * Zero a page of physical memory by temporarily mapping it into the tlb.
1024 */
1025void
1026moea_zero_page(mmu_t mmu, vm_page_t m)
1027{
1028	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1029	void *va = (void *)pa;
1030
1031	bzero(va, PAGE_SIZE);
1032}
1033
1034void
1035moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1036{
1037	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1038	void *va = (void *)(pa + off);
1039
1040	bzero(va, size);
1041}
1042
1043void
1044moea_zero_page_idle(mmu_t mmu, vm_page_t m)
1045{
1046	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1047	void *va = (void *)pa;
1048
1049	bzero(va, PAGE_SIZE);
1050}
1051
1052/*
1053 * Map the given physical page at the specified virtual address in the
1054 * target pmap with the protection requested.  If specified the page
1055 * will be wired down.
1056 */
1057void
1058moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1059	   boolean_t wired)
1060{
1061
1062	vm_page_lock_queues();
1063	PMAP_LOCK(pmap);
1064	moea_enter_locked(pmap, va, m, prot, wired);
1065	vm_page_unlock_queues();
1066	PMAP_UNLOCK(pmap);
1067}
1068
1069/*
1070 * Map the given physical page at the specified virtual address in the
1071 * target pmap with the protection requested.  If specified the page
1072 * will be wired down.
1073 *
1074 * The page queues and pmap must be locked.
1075 */
1076static void
1077moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1078    boolean_t wired)
1079{
1080	struct		pvo_head *pvo_head;
1081	uma_zone_t	zone;
1082	vm_page_t	pg;
1083	u_int		pte_lo, pvo_flags, was_exec, i;
1084	int		error;
1085
1086	if (!moea_initialized) {
1087		pvo_head = &moea_pvo_kunmanaged;
1088		zone = moea_upvo_zone;
1089		pvo_flags = 0;
1090		pg = NULL;
1091		was_exec = PTE_EXEC;
1092	} else {
1093		pvo_head = vm_page_to_pvoh(m);
1094		pg = m;
1095		zone = moea_mpvo_zone;
1096		pvo_flags = PVO_MANAGED;
1097		was_exec = 0;
1098	}
1099	if (pmap_bootstrapped)
1100		mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1101	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1102
1103	/* XXX change the pvo head for fake pages */
1104	if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) {
1105		pvo_flags &= ~PVO_MANAGED;
1106		pvo_head = &moea_pvo_kunmanaged;
1107		zone = moea_upvo_zone;
1108	}
1109
1110	/*
1111	 * If this is a managed page, and it's the first reference to the page,
1112	 * clear the execness of the page.  Otherwise fetch the execness.
1113	 */
1114	if ((pg != NULL) && ((m->flags & PG_FICTITIOUS) == 0)) {
1115		if (LIST_EMPTY(pvo_head)) {
1116			moea_attr_clear(pg, PTE_EXEC);
1117		} else {
1118			was_exec = moea_attr_fetch(pg) & PTE_EXEC;
1119		}
1120	}
1121
1122	/*
1123	 * Assume the page is cache inhibited and access is guarded unless
1124	 * it's in our available memory array.
1125	 */
1126	pte_lo = PTE_I | PTE_G;
1127	for (i = 0; i < pregions_sz; i++) {
1128		if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) &&
1129		    (VM_PAGE_TO_PHYS(m) <
1130			(pregions[i].mr_start + pregions[i].mr_size))) {
1131			pte_lo = PTE_M;
1132			break;
1133		}
1134	}
1135
1136	if (prot & VM_PROT_WRITE) {
1137		pte_lo |= PTE_BW;
1138		if (pmap_bootstrapped)
1139			vm_page_flag_set(m, PG_WRITEABLE);
1140	} else
1141		pte_lo |= PTE_BR;
1142
1143	if (prot & VM_PROT_EXECUTE)
1144		pvo_flags |= PVO_EXECUTABLE;
1145
1146	if (wired)
1147		pvo_flags |= PVO_WIRED;
1148
1149	if ((m->flags & PG_FICTITIOUS) != 0)
1150		pvo_flags |= PVO_FAKE;
1151
1152	error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
1153	    pte_lo, pvo_flags);
1154
1155	/*
1156	 * Flush the real page from the instruction cache if this page is
1157	 * mapped executable and cacheable and was not previously mapped (or
1158	 * was not mapped executable).
1159	 */
1160	if (error == 0 && (pvo_flags & PVO_EXECUTABLE) &&
1161	    (pte_lo & PTE_I) == 0 && was_exec == 0) {
1162		/*
1163		 * Flush the real memory from the cache.
1164		 */
1165		moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1166		if (pg != NULL)
1167			moea_attr_save(pg, PTE_EXEC);
1168	}
1169
1170	/* XXX syncicache always until problems are sorted */
1171	moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1172}
1173
1174/*
1175 * Maps a sequence of resident pages belonging to the same object.
1176 * The sequence begins with the given page m_start.  This page is
1177 * mapped at the given virtual address start.  Each subsequent page is
1178 * mapped at a virtual address that is offset from start by the same
1179 * amount as the page is offset from m_start within the object.  The
1180 * last page in the sequence is the page with the largest offset from
1181 * m_start that can be mapped at a virtual address less than the given
1182 * virtual address end.  Not every virtual page between start and end
1183 * is mapped; only those for which a resident page exists with the
1184 * corresponding offset from m_start are mapped.
1185 */
1186void
1187moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1188    vm_page_t m_start, vm_prot_t prot)
1189{
1190	vm_page_t m;
1191	vm_pindex_t diff, psize;
1192
1193	psize = atop(end - start);
1194	m = m_start;
1195	PMAP_LOCK(pm);
1196	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1197		moea_enter_locked(pm, start + ptoa(diff), m, prot &
1198		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1199		m = TAILQ_NEXT(m, listq);
1200	}
1201	PMAP_UNLOCK(pm);
1202}
1203
1204void
1205moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1206    vm_prot_t prot)
1207{
1208
1209	PMAP_LOCK(pm);
1210	moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1211	    FALSE);
1212	PMAP_UNLOCK(pm);
1213
1214}
1215
1216vm_paddr_t
1217moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1218{
1219	struct	pvo_entry *pvo;
1220	vm_paddr_t pa;
1221
1222	PMAP_LOCK(pm);
1223	pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1224	if (pvo == NULL)
1225		pa = 0;
1226	else
1227		pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1228	PMAP_UNLOCK(pm);
1229	return (pa);
1230}
1231
1232/*
1233 * Atomically extract and hold the physical page with the given
1234 * pmap and virtual address pair if that mapping permits the given
1235 * protection.
1236 */
1237vm_page_t
1238moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1239{
1240	struct	pvo_entry *pvo;
1241	vm_page_t m;
1242
1243	m = NULL;
1244	vm_page_lock_queues();
1245	PMAP_LOCK(pmap);
1246	pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
1247	if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) &&
1248	    ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW ||
1249	     (prot & VM_PROT_WRITE) == 0)) {
1250		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
1251		vm_page_hold(m);
1252	}
1253	vm_page_unlock_queues();
1254	PMAP_UNLOCK(pmap);
1255	return (m);
1256}
1257
1258void
1259moea_init(mmu_t mmu)
1260{
1261
1262	moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1263	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1264	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1265	moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1266	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1267	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1268	moea_initialized = TRUE;
1269}
1270
1271boolean_t
1272moea_is_modified(mmu_t mmu, vm_page_t m)
1273{
1274
1275	if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
1276		return (FALSE);
1277
1278	return (moea_query_bit(m, PTE_CHG));
1279}
1280
1281void
1282moea_clear_reference(mmu_t mmu, vm_page_t m)
1283{
1284
1285	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1286		return;
1287	moea_clear_bit(m, PTE_REF, NULL);
1288}
1289
1290void
1291moea_clear_modify(mmu_t mmu, vm_page_t m)
1292{
1293
1294	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1295		return;
1296	moea_clear_bit(m, PTE_CHG, NULL);
1297}
1298
1299/*
1300 * Clear the write and modified bits in each of the given page's mappings.
1301 */
1302void
1303moea_remove_write(mmu_t mmu, vm_page_t m)
1304{
1305	struct	pvo_entry *pvo;
1306	struct	pte *pt;
1307	pmap_t	pmap;
1308	u_int	lo;
1309
1310	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1311	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1312	    (m->flags & PG_WRITEABLE) == 0)
1313		return;
1314	lo = moea_attr_fetch(m);
1315	powerpc_sync();
1316	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1317		pmap = pvo->pvo_pmap;
1318		PMAP_LOCK(pmap);
1319		if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) {
1320			pt = moea_pvo_to_pte(pvo, -1);
1321			pvo->pvo_pte.pte.pte_lo &= ~PTE_PP;
1322			pvo->pvo_pte.pte.pte_lo |= PTE_BR;
1323			if (pt != NULL) {
1324				moea_pte_synch(pt, &pvo->pvo_pte.pte);
1325				lo |= pvo->pvo_pte.pte.pte_lo;
1326				pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG;
1327				moea_pte_change(pt, &pvo->pvo_pte.pte,
1328				    pvo->pvo_vaddr);
1329				mtx_unlock(&moea_table_mutex);
1330			}
1331		}
1332		PMAP_UNLOCK(pmap);
1333	}
1334	if ((lo & PTE_CHG) != 0) {
1335		moea_attr_clear(m, PTE_CHG);
1336		vm_page_dirty(m);
1337	}
1338	vm_page_flag_clear(m, PG_WRITEABLE);
1339}
1340
1341/*
1342 *	moea_ts_referenced:
1343 *
1344 *	Return a count of reference bits for a page, clearing those bits.
1345 *	It is not necessary for every reference bit to be cleared, but it
1346 *	is necessary that 0 only be returned when there are truly no
1347 *	reference bits set.
1348 *
1349 *	XXX: The exact number of bits to check and clear is a matter that
1350 *	should be tested and standardized at some point in the future for
1351 *	optimal aging of shared pages.
1352 */
1353boolean_t
1354moea_ts_referenced(mmu_t mmu, vm_page_t m)
1355{
1356	int count;
1357
1358	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1359		return (0);
1360
1361	count = moea_clear_bit(m, PTE_REF, NULL);
1362
1363	return (count);
1364}
1365
1366/*
1367 * Map a wired page into kernel virtual address space.
1368 */
1369void
1370moea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1371{
1372	u_int		pte_lo;
1373	int		error;
1374	int		i;
1375
1376#if 0
1377	if (va < VM_MIN_KERNEL_ADDRESS)
1378		panic("moea_kenter: attempt to enter non-kernel address %#x",
1379		    va);
1380#endif
1381
1382	pte_lo = PTE_I | PTE_G;
1383	for (i = 0; i < pregions_sz; i++) {
1384		if ((pa >= pregions[i].mr_start) &&
1385		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
1386			pte_lo = PTE_M;
1387			break;
1388		}
1389	}
1390
1391	PMAP_LOCK(kernel_pmap);
1392	error = moea_pvo_enter(kernel_pmap, moea_upvo_zone,
1393	    &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
1394
1395	if (error != 0 && error != ENOENT)
1396		panic("moea_kenter: failed to enter va %#x pa %#x: %d", va,
1397		    pa, error);
1398
1399	/*
1400	 * Flush the real memory from the instruction cache.
1401	 */
1402	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
1403		moea_syncicache(pa, PAGE_SIZE);
1404	}
1405	PMAP_UNLOCK(kernel_pmap);
1406}
1407
1408/*
1409 * Extract the physical page address associated with the given kernel virtual
1410 * address.
1411 */
1412vm_offset_t
1413moea_kextract(mmu_t mmu, vm_offset_t va)
1414{
1415	struct		pvo_entry *pvo;
1416	vm_paddr_t pa;
1417
1418	/*
1419	 * Allow direct mappings on 32-bit OEA
1420	 */
1421	if (va < VM_MIN_KERNEL_ADDRESS) {
1422		return (va);
1423	}
1424
1425	PMAP_LOCK(kernel_pmap);
1426	pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1427	KASSERT(pvo != NULL, ("moea_kextract: no addr found"));
1428	pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
1429	PMAP_UNLOCK(kernel_pmap);
1430	return (pa);
1431}
1432
1433/*
1434 * Remove a wired page from kernel virtual address space.
1435 */
1436void
1437moea_kremove(mmu_t mmu, vm_offset_t va)
1438{
1439
1440	moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1441}
1442
1443/*
1444 * Map a range of physical addresses into kernel virtual address space.
1445 *
1446 * The value passed in *virt is a suggested virtual address for the mapping.
1447 * Architectures which can support a direct-mapped physical to virtual region
1448 * can return the appropriate address within that region, leaving '*virt'
1449 * unchanged.  We cannot and therefore do not; *virt is updated with the
1450 * first usable address after the mapped region.
1451 */
1452vm_offset_t
1453moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1454    vm_offset_t pa_end, int prot)
1455{
1456	vm_offset_t	sva, va;
1457
1458	sva = *virt;
1459	va = sva;
1460	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1461		moea_kenter(mmu, va, pa_start);
1462	*virt = va;
1463	return (sva);
1464}
1465
1466/*
1467 * Returns true if the pmap's pv is one of the first
1468 * 16 pvs linked to from this page.  This count may
1469 * be changed upwards or downwards in the future; it
1470 * is only necessary that true be returned for a small
1471 * subset of pmaps for proper page aging.
1472 */
1473boolean_t
1474moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1475{
1476        int loops;
1477	struct pvo_entry *pvo;
1478
1479        if (!moea_initialized || (m->flags & PG_FICTITIOUS))
1480                return FALSE;
1481
1482	loops = 0;
1483	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1484		if (pvo->pvo_pmap == pmap)
1485			return (TRUE);
1486		if (++loops >= 16)
1487			break;
1488	}
1489
1490	return (FALSE);
1491}
1492
1493/*
1494 * Return the number of managed mappings to the given physical page
1495 * that are wired.
1496 */
1497int
1498moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
1499{
1500	struct pvo_entry *pvo;
1501	int count;
1502
1503	count = 0;
1504	if (!moea_initialized || (m->flags & PG_FICTITIOUS) != 0)
1505		return (count);
1506	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1507	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1508		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1509			count++;
1510	return (count);
1511}
1512
1513static u_int	moea_vsidcontext;
1514
1515void
1516moea_pinit(mmu_t mmu, pmap_t pmap)
1517{
1518	int	i, mask;
1519	u_int	entropy;
1520
1521	KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap"));
1522	PMAP_LOCK_INIT(pmap);
1523
1524	entropy = 0;
1525	__asm __volatile("mftb %0" : "=r"(entropy));
1526
1527	if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap))
1528	    == NULL) {
1529		pmap->pmap_phys = pmap;
1530	}
1531
1532
1533	/*
1534	 * Allocate some segment registers for this pmap.
1535	 */
1536	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1537		u_int	hash, n;
1538
1539		/*
1540		 * Create a new value by mutiplying by a prime and adding in
1541		 * entropy from the timebase register.  This is to make the
1542		 * VSID more random so that the PT hash function collides
1543		 * less often.  (Note that the prime casues gcc to do shifts
1544		 * instead of a multiply.)
1545		 */
1546		moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy;
1547		hash = moea_vsidcontext & (NPMAPS - 1);
1548		if (hash == 0)		/* 0 is special, avoid it */
1549			continue;
1550		n = hash >> 5;
1551		mask = 1 << (hash & (VSID_NBPW - 1));
1552		hash = (moea_vsidcontext & 0xfffff);
1553		if (moea_vsid_bitmap[n] & mask) {	/* collision? */
1554			/* anything free in this bucket? */
1555			if (moea_vsid_bitmap[n] == 0xffffffff) {
1556				entropy = (moea_vsidcontext >> 20);
1557				continue;
1558			}
1559			i = ffs(~moea_vsid_bitmap[i]) - 1;
1560			mask = 1 << i;
1561			hash &= 0xfffff & ~(VSID_NBPW - 1);
1562			hash |= i;
1563		}
1564		moea_vsid_bitmap[n] |= mask;
1565		for (i = 0; i < 16; i++)
1566			pmap->pm_sr[i] = VSID_MAKE(i, hash);
1567		return;
1568	}
1569
1570	panic("moea_pinit: out of segments");
1571}
1572
1573/*
1574 * Initialize the pmap associated with process 0.
1575 */
1576void
1577moea_pinit0(mmu_t mmu, pmap_t pm)
1578{
1579
1580	moea_pinit(mmu, pm);
1581	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1582}
1583
1584/*
1585 * Set the physical protection on the specified range of this map as requested.
1586 */
1587void
1588moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1589    vm_prot_t prot)
1590{
1591	struct	pvo_entry *pvo;
1592	struct	pte *pt;
1593	int	pteidx;
1594
1595	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1596	    ("moea_protect: non current pmap"));
1597
1598	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1599		moea_remove(mmu, pm, sva, eva);
1600		return;
1601	}
1602
1603	vm_page_lock_queues();
1604	PMAP_LOCK(pm);
1605	for (; sva < eva; sva += PAGE_SIZE) {
1606		pvo = moea_pvo_find_va(pm, sva, &pteidx);
1607		if (pvo == NULL)
1608			continue;
1609
1610		if ((prot & VM_PROT_EXECUTE) == 0)
1611			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1612
1613		/*
1614		 * Grab the PTE pointer before we diddle with the cached PTE
1615		 * copy.
1616		 */
1617		pt = moea_pvo_to_pte(pvo, pteidx);
1618		/*
1619		 * Change the protection of the page.
1620		 */
1621		pvo->pvo_pte.pte.pte_lo &= ~PTE_PP;
1622		pvo->pvo_pte.pte.pte_lo |= PTE_BR;
1623
1624		/*
1625		 * If the PVO is in the page table, update that pte as well.
1626		 */
1627		if (pt != NULL) {
1628			moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr);
1629			mtx_unlock(&moea_table_mutex);
1630		}
1631	}
1632	vm_page_unlock_queues();
1633	PMAP_UNLOCK(pm);
1634}
1635
1636/*
1637 * Map a list of wired pages into kernel virtual address space.  This is
1638 * intended for temporary mappings which do not need page modification or
1639 * references recorded.  Existing mappings in the region are overwritten.
1640 */
1641void
1642moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
1643{
1644	vm_offset_t va;
1645
1646	va = sva;
1647	while (count-- > 0) {
1648		moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1649		va += PAGE_SIZE;
1650		m++;
1651	}
1652}
1653
1654/*
1655 * Remove page mappings from kernel virtual address space.  Intended for
1656 * temporary mappings entered by moea_qenter.
1657 */
1658void
1659moea_qremove(mmu_t mmu, vm_offset_t sva, int count)
1660{
1661	vm_offset_t va;
1662
1663	va = sva;
1664	while (count-- > 0) {
1665		moea_kremove(mmu, va);
1666		va += PAGE_SIZE;
1667	}
1668}
1669
1670void
1671moea_release(mmu_t mmu, pmap_t pmap)
1672{
1673        int idx, mask;
1674
1675	/*
1676	 * Free segment register's VSID
1677	 */
1678        if (pmap->pm_sr[0] == 0)
1679                panic("moea_release");
1680
1681        idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
1682        mask = 1 << (idx % VSID_NBPW);
1683        idx /= VSID_NBPW;
1684        moea_vsid_bitmap[idx] &= ~mask;
1685	PMAP_LOCK_DESTROY(pmap);
1686}
1687
1688/*
1689 * Remove the given range of addresses from the specified map.
1690 */
1691void
1692moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1693{
1694	struct	pvo_entry *pvo;
1695	int	pteidx;
1696
1697	vm_page_lock_queues();
1698	PMAP_LOCK(pm);
1699	for (; sva < eva; sva += PAGE_SIZE) {
1700		pvo = moea_pvo_find_va(pm, sva, &pteidx);
1701		if (pvo != NULL) {
1702			moea_pvo_remove(pvo, pteidx);
1703		}
1704	}
1705	PMAP_UNLOCK(pm);
1706	vm_page_unlock_queues();
1707}
1708
1709/*
1710 * Remove physical page from all pmaps in which it resides. moea_pvo_remove()
1711 * will reflect changes in pte's back to the vm_page.
1712 */
1713void
1714moea_remove_all(mmu_t mmu, vm_page_t m)
1715{
1716	struct  pvo_head *pvo_head;
1717	struct	pvo_entry *pvo, *next_pvo;
1718	pmap_t	pmap;
1719
1720	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1721
1722	pvo_head = vm_page_to_pvoh(m);
1723	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1724		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1725
1726		MOEA_PVO_CHECK(pvo);	/* sanity check */
1727		pmap = pvo->pvo_pmap;
1728		PMAP_LOCK(pmap);
1729		moea_pvo_remove(pvo, -1);
1730		PMAP_UNLOCK(pmap);
1731	}
1732	vm_page_flag_clear(m, PG_WRITEABLE);
1733}
1734
1735/*
1736 * Allocate a physical page of memory directly from the phys_avail map.
1737 * Can only be called from moea_bootstrap before avail start and end are
1738 * calculated.
1739 */
1740static vm_offset_t
1741moea_bootstrap_alloc(vm_size_t size, u_int align)
1742{
1743	vm_offset_t	s, e;
1744	int		i, j;
1745
1746	size = round_page(size);
1747	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1748		if (align != 0)
1749			s = (phys_avail[i] + align - 1) & ~(align - 1);
1750		else
1751			s = phys_avail[i];
1752		e = s + size;
1753
1754		if (s < phys_avail[i] || e > phys_avail[i + 1])
1755			continue;
1756
1757		if (s == phys_avail[i]) {
1758			phys_avail[i] += size;
1759		} else if (e == phys_avail[i + 1]) {
1760			phys_avail[i + 1] -= size;
1761		} else {
1762			for (j = phys_avail_count * 2; j > i; j -= 2) {
1763				phys_avail[j] = phys_avail[j - 2];
1764				phys_avail[j + 1] = phys_avail[j - 1];
1765			}
1766
1767			phys_avail[i + 3] = phys_avail[i + 1];
1768			phys_avail[i + 1] = s;
1769			phys_avail[i + 2] = e;
1770			phys_avail_count++;
1771		}
1772
1773		return (s);
1774	}
1775	panic("moea_bootstrap_alloc: could not allocate memory");
1776}
1777
1778static void
1779moea_syncicache(vm_offset_t pa, vm_size_t len)
1780{
1781	__syncicache((void *)pa, len);
1782}
1783
1784static int
1785moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
1786    vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
1787{
1788	struct	pvo_entry *pvo;
1789	u_int	sr;
1790	int	first;
1791	u_int	ptegidx;
1792	int	i;
1793	int     bootstrap;
1794
1795	moea_pvo_enter_calls++;
1796	first = 0;
1797	bootstrap = 0;
1798
1799	/*
1800	 * Compute the PTE Group index.
1801	 */
1802	va &= ~ADDR_POFF;
1803	sr = va_to_sr(pm->pm_sr, va);
1804	ptegidx = va_to_pteg(sr, va);
1805
1806	/*
1807	 * Remove any existing mapping for this page.  Reuse the pvo entry if
1808	 * there is a mapping.
1809	 */
1810	mtx_lock(&moea_table_mutex);
1811	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
1812		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1813			if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa &&
1814			    (pvo->pvo_pte.pte.pte_lo & PTE_PP) ==
1815			    (pte_lo & PTE_PP)) {
1816				mtx_unlock(&moea_table_mutex);
1817				return (0);
1818			}
1819			moea_pvo_remove(pvo, -1);
1820			break;
1821		}
1822	}
1823
1824	/*
1825	 * If we aren't overwriting a mapping, try to allocate.
1826	 */
1827	if (moea_initialized) {
1828		pvo = uma_zalloc(zone, M_NOWAIT);
1829	} else {
1830		if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) {
1831			panic("moea_enter: bpvo pool exhausted, %d, %d, %d",
1832			      moea_bpvo_pool_index, BPVO_POOL_SIZE,
1833			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
1834		}
1835		pvo = &moea_bpvo_pool[moea_bpvo_pool_index];
1836		moea_bpvo_pool_index++;
1837		bootstrap = 1;
1838	}
1839
1840	if (pvo == NULL) {
1841		mtx_unlock(&moea_table_mutex);
1842		return (ENOMEM);
1843	}
1844
1845	moea_pvo_entries++;
1846	pvo->pvo_vaddr = va;
1847	pvo->pvo_pmap = pm;
1848	LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink);
1849	pvo->pvo_vaddr &= ~ADDR_POFF;
1850	if (flags & VM_PROT_EXECUTE)
1851		pvo->pvo_vaddr |= PVO_EXECUTABLE;
1852	if (flags & PVO_WIRED)
1853		pvo->pvo_vaddr |= PVO_WIRED;
1854	if (pvo_head != &moea_pvo_kunmanaged)
1855		pvo->pvo_vaddr |= PVO_MANAGED;
1856	if (bootstrap)
1857		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
1858	if (flags & PVO_FAKE)
1859		pvo->pvo_vaddr |= PVO_FAKE;
1860
1861	moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo);
1862
1863	/*
1864	 * Remember if the list was empty and therefore will be the first
1865	 * item.
1866	 */
1867	if (LIST_FIRST(pvo_head) == NULL)
1868		first = 1;
1869	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1870
1871	if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED)
1872		pm->pm_stats.wired_count++;
1873	pm->pm_stats.resident_count++;
1874
1875	/*
1876	 * We hope this succeeds but it isn't required.
1877	 */
1878	i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte);
1879	if (i >= 0) {
1880		PVO_PTEGIDX_SET(pvo, i);
1881	} else {
1882		panic("moea_pvo_enter: overflow");
1883		moea_pte_overflow++;
1884	}
1885	mtx_unlock(&moea_table_mutex);
1886
1887	return (first ? ENOENT : 0);
1888}
1889
1890static void
1891moea_pvo_remove(struct pvo_entry *pvo, int pteidx)
1892{
1893	struct	pte *pt;
1894
1895	/*
1896	 * If there is an active pte entry, we need to deactivate it (and
1897	 * save the ref & cfg bits).
1898	 */
1899	pt = moea_pvo_to_pte(pvo, pteidx);
1900	if (pt != NULL) {
1901		moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr);
1902		mtx_unlock(&moea_table_mutex);
1903		PVO_PTEGIDX_CLR(pvo);
1904	} else {
1905		moea_pte_overflow--;
1906	}
1907
1908	/*
1909	 * Update our statistics.
1910	 */
1911	pvo->pvo_pmap->pm_stats.resident_count--;
1912	if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED)
1913		pvo->pvo_pmap->pm_stats.wired_count--;
1914
1915	/*
1916	 * Save the REF/CHG bits into their cache if the page is managed.
1917	 */
1918	if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) {
1919		struct	vm_page *pg;
1920
1921		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN);
1922		if (pg != NULL) {
1923			moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo &
1924			    (PTE_REF | PTE_CHG));
1925		}
1926	}
1927
1928	/*
1929	 * Remove this PVO from the PV list.
1930	 */
1931	LIST_REMOVE(pvo, pvo_vlink);
1932
1933	/*
1934	 * Remove this from the overflow list and return it to the pool
1935	 * if we aren't going to reuse it.
1936	 */
1937	LIST_REMOVE(pvo, pvo_olink);
1938	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
1939		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone :
1940		    moea_upvo_zone, pvo);
1941	moea_pvo_entries--;
1942	moea_pvo_remove_calls++;
1943}
1944
1945static __inline int
1946moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1947{
1948	int	pteidx;
1949
1950	/*
1951	 * We can find the actual pte entry without searching by grabbing
1952	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
1953	 * noticing the HID bit.
1954	 */
1955	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1956	if (pvo->pvo_pte.pte.pte_hi & PTE_HID)
1957		pteidx ^= moea_pteg_mask * 8;
1958
1959	return (pteidx);
1960}
1961
1962static struct pvo_entry *
1963moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
1964{
1965	struct	pvo_entry *pvo;
1966	int	ptegidx;
1967	u_int	sr;
1968
1969	va &= ~ADDR_POFF;
1970	sr = va_to_sr(pm->pm_sr, va);
1971	ptegidx = va_to_pteg(sr, va);
1972
1973	mtx_lock(&moea_table_mutex);
1974	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
1975		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1976			if (pteidx_p)
1977				*pteidx_p = moea_pvo_pte_index(pvo, ptegidx);
1978			break;
1979		}
1980	}
1981	mtx_unlock(&moea_table_mutex);
1982
1983	return (pvo);
1984}
1985
1986static struct pte *
1987moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1988{
1989	struct	pte *pt;
1990
1991	/*
1992	 * If we haven't been supplied the ptegidx, calculate it.
1993	 */
1994	if (pteidx == -1) {
1995		int	ptegidx;
1996		u_int	sr;
1997
1998		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
1999		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
2000		pteidx = moea_pvo_pte_index(pvo, ptegidx);
2001	}
2002
2003	pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7];
2004	mtx_lock(&moea_table_mutex);
2005
2006	if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
2007		panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no "
2008		    "valid pte index", pvo);
2009	}
2010
2011	if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
2012		panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo "
2013		    "pvo but no valid pte", pvo);
2014	}
2015
2016	if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
2017		if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) {
2018			panic("moea_pvo_to_pte: pvo %p has valid pte in "
2019			    "moea_pteg_table %p but invalid in pvo", pvo, pt);
2020		}
2021
2022		if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF))
2023		    != 0) {
2024			panic("moea_pvo_to_pte: pvo %p pte does not match "
2025			    "pte %p in moea_pteg_table", pvo, pt);
2026		}
2027
2028		mtx_assert(&moea_table_mutex, MA_OWNED);
2029		return (pt);
2030	}
2031
2032	if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) {
2033		panic("moea_pvo_to_pte: pvo %p has invalid pte %p in "
2034		    "moea_pteg_table but valid in pvo", pvo, pt);
2035	}
2036
2037	mtx_unlock(&moea_table_mutex);
2038	return (NULL);
2039}
2040
2041/*
2042 * XXX: THIS STUFF SHOULD BE IN pte.c?
2043 */
2044int
2045moea_pte_spill(vm_offset_t addr)
2046{
2047	struct	pvo_entry *source_pvo, *victim_pvo;
2048	struct	pvo_entry *pvo;
2049	int	ptegidx, i, j;
2050	u_int	sr;
2051	struct	pteg *pteg;
2052	struct	pte *pt;
2053
2054	moea_pte_spills++;
2055
2056	sr = mfsrin(addr);
2057	ptegidx = va_to_pteg(sr, addr);
2058
2059	/*
2060	 * Have to substitute some entry.  Use the primary hash for this.
2061	 * Use low bits of timebase as random generator.
2062	 */
2063	pteg = &moea_pteg_table[ptegidx];
2064	mtx_lock(&moea_table_mutex);
2065	__asm __volatile("mftb %0" : "=r"(i));
2066	i &= 7;
2067	pt = &pteg->pt[i];
2068
2069	source_pvo = NULL;
2070	victim_pvo = NULL;
2071	LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
2072		/*
2073		 * We need to find a pvo entry for this address.
2074		 */
2075		MOEA_PVO_CHECK(pvo);
2076		if (source_pvo == NULL &&
2077		    moea_pte_match(&pvo->pvo_pte.pte, sr, addr,
2078		    pvo->pvo_pte.pte.pte_hi & PTE_HID)) {
2079			/*
2080			 * Now found an entry to be spilled into the pteg.
2081			 * The PTE is now valid, so we know it's active.
2082			 */
2083			j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte);
2084
2085			if (j >= 0) {
2086				PVO_PTEGIDX_SET(pvo, j);
2087				moea_pte_overflow--;
2088				MOEA_PVO_CHECK(pvo);
2089				mtx_unlock(&moea_table_mutex);
2090				return (1);
2091			}
2092
2093			source_pvo = pvo;
2094
2095			if (victim_pvo != NULL)
2096				break;
2097		}
2098
2099		/*
2100		 * We also need the pvo entry of the victim we are replacing
2101		 * so save the R & C bits of the PTE.
2102		 */
2103		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
2104		    moea_pte_compare(pt, &pvo->pvo_pte.pte)) {
2105			victim_pvo = pvo;
2106			if (source_pvo != NULL)
2107				break;
2108		}
2109	}
2110
2111	if (source_pvo == NULL) {
2112		mtx_unlock(&moea_table_mutex);
2113		return (0);
2114	}
2115
2116	if (victim_pvo == NULL) {
2117		if ((pt->pte_hi & PTE_HID) == 0)
2118			panic("moea_pte_spill: victim p-pte (%p) has no pvo"
2119			    "entry", pt);
2120
2121		/*
2122		 * If this is a secondary PTE, we need to search it's primary
2123		 * pvo bucket for the matching PVO.
2124		 */
2125		LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask],
2126		    pvo_olink) {
2127			MOEA_PVO_CHECK(pvo);
2128			/*
2129			 * We also need the pvo entry of the victim we are
2130			 * replacing so save the R & C bits of the PTE.
2131			 */
2132			if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) {
2133				victim_pvo = pvo;
2134				break;
2135			}
2136		}
2137
2138		if (victim_pvo == NULL)
2139			panic("moea_pte_spill: victim s-pte (%p) has no pvo"
2140			    "entry", pt);
2141	}
2142
2143	/*
2144	 * We are invalidating the TLB entry for the EA we are replacing even
2145	 * though it's valid.  If we don't, we lose any ref/chg bit changes
2146	 * contained in the TLB entry.
2147	 */
2148	source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID;
2149
2150	moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr);
2151	moea_pte_set(pt, &source_pvo->pvo_pte.pte);
2152
2153	PVO_PTEGIDX_CLR(victim_pvo);
2154	PVO_PTEGIDX_SET(source_pvo, i);
2155	moea_pte_replacements++;
2156
2157	MOEA_PVO_CHECK(victim_pvo);
2158	MOEA_PVO_CHECK(source_pvo);
2159
2160	mtx_unlock(&moea_table_mutex);
2161	return (1);
2162}
2163
2164static int
2165moea_pte_insert(u_int ptegidx, struct pte *pvo_pt)
2166{
2167	struct	pte *pt;
2168	int	i;
2169
2170	mtx_assert(&moea_table_mutex, MA_OWNED);
2171
2172	/*
2173	 * First try primary hash.
2174	 */
2175	for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2176		if ((pt->pte_hi & PTE_VALID) == 0) {
2177			pvo_pt->pte_hi &= ~PTE_HID;
2178			moea_pte_set(pt, pvo_pt);
2179			return (i);
2180		}
2181	}
2182
2183	/*
2184	 * Now try secondary hash.
2185	 */
2186	ptegidx ^= moea_pteg_mask;
2187
2188	for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2189		if ((pt->pte_hi & PTE_VALID) == 0) {
2190			pvo_pt->pte_hi |= PTE_HID;
2191			moea_pte_set(pt, pvo_pt);
2192			return (i);
2193		}
2194	}
2195
2196	panic("moea_pte_insert: overflow");
2197	return (-1);
2198}
2199
2200static boolean_t
2201moea_query_bit(vm_page_t m, int ptebit)
2202{
2203	struct	pvo_entry *pvo;
2204	struct	pte *pt;
2205
2206#if 0
2207	if (moea_attr_fetch(m) & ptebit)
2208		return (TRUE);
2209#endif
2210
2211	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2212		MOEA_PVO_CHECK(pvo);	/* sanity check */
2213
2214		/*
2215		 * See if we saved the bit off.  If so, cache it and return
2216		 * success.
2217		 */
2218		if (pvo->pvo_pte.pte.pte_lo & ptebit) {
2219			moea_attr_save(m, ptebit);
2220			MOEA_PVO_CHECK(pvo);	/* sanity check */
2221			return (TRUE);
2222		}
2223	}
2224
2225	/*
2226	 * No luck, now go through the hard part of looking at the PTEs
2227	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2228	 * the PTEs.
2229	 */
2230	powerpc_sync();
2231	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2232		MOEA_PVO_CHECK(pvo);	/* sanity check */
2233
2234		/*
2235		 * See if this pvo has a valid PTE.  if so, fetch the
2236		 * REF/CHG bits from the valid PTE.  If the appropriate
2237		 * ptebit is set, cache it and return success.
2238		 */
2239		pt = moea_pvo_to_pte(pvo, -1);
2240		if (pt != NULL) {
2241			moea_pte_synch(pt, &pvo->pvo_pte.pte);
2242			mtx_unlock(&moea_table_mutex);
2243			if (pvo->pvo_pte.pte.pte_lo & ptebit) {
2244				moea_attr_save(m, ptebit);
2245				MOEA_PVO_CHECK(pvo);	/* sanity check */
2246				return (TRUE);
2247			}
2248		}
2249	}
2250
2251	return (FALSE);
2252}
2253
2254static u_int
2255moea_clear_bit(vm_page_t m, int ptebit, int *origbit)
2256{
2257	u_int	count;
2258	struct	pvo_entry *pvo;
2259	struct	pte *pt;
2260	int	rv;
2261
2262	/*
2263	 * Clear the cached value.
2264	 */
2265	rv = moea_attr_fetch(m);
2266	moea_attr_clear(m, ptebit);
2267
2268	/*
2269	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2270	 * we can reset the right ones).  note that since the pvo entries and
2271	 * list heads are accessed via BAT0 and are never placed in the page
2272	 * table, we don't have to worry about further accesses setting the
2273	 * REF/CHG bits.
2274	 */
2275	powerpc_sync();
2276
2277	/*
2278	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2279	 * valid pte clear the ptebit from the valid pte.
2280	 */
2281	count = 0;
2282	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2283		MOEA_PVO_CHECK(pvo);	/* sanity check */
2284		pt = moea_pvo_to_pte(pvo, -1);
2285		if (pt != NULL) {
2286			moea_pte_synch(pt, &pvo->pvo_pte.pte);
2287			if (pvo->pvo_pte.pte.pte_lo & ptebit) {
2288				count++;
2289				moea_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2290			}
2291			mtx_unlock(&moea_table_mutex);
2292		}
2293		rv |= pvo->pvo_pte.pte.pte_lo;
2294		pvo->pvo_pte.pte.pte_lo &= ~ptebit;
2295		MOEA_PVO_CHECK(pvo);	/* sanity check */
2296	}
2297
2298	if (origbit != NULL) {
2299		*origbit = rv;
2300	}
2301
2302	return (count);
2303}
2304
2305/*
2306 * Return true if the physical range is encompassed by the battable[idx]
2307 */
2308static int
2309moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
2310{
2311	u_int prot;
2312	u_int32_t start;
2313	u_int32_t end;
2314	u_int32_t bat_ble;
2315
2316	/*
2317	 * Return immediately if not a valid mapping
2318	 */
2319	if (!battable[idx].batu & BAT_Vs)
2320		return (EINVAL);
2321
2322	/*
2323	 * The BAT entry must be cache-inhibited, guarded, and r/w
2324	 * so it can function as an i/o page
2325	 */
2326	prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
2327	if (prot != (BAT_I|BAT_G|BAT_PP_RW))
2328		return (EPERM);
2329
2330	/*
2331	 * The address should be within the BAT range. Assume that the
2332	 * start address in the BAT has the correct alignment (thus
2333	 * not requiring masking)
2334	 */
2335	start = battable[idx].batl & BAT_PBS;
2336	bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
2337	end = start | (bat_ble << 15) | 0x7fff;
2338
2339	if ((pa < start) || ((pa + size) > end))
2340		return (ERANGE);
2341
2342	return (0);
2343}
2344
2345boolean_t
2346moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2347{
2348	int i;
2349
2350	/*
2351	 * This currently does not work for entries that
2352	 * overlap 256M BAT segments.
2353	 */
2354
2355	for(i = 0; i < 16; i++)
2356		if (moea_bat_mapped(i, pa, size) == 0)
2357			return (0);
2358
2359	return (EFAULT);
2360}
2361
2362/*
2363 * Map a set of physical memory pages into the kernel virtual
2364 * address space. Return a pointer to where it is mapped. This
2365 * routine is intended to be used for mapping device memory,
2366 * NOT real memory.
2367 */
2368void *
2369moea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2370{
2371	vm_offset_t va, tmpva, ppa, offset;
2372	int i;
2373
2374	ppa = trunc_page(pa);
2375	offset = pa & PAGE_MASK;
2376	size = roundup(offset + size, PAGE_SIZE);
2377
2378	GIANT_REQUIRED;
2379
2380	/*
2381	 * If the physical address lies within a valid BAT table entry,
2382	 * return the 1:1 mapping. This currently doesn't work
2383	 * for regions that overlap 256M BAT segments.
2384	 */
2385	for (i = 0; i < 16; i++) {
2386		if (moea_bat_mapped(i, pa, size) == 0)
2387			return ((void *) pa);
2388	}
2389
2390	va = kmem_alloc_nofault(kernel_map, size);
2391	if (!va)
2392		panic("moea_mapdev: Couldn't alloc kernel virtual memory");
2393
2394	for (tmpva = va; size > 0;) {
2395		moea_kenter(mmu, tmpva, ppa);
2396		tlbie(tmpva);
2397		size -= PAGE_SIZE;
2398		tmpva += PAGE_SIZE;
2399		ppa += PAGE_SIZE;
2400	}
2401
2402	return ((void *)(va + offset));
2403}
2404
2405void
2406moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2407{
2408	vm_offset_t base, offset;
2409
2410	/*
2411	 * If this is outside kernel virtual space, then it's a
2412	 * battable entry and doesn't require unmapping
2413	 */
2414	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2415		base = trunc_page(va);
2416		offset = va & PAGE_MASK;
2417		size = roundup(offset + size, PAGE_SIZE);
2418		kmem_free(kernel_map, base, size);
2419	}
2420}
2421
2422static void
2423moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2424{
2425	struct pvo_entry *pvo;
2426	vm_offset_t lim;
2427	vm_paddr_t pa;
2428	vm_size_t len;
2429
2430	PMAP_LOCK(pm);
2431	while (sz > 0) {
2432		lim = round_page(va);
2433		len = MIN(lim - va, sz);
2434		pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
2435		if (pvo != NULL) {
2436			pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) |
2437			    (va & ADDR_POFF);
2438			moea_syncicache(pa, len);
2439		}
2440		va += len;
2441		sz -= len;
2442	}
2443	PMAP_UNLOCK(pm);
2444}
2445