mmu_oea.c revision 92847
1290001Sglebius/*
2290001Sglebius * Copyright (c) 2001 The NetBSD Foundation, Inc.
3290001Sglebius * All rights reserved.
4290001Sglebius *
5290001Sglebius * This code is derived from software contributed to The NetBSD Foundation
6290001Sglebius * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7290001Sglebius *
8290001Sglebius * Redistribution and use in source and binary forms, with or without
9290001Sglebius * modification, are permitted provided that the following conditions
10132451Sroberto * are met:
11290001Sglebius * 1. Redistributions of source code must retain the above copyright
12290001Sglebius *    notice, this list of conditions and the following disclaimer.
13290001Sglebius * 2. Redistributions in binary form must reproduce the above copyright
14132451Sroberto *    notice, this list of conditions and the following disclaimer in the
15290001Sglebius *    documentation and/or other materials provided with the distribution.
16132451Sroberto * 3. All advertising materials mentioning features or use of this software
17290001Sglebius *    must display the following acknowledgement:
18290001Sglebius *        This product includes software developed by the NetBSD
19290001Sglebius *        Foundation, Inc. and its contributors.
20290001Sglebius * 4. Neither the name of The NetBSD Foundation nor the names of its
21290001Sglebius *    contributors may be used to endorse or promote products derived
22290001Sglebius *    from this software without specific prior written permission.
23132451Sroberto *
24290001Sglebius * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25290001Sglebius * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26290001Sglebius * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27290001Sglebius * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28290001Sglebius * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29290001Sglebius * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30290001Sglebius * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31290001Sglebius * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32290001Sglebius * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33290001Sglebius * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34290001Sglebius * POSSIBILITY OF SUCH DAMAGE.
35290001Sglebius */
36132451Sroberto/*
37290001Sglebius * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38290001Sglebius * Copyright (C) 1995, 1996 TooLs GmbH.
39132451Sroberto * All rights reserved.
40290001Sglebius *
41290001Sglebius * Redistribution and use in source and binary forms, with or without
42290001Sglebius * modification, are permitted provided that the following conditions
43290001Sglebius * are met:
44290001Sglebius * 1. Redistributions of source code must retain the above copyright
45290001Sglebius *    notice, this list of conditions and the following disclaimer.
46290001Sglebius * 2. Redistributions in binary form must reproduce the above copyright
47290001Sglebius *    notice, this list of conditions and the following disclaimer in the
48132451Sroberto *    documentation and/or other materials provided with the distribution.
49132451Sroberto * 3. All advertising materials mentioning features or use of this software
50132451Sroberto *    must display the following acknowledgement:
51290001Sglebius *	This product includes software developed by TooLs GmbH.
52290001Sglebius * 4. The name of TooLs GmbH may not be used to endorse or promote products
53132451Sroberto *    derived from this software without specific prior written permission.
54132451Sroberto *
55290001Sglebius * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56290001Sglebius * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57290001Sglebius * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58132451Sroberto * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59132451Sroberto * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60290001Sglebius * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61290001Sglebius * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62132451Sroberto * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63290001Sglebius * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64290001Sglebius * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65290001Sglebius *
66132451Sroberto * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67290001Sglebius */
68290001Sglebius/*
69132451Sroberto * Copyright (C) 2001 Benno Rice.
70132451Sroberto * All rights reserved.
71290001Sglebius *
72290001Sglebius * Redistribution and use in source and binary forms, with or without
73290001Sglebius * modification, are permitted provided that the following conditions
74290001Sglebius * are met:
75182007Sroberto * 1. Redistributions of source code must retain the above copyright
76290001Sglebius *    notice, this list of conditions and the following disclaimer.
77290001Sglebius * 2. Redistributions in binary form must reproduce the above copyright
78290001Sglebius *    notice, this list of conditions and the following disclaimer in the
79290001Sglebius *    documentation and/or other materials provided with the distribution.
80290001Sglebius *
81290001Sglebius * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82290001Sglebius * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83290001Sglebius * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84290001Sglebius * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85290001Sglebius * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86290001Sglebius * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87290001Sglebius * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88290001Sglebius * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89290001Sglebius * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90290001Sglebius * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91290001Sglebius */
92290001Sglebius
93182007Sroberto#ifndef lint
94290001Sglebiusstatic const char rcsid[] =
95182007Sroberto  "$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 92847 2002-03-21 01:11:31Z jeff $";
96290001Sglebius#endif /* not lint */
97290001Sglebius
98290001Sglebius/*
99290001Sglebius * Manages physical address maps.
100290001Sglebius *
101290001Sglebius * In addition to hardware address maps, this module is called upon to
102290001Sglebius * provide software-use-only maps which may or may not be stored in the
103290001Sglebius * same form as hardware maps.  These pseudo-maps are used to store
104290001Sglebius * intermediate results from copy operations to and from address spaces.
105290001Sglebius *
106290001Sglebius * Since the information managed by this module is also stored by the
107290001Sglebius * logical address mapping module, this module may throw away valid virtual
108290001Sglebius * to physical mappings at almost any time.  However, invalidations of
109290001Sglebius * mappings must be done as requested.
110290001Sglebius *
111290001Sglebius * In order to cope with hardware architectures which make virtual to
112290001Sglebius * physical map invalidates expensive, this module may delay invalidate
113290001Sglebius * reduced protection operations until such time as they are actually
114290001Sglebius * necessary.  This module is given full information as to which processors
115290001Sglebius * are currently using which maps, and to when physical maps must be made
116290001Sglebius * correct.
117290001Sglebius */
118290001Sglebius
119290001Sglebius#include <sys/param.h>
120290001Sglebius#include <sys/kernel.h>
121290001Sglebius#include <sys/ktr.h>
122290001Sglebius#include <sys/lock.h>
123290001Sglebius#include <sys/msgbuf.h>
124290001Sglebius#include <sys/mutex.h>
125290001Sglebius#include <sys/proc.h>
126290001Sglebius#include <sys/sysctl.h>
127290001Sglebius#include <sys/systm.h>
128290001Sglebius#include <sys/vmmeter.h>
129290001Sglebius
130290001Sglebius#include <dev/ofw/openfirm.h>
131290001Sglebius
132290001Sglebius#include <vm/vm.h>
133290001Sglebius#include <vm/vm_param.h>
134290001Sglebius#include <vm/vm_kern.h>
135290001Sglebius#include <vm/vm_page.h>
136290001Sglebius#include <vm/vm_map.h>
137132451Sroberto#include <vm/vm_object.h>
138290001Sglebius#include <vm/vm_extern.h>
139290001Sglebius#include <vm/vm_pageout.h>
140182007Sroberto#include <vm/vm_pager.h>
141132451Sroberto#include <vm/uma.h>
142132451Sroberto
143132451Sroberto#include <machine/bat.h>
144132451Sroberto#include <machine/frame.h>
145182007Sroberto#include <machine/md_var.h>
146132451Sroberto#include <machine/psl.h>
147290001Sglebius#include <machine/pte.h>
148290001Sglebius#include <machine/sr.h>
149290001Sglebius
150132451Sroberto#define	PMAP_DEBUG
151290001Sglebius
152290001Sglebius#define TODO	panic("%s: not implemented", __func__);
153290001Sglebius
154290001Sglebius#define	PMAP_LOCK(pm)
155290001Sglebius#define	PMAP_UNLOCK(pm)
156290001Sglebius
157290001Sglebius#define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
158290001Sglebius#define	TLBSYNC()	__asm __volatile("tlbsync");
159290001Sglebius#define	SYNC()		__asm __volatile("sync");
160132451Sroberto#define	EIEIO()		__asm __volatile("eieio");
161132451Sroberto
162290001Sglebius#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
163290001Sglebius#define	VSID_TO_SR(vsid)	((vsid) & 0xf)
164290001Sglebius#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
165290001Sglebius
166290001Sglebius#define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
167290001Sglebius#define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
168290001Sglebius#define	PVO_WIRED		0x0010		/* PVO entry is wired */
169290001Sglebius#define	PVO_MANAGED		0x0020		/* PVO entry is managed */
170290001Sglebius#define	PVO_EXECUTABLE		0x0040		/* PVO entry is executable */
171290001Sglebius#define	PVO_BOOTSTRAP		0x0004		/* PVO entry allocated during
172132451Sroberto						   bootstrap */
173290001Sglebius#define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
174290001Sglebius#define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
175290001Sglebius#define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
176290001Sglebius#define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
177290001Sglebius#define	PVO_PTEGIDX_CLR(pvo)	\
178290001Sglebius	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
179290001Sglebius#define	PVO_PTEGIDX_SET(pvo, i)	\
180290001Sglebius	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
181290001Sglebius
182132451Sroberto#define	PMAP_PVO_CHECK(pvo)
183132451Sroberto
184132451Srobertostruct mem_region {
185132451Sroberto	vm_offset_t	mr_start;
186290001Sglebius	vm_offset_t	mr_size;
187290001Sglebius};
188132451Sroberto
189290001Sglebiusstruct ofw_map {
190290001Sglebius	vm_offset_t	om_va;
191290001Sglebius	vm_size_t	om_len;
192290001Sglebius	vm_offset_t	om_pa;
193290001Sglebius	u_int		om_mode;
194290001Sglebius};
195290001Sglebius
196290001Sglebiusint	pmap_bootstrapped = 0;
197290001Sglebius
198290001Sglebius/*
199290001Sglebius * Virtual and physical address of message buffer.
200290001Sglebius */
201290001Sglebiusstruct		msgbuf *msgbufp;
202290001Sglebiusvm_offset_t	msgbuf_phys;
203290001Sglebius
204132451Sroberto/*
205290001Sglebius * Physical addresses of first and last available physical page.
206290001Sglebius */
207290001Sglebiusvm_offset_t avail_start;
208290001Sglebiusvm_offset_t avail_end;
209290001Sglebius
210290001Sglebius/*
211290001Sglebius * Map of physical memory regions.
212290001Sglebius */
213290001Sglebiusvm_offset_t	phys_avail[128];
214290001Sglebiusu_int		phys_avail_count;
215290001Sglebiusstatic struct	mem_region regions[128];
216290001Sglebiusstatic struct	ofw_map translations[128];
217132451Srobertostatic int	translations_size;
218132451Sroberto
219290001Sglebius/*
220290001Sglebius * First and last available kernel virtual addresses.
221290001Sglebius */
222290001Sglebiusvm_offset_t virtual_avail;
223290001Sglebiusvm_offset_t virtual_end;
224132451Srobertovm_offset_t kernel_vm_end;
225290001Sglebius
226132451Sroberto/*
227290001Sglebius * Kernel pmap.
228132451Sroberto */
229290001Sglebiusstruct pmap kernel_pmap_store;
230290001Sglebiusextern struct pmap ofw_pmap;
231290001Sglebius
232290001Sglebius/*
233290001Sglebius * PTEG data.
234290001Sglebius */
235290001Sglebiusstatic struct	pteg *pmap_pteg_table;
236290001Sglebiusu_int		pmap_pteg_count;
237290001Sglebiusu_int		pmap_pteg_mask;
238290001Sglebius
239290001Sglebius/*
240290001Sglebius * PVO data.
241290001Sglebius */
242290001Sglebiusstruct	pvo_head *pmap_pvo_table;		/* pvo entries by pteg index */
243290001Sglebiusstruct	pvo_head pmap_pvo_kunmanaged =
244290001Sglebius    LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
245290001Sglebiusstruct	pvo_head pmap_pvo_unmanaged =
246132451Sroberto    LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
247132451Sroberto
248132451Srobertouma_zone_t	pmap_upvo_zone;	/* zone for pvo entries for unmanaged pages */
249290001Sglebiusuma_zone_t	pmap_mpvo_zone;	/* zone for pvo entries for managed pages */
250290001Sglebiusstruct		vm_object pmap_upvo_zone_obj;
251290001Sglebiusstruct		vm_object pmap_mpvo_zone_obj;
252290001Sglebiusstatic vm_object_t	pmap_pvo_obj;
253132451Srobertostatic u_int		pmap_pvo_count;
254290001Sglebius
255290001Sglebius#define	PMAP_PVO_SIZE	1024
256290001Sglebiusstatic struct	pvo_entry *pmap_bpvo_pool;
257290001Sglebiusstatic int	pmap_bpvo_pool_index;
258290001Sglebiusstatic int	pmap_bpvo_pool_count;
259290001Sglebius
260290001Sglebius#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
261132451Srobertostatic u_int	pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
262290001Sglebius
263290001Sglebiusstatic boolean_t pmap_initialized = FALSE;
264290001Sglebius
265290001Sglebius/*
266290001Sglebius * Statistics.
267290001Sglebius */
268290001Sglebiusu_int	pmap_pte_valid = 0;
269290001Sglebiusu_int	pmap_pte_overflow = 0;
270290001Sglebiusu_int	pmap_pte_replacements = 0;
271290001Sglebiusu_int	pmap_pvo_entries = 0;
272132451Srobertou_int	pmap_pvo_enter_calls = 0;
273290001Sglebiusu_int	pmap_pvo_remove_calls = 0;
274290001Sglebiusu_int	pmap_pte_spills = 0;
275290001SglebiusSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid,
276290001Sglebius    0, "");
277290001SglebiusSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD,
278290001Sglebius    &pmap_pte_overflow, 0, "");
279290001SglebiusSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD,
280290001Sglebius    &pmap_pte_replacements, 0, "");
281290001SglebiusSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries,
282290001Sglebius    0, "");
283290001SglebiusSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD,
284290001Sglebius    &pmap_pvo_enter_calls, 0, "");
285290001SglebiusSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD,
286290001Sglebius    &pmap_pvo_remove_calls, 0, "");
287290001SglebiusSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD,
288290001Sglebius    &pmap_pte_spills, 0, "");
289290001Sglebius
290290001Sglebiusstruct	pvo_entry *pmap_pvo_zeropage;
291290001Sglebius
292290001Sglebiusvm_offset_t	pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
293290001Sglebiusu_int		pmap_rkva_count = 4;
294290001Sglebius
295290001Sglebius/*
296290001Sglebius * Allocate physical memory for use in pmap_bootstrap.
297290001Sglebius */
298290001Sglebiusstatic vm_offset_t	pmap_bootstrap_alloc(vm_size_t, u_int);
299290001Sglebius
300290001Sglebius/*
301290001Sglebius * PTE calls.
302290001Sglebius */
303290001Sglebiusstatic int		pmap_pte_insert(u_int, struct pte *);
304290001Sglebius
305290001Sglebius/*
306290001Sglebius * PVO calls.
307290001Sglebius */
308290001Sglebiusstatic int	pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
309290001Sglebius		    vm_offset_t, vm_offset_t, u_int, int);
310290001Sglebiusstatic void	pmap_pvo_remove(struct pvo_entry *, int);
311290001Sglebiusstatic struct	pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *);
312290001Sglebiusstatic struct	pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
313290001Sglebius
314290001Sglebius/*
315290001Sglebius * Utility routines.
316290001Sglebius */
317290001Sglebiusstatic void *		pmap_pvo_allocf(uma_zone_t, int, u_int8_t *, int);
318290001Sglebiusstatic struct		pvo_entry *pmap_rkva_alloc(void);
319290001Sglebiusstatic void		pmap_pa_map(struct pvo_entry *, vm_offset_t,
320290001Sglebius			    struct pte *, int *);
321290001Sglebiusstatic void		pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
322290001Sglebiusstatic void		pmap_syncicache(vm_offset_t, vm_size_t);
323290001Sglebiusstatic boolean_t	pmap_query_bit(vm_page_t, int);
324290001Sglebiusstatic boolean_t	pmap_clear_bit(vm_page_t, int);
325290001Sglebiusstatic void		tlbia(void);
326290001Sglebius
327290001Sglebiusstatic __inline int
328290001Sglebiusva_to_sr(u_int *sr, vm_offset_t va)
329290001Sglebius{
330290001Sglebius	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
331290001Sglebius}
332290001Sglebius
333290001Sglebiusstatic __inline u_int
334290001Sglebiusva_to_pteg(u_int sr, vm_offset_t addr)
335290001Sglebius{
336290001Sglebius	u_int hash;
337290001Sglebius
338290001Sglebius	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
339290001Sglebius	    ADDR_PIDX_SHFT);
340290001Sglebius	return (hash & pmap_pteg_mask);
341290001Sglebius}
342290001Sglebius
343290001Sglebiusstatic __inline struct pvo_head *
344290001Sglebiuspa_to_pvoh(vm_offset_t pa)
345290001Sglebius{
346290001Sglebius	struct	vm_page *pg;
347290001Sglebius
348290001Sglebius	pg = PHYS_TO_VM_PAGE(pa);
349290001Sglebius
350290001Sglebius	if (pg == NULL)
351290001Sglebius		return (&pmap_pvo_unmanaged);
352290001Sglebius
353290001Sglebius	return (&pg->md.mdpg_pvoh);
354290001Sglebius}
355290001Sglebius
356290001Sglebiusstatic __inline struct pvo_head *
357290001Sglebiusvm_page_to_pvoh(vm_page_t m)
358290001Sglebius{
359290001Sglebius
360290001Sglebius	return (&m->md.mdpg_pvoh);
361290001Sglebius}
362290001Sglebius
363290001Sglebiusstatic __inline void
364290001Sglebiuspmap_attr_clear(vm_page_t m, int ptebit)
365290001Sglebius{
366290001Sglebius
367290001Sglebius	m->md.mdpg_attrs &= ~ptebit;
368290001Sglebius}
369290001Sglebius
370290001Sglebiusstatic __inline int
371290001Sglebiuspmap_attr_fetch(vm_page_t m)
372290001Sglebius{
373290001Sglebius
374290001Sglebius	return (m->md.mdpg_attrs);
375290001Sglebius}
376132451Sroberto
377132451Srobertostatic __inline void
378132451Srobertopmap_attr_save(vm_page_t m, int ptebit)
379290001Sglebius{
380290001Sglebius
381132451Sroberto	m->md.mdpg_attrs |= ptebit;
382290001Sglebius}
383290001Sglebius
384132451Srobertostatic __inline int
385290001Sglebiuspmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
386290001Sglebius{
387290001Sglebius	if (pt->pte_hi == pvo_pt->pte_hi)
388132451Sroberto		return (1);
389290001Sglebius
390290001Sglebius	return (0);
391290001Sglebius}
392290001Sglebius
393290001Sglebiusstatic __inline int
394290001Sglebiuspmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
395290001Sglebius{
396290001Sglebius	return (pt->pte_hi & ~PTE_VALID) ==
397290001Sglebius	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
398290001Sglebius	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
399290001Sglebius}
400290001Sglebius
401290001Sglebiusstatic __inline void
402290001Sglebiuspmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
403290001Sglebius{
404290001Sglebius	/*
405290001Sglebius	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
406290001Sglebius	 * set when the real pte is set in memory.
407290001Sglebius	 *
408290001Sglebius	 * Note: Don't set the valid bit for correct operation of tlb update.
409290001Sglebius	 */
410290001Sglebius	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
411290001Sglebius	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
412290001Sglebius	pt->pte_lo = pte_lo;
413290001Sglebius}
414290001Sglebius
415290001Sglebiusstatic __inline void
416290001Sglebiuspmap_pte_synch(struct pte *pt, struct pte *pvo_pt)
417290001Sglebius{
418290001Sglebius
419290001Sglebius	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
420290001Sglebius}
421290001Sglebius
422290001Sglebiusstatic __inline void
423290001Sglebiuspmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
424290001Sglebius{
425290001Sglebius
426290001Sglebius	/*
427290001Sglebius	 * As shown in Section 7.6.3.2.3
428290001Sglebius	 */
429290001Sglebius	pt->pte_lo &= ~ptebit;
430290001Sglebius	TLBIE(va);
431290001Sglebius	EIEIO();
432290001Sglebius	TLBSYNC();
433290001Sglebius	SYNC();
434290001Sglebius}
435132451Sroberto
436132451Srobertostatic __inline void
437132451Srobertopmap_pte_set(struct pte *pt, struct pte *pvo_pt)
438290001Sglebius{
439290001Sglebius
440290001Sglebius	pvo_pt->pte_hi |= PTE_VALID;
441290001Sglebius
442290001Sglebius	/*
443132451Sroberto	 * Update the PTE as defined in section 7.6.3.1.
444290001Sglebius	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
445290001Sglebius	 * been saved so this routine can restore them (if desired).
446290001Sglebius	 */
447290001Sglebius	pt->pte_lo = pvo_pt->pte_lo;
448290001Sglebius	EIEIO();
449290001Sglebius	pt->pte_hi = pvo_pt->pte_hi;
450290001Sglebius	SYNC();
451290001Sglebius	pmap_pte_valid++;
452290001Sglebius}
453290001Sglebius
454290001Sglebiusstatic __inline void
455290001Sglebiuspmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
456132451Sroberto{
457132451Sroberto
458132451Sroberto	pvo_pt->pte_hi &= ~PTE_VALID;
459290001Sglebius
460290001Sglebius	/*
461132451Sroberto	 * Force the reg & chg bits back into the PTEs.
462290001Sglebius	 */
463290001Sglebius	SYNC();
464290001Sglebius
465132451Sroberto	/*
466290001Sglebius	 * Invalidate the pte.
467290001Sglebius	 */
468290001Sglebius	pt->pte_hi &= ~PTE_VALID;
469290001Sglebius
470132451Sroberto	SYNC();
471290001Sglebius	TLBIE(va);
472290001Sglebius	EIEIO();
473290001Sglebius	TLBSYNC();
474290001Sglebius	SYNC();
475290001Sglebius
476290001Sglebius	/*
477290001Sglebius	 * Save the reg & chg bits.
478290001Sglebius	 */
479290001Sglebius	pmap_pte_synch(pt, pvo_pt);
480132451Sroberto	pmap_pte_valid--;
481290001Sglebius}
482290001Sglebius
483290001Sglebiusstatic __inline void
484132451Srobertopmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
485290001Sglebius{
486290001Sglebius
487290001Sglebius	/*
488290001Sglebius	 * Invalidate the PTE
489132451Sroberto	 */
490132451Sroberto	pmap_pte_unset(pt, pvo_pt, va);
491132451Sroberto	pmap_pte_set(pt, pvo_pt);
492132451Sroberto}
493290001Sglebius
494290001Sglebius/*
495132451Sroberto * Quick sort callout for comparing memory regions.
496290001Sglebius */
497290001Sglebiusstatic int	mr_cmp(const void *a, const void *b);
498132451Srobertostatic int	om_cmp(const void *a, const void *b);
499290001Sglebius
500290001Sglebiusstatic int
501290001Sglebiusmr_cmp(const void *a, const void *b)
502132451Sroberto{
503132451Sroberto	const struct	mem_region *regiona;
504132451Sroberto	const struct	mem_region *regionb;
505290001Sglebius
506290001Sglebius	regiona = a;
507290001Sglebius	regionb = b;
508290001Sglebius	if (regiona->mr_start < regionb->mr_start)
509290001Sglebius		return (-1);
510290001Sglebius	else if (regiona->mr_start > regionb->mr_start)
511290001Sglebius		return (1);
512290001Sglebius	else
513132451Sroberto		return (0);
514132451Sroberto}
515132451Sroberto
516290001Sglebiusstatic int
517132451Srobertoom_cmp(const void *a, const void *b)
518132451Sroberto{
519290001Sglebius	const struct	ofw_map *mapa;
520290001Sglebius	const struct	ofw_map *mapb;
521132451Sroberto
522290001Sglebius	mapa = a;
523290001Sglebius	mapb = b;
524290001Sglebius	if (mapa->om_pa < mapb->om_pa)
525290001Sglebius		return (-1);
526290001Sglebius	else if (mapa->om_pa > mapb->om_pa)
527290001Sglebius		return (1);
528290001Sglebius	else
529290001Sglebius		return (0);
530290001Sglebius}
531290001Sglebius
532132451Srobertovoid
533290001Sglebiuspmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
534290001Sglebius{
535290001Sglebius	ihandle_t	pmem, mmui;
536290001Sglebius	phandle_t	chosen, mmu;
537290001Sglebius	int		sz;
538290001Sglebius	int		i, j;
539290001Sglebius	vm_size_t	size, physsz;
540290001Sglebius	vm_offset_t	pa, va, off;
541132451Sroberto	u_int		batl, batu;
542290001Sglebius
543290001Sglebius	/*
544290001Sglebius	 * Use an IBAT and a DBAT to map the bottom segment of memory
545290001Sglebius	 * where we are.
546290001Sglebius	 */
547290001Sglebius	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
548290001Sglebius	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
549290001Sglebius	__asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1"
550290001Sglebius	    :: "r"(batu), "r"(batl));
551290001Sglebius#if 0
552290001Sglebius	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
553290001Sglebius	batl = BATL(0x80000000, BAT_M, BAT_PP_RW);
554290001Sglebius	__asm ("mtibatu 1,%0; mtibatl 1,%1; mtdbatu 1,%0; mtdbatl 1,%1"
555290001Sglebius	    :: "r"(batu), "r"(batl));
556290001Sglebius#endif
557132451Sroberto
558290001Sglebius	/*
559290001Sglebius	 * Set the start and end of kva.
560290001Sglebius	 */
561290001Sglebius	virtual_avail = VM_MIN_KERNEL_ADDRESS;
562290001Sglebius	virtual_end = VM_MAX_KERNEL_ADDRESS;
563290001Sglebius
564290001Sglebius	if ((pmem = OF_finddevice("/memory")) == -1)
565290001Sglebius		panic("pmap_bootstrap: can't locate memory device");
566290001Sglebius	if ((sz = OF_getproplen(pmem, "available")) == -1)
567290001Sglebius		panic("pmap_bootstrap: can't get length of available memory");
568290001Sglebius	if (sizeof(phys_avail) < sz)
569290001Sglebius		panic("pmap_bootstrap: phys_avail too small");
570290001Sglebius	if (sizeof(regions) < sz)
571290001Sglebius		panic("pmap_bootstrap: regions too small");
572290001Sglebius	bzero(regions, sz);
573290001Sglebius	if (OF_getprop(pmem, "available", regions, sz) == -1)
574290001Sglebius		panic("pmap_bootstrap: can't get available memory");
575132451Sroberto	sz /= sizeof(*regions);
576290001Sglebius	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
577290001Sglebius	qsort(regions, sz, sizeof(*regions), mr_cmp);
578290001Sglebius	phys_avail_count = 0;
579290001Sglebius	physsz = 0;
580290001Sglebius	for (i = 0, j = 0; i < sz; i++, j += 2) {
581290001Sglebius		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
582290001Sglebius		    regions[i].mr_start + regions[i].mr_size,
583290001Sglebius		    regions[i].mr_size);
584290001Sglebius		phys_avail[j] = regions[i].mr_start;
585290001Sglebius		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
586290001Sglebius		phys_avail_count++;
587290001Sglebius		physsz += regions[i].mr_size;
588290001Sglebius	}
589290001Sglebius	physmem = btoc(physsz);
590290001Sglebius
591290001Sglebius	/*
592132451Sroberto	 * Allocate PTEG table.
593290001Sglebius	 */
594132451Sroberto#ifdef PTEGCOUNT
595290001Sglebius	pmap_pteg_count = PTEGCOUNT;
596290001Sglebius#else
597290001Sglebius	pmap_pteg_count = 0x1000;
598290001Sglebius
599132451Sroberto	while (pmap_pteg_count < physmem)
600290001Sglebius		pmap_pteg_count <<= 1;
601290001Sglebius
602290001Sglebius	pmap_pteg_count >>= 1;
603290001Sglebius#endif /* PTEGCOUNT */
604290001Sglebius
605290001Sglebius	size = pmap_pteg_count * sizeof(struct pteg);
606290001Sglebius	CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count,
607290001Sglebius	    size);
608290001Sglebius	pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size);
609290001Sglebius	CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table);
610290001Sglebius	bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg));
611290001Sglebius	pmap_pteg_mask = pmap_pteg_count - 1;
612290001Sglebius
613290001Sglebius	/*
614290001Sglebius	 * Allocate PTE overflow lists.
615290001Sglebius	 */
616290001Sglebius	size = sizeof(struct pvo_head) * pmap_pteg_count;
617290001Sglebius	pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size,
618290001Sglebius	    PAGE_SIZE);
619132451Sroberto	CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table);
620290001Sglebius	for (i = 0; i < pmap_pteg_count; i++)
621290001Sglebius		LIST_INIT(&pmap_pvo_table[i]);
622290001Sglebius
623290001Sglebius	/*
624290001Sglebius	 * Allocate the message buffer.
625290001Sglebius	 */
626290001Sglebius	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0);
627290001Sglebius
628290001Sglebius	/*
629290001Sglebius	 * Initialise the unmanaged pvo pool.
630290001Sglebius	 */
631132451Sroberto	pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(PAGE_SIZE, 0);
632132451Sroberto	pmap_bpvo_pool_index = 0;
633290001Sglebius	pmap_bpvo_pool_count = (int)PAGE_SIZE / sizeof(struct pvo_entry);
634132451Sroberto
635290001Sglebius	/*
636132451Sroberto	 * Make sure kernel vsid is allocated as well as VSID 0.
637290001Sglebius	 */
638290001Sglebius	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
639290001Sglebius		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
640290001Sglebius	pmap_vsid_bitmap[0] |= 1;
641290001Sglebius
642132451Sroberto	/*
643290001Sglebius	 * Set up the OpenFirmware pmap and add it's mappings.
644290001Sglebius	 */
645290001Sglebius	pmap_pinit(&ofw_pmap);
646290001Sglebius	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
647290001Sglebius	if ((chosen = OF_finddevice("/chosen")) == -1)
648290001Sglebius		panic("pmap_bootstrap: can't find /chosen");
649290001Sglebius	OF_getprop(chosen, "mmu", &mmui, 4);
650290001Sglebius	if ((mmu = OF_instance_to_package(mmui)) == -1)
651290001Sglebius		panic("pmap_bootstrap: can't get mmu package");
652290001Sglebius	if ((sz = OF_getproplen(mmu, "translations")) == -1)
653290001Sglebius		panic("pmap_bootstrap: can't get ofw translation count");
654290001Sglebius	if (sizeof(translations) < sz)
655132451Sroberto		panic("pmap_bootstrap: translations too small");
656290001Sglebius	bzero(translations, sz);
657290001Sglebius	if (OF_getprop(mmu, "translations", translations, sz) == -1)
658		panic("pmap_bootstrap: can't get ofw translations");
659	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
660	qsort(translations, sz, sizeof (*translations), om_cmp);
661	for (i = 0; i < sz; i++) {
662		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
663		    translations[i].om_pa, translations[i].om_va,
664		    translations[i].om_len);
665
666		/* Drop stuff below something? */
667
668		/* Enter the pages? */
669		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
670			struct	vm_page m;
671
672			m.phys_addr = translations[i].om_pa + off;
673			pmap_enter(&ofw_pmap, translations[i].om_va + off, &m,
674			    VM_PROT_ALL, 1);
675		}
676	}
677#ifdef SMP
678	TLBSYNC();
679#endif
680
681	/*
682	 * Initialize the kernel pmap (which is statically allocated).
683	 */
684	for (i = 0; i < 16; i++) {
685		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
686	}
687	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
688	kernel_pmap->pm_active = ~0;
689	kernel_pmap->pm_count = 1;
690
691	/*
692	 * Allocate a kernel stack with a guard page for thread0 and map it
693	 * into the kernel page map.
694	 */
695	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
696	kstack0_phys = pa;
697	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
698	CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
699	    kstack0);
700	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
701	for (i = 0; i < KSTACK_PAGES; i++) {
702		pa = kstack0_phys + i * PAGE_SIZE;
703		va = kstack0 + i * PAGE_SIZE;
704		pmap_kenter(va, pa);
705		TLBIE(va);
706	}
707
708	/*
709	 * Calculate the first and last available physical addresses.
710	 */
711	avail_start = phys_avail[0];
712	for (i = 0; phys_avail[i + 2] != 0; i += 2)
713		;
714	avail_end = phys_avail[i + 1];
715	Maxmem = powerpc_btop(avail_end);
716
717	/*
718	 * Allocate virtual address space for the message buffer.
719	 */
720	msgbufp = (struct msgbuf *)virtual_avail;
721	virtual_avail += round_page(MSGBUF_SIZE);
722
723	/*
724	 * Initialize hardware.
725	 */
726	for (i = 0; i < 16; i++) {
727		__asm __volatile("mtsrin %0,%1"
728		    :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
729	}
730	__asm __volatile ("mtsr %0,%1"
731	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
732	__asm __volatile ("sync; mtsdr1 %0; isync"
733	    :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
734	tlbia();
735
736	pmap_bootstrapped++;
737}
738
739/*
740 * Activate a user pmap.  The pmap must be activated before it's address
741 * space can be accessed in any way.
742 */
743void
744pmap_activate(struct thread *td)
745{
746	pmap_t	pm;
747	int	i;
748
749	/*
750	 * Load all the data we need up front to encourasge the compiler to
751	 * not issue any loads while we have interrupts disabled below.
752	 */
753	pm = &td->td_proc->p_vmspace->vm_pmap;
754
755	KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?"));
756
757	pm->pm_active |= PCPU_GET(cpumask);
758
759	/*
760	 * XXX: Address this again later?
761	 * NetBSD only change the segment registers on return to userland.
762	 */
763#if 0
764	critical_enter();
765
766	for (i = 0; i < 16; i++) {
767		__asm __volatile("mtsr %0,%1" :: "r"(i), "r"(pm->pm_sr[i]));
768	}
769	__asm __volatile("sync; isync");
770
771	critical_exit();
772#endif
773}
774
775void
776pmap_deactivate(struct thread *td)
777{
778	pmap_t	pm;
779
780	pm = &td->td_proc->p_vmspace->vm_pmap;
781	pm->pm_active &= ~(PCPU_GET(cpumask));
782}
783
784vm_offset_t
785pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
786{
787	TODO;
788	return (0);
789}
790
791void
792pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
793{
794	TODO;
795}
796
797void
798pmap_clear_modify(vm_page_t m)
799{
800
801	if (m->flags * PG_FICTITIOUS)
802		return;
803	pmap_clear_bit(m, PTE_CHG);
804}
805
806void
807pmap_collect(void)
808{
809	TODO;
810}
811
812void
813pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
814	  vm_size_t len, vm_offset_t src_addr)
815{
816	TODO;
817}
818
819void
820pmap_copy_page(vm_offset_t src, vm_offset_t dst)
821{
822	TODO;
823}
824
825/*
826 * Zero a page of physical memory by temporarily mapping it into the tlb.
827 */
828void
829pmap_zero_page(vm_offset_t pa)
830{
831	caddr_t	va;
832	int	i;
833
834	if (pa < SEGMENT_LENGTH) {
835		va = (caddr_t) pa;
836	} else if (pmap_initialized) {
837		if (pmap_pvo_zeropage == NULL)
838			pmap_pvo_zeropage = pmap_rkva_alloc();
839		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
840		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
841	} else {
842		panic("pmap_zero_page: can't zero pa %#x", pa);
843	}
844
845	bzero(va, PAGE_SIZE);
846
847	for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) {
848		__asm __volatile("dcbz 0,%0" :: "r"(va));
849		va += CACHELINESIZE;
850	}
851
852	if (pa >= SEGMENT_LENGTH)
853		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
854}
855
856void
857pmap_zero_page_area(vm_offset_t pa, int off, int size)
858{
859	TODO;
860}
861
862/*
863 * Map the given physical page at the specified virtual address in the
864 * target pmap with the protection requested.  If specified the page
865 * will be wired down.
866 */
867void
868pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
869	   boolean_t wired)
870{
871	struct		pvo_head *pvo_head;
872	uma_zone_t	zone;
873	u_int		pte_lo, pvo_flags;
874	int		error;
875
876	if (!pmap_initialized) {
877		pvo_head = &pmap_pvo_kunmanaged;
878		zone = pmap_upvo_zone;
879		pvo_flags = 0;
880	} else {
881		pvo_head = pa_to_pvoh(m->phys_addr);
882		zone = pmap_mpvo_zone;
883		pvo_flags = PVO_MANAGED;
884	}
885
886	pte_lo = PTE_I | PTE_G;
887
888	if (prot & VM_PROT_WRITE)
889		pte_lo |= PTE_BW;
890	else
891		pte_lo |= PTE_BR;
892
893	if (prot & VM_PROT_EXECUTE)
894		pvo_flags |= PVO_EXECUTABLE;
895
896	if (wired)
897		pvo_flags |= PVO_WIRED;
898
899	error = pmap_pvo_enter(pmap, zone, pvo_head, va, m->phys_addr, pte_lo,
900	    pvo_flags);
901
902	if (error == ENOENT) {
903		/*
904		 * Flush the real memory from the cache.
905		 */
906		if ((pvo_flags & PVO_EXECUTABLE) && (pte_lo & PTE_I) == 0) {
907			pmap_syncicache(m->phys_addr, PAGE_SIZE);
908		}
909	}
910}
911
912vm_offset_t
913pmap_extract(pmap_t pmap, vm_offset_t va)
914{
915	TODO;
916	return (0);
917}
918
919/*
920 * Grow the number of kernel page table entries.  Unneeded.
921 */
922void
923pmap_growkernel(vm_offset_t addr)
924{
925}
926
927void
928pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
929{
930
931	CTR(KTR_PMAP, "pmap_init");
932}
933
934void
935pmap_init2(void)
936{
937
938	CTR(KTR_PMAP, "pmap_init2");
939
940	pmap_pvo_obj = vm_object_allocate(OBJT_PHYS, 16);
941	pmap_pvo_count = 0;
942	pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
943	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
944	uma_zone_set_allocf(pmap_upvo_zone, pmap_pvo_allocf);
945	pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
946	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
947	uma_zone_set_allocf(pmap_mpvo_zone, pmap_pvo_allocf);
948	pmap_initialized = TRUE;
949}
950
951boolean_t
952pmap_is_modified(vm_page_t m)
953{
954	TODO;
955	return (0);
956}
957
958void
959pmap_clear_reference(vm_page_t m)
960{
961	TODO;
962}
963
964/*
965 *	pmap_ts_referenced:
966 *
967 *	Return a count of reference bits for a page, clearing those bits.
968 *	It is not necessary for every reference bit to be cleared, but it
969 *	is necessary that 0 only be returned when there are truly no
970 *	reference bits set.
971 *
972 *	XXX: The exact number of bits to check and clear is a matter that
973 *	should be tested and standardized at some point in the future for
974 *	optimal aging of shared pages.
975 */
976
977int
978pmap_ts_referenced(vm_page_t m)
979{
980	TODO;
981	return (0);
982}
983
984/*
985 * Map a wired page into kernel virtual address space.
986 */
987void
988pmap_kenter(vm_offset_t va, vm_offset_t pa)
989{
990	u_int		pte_lo;
991	int		error;
992	int		i;
993
994#if 0
995	if (va < VM_MIN_KERNEL_ADDRESS)
996		panic("pmap_kenter: attempt to enter non-kernel address %#x",
997		    va);
998#endif
999
1000	pte_lo = PTE_I | PTE_G | PTE_BW;
1001	for (i = 0; phys_avail[i + 2] != 0; i += 2) {
1002		if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) {
1003			pte_lo &= ~(PTE_I | PTE_G);
1004			break;
1005		}
1006	}
1007
1008	error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone,
1009	    &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
1010
1011	if (error != 0 && error != ENOENT)
1012		panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va,
1013		    pa, error);
1014
1015	/*
1016	 * Flush the real memory from the instruction cache.
1017	 */
1018	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
1019		pmap_syncicache(pa, PAGE_SIZE);
1020	}
1021}
1022
1023vm_offset_t
1024pmap_kextract(vm_offset_t va)
1025{
1026	TODO;
1027	return (0);
1028}
1029
1030/*
1031 * Remove a wired page from kernel virtual address space.
1032 */
1033void
1034pmap_kremove(vm_offset_t va)
1035{
1036
1037	pmap_remove(kernel_pmap, va, roundup(va, PAGE_SIZE));
1038}
1039
1040/*
1041 * Map a range of physical addresses into kernel virtual address space.
1042 *
1043 * The value passed in *virt is a suggested virtual address for the mapping.
1044 * Architectures which can support a direct-mapped physical to virtual region
1045 * can return the appropriate address within that region, leaving '*virt'
1046 * unchanged.  We cannot and therefore do not; *virt is updated with the
1047 * first usable address after the mapped region.
1048 */
1049vm_offset_t
1050pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
1051{
1052	vm_offset_t	sva, va;
1053
1054	sva = *virt;
1055	va = sva;
1056	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1057		pmap_kenter(va, pa_start);
1058	*virt = va;
1059	return (sva);
1060}
1061
1062int
1063pmap_mincore(pmap_t pmap, vm_offset_t addr)
1064{
1065	TODO;
1066	return (0);
1067}
1068
1069/*
1070 * Create the uarea for a new process.
1071 * This routine directly affects the fork perf for a process.
1072 */
1073void
1074pmap_new_proc(struct proc *p)
1075{
1076	vm_object_t	upobj;
1077	vm_offset_t	up;
1078	vm_page_t	m;
1079	u_int		i;
1080
1081	/*
1082	 * Allocate the object for the upages.
1083	 */
1084	upobj = p->p_upages_obj;
1085	if (upobj == NULL) {
1086		upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
1087		p->p_upages_obj = upobj;
1088	}
1089
1090	/*
1091	 * Get a kernel virtual address for the uarea for this process.
1092	 */
1093	up = (vm_offset_t)p->p_uarea;
1094	if (up == 0) {
1095		up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
1096		if (up == 0)
1097			panic("pmap_new_proc: upage allocation failed");
1098		p->p_uarea = (struct user *)up;
1099	}
1100
1101	for (i = 0; i < UAREA_PAGES; i++) {
1102		/*
1103		 * Get a uarea page.
1104		 */
1105		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1106
1107		/*
1108		 * Wire the page.
1109		 */
1110		m->wire_count++;
1111
1112		/*
1113		 * Enter the page into the kernel address space.
1114		 */
1115		pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
1116
1117		vm_page_wakeup(m);
1118		vm_page_flag_clear(m, PG_ZERO);
1119		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1120		m->valid = VM_PAGE_BITS_ALL;
1121	}
1122}
1123
1124void
1125pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
1126		    vm_pindex_t pindex, vm_size_t size, int limit)
1127{
1128	TODO;
1129}
1130
1131/*
1132 * Lower the permission for all mappings to a given page.
1133 */
1134void
1135pmap_page_protect(vm_page_t m, vm_prot_t prot)
1136{
1137	struct	pvo_head *pvo_head;
1138	struct	pvo_entry *pvo, *next_pvo;
1139	struct	pte *pt;
1140
1141	/*
1142	 * Since the routine only downgrades protection, if the
1143	 * maximal protection is desired, there isn't any change
1144	 * to be made.
1145	 */
1146	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
1147	    (VM_PROT_READ|VM_PROT_WRITE))
1148		return;
1149
1150	pvo_head = vm_page_to_pvoh(m);
1151	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1152		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1153		PMAP_PVO_CHECK(pvo);	/* sanity check */
1154
1155		/*
1156		 * Downgrading to no mapping at all, we just remove the entry.
1157		 */
1158		if ((prot & VM_PROT_READ) == 0) {
1159			pmap_pvo_remove(pvo, -1);
1160			continue;
1161		}
1162
1163		/*
1164		 * If EXEC permission is being revoked, just clear the flag
1165		 * in the PVO.
1166		 */
1167		if ((prot & VM_PROT_EXECUTE) == 0)
1168			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1169
1170		/*
1171		 * If this entry is already RO, don't diddle with the page
1172		 * table.
1173		 */
1174		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
1175			PMAP_PVO_CHECK(pvo);
1176			continue;
1177		}
1178
1179		/*
1180		 * Grab the PTE before we diddle the bits so pvo_to_pte can
1181		 * verify the pte contents are as expected.
1182		 */
1183		pt = pmap_pvo_to_pte(pvo, -1);
1184		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1185		pvo->pvo_pte.pte_lo |= PTE_BR;
1186		if (pt != NULL)
1187			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1188		PMAP_PVO_CHECK(pvo);	/* sanity check */
1189	}
1190}
1191
1192/*
1193 * Make the specified page pageable (or not).  Unneeded.
1194 */
1195void
1196pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
1197	      boolean_t pageable)
1198{
1199}
1200
1201/*
1202 * Returns true if the pmap's pv is one of the first
1203 * 16 pvs linked to from this page.  This count may
1204 * be changed upwards or downwards in the future; it
1205 * is only necessary that true be returned for a small
1206 * subset of pmaps for proper page aging.
1207 */
1208boolean_t
1209pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
1210{
1211	TODO;
1212	return (0);
1213}
1214
1215static u_int	pmap_vsidcontext;
1216
1217void
1218pmap_pinit(pmap_t pmap)
1219{
1220	int	i, mask;
1221	u_int	entropy;
1222
1223	entropy = 0;
1224	__asm __volatile("mftb %0" : "=r"(entropy));
1225
1226	/*
1227	 * Allocate some segment registers for this pmap.
1228	 */
1229	pmap->pm_count = 1;
1230	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1231		u_int	hash, n;
1232
1233		/*
1234		 * Create a new value by mutiplying by a prime and adding in
1235		 * entropy from the timebase register.  This is to make the
1236		 * VSID more random so that the PT hash function collides
1237		 * less often.  (Note that the prime casues gcc to do shifts
1238		 * instead of a multiply.)
1239		 */
1240		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1241		hash = pmap_vsidcontext & (NPMAPS - 1);
1242		if (hash == 0)		/* 0 is special, avoid it */
1243			continue;
1244		n = hash >> 5;
1245		mask = 1 << (hash & (VSID_NBPW - 1));
1246		hash = (pmap_vsidcontext & 0xfffff);
1247		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
1248			/* anything free in this bucket? */
1249			if (pmap_vsid_bitmap[n] == 0xffffffff) {
1250				entropy = (pmap_vsidcontext >> 20);
1251				continue;
1252			}
1253			i = ffs(~pmap_vsid_bitmap[i]) - 1;
1254			mask = 1 << i;
1255			hash &= 0xfffff & ~(VSID_NBPW - 1);
1256			hash |= i;
1257		}
1258		pmap_vsid_bitmap[n] |= mask;
1259		for (i = 0; i < 16; i++)
1260			pmap->pm_sr[i] = VSID_MAKE(i, hash);
1261		return;
1262	}
1263
1264	panic("pmap_pinit: out of segments");
1265}
1266
1267/*
1268 * Initialize the pmap associated with process 0.
1269 */
1270void
1271pmap_pinit0(pmap_t pm)
1272{
1273
1274	pmap_pinit(pm);
1275	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1276}
1277
1278void
1279pmap_pinit2(pmap_t pmap)
1280{
1281	/* XXX: Remove this stub when no longer called */
1282}
1283
1284void
1285pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
1286{
1287	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1288	    ("pmap_prefault: non current pmap"));
1289	/* XXX */
1290}
1291
1292void
1293pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1294{
1295	TODO;
1296}
1297
1298vm_offset_t
1299pmap_phys_address(int ppn)
1300{
1301	TODO;
1302	return (0);
1303}
1304
1305/*
1306 * Map a list of wired pages into kernel virtual address space.  This is
1307 * intended for temporary mappings which do not need page modification or
1308 * references recorded.  Existing mappings in the region are overwritten.
1309 */
1310void
1311pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1312{
1313	int	i;
1314
1315	for (i = 0; i < count; i++, va += PAGE_SIZE)
1316		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
1317}
1318
1319/*
1320 * Remove page mappings from kernel virtual address space.  Intended for
1321 * temporary mappings entered by pmap_qenter.
1322 */
1323void
1324pmap_qremove(vm_offset_t va, int count)
1325{
1326	int	i;
1327
1328	for (i = 0; i < count; i++, va += PAGE_SIZE)
1329		pmap_kremove(va);
1330}
1331
1332/*
1333 * Add a reference to the specified pmap.
1334 */
1335void
1336pmap_reference(pmap_t pm)
1337{
1338
1339	if (pm != NULL)
1340		pm->pm_count++;
1341}
1342
1343void
1344pmap_release(pmap_t pmap)
1345{
1346	TODO;
1347}
1348
1349/*
1350 * Remove the given range of addresses from the specified map.
1351 */
1352void
1353pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1354{
1355	struct	pvo_entry *pvo;
1356	int	pteidx;
1357
1358	for (; sva < eva; sva += PAGE_SIZE) {
1359		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
1360		if (pvo != NULL) {
1361			pmap_pvo_remove(pvo, pteidx);
1362		}
1363	}
1364}
1365
1366void
1367pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1368{
1369	TODO;
1370}
1371
1372void
1373pmap_swapin_proc(struct proc *p)
1374{
1375	TODO;
1376}
1377
1378void
1379pmap_swapout_proc(struct proc *p)
1380{
1381	TODO;
1382}
1383
1384/*
1385 * Create the kernel stack and pcb for a new thread.
1386 * This routine directly affects the fork perf for a process and
1387 * create performance for a thread.
1388 */
1389void
1390pmap_new_thread(struct thread *td)
1391{
1392	vm_object_t	ksobj;
1393	vm_offset_t	ks;
1394	vm_page_t	m;
1395	u_int		i;
1396
1397	/*
1398	 * Allocate object for the kstack.
1399	 */
1400	ksobj = td->td_kstack_obj;
1401	if (ksobj == NULL) {
1402		ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
1403		td->td_kstack_obj = ksobj;
1404	}
1405
1406	/*
1407	 * Get a kernel virtual address for the kstack for this thread.
1408	 */
1409	ks = td->td_kstack;
1410	if (ks == 0) {
1411		ks = kmem_alloc_nofault(kernel_map,
1412		    (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
1413		if (ks == 0)
1414			panic("pmap_new_thread: kstack allocation failed");
1415		TLBIE(ks);
1416		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
1417		td->td_kstack = ks;
1418	}
1419
1420	for (i = 0; i < KSTACK_PAGES; i++) {
1421		/*
1422		 * Get a kernel stack page.
1423		 */
1424		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1425
1426		/*
1427		 * Wire the page.
1428		 */
1429		m->wire_count++;
1430
1431		/*
1432		 * Enter the page into the kernel address space.
1433		 */
1434		pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
1435
1436		vm_page_wakeup(m);
1437		vm_page_flag_clear(m, PG_ZERO);
1438		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1439		m->valid = VM_PAGE_BITS_ALL;
1440	}
1441}
1442
1443void
1444pmap_dispose_proc(struct proc *p)
1445{
1446	TODO;
1447}
1448
1449void
1450pmap_dispose_thread(struct thread *td)
1451{
1452	TODO;
1453}
1454
1455void
1456pmap_swapin_thread(struct thread *td)
1457{
1458	TODO;
1459}
1460
1461void
1462pmap_swapout_thread(struct thread *td)
1463{
1464	TODO;
1465}
1466
1467/*
1468 * Allocate a physical page of memory directly from the phys_avail map.
1469 * Can only be called from pmap_bootstrap before avail start and end are
1470 * calculated.
1471 */
1472static vm_offset_t
1473pmap_bootstrap_alloc(vm_size_t size, u_int align)
1474{
1475	vm_offset_t	s, e;
1476	int		i, j;
1477
1478	size = round_page(size);
1479	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1480		if (align != 0)
1481			s = (phys_avail[i] + align - 1) & ~(align - 1);
1482		else
1483			s = phys_avail[i];
1484		e = s + size;
1485
1486		if (s < phys_avail[i] || e > phys_avail[i + 1])
1487			continue;
1488
1489		if (s == phys_avail[i]) {
1490			phys_avail[i] += size;
1491		} else if (e == phys_avail[i + 1]) {
1492			phys_avail[i + 1] -= size;
1493		} else {
1494			for (j = phys_avail_count * 2; j > i; j -= 2) {
1495				phys_avail[j] = phys_avail[j - 2];
1496				phys_avail[j + 1] = phys_avail[j - 1];
1497			}
1498
1499			phys_avail[i + 3] = phys_avail[i + 1];
1500			phys_avail[i + 1] = s;
1501			phys_avail[i + 2] = e;
1502			phys_avail_count++;
1503		}
1504
1505		return (s);
1506	}
1507	panic("pmap_bootstrap_alloc: could not allocate memory");
1508}
1509
1510/*
1511 * Return an unmapped pvo for a kernel virtual address.
1512 * Used by pmap functions that operate on physical pages.
1513 */
1514static struct pvo_entry *
1515pmap_rkva_alloc(void)
1516{
1517	struct		pvo_entry *pvo;
1518	struct		pte *pt;
1519	vm_offset_t	kva;
1520	int		pteidx;
1521
1522	if (pmap_rkva_count == 0)
1523		panic("pmap_rkva_alloc: no more reserved KVAs");
1524
1525	kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count);
1526	pmap_kenter(kva, 0);
1527
1528	pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx);
1529
1530	if (pvo == NULL)
1531		panic("pmap_kva_alloc: pmap_pvo_find_va failed");
1532
1533	pt = pmap_pvo_to_pte(pvo, pteidx);
1534
1535	if (pt == NULL)
1536		panic("pmap_kva_alloc: pmap_pvo_to_pte failed");
1537
1538	pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1539	PVO_PTEGIDX_CLR(pvo);
1540
1541	pmap_pte_overflow++;
1542
1543	return (pvo);
1544}
1545
1546static void
1547pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
1548    int *depth_p)
1549{
1550	struct	pte *pt;
1551
1552	/*
1553	 * If this pvo already has a valid pte, we need to save it so it can
1554	 * be restored later.  We then just reload the new PTE over the old
1555	 * slot.
1556	 */
1557	if (saved_pt != NULL) {
1558		pt = pmap_pvo_to_pte(pvo, -1);
1559
1560		if (pt != NULL) {
1561			pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1562			PVO_PTEGIDX_CLR(pvo);
1563			pmap_pte_overflow++;
1564		}
1565
1566		*saved_pt = pvo->pvo_pte;
1567
1568		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1569	}
1570
1571	pvo->pvo_pte.pte_lo |= pa;
1572
1573	if (!pmap_pte_spill(pvo->pvo_vaddr))
1574		panic("pmap_pa_map: could not spill pvo %p", pvo);
1575
1576	if (depth_p != NULL)
1577		(*depth_p)++;
1578}
1579
1580static void
1581pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
1582{
1583	struct	pte *pt;
1584
1585	pt = pmap_pvo_to_pte(pvo, -1);
1586
1587	if (pt != NULL) {
1588		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1589		PVO_PTEGIDX_CLR(pvo);
1590		pmap_pte_overflow++;
1591	}
1592
1593	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1594
1595	/*
1596	 * If there is a saved PTE and it's valid, restore it and return.
1597	 */
1598	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
1599		if (depth_p != NULL && --(*depth_p) == 0)
1600			panic("pmap_pa_unmap: restoring but depth == 0");
1601
1602		pvo->pvo_pte = *saved_pt;
1603
1604		if (!pmap_pte_spill(pvo->pvo_vaddr))
1605			panic("pmap_pa_unmap: could not spill pvo %p", pvo);
1606	}
1607}
1608
1609static void
1610pmap_syncicache(vm_offset_t pa, vm_size_t len)
1611{
1612	__syncicache((void *)pa, len);
1613}
1614
1615static void
1616tlbia(void)
1617{
1618	caddr_t	i;
1619
1620	SYNC();
1621	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
1622		TLBIE(i);
1623		EIEIO();
1624	}
1625	TLBSYNC();
1626	SYNC();
1627}
1628
1629static int
1630pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
1631    vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
1632{
1633	struct	pvo_entry *pvo;
1634	u_int	sr;
1635	int	first;
1636	u_int	ptegidx;
1637	int	i;
1638
1639	pmap_pvo_enter_calls++;
1640
1641	/*
1642	 * Compute the PTE Group index.
1643	 */
1644	va &= ~ADDR_POFF;
1645	sr = va_to_sr(pm->pm_sr, va);
1646	ptegidx = va_to_pteg(sr, va);
1647
1648	/*
1649	 * Remove any existing mapping for this page.  Reuse the pvo entry if
1650	 * there is a mapping.
1651	 */
1652	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1653		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1654			if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa)
1655				return (0);
1656			pmap_pvo_remove(pvo, -1);
1657			break;
1658		}
1659	}
1660
1661	/*
1662	 * If we aren't overwriting a mapping, try to allocate.
1663	 */
1664	if (pmap_initialized) {
1665		pvo = uma_zalloc(zone, M_NOWAIT);
1666	} else {
1667		if (pmap_bpvo_pool_index >= pmap_bpvo_pool_count) {
1668			pmap_bpvo_pool = (struct pvo_entry *)
1669			    pmap_bootstrap_alloc(PAGE_SIZE, 0);
1670			pmap_bpvo_pool_index = 0;
1671		}
1672		pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index];
1673		pmap_bpvo_pool_index++;
1674		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
1675	}
1676
1677	if (pvo == NULL) {
1678		return (ENOMEM);
1679	}
1680
1681	pmap_pvo_entries++;
1682	pvo->pvo_vaddr = va;
1683	pvo->pvo_pmap = pm;
1684	LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1685	pvo->pvo_vaddr &= ~ADDR_POFF;
1686	if (flags & VM_PROT_EXECUTE)
1687		pvo->pvo_vaddr |= PVO_EXECUTABLE;
1688	if (flags & PVO_WIRED)
1689		pvo->pvo_vaddr |= PVO_WIRED;
1690	if (pvo_head != &pmap_pvo_kunmanaged)
1691		pvo->pvo_vaddr |= PVO_MANAGED;
1692	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
1693
1694	/*
1695	 * Remember if the list was empty and therefore will be the first
1696	 * item.
1697	 */
1698	first = LIST_FIRST(pvo_head) == NULL;
1699
1700	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1701	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1702		pvo->pvo_pmap->pm_stats.wired_count++;
1703	pvo->pvo_pmap->pm_stats.resident_count++;
1704
1705	/*
1706	 * We hope this succeeds but it isn't required.
1707	 */
1708	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1709	if (i >= 0) {
1710		PVO_PTEGIDX_SET(pvo, i);
1711	} else {
1712		panic("pmap_pvo_enter: overflow");
1713		pmap_pte_overflow++;
1714	}
1715
1716	return (first ? ENOENT : 0);
1717}
1718
1719static void
1720pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
1721{
1722	struct	pte *pt;
1723
1724	/*
1725	 * If there is an active pte entry, we need to deactivate it (and
1726	 * save the ref & cfg bits).
1727	 */
1728	pt = pmap_pvo_to_pte(pvo, pteidx);
1729	if (pt != NULL) {
1730		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1731		PVO_PTEGIDX_CLR(pvo);
1732	} else {
1733		pmap_pte_overflow--;
1734	}
1735
1736	/*
1737	 * Update our statistics.
1738	 */
1739	pvo->pvo_pmap->pm_stats.resident_count--;
1740	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1741		pvo->pvo_pmap->pm_stats.wired_count--;
1742
1743	/*
1744	 * Save the REF/CHG bits into their cache if the page is managed.
1745	 */
1746	if (pvo->pvo_vaddr & PVO_MANAGED) {
1747		struct	vm_page *pg;
1748
1749		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
1750		if (pg != NULL) {
1751			pmap_attr_save(pg, pvo->pvo_pte.pte_lo &
1752			    (PTE_REF | PTE_CHG));
1753		}
1754	}
1755
1756	/*
1757	 * Remove this PVO from the PV list.
1758	 */
1759	LIST_REMOVE(pvo, pvo_vlink);
1760
1761	/*
1762	 * Remove this from the overflow list and return it to the pool
1763	 * if we aren't going to reuse it.
1764	 */
1765	LIST_REMOVE(pvo, pvo_olink);
1766	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
1767		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone :
1768		    pmap_upvo_zone, pvo);
1769	pmap_pvo_entries--;
1770	pmap_pvo_remove_calls++;
1771}
1772
1773static __inline int
1774pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1775{
1776	int	pteidx;
1777
1778	/*
1779	 * We can find the actual pte entry without searching by grabbing
1780	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
1781	 * noticing the HID bit.
1782	 */
1783	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1784	if (pvo->pvo_pte.pte_hi & PTE_HID)
1785		pteidx ^= pmap_pteg_mask * 8;
1786
1787	return (pteidx);
1788}
1789
1790static struct pvo_entry *
1791pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
1792{
1793	struct	pvo_entry *pvo;
1794	int	ptegidx;
1795	u_int	sr;
1796
1797	va &= ~ADDR_POFF;
1798	sr = va_to_sr(pm->pm_sr, va);
1799	ptegidx = va_to_pteg(sr, va);
1800
1801	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1802		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1803			if (pteidx_p)
1804				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1805			return (pvo);
1806		}
1807	}
1808
1809	return (NULL);
1810}
1811
1812static struct pte *
1813pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1814{
1815	struct	pte *pt;
1816
1817	/*
1818	 * If we haven't been supplied the ptegidx, calculate it.
1819	 */
1820	if (pteidx == -1) {
1821		int	ptegidx;
1822		u_int	sr;
1823
1824		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
1825		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
1826		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1827	}
1828
1829	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1830
1831	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1832		panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no "
1833		    "valid pte index", pvo);
1834	}
1835
1836	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1837		panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo "
1838		    "pvo but no valid pte", pvo);
1839	}
1840
1841	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1842		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1843			panic("pmap_pvo_to_pte: pvo %p has valid pte in "
1844			    "pmap_pteg_table %p but invalid in pvo", pvo, pt);
1845		}
1846
1847		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
1848		    != 0) {
1849			panic("pmap_pvo_to_pte: pvo %p pte does not match "
1850			    "pte %p in pmap_pteg_table", pvo, pt);
1851		}
1852
1853		return (pt);
1854	}
1855
1856	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1857		panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in "
1858		    "pmap_pteg_table but valid in pvo", pvo, pt);
1859	}
1860
1861	return (NULL);
1862}
1863
1864static void *
1865pmap_pvo_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1866{
1867	vm_page_t	m;
1868
1869	if (bytes != PAGE_SIZE)
1870		panic("pmap_pvo_allocf: benno was shortsighted.  hit him.");
1871
1872	*flags = UMA_SLAB_PRIV;
1873	m = vm_page_alloc(pmap_pvo_obj, pmap_pvo_count, VM_ALLOC_SYSTEM);
1874	if (m == NULL)
1875		return (NULL);
1876	pmap_pvo_count++;
1877	return ((void *)VM_PAGE_TO_PHYS(m));
1878}
1879
1880/*
1881 * XXX: THIS STUFF SHOULD BE IN pte.c?
1882 */
1883int
1884pmap_pte_spill(vm_offset_t addr)
1885{
1886	struct	pvo_entry *source_pvo, *victim_pvo;
1887	struct	pvo_entry *pvo;
1888	int	ptegidx, i, j;
1889	u_int	sr;
1890	struct	pteg *pteg;
1891	struct	pte *pt;
1892
1893	pmap_pte_spills++;
1894
1895	__asm __volatile("mfsrin %0,%1" : "=r"(sr) : "r"(addr));
1896	ptegidx = va_to_pteg(sr, addr);
1897
1898	/*
1899	 * Have to substitute some entry.  Use the primary hash for this.
1900	 * Use low bits of timebase as random generator.
1901	 */
1902	pteg = &pmap_pteg_table[ptegidx];
1903	__asm __volatile("mftb %0" : "=r"(i));
1904	i &= 7;
1905	pt = &pteg->pt[i];
1906
1907	source_pvo = NULL;
1908	victim_pvo = NULL;
1909	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1910		/*
1911		 * We need to find a pvo entry for this address.
1912		 */
1913		PMAP_PVO_CHECK(pvo);
1914		if (source_pvo == NULL &&
1915		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
1916		    pvo->pvo_pte.pte_hi & PTE_HID)) {
1917			/*
1918			 * Now found an entry to be spilled into the pteg.
1919			 * The PTE is now valid, so we know it's active.
1920			 */
1921			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1922
1923			if (j >= 0) {
1924				PVO_PTEGIDX_SET(pvo, j);
1925				pmap_pte_overflow--;
1926				PMAP_PVO_CHECK(pvo);
1927				return (1);
1928			}
1929
1930			source_pvo = pvo;
1931
1932			if (victim_pvo != NULL)
1933				break;
1934		}
1935
1936		/*
1937		 * We also need the pvo entry of the victim we are replacing
1938		 * so save the R & C bits of the PTE.
1939		 */
1940		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
1941		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
1942			victim_pvo = pvo;
1943			if (source_pvo != NULL)
1944				break;
1945		}
1946	}
1947
1948	if (source_pvo == NULL)
1949		return (0);
1950
1951	if (victim_pvo == NULL) {
1952		if ((pt->pte_hi & PTE_HID) == 0)
1953			panic("pmap_pte_spill: victim p-pte (%p) has no pvo"
1954			    "entry", pt);
1955
1956		/*
1957		 * If this is a secondary PTE, we need to search it's primary
1958		 * pvo bucket for the matching PVO.
1959		 */
1960		LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
1961		    pvo_olink) {
1962			PMAP_PVO_CHECK(pvo);
1963			/*
1964			 * We also need the pvo entry of the victim we are
1965			 * replacing so save the R & C bits of the PTE.
1966			 */
1967			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
1968				victim_pvo = pvo;
1969				break;
1970			}
1971		}
1972
1973		if (victim_pvo == NULL)
1974			panic("pmap_pte_spill: victim s-pte (%p) has no pvo"
1975			    "entry", pt);
1976	}
1977
1978	/*
1979	 * We are invalidating the TLB entry for the EA we are replacing even
1980	 * though it's valid.  If we don't, we lose any ref/chg bit changes
1981	 * contained in the TLB entry.
1982	 */
1983	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
1984
1985	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
1986	pmap_pte_set(pt, &source_pvo->pvo_pte);
1987
1988	PVO_PTEGIDX_CLR(victim_pvo);
1989	PVO_PTEGIDX_SET(source_pvo, i);
1990	pmap_pte_replacements++;
1991
1992	PMAP_PVO_CHECK(victim_pvo);
1993	PMAP_PVO_CHECK(source_pvo);
1994
1995	return (1);
1996}
1997
1998static int
1999pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt)
2000{
2001	struct	pte *pt;
2002	int	i;
2003
2004	/*
2005	 * First try primary hash.
2006	 */
2007	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2008		if ((pt->pte_hi & PTE_VALID) == 0) {
2009			pvo_pt->pte_hi &= ~PTE_HID;
2010			pmap_pte_set(pt, pvo_pt);
2011			return (i);
2012		}
2013	}
2014
2015	/*
2016	 * Now try secondary hash.
2017	 */
2018	ptegidx ^= pmap_pteg_mask;
2019	ptegidx++;
2020	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2021		if ((pt->pte_hi & PTE_VALID) == 0) {
2022			pvo_pt->pte_hi |= PTE_HID;
2023			pmap_pte_set(pt, pvo_pt);
2024			return (i);
2025		}
2026	}
2027
2028	panic("pmap_pte_insert: overflow");
2029	return (-1);
2030}
2031
2032static boolean_t
2033pmap_query_bit(vm_page_t m, int ptebit)
2034{
2035	struct	pvo_entry *pvo;
2036	struct	pte *pt;
2037
2038	if (pmap_attr_fetch(m) & ptebit)
2039		return (TRUE);
2040
2041	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2042		PMAP_PVO_CHECK(pvo);	/* sanity check */
2043
2044		/*
2045		 * See if we saved the bit off.  If so, cache it and return
2046		 * success.
2047		 */
2048		if (pvo->pvo_pte.pte_lo & ptebit) {
2049			pmap_attr_save(m, ptebit);
2050			PMAP_PVO_CHECK(pvo);	/* sanity check */
2051			return (TRUE);
2052		}
2053	}
2054
2055	/*
2056	 * No luck, now go through the hard part of looking at the PTEs
2057	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2058	 * the PTEs.
2059	 */
2060	SYNC();
2061	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2062		PMAP_PVO_CHECK(pvo);	/* sanity check */
2063
2064		/*
2065		 * See if this pvo has a valid PTE.  if so, fetch the
2066		 * REF/CHG bits from the valid PTE.  If the appropriate
2067		 * ptebit is set, cache it and return success.
2068		 */
2069		pt = pmap_pvo_to_pte(pvo, -1);
2070		if (pt != NULL) {
2071			pmap_pte_synch(pt, &pvo->pvo_pte);
2072			if (pvo->pvo_pte.pte_lo & ptebit) {
2073				pmap_attr_save(m, ptebit);
2074				PMAP_PVO_CHECK(pvo);	/* sanity check */
2075				return (TRUE);
2076			}
2077		}
2078	}
2079
2080	return (TRUE);
2081}
2082
2083static boolean_t
2084pmap_clear_bit(vm_page_t m, int ptebit)
2085{
2086	struct	pvo_entry *pvo;
2087	struct	pte *pt;
2088	int	rv;
2089
2090	/*
2091	 * Clear the cached value.
2092	 */
2093	rv = pmap_attr_fetch(m);
2094	pmap_attr_clear(m, ptebit);
2095
2096	/*
2097	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2098	 * we can reset the right ones).  note that since the pvo entries and
2099	 * list heads are accessed via BAT0 and are never placed in the page
2100	 * table, we don't have to worry about further accesses setting the
2101	 * REF/CHG bits.
2102	 */
2103	SYNC();
2104
2105	/*
2106	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2107	 * valid pte clear the ptebit from the valid pte.
2108	 */
2109	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2110		PMAP_PVO_CHECK(pvo);	/* sanity check */
2111		pt = pmap_pvo_to_pte(pvo, -1);
2112		if (pt != NULL) {
2113			pmap_pte_synch(pt, &pvo->pvo_pte);
2114			if (pvo->pvo_pte.pte_lo & ptebit)
2115				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2116		}
2117		rv |= pvo->pvo_pte.pte_lo;
2118		pvo->pvo_pte.pte_lo &= ~ptebit;
2119		PMAP_PVO_CHECK(pvo);	/* sanity check */
2120	}
2121
2122	return ((rv & ptebit) != 0);
2123}
2124