mmu_oea.c revision 110172
1/*
2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *        This product includes software developed by the NetBSD
19 *        Foundation, Inc. and its contributors.
20 * 4. Neither the name of The NetBSD Foundation nor the names of its
21 *    contributors may be used to endorse or promote products derived
22 *    from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36/*
37 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38 * Copyright (C) 1995, 1996 TooLs GmbH.
39 * All rights reserved.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 *    notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in the
48 *    documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 *    must display the following acknowledgement:
51 *	This product includes software developed by TooLs GmbH.
52 * 4. The name of TooLs GmbH may not be used to endorse or promote products
53 *    derived from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67 */
68/*
69 * Copyright (C) 2001 Benno Rice.
70 * All rights reserved.
71 *
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions
74 * are met:
75 * 1. Redistributions of source code must retain the above copyright
76 *    notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce the above copyright
78 *    notice, this list of conditions and the following disclaimer in the
79 *    documentation and/or other materials provided with the distribution.
80 *
81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93#ifndef lint
94static const char rcsid[] =
95   "$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 110172 2003-02-01 02:56:48Z grehan $";
96#endif /* not lint */
97
98/*
99 * Manages physical address maps.
100 *
101 * In addition to hardware address maps, this module is called upon to
102 * provide software-use-only maps which may or may not be stored in the
103 * same form as hardware maps.  These pseudo-maps are used to store
104 * intermediate results from copy operations to and from address spaces.
105 *
106 * Since the information managed by this module is also stored by the
107 * logical address mapping module, this module may throw away valid virtual
108 * to physical mappings at almost any time.  However, invalidations of
109 * mappings must be done as requested.
110 *
111 * In order to cope with hardware architectures which make virtual to
112 * physical map invalidates expensive, this module may delay invalidate
113 * reduced protection operations until such time as they are actually
114 * necessary.  This module is given full information as to which processors
115 * are currently using which maps, and to when physical maps must be made
116 * correct.
117 */
118
119#include <sys/param.h>
120#include <sys/kernel.h>
121#include <sys/ktr.h>
122#include <sys/lock.h>
123#include <sys/msgbuf.h>
124#include <sys/mutex.h>
125#include <sys/proc.h>
126#include <sys/sysctl.h>
127#include <sys/systm.h>
128#include <sys/vmmeter.h>
129
130#include <dev/ofw/openfirm.h>
131
132#include <vm/vm.h>
133#include <vm/vm_param.h>
134#include <vm/vm_kern.h>
135#include <vm/vm_page.h>
136#include <vm/vm_map.h>
137#include <vm/vm_object.h>
138#include <vm/vm_extern.h>
139#include <vm/vm_pageout.h>
140#include <vm/vm_pager.h>
141#include <vm/uma.h>
142
143#include <machine/powerpc.h>
144#include <machine/bat.h>
145#include <machine/frame.h>
146#include <machine/md_var.h>
147#include <machine/psl.h>
148#include <machine/pte.h>
149#include <machine/sr.h>
150
151#define	PMAP_DEBUG
152
153#define TODO	panic("%s: not implemented", __func__);
154
155#define	PMAP_LOCK(pm)
156#define	PMAP_UNLOCK(pm)
157
158#define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
159#define	TLBSYNC()	__asm __volatile("tlbsync");
160#define	SYNC()		__asm __volatile("sync");
161#define	EIEIO()		__asm __volatile("eieio");
162
163#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
164#define	VSID_TO_SR(vsid)	((vsid) & 0xf)
165#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
166
167#define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
168#define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
169#define	PVO_WIRED		0x0010		/* PVO entry is wired */
170#define	PVO_MANAGED		0x0020		/* PVO entry is managed */
171#define	PVO_EXECUTABLE		0x0040		/* PVO entry is executable */
172#define	PVO_BOOTSTRAP		0x0080		/* PVO entry allocated during
173						   bootstrap */
174#define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
175#define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
176#define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
177#define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
178#define	PVO_PTEGIDX_CLR(pvo)	\
179	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
180#define	PVO_PTEGIDX_SET(pvo, i)	\
181	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
182
183#define	PMAP_PVO_CHECK(pvo)
184
185struct ofw_map {
186	vm_offset_t	om_va;
187	vm_size_t	om_len;
188	vm_offset_t	om_pa;
189	u_int		om_mode;
190};
191
192int	pmap_bootstrapped = 0;
193
194/*
195 * Virtual and physical address of message buffer.
196 */
197struct		msgbuf *msgbufp;
198vm_offset_t	msgbuf_phys;
199
200/*
201 * Physical addresses of first and last available physical page.
202 */
203vm_offset_t avail_start;
204vm_offset_t avail_end;
205
206int pmap_pagedaemon_waken;
207
208/*
209 * Map of physical memory regions.
210 */
211vm_offset_t	phys_avail[128];
212u_int		phys_avail_count;
213static struct	mem_region *regions;
214static struct	mem_region *pregions;
215int		regions_sz, pregions_sz;
216static struct	ofw_map *translations;
217
218/*
219 * First and last available kernel virtual addresses.
220 */
221vm_offset_t virtual_avail;
222vm_offset_t virtual_end;
223vm_offset_t kernel_vm_end;
224
225/*
226 * Kernel pmap.
227 */
228struct pmap kernel_pmap_store;
229extern struct pmap ofw_pmap;
230
231/*
232 * PTEG data.
233 */
234static struct	pteg *pmap_pteg_table;
235u_int		pmap_pteg_count;
236u_int		pmap_pteg_mask;
237
238/*
239 * PVO data.
240 */
241struct	pvo_head *pmap_pvo_table;		/* pvo entries by pteg index */
242struct	pvo_head pmap_pvo_kunmanaged =
243    LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
244struct	pvo_head pmap_pvo_unmanaged =
245    LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
246
247uma_zone_t	pmap_upvo_zone;	/* zone for pvo entries for unmanaged pages */
248uma_zone_t	pmap_mpvo_zone;	/* zone for pvo entries for managed pages */
249struct		vm_object pmap_upvo_zone_obj;
250struct		vm_object pmap_mpvo_zone_obj;
251static vm_object_t	pmap_pvo_obj;
252static u_int		pmap_pvo_count;
253
254#define	BPVO_POOL_SIZE	32768
255static struct	pvo_entry *pmap_bpvo_pool;
256static int	pmap_bpvo_pool_index = 0;
257
258#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
259static u_int	pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
260
261static boolean_t pmap_initialized = FALSE;
262
263/*
264 * Statistics.
265 */
266u_int	pmap_pte_valid = 0;
267u_int	pmap_pte_overflow = 0;
268u_int	pmap_pte_replacements = 0;
269u_int	pmap_pvo_entries = 0;
270u_int	pmap_pvo_enter_calls = 0;
271u_int	pmap_pvo_remove_calls = 0;
272u_int	pmap_pte_spills = 0;
273SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid,
274    0, "");
275SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD,
276    &pmap_pte_overflow, 0, "");
277SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD,
278    &pmap_pte_replacements, 0, "");
279SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries,
280    0, "");
281SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD,
282    &pmap_pvo_enter_calls, 0, "");
283SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD,
284    &pmap_pvo_remove_calls, 0, "");
285SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD,
286    &pmap_pte_spills, 0, "");
287
288struct	pvo_entry *pmap_pvo_zeropage;
289
290vm_offset_t	pmap_rkva_start = VM_MIN_KERNEL_ADDRESS;
291u_int		pmap_rkva_count = 4;
292
293/*
294 * Allocate physical memory for use in pmap_bootstrap.
295 */
296static vm_offset_t	pmap_bootstrap_alloc(vm_size_t, u_int);
297
298/*
299 * PTE calls.
300 */
301static int		pmap_pte_insert(u_int, struct pte *);
302
303/*
304 * PVO calls.
305 */
306static int	pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
307		    vm_offset_t, vm_offset_t, u_int, int);
308static void	pmap_pvo_remove(struct pvo_entry *, int);
309static struct	pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *);
310static struct	pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
311
312/*
313 * Utility routines.
314 */
315static void *		pmap_pvo_allocf(uma_zone_t, int, u_int8_t *, int);
316static struct		pvo_entry *pmap_rkva_alloc(void);
317static void		pmap_pa_map(struct pvo_entry *, vm_offset_t,
318			    struct pte *, int *);
319static void		pmap_pa_unmap(struct pvo_entry *, struct pte *, int *);
320static void		pmap_syncicache(vm_offset_t, vm_size_t);
321static boolean_t	pmap_query_bit(vm_page_t, int);
322static u_int		pmap_clear_bit(vm_page_t, int, int *);
323static void		tlbia(void);
324
325static __inline int
326va_to_sr(u_int *sr, vm_offset_t va)
327{
328	return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
329}
330
331static __inline u_int
332va_to_pteg(u_int sr, vm_offset_t addr)
333{
334	u_int hash;
335
336	hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
337	    ADDR_PIDX_SHFT);
338	return (hash & pmap_pteg_mask);
339}
340
341static __inline struct pvo_head *
342pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p)
343{
344	struct	vm_page *pg;
345
346	pg = PHYS_TO_VM_PAGE(pa);
347
348	if (pg_p != NULL)
349		*pg_p = pg;
350
351	if (pg == NULL)
352		return (&pmap_pvo_unmanaged);
353
354	return (&pg->md.mdpg_pvoh);
355}
356
357static __inline struct pvo_head *
358vm_page_to_pvoh(vm_page_t m)
359{
360
361	return (&m->md.mdpg_pvoh);
362}
363
364static __inline void
365pmap_attr_clear(vm_page_t m, int ptebit)
366{
367
368	m->md.mdpg_attrs &= ~ptebit;
369}
370
371static __inline int
372pmap_attr_fetch(vm_page_t m)
373{
374
375	return (m->md.mdpg_attrs);
376}
377
378static __inline void
379pmap_attr_save(vm_page_t m, int ptebit)
380{
381
382	m->md.mdpg_attrs |= ptebit;
383}
384
385static __inline int
386pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
387{
388	if (pt->pte_hi == pvo_pt->pte_hi)
389		return (1);
390
391	return (0);
392}
393
394static __inline int
395pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
396{
397	return (pt->pte_hi & ~PTE_VALID) ==
398	    (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
399	    ((va >> ADDR_API_SHFT) & PTE_API) | which);
400}
401
402static __inline void
403pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
404{
405	/*
406	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
407	 * set when the real pte is set in memory.
408	 *
409	 * Note: Don't set the valid bit for correct operation of tlb update.
410	 */
411	pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
412	    (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
413	pt->pte_lo = pte_lo;
414}
415
416static __inline void
417pmap_pte_synch(struct pte *pt, struct pte *pvo_pt)
418{
419
420	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
421}
422
423static __inline void
424pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
425{
426
427	/*
428	 * As shown in Section 7.6.3.2.3
429	 */
430	pt->pte_lo &= ~ptebit;
431	TLBIE(va);
432	EIEIO();
433	TLBSYNC();
434	SYNC();
435}
436
437static __inline void
438pmap_pte_set(struct pte *pt, struct pte *pvo_pt)
439{
440
441	pvo_pt->pte_hi |= PTE_VALID;
442
443	/*
444	 * Update the PTE as defined in section 7.6.3.1.
445	 * Note that the REF/CHG bits are from pvo_pt and thus should havce
446	 * been saved so this routine can restore them (if desired).
447	 */
448	pt->pte_lo = pvo_pt->pte_lo;
449	EIEIO();
450	pt->pte_hi = pvo_pt->pte_hi;
451	SYNC();
452	pmap_pte_valid++;
453}
454
455static __inline void
456pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
457{
458
459	pvo_pt->pte_hi &= ~PTE_VALID;
460
461	/*
462	 * Force the reg & chg bits back into the PTEs.
463	 */
464	SYNC();
465
466	/*
467	 * Invalidate the pte.
468	 */
469	pt->pte_hi &= ~PTE_VALID;
470
471	SYNC();
472	TLBIE(va);
473	EIEIO();
474	TLBSYNC();
475	SYNC();
476
477	/*
478	 * Save the reg & chg bits.
479	 */
480	pmap_pte_synch(pt, pvo_pt);
481	pmap_pte_valid--;
482}
483
484static __inline void
485pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
486{
487
488	/*
489	 * Invalidate the PTE
490	 */
491	pmap_pte_unset(pt, pvo_pt, va);
492	pmap_pte_set(pt, pvo_pt);
493}
494
495/*
496 * Quick sort callout for comparing memory regions.
497 */
498static int	mr_cmp(const void *a, const void *b);
499static int	om_cmp(const void *a, const void *b);
500
501static int
502mr_cmp(const void *a, const void *b)
503{
504	const struct	mem_region *regiona;
505	const struct	mem_region *regionb;
506
507	regiona = a;
508	regionb = b;
509	if (regiona->mr_start < regionb->mr_start)
510		return (-1);
511	else if (regiona->mr_start > regionb->mr_start)
512		return (1);
513	else
514		return (0);
515}
516
517static int
518om_cmp(const void *a, const void *b)
519{
520	const struct	ofw_map *mapa;
521	const struct	ofw_map *mapb;
522
523	mapa = a;
524	mapb = b;
525	if (mapa->om_pa < mapb->om_pa)
526		return (-1);
527	else if (mapa->om_pa > mapb->om_pa)
528		return (1);
529	else
530		return (0);
531}
532
533void
534pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
535{
536	ihandle_t	mmui;
537	phandle_t	chosen, mmu;
538	int		sz;
539	int		i, j;
540	int		ofw_mappings;
541	vm_size_t	size, physsz;
542	vm_offset_t	pa, va, off;
543	u_int		batl, batu;
544
545        /*
546         * Set up BAT0 to map the lowest 256 MB area
547         */
548        battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
549        battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
550
551        /*
552         * Map PCI memory space.
553         */
554        battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
555        battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
556
557        battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
558        battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
559
560        battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
561        battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
562
563        battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
564        battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
565
566        /*
567         * Map obio devices.
568         */
569        battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW);
570        battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs);
571
572	/*
573	 * Use an IBAT and a DBAT to map the bottom segment of memory
574	 * where we are.
575	 */
576	batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs);
577	batl = BATL(0x00000000, BAT_M, BAT_PP_RW);
578	__asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1"
579	    :: "r"(batu), "r"(batl));
580
581#if 0
582	/* map frame buffer */
583	batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
584	batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
585	__asm ("mtdbatu 1,%0; mtdbatl 1,%1"
586	    :: "r"(batu), "r"(batl));
587#endif
588
589#if 1
590	/* map pci space */
591	batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
592	batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
593	__asm ("mtdbatu 1,%0; mtdbatl 1,%1"
594	    :: "r"(batu), "r"(batl));
595#endif
596
597	/*
598	 * Set the start and end of kva.
599	 */
600	virtual_avail = VM_MIN_KERNEL_ADDRESS;
601	virtual_end = VM_MAX_KERNEL_ADDRESS;
602
603	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
604	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
605
606	qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
607	for (i = 0; i < pregions_sz; i++) {
608		vm_offset_t pa;
609		vm_offset_t end;
610
611		CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
612			pregions[i].mr_start,
613			pregions[i].mr_start + pregions[i].mr_size,
614			pregions[i].mr_size);
615		/*
616		 * Install entries into the BAT table to allow all
617		 * of physmem to be convered by on-demand BAT entries.
618		 * The loop will sometimes set the same battable element
619		 * twice, but that's fine since they won't be used for
620		 * a while yet.
621		 */
622		pa = pregions[i].mr_start & 0xf0000000;
623		end = pregions[i].mr_start + pregions[i].mr_size;
624		do {
625                        u_int n = pa >> ADDR_SR_SHFT;
626
627			battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
628			battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
629			pa += SEGMENT_LENGTH;
630		} while (pa < end);
631	}
632
633	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
634		panic("pmap_bootstrap: phys_avail too small");
635	qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
636	phys_avail_count = 0;
637	physsz = 0;
638	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
639		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
640		    regions[i].mr_start + regions[i].mr_size,
641		    regions[i].mr_size);
642		phys_avail[j] = regions[i].mr_start;
643		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
644		phys_avail_count++;
645		physsz += regions[i].mr_size;
646	}
647	physmem = btoc(physsz);
648
649	/*
650	 * Allocate PTEG table.
651	 */
652#ifdef PTEGCOUNT
653	pmap_pteg_count = PTEGCOUNT;
654#else
655	pmap_pteg_count = 0x1000;
656
657	while (pmap_pteg_count < physmem)
658		pmap_pteg_count <<= 1;
659
660	pmap_pteg_count >>= 1;
661#endif /* PTEGCOUNT */
662
663	size = pmap_pteg_count * sizeof(struct pteg);
664	CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count,
665	    size);
666	pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size);
667	CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table);
668	bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg));
669	pmap_pteg_mask = pmap_pteg_count - 1;
670
671	/*
672	 * Allocate pv/overflow lists.
673	 */
674	size = sizeof(struct pvo_head) * pmap_pteg_count;
675	pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size,
676	    PAGE_SIZE);
677	CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table);
678	for (i = 0; i < pmap_pteg_count; i++)
679		LIST_INIT(&pmap_pvo_table[i]);
680
681	/*
682	 * Allocate the message buffer.
683	 */
684	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0);
685
686	/*
687	 * Initialise the unmanaged pvo pool.
688	 */
689	pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(
690		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
691	pmap_bpvo_pool_index = 0;
692
693	/*
694	 * Make sure kernel vsid is allocated as well as VSID 0.
695	 */
696	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
697		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
698	pmap_vsid_bitmap[0] |= 1;
699
700	/*
701	 * Set up the OpenFirmware pmap and add it's mappings.
702	 */
703	pmap_pinit(&ofw_pmap);
704	ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
705	if ((chosen = OF_finddevice("/chosen")) == -1)
706		panic("pmap_bootstrap: can't find /chosen");
707	OF_getprop(chosen, "mmu", &mmui, 4);
708	if ((mmu = OF_instance_to_package(mmui)) == -1)
709		panic("pmap_bootstrap: can't get mmu package");
710	if ((sz = OF_getproplen(mmu, "translations")) == -1)
711		panic("pmap_bootstrap: can't get ofw translation count");
712	translations = NULL;
713	for (i = 0; phys_avail[i + 2] != 0; i += 2) {
714		if (phys_avail[i + 1] >= sz)
715			translations = (struct ofw_map *)phys_avail[i];
716	}
717	if (translations == NULL)
718		panic("pmap_bootstrap: no space to copy translations");
719	bzero(translations, sz);
720	if (OF_getprop(mmu, "translations", translations, sz) == -1)
721		panic("pmap_bootstrap: can't get ofw translations");
722	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
723	sz /= sizeof(*translations);
724	qsort(translations, sz, sizeof (*translations), om_cmp);
725	for (i = 0, ofw_mappings = 0; i < sz; i++) {
726		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
727		    translations[i].om_pa, translations[i].om_va,
728		    translations[i].om_len);
729
730		/*
731		 * If the mapping is 1:1, let the RAM and device on-demand
732		 * BAT tables take care of the translation.
733		 */
734		if (translations[i].om_va == translations[i].om_pa)
735			continue;
736
737		/* Enter the pages */
738		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
739			struct	vm_page m;
740
741			m.phys_addr = translations[i].om_pa + off;
742			pmap_enter(&ofw_pmap, translations[i].om_va + off, &m,
743				   VM_PROT_ALL, 1);
744			ofw_mappings++;
745		}
746	}
747#ifdef SMP
748	TLBSYNC();
749#endif
750
751	/*
752	 * Initialize the kernel pmap (which is statically allocated).
753	 */
754	for (i = 0; i < 16; i++) {
755		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
756	}
757	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
758	kernel_pmap->pm_active = ~0;
759
760	/*
761	 * Allocate a kernel stack with a guard page for thread0 and map it
762	 * into the kernel page map.
763	 */
764	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
765	kstack0_phys = pa;
766	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
767	CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
768	    kstack0);
769	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
770	for (i = 0; i < KSTACK_PAGES; i++) {
771		pa = kstack0_phys + i * PAGE_SIZE;
772		va = kstack0 + i * PAGE_SIZE;
773		pmap_kenter(va, pa);
774		TLBIE(va);
775	}
776
777	/*
778	 * Calculate the first and last available physical addresses.
779	 */
780	avail_start = phys_avail[0];
781	for (i = 0; phys_avail[i + 2] != 0; i += 2)
782		;
783	avail_end = phys_avail[i + 1];
784	Maxmem = powerpc_btop(avail_end);
785
786	/*
787	 * Allocate virtual address space for the message buffer.
788	 */
789	msgbufp = (struct msgbuf *)virtual_avail;
790	virtual_avail += round_page(MSGBUF_SIZE);
791
792	/*
793	 * Initialize hardware.
794	 */
795	for (i = 0; i < 16; i++) {
796		mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT);
797	}
798	__asm __volatile ("mtsr %0,%1"
799	    :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
800	__asm __volatile ("sync; mtsdr1 %0; isync"
801	    :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
802	tlbia();
803
804	pmap_bootstrapped++;
805}
806
807/*
808 * Activate a user pmap.  The pmap must be activated before it's address
809 * space can be accessed in any way.
810 */
811void
812pmap_activate(struct thread *td)
813{
814	pmap_t	pm, pmr;
815
816	/*
817	 * Load all the data we need up front to encourage the compiler to
818	 * not issue any loads while we have interrupts disabled below.
819	 */
820	pm = &td->td_proc->p_vmspace->vm_pmap;
821
822	if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL)
823		pmr = pm;
824
825	pm->pm_active |= PCPU_GET(cpumask);
826	PCPU_SET(curpmap, pmr);
827}
828
829void
830pmap_deactivate(struct thread *td)
831{
832	pmap_t	pm;
833
834	pm = &td->td_proc->p_vmspace->vm_pmap;
835	pm->pm_active &= ~(PCPU_GET(cpumask));
836	PCPU_SET(curpmap, NULL);
837}
838
839vm_offset_t
840pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
841{
842
843	return (va);
844}
845
846void
847pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
848{
849	struct	pvo_entry *pvo;
850
851	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
852
853	if (pvo != NULL) {
854		if (wired) {
855			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
856				pm->pm_stats.wired_count++;
857			pvo->pvo_vaddr |= PVO_WIRED;
858		} else {
859			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
860				pm->pm_stats.wired_count--;
861			pvo->pvo_vaddr &= ~PVO_WIRED;
862		}
863	}
864}
865
866void
867pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
868	  vm_size_t len, vm_offset_t src_addr)
869{
870
871	/*
872	 * This is not needed as it's mainly an optimisation.
873	 * It may want to be implemented later though.
874	 */
875}
876
877void
878pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
879{
880	vm_offset_t	dst;
881	vm_offset_t	src;
882
883	dst = VM_PAGE_TO_PHYS(mdst);
884	src = VM_PAGE_TO_PHYS(msrc);
885
886	kcopy((void *)src, (void *)dst, PAGE_SIZE);
887}
888
889/*
890 * Zero a page of physical memory by temporarily mapping it into the tlb.
891 */
892void
893pmap_zero_page(vm_page_t m)
894{
895	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
896	caddr_t va;
897
898	if (pa < SEGMENT_LENGTH) {
899		va = (caddr_t) pa;
900	} else if (pmap_initialized) {
901		if (pmap_pvo_zeropage == NULL)
902			pmap_pvo_zeropage = pmap_rkva_alloc();
903		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
904		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
905	} else {
906		panic("pmap_zero_page: can't zero pa %#x", pa);
907	}
908
909	bzero(va, PAGE_SIZE);
910
911	if (pa >= SEGMENT_LENGTH)
912		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
913}
914
915void
916pmap_zero_page_area(vm_page_t m, int off, int size)
917{
918	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
919	caddr_t va;
920
921	if (pa < SEGMENT_LENGTH) {
922		va = (caddr_t) pa;
923	} else if (pmap_initialized) {
924		if (pmap_pvo_zeropage == NULL)
925			pmap_pvo_zeropage = pmap_rkva_alloc();
926		pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL);
927		va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage);
928	} else {
929		panic("pmap_zero_page: can't zero pa %#x", pa);
930	}
931
932	bzero(va + off, size);
933
934	if (pa >= SEGMENT_LENGTH)
935		pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL);
936}
937
938void
939pmap_zero_page_idle(vm_page_t m)
940{
941
942	/* XXX this is called outside of Giant, is pmap_zero_page safe? */
943	/* XXX maybe have a dedicated mapping for this to avoid the problem? */
944	mtx_lock(&Giant);
945	pmap_zero_page(m);
946	mtx_unlock(&Giant);
947}
948
949/*
950 * Map the given physical page at the specified virtual address in the
951 * target pmap with the protection requested.  If specified the page
952 * will be wired down.
953 */
954void
955pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
956	   boolean_t wired)
957{
958	struct		pvo_head *pvo_head;
959	uma_zone_t	zone;
960	vm_page_t	pg;
961	u_int		pte_lo, pvo_flags, was_exec, i;
962	int		error;
963
964	if (!pmap_initialized) {
965		pvo_head = &pmap_pvo_kunmanaged;
966		zone = pmap_upvo_zone;
967		pvo_flags = 0;
968		pg = NULL;
969		was_exec = PTE_EXEC;
970	} else {
971		pvo_head = vm_page_to_pvoh(m);
972		pg = m;
973		zone = pmap_mpvo_zone;
974		pvo_flags = PVO_MANAGED;
975		was_exec = 0;
976	}
977
978	/*
979	 * If this is a managed page, and it's the first reference to the page,
980	 * clear the execness of the page.  Otherwise fetch the execness.
981	 */
982	if (pg != NULL) {
983		if (LIST_EMPTY(pvo_head)) {
984			pmap_attr_clear(pg, PTE_EXEC);
985		} else {
986			was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
987		}
988	}
989
990
991	/*
992	 * Assume the page is cache inhibited and access is guarded unless
993	 * it's in our available memory array.
994	 */
995	pte_lo = PTE_I | PTE_G;
996	for (i = 0; i < pregions_sz; i++) {
997		if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) &&
998		    (VM_PAGE_TO_PHYS(m) <
999			(pregions[i].mr_start + pregions[i].mr_size))) {
1000			pte_lo &= ~(PTE_I | PTE_G);
1001			break;
1002		}
1003	}
1004
1005	if (prot & VM_PROT_WRITE)
1006		pte_lo |= PTE_BW;
1007	else
1008		pte_lo |= PTE_BR;
1009
1010	pvo_flags |= (prot & VM_PROT_EXECUTE);
1011
1012	if (wired)
1013		pvo_flags |= PVO_WIRED;
1014
1015	error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
1016	    pte_lo, pvo_flags);
1017
1018	/*
1019	 * Flush the real page from the instruction cache if this page is
1020	 * mapped executable and cacheable and was not previously mapped (or
1021	 * was not mapped executable).
1022	 */
1023	if (error == 0 && (pvo_flags & PVO_EXECUTABLE) &&
1024	    (pte_lo & PTE_I) == 0 && was_exec == 0) {
1025		/*
1026		 * Flush the real memory from the cache.
1027		 */
1028		pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1029		if (pg != NULL)
1030			pmap_attr_save(pg, PTE_EXEC);
1031	}
1032
1033	/* XXX syncicache always until problems are sorted */
1034	pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1035}
1036
1037vm_offset_t
1038pmap_extract(pmap_t pm, vm_offset_t va)
1039{
1040	struct	pvo_entry *pvo;
1041
1042	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
1043
1044	if (pvo != NULL) {
1045		return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF));
1046	}
1047
1048	return (0);
1049}
1050
1051/*
1052 * Grow the number of kernel page table entries.  Unneeded.
1053 */
1054void
1055pmap_growkernel(vm_offset_t addr)
1056{
1057}
1058
1059void
1060pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
1061{
1062
1063	CTR0(KTR_PMAP, "pmap_init");
1064
1065	pmap_pvo_obj = vm_object_allocate(OBJT_PHYS, 16);
1066	pmap_pvo_count = 0;
1067	pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1068	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
1069	uma_zone_set_allocf(pmap_upvo_zone, pmap_pvo_allocf);
1070	pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1071	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
1072	uma_zone_set_allocf(pmap_mpvo_zone, pmap_pvo_allocf);
1073	pmap_initialized = TRUE;
1074}
1075
1076void
1077pmap_init2(void)
1078{
1079
1080	CTR0(KTR_PMAP, "pmap_init2");
1081}
1082
1083boolean_t
1084pmap_is_modified(vm_page_t m)
1085{
1086
1087	if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0)
1088		return (FALSE);
1089
1090	return (pmap_query_bit(m, PTE_CHG));
1091}
1092
1093void
1094pmap_clear_reference(vm_page_t m)
1095{
1096
1097	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1098		return;
1099	pmap_clear_bit(m, PTE_REF, NULL);
1100}
1101
1102void
1103pmap_clear_modify(vm_page_t m)
1104{
1105
1106	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1107		return;
1108	pmap_clear_bit(m, PTE_CHG, NULL);
1109}
1110
1111/*
1112 *	pmap_ts_referenced:
1113 *
1114 *	Return a count of reference bits for a page, clearing those bits.
1115 *	It is not necessary for every reference bit to be cleared, but it
1116 *	is necessary that 0 only be returned when there are truly no
1117 *	reference bits set.
1118 *
1119 *	XXX: The exact number of bits to check and clear is a matter that
1120 *	should be tested and standardized at some point in the future for
1121 *	optimal aging of shared pages.
1122 */
1123int
1124pmap_ts_referenced(vm_page_t m)
1125{
1126	int count;
1127
1128	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1129		return (0);
1130
1131	count = pmap_clear_bit(m, PTE_REF, NULL);
1132
1133	return (count);
1134}
1135
1136/*
1137 * Map a wired page into kernel virtual address space.
1138 */
1139void
1140pmap_kenter(vm_offset_t va, vm_offset_t pa)
1141{
1142	u_int		pte_lo;
1143	int		error;
1144	int		i;
1145
1146#if 0
1147	if (va < VM_MIN_KERNEL_ADDRESS)
1148		panic("pmap_kenter: attempt to enter non-kernel address %#x",
1149		    va);
1150#endif
1151
1152	pte_lo = PTE_I | PTE_G;
1153	for (i = 0; i < pregions_sz; i++) {
1154		if ((pa >= pregions[i].mr_start) &&
1155		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
1156			pte_lo &= ~(PTE_I | PTE_G);
1157			break;
1158		}
1159	}
1160
1161	error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone,
1162	    &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
1163
1164	if (error != 0 && error != ENOENT)
1165		panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va,
1166		    pa, error);
1167
1168	/*
1169	 * Flush the real memory from the instruction cache.
1170	 */
1171	if ((pte_lo & (PTE_I | PTE_G)) == 0) {
1172		pmap_syncicache(pa, PAGE_SIZE);
1173	}
1174}
1175
1176/*
1177 * Extract the physical page address associated with the given kernel virtual
1178 * address.
1179 */
1180vm_offset_t
1181pmap_kextract(vm_offset_t va)
1182{
1183	struct		pvo_entry *pvo;
1184
1185	pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
1186	if (pvo == NULL) {
1187		return (0);
1188	}
1189
1190	return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF));
1191}
1192
1193/*
1194 * Remove a wired page from kernel virtual address space.
1195 */
1196void
1197pmap_kremove(vm_offset_t va)
1198{
1199
1200	pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
1201}
1202
1203/*
1204 * Map a range of physical addresses into kernel virtual address space.
1205 *
1206 * The value passed in *virt is a suggested virtual address for the mapping.
1207 * Architectures which can support a direct-mapped physical to virtual region
1208 * can return the appropriate address within that region, leaving '*virt'
1209 * unchanged.  We cannot and therefore do not; *virt is updated with the
1210 * first usable address after the mapped region.
1211 */
1212vm_offset_t
1213pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
1214{
1215	vm_offset_t	sva, va;
1216
1217	sva = *virt;
1218	va = sva;
1219	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1220		pmap_kenter(va, pa_start);
1221	*virt = va;
1222	return (sva);
1223}
1224
1225int
1226pmap_mincore(pmap_t pmap, vm_offset_t addr)
1227{
1228	TODO;
1229	return (0);
1230}
1231
1232void
1233pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1234		    vm_pindex_t pindex, vm_size_t size, int limit)
1235{
1236
1237	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1238	    ("pmap_remove_pages: non current pmap"));
1239	/* XXX */
1240}
1241
1242/*
1243 * Lower the permission for all mappings to a given page.
1244 */
1245void
1246pmap_page_protect(vm_page_t m, vm_prot_t prot)
1247{
1248	struct	pvo_head *pvo_head;
1249	struct	pvo_entry *pvo, *next_pvo;
1250	struct	pte *pt;
1251
1252	/*
1253	 * Since the routine only downgrades protection, if the
1254	 * maximal protection is desired, there isn't any change
1255	 * to be made.
1256	 */
1257	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
1258	    (VM_PROT_READ|VM_PROT_WRITE))
1259		return;
1260
1261	pvo_head = vm_page_to_pvoh(m);
1262	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1263		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1264		PMAP_PVO_CHECK(pvo);	/* sanity check */
1265
1266		/*
1267		 * Downgrading to no mapping at all, we just remove the entry.
1268		 */
1269		if ((prot & VM_PROT_READ) == 0) {
1270			pmap_pvo_remove(pvo, -1);
1271			continue;
1272		}
1273
1274		/*
1275		 * If EXEC permission is being revoked, just clear the flag
1276		 * in the PVO.
1277		 */
1278		if ((prot & VM_PROT_EXECUTE) == 0)
1279			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1280
1281		/*
1282		 * If this entry is already RO, don't diddle with the page
1283		 * table.
1284		 */
1285		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
1286			PMAP_PVO_CHECK(pvo);
1287			continue;
1288		}
1289
1290		/*
1291		 * Grab the PTE before we diddle the bits so pvo_to_pte can
1292		 * verify the pte contents are as expected.
1293		 */
1294		pt = pmap_pvo_to_pte(pvo, -1);
1295		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1296		pvo->pvo_pte.pte_lo |= PTE_BR;
1297		if (pt != NULL)
1298			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1299		PMAP_PVO_CHECK(pvo);	/* sanity check */
1300	}
1301}
1302
1303/*
1304 * Returns true if the pmap's pv is one of the first
1305 * 16 pvs linked to from this page.  This count may
1306 * be changed upwards or downwards in the future; it
1307 * is only necessary that true be returned for a small
1308 * subset of pmaps for proper page aging.
1309 */
1310boolean_t
1311pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
1312{
1313        int loops;
1314	struct pvo_entry *pvo;
1315
1316        if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1317                return FALSE;
1318
1319	loops = 0;
1320	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1321		if (pvo->pvo_pmap == pmap)
1322			return (TRUE);
1323		if (++loops >= 16)
1324			break;
1325	}
1326
1327	return (FALSE);
1328}
1329
1330static u_int	pmap_vsidcontext;
1331
1332void
1333pmap_pinit(pmap_t pmap)
1334{
1335	int	i, mask;
1336	u_int	entropy;
1337
1338	entropy = 0;
1339	__asm __volatile("mftb %0" : "=r"(entropy));
1340
1341	/*
1342	 * Allocate some segment registers for this pmap.
1343	 */
1344	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1345		u_int	hash, n;
1346
1347		/*
1348		 * Create a new value by mutiplying by a prime and adding in
1349		 * entropy from the timebase register.  This is to make the
1350		 * VSID more random so that the PT hash function collides
1351		 * less often.  (Note that the prime casues gcc to do shifts
1352		 * instead of a multiply.)
1353		 */
1354		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1355		hash = pmap_vsidcontext & (NPMAPS - 1);
1356		if (hash == 0)		/* 0 is special, avoid it */
1357			continue;
1358		n = hash >> 5;
1359		mask = 1 << (hash & (VSID_NBPW - 1));
1360		hash = (pmap_vsidcontext & 0xfffff);
1361		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
1362			/* anything free in this bucket? */
1363			if (pmap_vsid_bitmap[n] == 0xffffffff) {
1364				entropy = (pmap_vsidcontext >> 20);
1365				continue;
1366			}
1367			i = ffs(~pmap_vsid_bitmap[i]) - 1;
1368			mask = 1 << i;
1369			hash &= 0xfffff & ~(VSID_NBPW - 1);
1370			hash |= i;
1371		}
1372		pmap_vsid_bitmap[n] |= mask;
1373		for (i = 0; i < 16; i++)
1374			pmap->pm_sr[i] = VSID_MAKE(i, hash);
1375		return;
1376	}
1377
1378	panic("pmap_pinit: out of segments");
1379}
1380
1381/*
1382 * Initialize the pmap associated with process 0.
1383 */
1384void
1385pmap_pinit0(pmap_t pm)
1386{
1387
1388	pmap_pinit(pm);
1389	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1390}
1391
1392void
1393pmap_pinit2(pmap_t pmap)
1394{
1395	/* XXX: Remove this stub when no longer called */
1396}
1397
1398void
1399pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
1400{
1401	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1402	    ("pmap_prefault: non current pmap"));
1403	/* XXX */
1404}
1405
1406/*
1407 * Set the physical protection on the specified range of this map as requested.
1408 */
1409void
1410pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1411{
1412	struct	pvo_entry *pvo;
1413	struct	pte *pt;
1414	int	pteidx;
1415
1416	CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
1417	    eva, prot);
1418
1419
1420	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1421	    ("pmap_protect: non current pmap"));
1422
1423	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1424		pmap_remove(pm, sva, eva);
1425		return;
1426	}
1427
1428	for (; sva < eva; sva += PAGE_SIZE) {
1429		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
1430		if (pvo == NULL)
1431			continue;
1432
1433		if ((prot & VM_PROT_EXECUTE) == 0)
1434			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1435
1436		/*
1437		 * Grab the PTE pointer before we diddle with the cached PTE
1438		 * copy.
1439		 */
1440		pt = pmap_pvo_to_pte(pvo, pteidx);
1441		/*
1442		 * Change the protection of the page.
1443		 */
1444		pvo->pvo_pte.pte_lo &= ~PTE_PP;
1445		pvo->pvo_pte.pte_lo |= PTE_BR;
1446
1447		/*
1448		 * If the PVO is in the page table, update that pte as well.
1449		 */
1450		if (pt != NULL)
1451			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1452	}
1453}
1454
1455vm_offset_t
1456pmap_phys_address(int ppn)
1457{
1458	TODO;
1459	return (0);
1460}
1461
1462/*
1463 * Map a list of wired pages into kernel virtual address space.  This is
1464 * intended for temporary mappings which do not need page modification or
1465 * references recorded.  Existing mappings in the region are overwritten.
1466 */
1467void
1468pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
1469{
1470	vm_offset_t va;
1471
1472	va = sva;
1473	while (count-- > 0) {
1474		pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
1475		va += PAGE_SIZE;
1476		m++;
1477	}
1478}
1479
1480/*
1481 * Remove page mappings from kernel virtual address space.  Intended for
1482 * temporary mappings entered by pmap_qenter.
1483 */
1484void
1485pmap_qremove(vm_offset_t sva, int count)
1486{
1487	vm_offset_t va;
1488
1489	va = sva;
1490	while (count-- > 0) {
1491		pmap_kremove(va);
1492		va += PAGE_SIZE;
1493	}
1494}
1495
1496void
1497pmap_release(pmap_t pmap)
1498{
1499        int idx, mask;
1500
1501	/*
1502	 * Free segment register's VSID
1503	 */
1504        if (pmap->pm_sr[0] == 0)
1505                panic("pmap_release");
1506
1507        idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
1508        mask = 1 << (idx % VSID_NBPW);
1509        idx /= VSID_NBPW;
1510        pmap_vsid_bitmap[idx] &= ~mask;
1511}
1512
1513/*
1514 * Remove the given range of addresses from the specified map.
1515 */
1516void
1517pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1518{
1519	struct	pvo_entry *pvo;
1520	int	pteidx;
1521
1522	for (; sva < eva; sva += PAGE_SIZE) {
1523		pvo = pmap_pvo_find_va(pm, sva, &pteidx);
1524		if (pvo != NULL) {
1525			pmap_pvo_remove(pvo, pteidx);
1526		}
1527	}
1528}
1529
1530/*
1531 * Remove physical page from all pmaps in which it resides. pmap_pvo_remove()
1532 * will reflect changes in pte's back to the vm_page.
1533 */
1534void
1535pmap_remove_all(vm_page_t m)
1536{
1537	struct  pvo_head *pvo_head;
1538	struct	pvo_entry *pvo, *next_pvo;
1539
1540	KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
1541	    ("pv_remove_all: illegal for unmanaged page %#x",
1542	    VM_PAGE_TO_PHYS(m)));
1543
1544	pvo_head = vm_page_to_pvoh(m);
1545	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
1546		next_pvo = LIST_NEXT(pvo, pvo_vlink);
1547
1548		PMAP_PVO_CHECK(pvo);	/* sanity check */
1549		pmap_pvo_remove(pvo, -1);
1550	}
1551	vm_page_flag_clear(m, PG_WRITEABLE);
1552}
1553
1554/*
1555 * Remove all pages from specified address space, this aids process exit
1556 * speeds.  This is much faster than pmap_remove in the case of running down
1557 * an entire address space.  Only works for the current pmap.
1558 */
1559void
1560pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1561{
1562
1563	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1564	    ("pmap_remove_pages: non current pmap"));
1565	pmap_remove(pm, sva, eva);
1566}
1567
1568#ifndef KSTACK_MAX_PAGES
1569#define KSTACK_MAX_PAGES 32
1570#endif
1571
1572/*
1573 * Create the kernel stack and pcb for a new thread.
1574 * This routine directly affects the fork perf for a process and
1575 * create performance for a thread.
1576 */
1577void
1578pmap_new_thread(struct thread *td, int pages)
1579{
1580	vm_page_t	ma[KSTACK_MAX_PAGES];
1581	vm_object_t	ksobj;
1582	vm_offset_t	ks;
1583	vm_page_t	m;
1584	u_int		i;
1585
1586	/* Bounds check */
1587	if (pages <= 1)
1588		pages = KSTACK_PAGES;
1589	else if (pages > KSTACK_MAX_PAGES)
1590		pages = KSTACK_MAX_PAGES;
1591
1592	/*
1593	 * Allocate object for the kstack.
1594	 */
1595	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
1596	td->td_kstack_obj = ksobj;
1597
1598	/*
1599	 * Get a kernel virtual address for the kstack for this thread.
1600	 */
1601	ks = kmem_alloc_nofault(kernel_map,
1602	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
1603	if (ks == 0)
1604		panic("pmap_new_thread: kstack allocation failed");
1605	TLBIE(ks);
1606	ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
1607	td->td_kstack = ks;
1608
1609	/*
1610	 * Knowing the number of pages allocated is useful when you
1611	 * want to deallocate them.
1612	 */
1613	td->td_kstack_pages = pages;
1614
1615	for (i = 0; i < pages; i++) {
1616		/*
1617		 * Get a kernel stack page.
1618		 */
1619		m = vm_page_grab(ksobj, i,
1620		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
1621		ma[i] = m;
1622
1623                vm_page_lock_queues();
1624		vm_page_wakeup(m);
1625		vm_page_flag_clear(m, PG_ZERO);
1626		m->valid = VM_PAGE_BITS_ALL;
1627		vm_page_unlock_queues();
1628	}
1629
1630	/*
1631	 * Enter the page into the kernel address space
1632	 */
1633	pmap_qenter(ks, ma, pages);
1634}
1635
1636void
1637pmap_dispose_thread(struct thread *td)
1638{
1639	vm_object_t ksobj;
1640	vm_offset_t ks;
1641	vm_page_t m;
1642	int i;
1643	int pages;
1644
1645	pages = td->td_kstack_pages;
1646	ksobj = td->td_kstack_obj;
1647	ks = td->td_kstack;
1648	for (i = 0; i < pages ; i++) {
1649		m = vm_page_lookup(ksobj, i);
1650		if (m == NULL)
1651			panic("pmap_dispose_thread: kstack already missing?");
1652		vm_page_lock_queues();
1653		vm_page_busy(m);
1654		vm_page_unwire(m, 0);
1655		vm_page_free(m);
1656		vm_page_unlock_queues();
1657	}
1658	pmap_qremove(ks, pages);
1659	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
1660	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
1661	vm_object_deallocate(ksobj);
1662}
1663
1664void
1665pmap_new_altkstack(struct thread *td, int pages)
1666{
1667	/* shuffle the original stack */
1668	td->td_altkstack_obj = td->td_kstack_obj;
1669	td->td_altkstack = td->td_kstack;
1670	td->td_altkstack_pages = td->td_kstack_pages;
1671
1672	pmap_new_thread(td, pages);
1673}
1674
1675void
1676pmap_dispose_altkstack(struct thread *td)
1677{
1678	pmap_dispose_thread(td);
1679
1680	/* restore the original kstack */
1681	td->td_kstack = td->td_altkstack;
1682	td->td_kstack_obj = td->td_altkstack_obj;
1683	td->td_kstack_pages = td->td_altkstack_pages;
1684	td->td_altkstack = 0;
1685	td->td_altkstack_obj = NULL;
1686	td->td_altkstack_pages = 0;
1687}
1688
1689void
1690pmap_swapin_thread(struct thread *td)
1691{
1692	vm_page_t ma[KSTACK_MAX_PAGES];
1693	vm_object_t ksobj;
1694	vm_offset_t ks;
1695	vm_page_t m;
1696	int rv;
1697	int i;
1698	int pages;
1699
1700	pages = td->td_kstack_pages;
1701	ksobj = td->td_kstack_obj;
1702	ks = td->td_kstack;
1703	for (i = 0; i < pages; i++) {
1704		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1705		if (m->valid != VM_PAGE_BITS_ALL) {
1706			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
1707			if (rv != VM_PAGER_OK)
1708				panic("pmap_swapin_thread: cannot get kstack");
1709			m = vm_page_lookup(ksobj, i);
1710			m->valid = VM_PAGE_BITS_ALL;
1711		}
1712		ma[i] = m;
1713		vm_page_lock_queues();
1714		vm_page_wire(m);
1715		vm_page_wakeup(m);
1716		vm_page_unlock_queues();
1717	}
1718	pmap_qenter(ks, ma, pages);
1719}
1720
1721
1722void
1723pmap_swapout_thread(struct thread *td)
1724{
1725	vm_object_t ksobj;
1726	vm_offset_t ks;
1727	vm_page_t m;
1728	int i;
1729	int pages;
1730
1731	pages = td->td_kstack_pages;
1732	ksobj = td->td_kstack_obj;
1733	ks = (vm_offset_t)td->td_kstack;
1734	for (i = 0; i < pages; i++) {
1735		m = vm_page_lookup(ksobj, i);
1736		if (m == NULL)
1737			panic("pmap_swapout_thread: kstack already missing?");
1738		vm_page_lock_queues();
1739		vm_page_dirty(m);
1740		vm_page_unwire(m, 0);
1741		vm_page_unlock_queues();
1742	}
1743	pmap_qremove(ks, pages);
1744}
1745
1746/*
1747 * Allocate a physical page of memory directly from the phys_avail map.
1748 * Can only be called from pmap_bootstrap before avail start and end are
1749 * calculated.
1750 */
1751static vm_offset_t
1752pmap_bootstrap_alloc(vm_size_t size, u_int align)
1753{
1754	vm_offset_t	s, e;
1755	int		i, j;
1756
1757	size = round_page(size);
1758	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1759		if (align != 0)
1760			s = (phys_avail[i] + align - 1) & ~(align - 1);
1761		else
1762			s = phys_avail[i];
1763		e = s + size;
1764
1765		if (s < phys_avail[i] || e > phys_avail[i + 1])
1766			continue;
1767
1768		if (s == phys_avail[i]) {
1769			phys_avail[i] += size;
1770		} else if (e == phys_avail[i + 1]) {
1771			phys_avail[i + 1] -= size;
1772		} else {
1773			for (j = phys_avail_count * 2; j > i; j -= 2) {
1774				phys_avail[j] = phys_avail[j - 2];
1775				phys_avail[j + 1] = phys_avail[j - 1];
1776			}
1777
1778			phys_avail[i + 3] = phys_avail[i + 1];
1779			phys_avail[i + 1] = s;
1780			phys_avail[i + 2] = e;
1781			phys_avail_count++;
1782		}
1783
1784		return (s);
1785	}
1786	panic("pmap_bootstrap_alloc: could not allocate memory");
1787}
1788
1789/*
1790 * Return an unmapped pvo for a kernel virtual address.
1791 * Used by pmap functions that operate on physical pages.
1792 */
1793static struct pvo_entry *
1794pmap_rkva_alloc(void)
1795{
1796	struct		pvo_entry *pvo;
1797	struct		pte *pt;
1798	vm_offset_t	kva;
1799	int		pteidx;
1800
1801	if (pmap_rkva_count == 0)
1802		panic("pmap_rkva_alloc: no more reserved KVAs");
1803
1804	kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count);
1805	pmap_kenter(kva, 0);
1806
1807	pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx);
1808
1809	if (pvo == NULL)
1810		panic("pmap_kva_alloc: pmap_pvo_find_va failed");
1811
1812	pt = pmap_pvo_to_pte(pvo, pteidx);
1813
1814	if (pt == NULL)
1815		panic("pmap_kva_alloc: pmap_pvo_to_pte failed");
1816
1817	pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1818	PVO_PTEGIDX_CLR(pvo);
1819
1820	pmap_pte_overflow++;
1821
1822	return (pvo);
1823}
1824
1825static void
1826pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt,
1827    int *depth_p)
1828{
1829	struct	pte *pt;
1830
1831	/*
1832	 * If this pvo already has a valid pte, we need to save it so it can
1833	 * be restored later.  We then just reload the new PTE over the old
1834	 * slot.
1835	 */
1836	if (saved_pt != NULL) {
1837		pt = pmap_pvo_to_pte(pvo, -1);
1838
1839		if (pt != NULL) {
1840			pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1841			PVO_PTEGIDX_CLR(pvo);
1842			pmap_pte_overflow++;
1843		}
1844
1845		*saved_pt = pvo->pvo_pte;
1846
1847		pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1848	}
1849
1850	pvo->pvo_pte.pte_lo |= pa;
1851
1852	if (!pmap_pte_spill(pvo->pvo_vaddr))
1853		panic("pmap_pa_map: could not spill pvo %p", pvo);
1854
1855	if (depth_p != NULL)
1856		(*depth_p)++;
1857}
1858
1859static void
1860pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p)
1861{
1862	struct	pte *pt;
1863
1864	pt = pmap_pvo_to_pte(pvo, -1);
1865
1866	if (pt != NULL) {
1867		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1868		PVO_PTEGIDX_CLR(pvo);
1869		pmap_pte_overflow++;
1870	}
1871
1872	pvo->pvo_pte.pte_lo &= ~PTE_RPGN;
1873
1874	/*
1875	 * If there is a saved PTE and it's valid, restore it and return.
1876	 */
1877	if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) {
1878		if (depth_p != NULL && --(*depth_p) == 0)
1879			panic("pmap_pa_unmap: restoring but depth == 0");
1880
1881		pvo->pvo_pte = *saved_pt;
1882
1883		if (!pmap_pte_spill(pvo->pvo_vaddr))
1884			panic("pmap_pa_unmap: could not spill pvo %p", pvo);
1885	}
1886}
1887
1888static void
1889pmap_syncicache(vm_offset_t pa, vm_size_t len)
1890{
1891	__syncicache((void *)pa, len);
1892}
1893
1894static void
1895tlbia(void)
1896{
1897	caddr_t	i;
1898
1899	SYNC();
1900	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
1901		TLBIE(i);
1902		EIEIO();
1903	}
1904	TLBSYNC();
1905	SYNC();
1906}
1907
1908static int
1909pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
1910    vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags)
1911{
1912	struct	pvo_entry *pvo;
1913	u_int	sr;
1914	int	first;
1915	u_int	ptegidx;
1916	int	i;
1917	int     bootstrap;
1918
1919	pmap_pvo_enter_calls++;
1920	first = 0;
1921
1922	bootstrap = 0;
1923
1924	/*
1925	 * Compute the PTE Group index.
1926	 */
1927	va &= ~ADDR_POFF;
1928	sr = va_to_sr(pm->pm_sr, va);
1929	ptegidx = va_to_pteg(sr, va);
1930
1931	/*
1932	 * Remove any existing mapping for this page.  Reuse the pvo entry if
1933	 * there is a mapping.
1934	 */
1935	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1936		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1937			if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa &&
1938			    (pvo->pvo_pte.pte_lo & PTE_PP) ==
1939			    (pte_lo & PTE_PP)) {
1940				return (0);
1941			}
1942			pmap_pvo_remove(pvo, -1);
1943			break;
1944		}
1945	}
1946
1947	/*
1948	 * If we aren't overwriting a mapping, try to allocate.
1949	 */
1950	if (pmap_initialized) {
1951		pvo = uma_zalloc(zone, M_NOWAIT);
1952	} else {
1953		if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) {
1954			panic("pmap_enter: bpvo pool exhausted, %d, %d, %d",
1955			      pmap_bpvo_pool_index, BPVO_POOL_SIZE,
1956			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
1957		}
1958		pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index];
1959		pmap_bpvo_pool_index++;
1960		bootstrap = 1;
1961	}
1962
1963	if (pvo == NULL) {
1964		return (ENOMEM);
1965	}
1966
1967	pmap_pvo_entries++;
1968	pvo->pvo_vaddr = va;
1969	pvo->pvo_pmap = pm;
1970	LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1971	pvo->pvo_vaddr &= ~ADDR_POFF;
1972	if (flags & VM_PROT_EXECUTE)
1973		pvo->pvo_vaddr |= PVO_EXECUTABLE;
1974	if (flags & PVO_WIRED)
1975		pvo->pvo_vaddr |= PVO_WIRED;
1976	if (pvo_head != &pmap_pvo_kunmanaged)
1977		pvo->pvo_vaddr |= PVO_MANAGED;
1978	if (bootstrap)
1979		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
1980	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
1981
1982	/*
1983	 * Remember if the list was empty and therefore will be the first
1984	 * item.
1985	 */
1986	if (LIST_FIRST(pvo_head) == NULL)
1987		first = 1;
1988
1989	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1990	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
1991		pvo->pvo_pmap->pm_stats.wired_count++;
1992	pvo->pvo_pmap->pm_stats.resident_count++;
1993
1994	/*
1995	 * We hope this succeeds but it isn't required.
1996	 */
1997	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1998	if (i >= 0) {
1999		PVO_PTEGIDX_SET(pvo, i);
2000	} else {
2001		panic("pmap_pvo_enter: overflow");
2002		pmap_pte_overflow++;
2003	}
2004
2005	return (first ? ENOENT : 0);
2006}
2007
2008static void
2009pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
2010{
2011	struct	pte *pt;
2012
2013	/*
2014	 * If there is an active pte entry, we need to deactivate it (and
2015	 * save the ref & cfg bits).
2016	 */
2017	pt = pmap_pvo_to_pte(pvo, pteidx);
2018	if (pt != NULL) {
2019		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2020		PVO_PTEGIDX_CLR(pvo);
2021	} else {
2022		pmap_pte_overflow--;
2023	}
2024
2025	/*
2026	 * Update our statistics.
2027	 */
2028	pvo->pvo_pmap->pm_stats.resident_count--;
2029	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
2030		pvo->pvo_pmap->pm_stats.wired_count--;
2031
2032	/*
2033	 * Save the REF/CHG bits into their cache if the page is managed.
2034	 */
2035	if (pvo->pvo_vaddr & PVO_MANAGED) {
2036		struct	vm_page *pg;
2037
2038		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN);
2039		if (pg != NULL) {
2040			pmap_attr_save(pg, pvo->pvo_pte.pte_lo &
2041			    (PTE_REF | PTE_CHG));
2042		}
2043	}
2044
2045	/*
2046	 * Remove this PVO from the PV list.
2047	 */
2048	LIST_REMOVE(pvo, pvo_vlink);
2049
2050	/*
2051	 * Remove this from the overflow list and return it to the pool
2052	 * if we aren't going to reuse it.
2053	 */
2054	LIST_REMOVE(pvo, pvo_olink);
2055	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2056		uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone :
2057		    pmap_upvo_zone, pvo);
2058	pmap_pvo_entries--;
2059	pmap_pvo_remove_calls++;
2060}
2061
2062static __inline int
2063pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
2064{
2065	int	pteidx;
2066
2067	/*
2068	 * We can find the actual pte entry without searching by grabbing
2069	 * the PTEG index from 3 unused bits in pte_lo[11:9] and by
2070	 * noticing the HID bit.
2071	 */
2072	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
2073	if (pvo->pvo_pte.pte_hi & PTE_HID)
2074		pteidx ^= pmap_pteg_mask * 8;
2075
2076	return (pteidx);
2077}
2078
2079static struct pvo_entry *
2080pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
2081{
2082	struct	pvo_entry *pvo;
2083	int	ptegidx;
2084	u_int	sr;
2085
2086	va &= ~ADDR_POFF;
2087	sr = va_to_sr(pm->pm_sr, va);
2088	ptegidx = va_to_pteg(sr, va);
2089
2090	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2091		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2092			if (pteidx_p)
2093				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
2094			return (pvo);
2095		}
2096	}
2097
2098	return (NULL);
2099}
2100
2101static struct pte *
2102pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
2103{
2104	struct	pte *pt;
2105
2106	/*
2107	 * If we haven't been supplied the ptegidx, calculate it.
2108	 */
2109	if (pteidx == -1) {
2110		int	ptegidx;
2111		u_int	sr;
2112
2113		sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
2114		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
2115		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
2116	}
2117
2118	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
2119
2120	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
2121		panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no "
2122		    "valid pte index", pvo);
2123	}
2124
2125	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
2126		panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo "
2127		    "pvo but no valid pte", pvo);
2128	}
2129
2130	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
2131		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
2132			panic("pmap_pvo_to_pte: pvo %p has valid pte in "
2133			    "pmap_pteg_table %p but invalid in pvo", pvo, pt);
2134		}
2135
2136		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF))
2137		    != 0) {
2138			panic("pmap_pvo_to_pte: pvo %p pte does not match "
2139			    "pte %p in pmap_pteg_table", pvo, pt);
2140		}
2141
2142		return (pt);
2143	}
2144
2145	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
2146		panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in "
2147		    "pmap_pteg_table but valid in pvo", pvo, pt);
2148	}
2149
2150	return (NULL);
2151}
2152
2153static void *
2154pmap_pvo_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
2155{
2156	vm_page_t	m;
2157
2158	if (bytes != PAGE_SIZE)
2159		panic("pmap_pvo_allocf: benno was shortsighted.  hit him.");
2160
2161	*flags = UMA_SLAB_PRIV;
2162	m = vm_page_alloc(pmap_pvo_obj, pmap_pvo_count, VM_ALLOC_SYSTEM);
2163	if (m == NULL)
2164		return (NULL);
2165	pmap_pvo_count++;
2166	return ((void *)VM_PAGE_TO_PHYS(m));
2167}
2168
2169/*
2170 * XXX: THIS STUFF SHOULD BE IN pte.c?
2171 */
2172int
2173pmap_pte_spill(vm_offset_t addr)
2174{
2175	struct	pvo_entry *source_pvo, *victim_pvo;
2176	struct	pvo_entry *pvo;
2177	int	ptegidx, i, j;
2178	u_int	sr;
2179	struct	pteg *pteg;
2180	struct	pte *pt;
2181
2182	pmap_pte_spills++;
2183
2184	sr = mfsrin(addr);
2185	ptegidx = va_to_pteg(sr, addr);
2186
2187	/*
2188	 * Have to substitute some entry.  Use the primary hash for this.
2189	 * Use low bits of timebase as random generator.
2190	 */
2191	pteg = &pmap_pteg_table[ptegidx];
2192	__asm __volatile("mftb %0" : "=r"(i));
2193	i &= 7;
2194	pt = &pteg->pt[i];
2195
2196	source_pvo = NULL;
2197	victim_pvo = NULL;
2198	LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2199		/*
2200		 * We need to find a pvo entry for this address.
2201		 */
2202		PMAP_PVO_CHECK(pvo);
2203		if (source_pvo == NULL &&
2204		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
2205		    pvo->pvo_pte.pte_hi & PTE_HID)) {
2206			/*
2207			 * Now found an entry to be spilled into the pteg.
2208			 * The PTE is now valid, so we know it's active.
2209			 */
2210			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
2211
2212			if (j >= 0) {
2213				PVO_PTEGIDX_SET(pvo, j);
2214				pmap_pte_overflow--;
2215				PMAP_PVO_CHECK(pvo);
2216				return (1);
2217			}
2218
2219			source_pvo = pvo;
2220
2221			if (victim_pvo != NULL)
2222				break;
2223		}
2224
2225		/*
2226		 * We also need the pvo entry of the victim we are replacing
2227		 * so save the R & C bits of the PTE.
2228		 */
2229		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
2230		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
2231			victim_pvo = pvo;
2232			if (source_pvo != NULL)
2233				break;
2234		}
2235	}
2236
2237	if (source_pvo == NULL)
2238		return (0);
2239
2240	if (victim_pvo == NULL) {
2241		if ((pt->pte_hi & PTE_HID) == 0)
2242			panic("pmap_pte_spill: victim p-pte (%p) has no pvo"
2243			    "entry", pt);
2244
2245		/*
2246		 * If this is a secondary PTE, we need to search it's primary
2247		 * pvo bucket for the matching PVO.
2248		 */
2249		LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask],
2250		    pvo_olink) {
2251			PMAP_PVO_CHECK(pvo);
2252			/*
2253			 * We also need the pvo entry of the victim we are
2254			 * replacing so save the R & C bits of the PTE.
2255			 */
2256			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
2257				victim_pvo = pvo;
2258				break;
2259			}
2260		}
2261
2262		if (victim_pvo == NULL)
2263			panic("pmap_pte_spill: victim s-pte (%p) has no pvo"
2264			    "entry", pt);
2265	}
2266
2267	/*
2268	 * We are invalidating the TLB entry for the EA we are replacing even
2269	 * though it's valid.  If we don't, we lose any ref/chg bit changes
2270	 * contained in the TLB entry.
2271	 */
2272	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
2273
2274	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
2275	pmap_pte_set(pt, &source_pvo->pvo_pte);
2276
2277	PVO_PTEGIDX_CLR(victim_pvo);
2278	PVO_PTEGIDX_SET(source_pvo, i);
2279	pmap_pte_replacements++;
2280
2281	PMAP_PVO_CHECK(victim_pvo);
2282	PMAP_PVO_CHECK(source_pvo);
2283
2284	return (1);
2285}
2286
2287static int
2288pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt)
2289{
2290	struct	pte *pt;
2291	int	i;
2292
2293	/*
2294	 * First try primary hash.
2295	 */
2296	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2297		if ((pt->pte_hi & PTE_VALID) == 0) {
2298			pvo_pt->pte_hi &= ~PTE_HID;
2299			pmap_pte_set(pt, pvo_pt);
2300			return (i);
2301		}
2302	}
2303
2304	/*
2305	 * Now try secondary hash.
2306	 */
2307	ptegidx ^= pmap_pteg_mask;
2308	ptegidx++;
2309	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
2310		if ((pt->pte_hi & PTE_VALID) == 0) {
2311			pvo_pt->pte_hi |= PTE_HID;
2312			pmap_pte_set(pt, pvo_pt);
2313			return (i);
2314		}
2315	}
2316
2317	panic("pmap_pte_insert: overflow");
2318	return (-1);
2319}
2320
2321static boolean_t
2322pmap_query_bit(vm_page_t m, int ptebit)
2323{
2324	struct	pvo_entry *pvo;
2325	struct	pte *pt;
2326
2327	if (pmap_attr_fetch(m) & ptebit)
2328		return (TRUE);
2329
2330	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2331		PMAP_PVO_CHECK(pvo);	/* sanity check */
2332
2333		/*
2334		 * See if we saved the bit off.  If so, cache it and return
2335		 * success.
2336		 */
2337		if (pvo->pvo_pte.pte_lo & ptebit) {
2338			pmap_attr_save(m, ptebit);
2339			PMAP_PVO_CHECK(pvo);	/* sanity check */
2340			return (TRUE);
2341		}
2342	}
2343
2344	/*
2345	 * No luck, now go through the hard part of looking at the PTEs
2346	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2347	 * the PTEs.
2348	 */
2349	SYNC();
2350	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2351		PMAP_PVO_CHECK(pvo);	/* sanity check */
2352
2353		/*
2354		 * See if this pvo has a valid PTE.  if so, fetch the
2355		 * REF/CHG bits from the valid PTE.  If the appropriate
2356		 * ptebit is set, cache it and return success.
2357		 */
2358		pt = pmap_pvo_to_pte(pvo, -1);
2359		if (pt != NULL) {
2360			pmap_pte_synch(pt, &pvo->pvo_pte);
2361			if (pvo->pvo_pte.pte_lo & ptebit) {
2362				pmap_attr_save(m, ptebit);
2363				PMAP_PVO_CHECK(pvo);	/* sanity check */
2364				return (TRUE);
2365			}
2366		}
2367	}
2368
2369	return (TRUE);
2370}
2371
2372static u_int
2373pmap_clear_bit(vm_page_t m, int ptebit, int *origbit)
2374{
2375	u_int	count;
2376	struct	pvo_entry *pvo;
2377	struct	pte *pt;
2378	int	rv;
2379
2380	/*
2381	 * Clear the cached value.
2382	 */
2383	rv = pmap_attr_fetch(m);
2384	pmap_attr_clear(m, ptebit);
2385
2386	/*
2387	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2388	 * we can reset the right ones).  note that since the pvo entries and
2389	 * list heads are accessed via BAT0 and are never placed in the page
2390	 * table, we don't have to worry about further accesses setting the
2391	 * REF/CHG bits.
2392	 */
2393	SYNC();
2394
2395	/*
2396	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2397	 * valid pte clear the ptebit from the valid pte.
2398	 */
2399	count = 0;
2400	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2401		PMAP_PVO_CHECK(pvo);	/* sanity check */
2402		pt = pmap_pvo_to_pte(pvo, -1);
2403		if (pt != NULL) {
2404			pmap_pte_synch(pt, &pvo->pvo_pte);
2405			if (pvo->pvo_pte.pte_lo & ptebit) {
2406				count++;
2407				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2408			}
2409		}
2410		rv |= pvo->pvo_pte.pte_lo;
2411		pvo->pvo_pte.pte_lo &= ~ptebit;
2412		PMAP_PVO_CHECK(pvo);	/* sanity check */
2413	}
2414
2415	if (origbit != NULL) {
2416		*origbit = rv;
2417	}
2418
2419	return (count);
2420}
2421
2422/*
2423 * Return true if the physical range is encompassed by the battable[idx]
2424 */
2425static int
2426pmap_bat_mapped(int idx, vm_offset_t pa, vm_size_t size)
2427{
2428	u_int prot;
2429	u_int32_t start;
2430	u_int32_t end;
2431	u_int32_t bat_ble;
2432
2433	/*
2434	 * Return immediately if not a valid mapping
2435	 */
2436	if (!battable[idx].batu & BAT_Vs)
2437		return (EINVAL);
2438
2439	/*
2440	 * The BAT entry must be cache-inhibited, guarded, and r/w
2441	 * so it can function as an i/o page
2442	 */
2443	prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
2444	if (prot != (BAT_I|BAT_G|BAT_PP_RW))
2445		return (EPERM);
2446
2447	/*
2448	 * The address should be within the BAT range. Assume that the
2449	 * start address in the BAT has the correct alignment (thus
2450	 * not requiring masking)
2451	 */
2452	start = battable[idx].batl & BAT_PBS;
2453	bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
2454	end = start | (bat_ble << 15) | 0x7fff;
2455
2456	if ((pa < start) || ((pa + size) > end))
2457		return (ERANGE);
2458
2459	return (0);
2460}
2461
2462
2463/*
2464 * Map a set of physical memory pages into the kernel virtual
2465 * address space. Return a pointer to where it is mapped. This
2466 * routine is intended to be used for mapping device memory,
2467 * NOT real memory.
2468 */
2469void *
2470pmap_mapdev(vm_offset_t pa, vm_size_t size)
2471{
2472	vm_offset_t va, tmpva, ppa, offset;
2473	int i;
2474
2475	ppa = trunc_page(pa);
2476	offset = pa & PAGE_MASK;
2477	size = roundup(offset + size, PAGE_SIZE);
2478
2479	GIANT_REQUIRED;
2480
2481	/*
2482	 * If the physical address lies within a valid BAT table entry,
2483	 * return the 1:1 mapping. This currently doesn't work
2484	 * for regions that overlap 256M BAT segments.
2485	 */
2486	for (i = 0; i < 16; i++) {
2487		if (pmap_bat_mapped(i, pa, size) == 0)
2488			return ((void *) pa);
2489	}
2490
2491	va = kmem_alloc_pageable(kernel_map, size);
2492	if (!va)
2493		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2494
2495	for (tmpva = va; size > 0;) {
2496		pmap_kenter(tmpva, ppa);
2497		TLBIE(tmpva); /* XXX or should it be invalidate-all ? */
2498		size -= PAGE_SIZE;
2499		tmpva += PAGE_SIZE;
2500		ppa += PAGE_SIZE;
2501	}
2502
2503	return ((void *)(va + offset));
2504}
2505
2506void
2507pmap_unmapdev(vm_offset_t va, vm_size_t size)
2508{
2509	vm_offset_t base, offset;
2510
2511	/*
2512	 * If this is outside kernel virtual space, then it's a
2513	 * battable entry and doesn't require unmapping
2514	 */
2515	if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) {
2516		base = trunc_page(va);
2517		offset = va & PAGE_MASK;
2518		size = roundup(offset + size, PAGE_SIZE);
2519		kmem_free(kernel_map, base, size);
2520	}
2521}
2522