pmap-v4.c revision 147417
1153317Ssam/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */
2174245Ssam/*-
3153317Ssam * Copyright 2004 Olivier Houchard.
4153317Ssam * Copyright 2003 Wasabi Systems, Inc.
5153317Ssam * All rights reserved.
6153317Ssam *
7153317Ssam * Written by Steve C. Woodford for Wasabi Systems, Inc.
8153317Ssam *
9153317Ssam * Redistribution and use in source and binary forms, with or without
10153317Ssam * modification, are permitted provided that the following conditions
11153317Ssam * are met:
12153317Ssam * 1. Redistributions of source code must retain the above copyright
13153317Ssam *    notice, this list of conditions and the following disclaimer.
14153317Ssam * 2. Redistributions in binary form must reproduce the above copyright
15153317Ssam *    notice, this list of conditions and the following disclaimer in the
16153317Ssam *    documentation and/or other materials provided with the distribution.
17153317Ssam * 3. All advertising materials mentioning features or use of this software
18153317Ssam *    must display the following acknowledgement:
19153317Ssam *      This product includes software developed for the NetBSD Project by
20153317Ssam *      Wasabi Systems, Inc.
21153317Ssam * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22153317Ssam *    or promote products derived from this software without specific prior
23153317Ssam *    written permission.
24153317Ssam *
25153317Ssam * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26153317Ssam * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27153317Ssam * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28153317Ssam * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29153317Ssam * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30153317Ssam * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31153317Ssam * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32153317Ssam * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33153317Ssam * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34153317Ssam * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35153317Ssam * POSSIBILITY OF SUCH DAMAGE.
36153317Ssam */
37153317Ssam
38153317Ssam/*-
39153317Ssam * Copyright (c) 2002-2003 Wasabi Systems, Inc.
40153317Ssam * Copyright (c) 2001 Richard Earnshaw
41153317Ssam * Copyright (c) 2001-2002 Christopher Gilbert
42153317Ssam * All rights reserved.
43153317Ssam *
44184370Ssam * 1. Redistributions of source code must retain the above copyright
45153317Ssam *    notice, this list of conditions and the following disclaimer.
46153317Ssam * 2. Redistributions in binary form must reproduce the above copyright
47153317Ssam *    notice, this list of conditions and the following disclaimer in the
48153317Ssam *    documentation and/or other materials provided with the distribution.
49153317Ssam * 3. The name of the company nor the name of the author may be used to
50153317Ssam *    endorse or promote products derived from this software without specific
51153317Ssam *    prior written permission.
52153317Ssam *
53153317Ssam * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
54153317Ssam * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
55153317Ssam * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56153317Ssam * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
57153317Ssam * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58153317Ssam * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59153317Ssam * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60153317Ssam * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61153317Ssam * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62153317Ssam * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63153317Ssam * SUCH DAMAGE.
64153317Ssam */
65153317Ssam/*-
66153317Ssam * Copyright (c) 1999 The NetBSD Foundation, Inc.
67153317Ssam * All rights reserved.
68153391Ssam *
69153391Ssam * This code is derived from software contributed to The NetBSD Foundation
70153391Ssam * by Charles M. Hannum.
71174571Ssam *
72184370Ssam * Redistribution and use in source and binary forms, with or without
73153317Ssam * modification, are permitted provided that the following conditions
74153317Ssam * are met:
75153317Ssam * 1. Redistributions of source code must retain the above copyright
76153317Ssam *    notice, this list of conditions and the following disclaimer.
77153317Ssam * 2. Redistributions in binary form must reproduce the above copyright
78153317Ssam *    notice, this list of conditions and the following disclaimer in the
79153317Ssam *    documentation and/or other materials provided with the distribution.
80153317Ssam * 3. All advertising materials mentioning features or use of this software
81153317Ssam *    must display the following acknowledgement:
82153317Ssam *        This product includes software developed by the NetBSD
83153317Ssam *        Foundation, Inc. and its contributors.
84153317Ssam * 4. Neither the name of The NetBSD Foundation nor the names of its
85153317Ssam *    contributors may be used to endorse or promote products derived
86153317Ssam *    from this software without specific prior written permission.
87153317Ssam *
88153317Ssam * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
89153317Ssam * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
90153317Ssam * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
91153317Ssam * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
92153317Ssam * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
93153317Ssam * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
94153317Ssam * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
95153317Ssam * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
96153317Ssam * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
97153317Ssam * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
98153391Ssam * POSSIBILITY OF SUCH DAMAGE.
99153391Ssam */
100153391Ssam
101174571Ssam/*-
102184370Ssam * Copyright (c) 1994-1998 Mark Brinicombe.
103153317Ssam * Copyright (c) 1994 Brini.
104153317Ssam * All rights reserved.
105153317Ssam *
106153317Ssam * This code is derived from software written for Brini by Mark Brinicombe
107153317Ssam *
108153317Ssam * Redistribution and use in source and binary forms, with or without
109153317Ssam * modification, are permitted provided that the following conditions
110153317Ssam * are met:
111153317Ssam * 1. Redistributions of source code must retain the above copyright
112153317Ssam *    notice, this list of conditions and the following disclaimer.
113153317Ssam * 2. Redistributions in binary form must reproduce the above copyright
114153317Ssam *    notice, this list of conditions and the following disclaimer in the
115153317Ssam *    documentation and/or other materials provided with the distribution.
116153317Ssam * 3. All advertising materials mentioning features or use of this software
117153317Ssam *    must display the following acknowledgement:
118153317Ssam *      This product includes software developed by Mark Brinicombe.
119153317Ssam * 4. The name of the author may not be used to endorse or promote products
120153317Ssam *    derived from this software without specific prior written permission.
121153317Ssam *
122153317Ssam * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
123153317Ssam * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
124153317Ssam * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
125153317Ssam * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
126153317Ssam * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
127153317Ssam * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
128153317Ssam * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
129153317Ssam * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
130153317Ssam * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
131153317Ssam *
132153317Ssam * RiscBSD kernel project
133153317Ssam *
134153317Ssam * pmap.c
135153317Ssam *
136153317Ssam * Machine dependant vm stuff
137153317Ssam *
138153317Ssam * Created      : 20/09/94
139153317Ssam */
140153317Ssam
141153317Ssam/*
142153317Ssam * Special compilation symbols
143174571Ssam * PMAP_DEBUG           - Build in pmap_debug_level code
144153317Ssam */
145153317Ssam/* Include header files */
146153317Ssam
147153317Ssam#include "opt_vm.h"
148153317Ssam
149153317Ssam#include <sys/cdefs.h>
150153317Ssam__FBSDID("$FreeBSD: head/sys/arm/arm/pmap.c 147417 2005-06-16 13:23:39Z cognet $");
151174571Ssam#include <sys/param.h>
152174571Ssam#include <sys/systm.h>
153174571Ssam#include <sys/kernel.h>
154153317Ssam#include <sys/proc.h>
155153317Ssam#include <sys/malloc.h>
156153317Ssam#include <sys/msgbuf.h>
157153317Ssam#include <sys/vmmeter.h>
158153317Ssam#include <sys/mman.h>
159153317Ssam#include <sys/smp.h>
160153317Ssam#include <sys/sx.h>
161153317Ssam#include <sys/sched.h>
162153317Ssam
163153317Ssam#include <vm/vm.h>
164153317Ssam#include <vm/uma.h>
165153317Ssam#include <vm/pmap.h>
166153317Ssam#include <vm/vm_kern.h>
167153317Ssam#include <vm/vm_object.h>
168153317Ssam#include <vm/vm_map.h>
169153317Ssam#include <vm/vm_page.h>
170153317Ssam#include <vm/vm_pageout.h>
171153317Ssam#include <vm/vm_extern.h>
172153317Ssam#include <sys/lock.h>
173153317Ssam#include <sys/mutex.h>
174153317Ssam#include <machine/md_var.h>
175153317Ssam#include <machine/vmparam.h>
176153317Ssam#include <machine/cpu.h>
177153317Ssam#include <machine/cpufunc.h>
178153317Ssam#include <machine/pcb.h>
179153317Ssam
180153317Ssam#ifdef PMAP_DEBUG
181153317Ssam#define PDEBUG(_lev_,_stat_) \
182153317Ssam        if (pmap_debug_level >= (_lev_)) \
183153317Ssam                ((_stat_))
184153317Ssam#define dprintf printf
185153317Ssam
186153317Ssamint pmap_debug_level = 0;
187153317Ssam#define PMAP_INLINE
188153317Ssam#else   /* PMAP_DEBUG */
189153317Ssam#define PDEBUG(_lev_,_stat_) /* Nothing */
190153317Ssam#define dprintf(x, arg...)
191153317Ssam#define PMAP_INLINE __inline
192153317Ssam#endif  /* PMAP_DEBUG */
193153317Ssam
194153317Ssamextern struct pv_addr systempage;
195153317Ssam/*
196153317Ssam * Internal function prototypes
197153317Ssam */
198153317Ssamstatic void pmap_free_pv_entry (pv_entry_t);
199153317Ssamstatic pv_entry_t pmap_get_pv_entry(void);
200153317Ssam
201153317Ssamstatic void		pmap_vac_me_harder(struct vm_page *, pmap_t,
202153317Ssam    vm_offset_t);
203153317Ssamstatic void		pmap_vac_me_kpmap(struct vm_page *, pmap_t,
204153317Ssam    vm_offset_t);
205153317Ssamstatic void		pmap_vac_me_user(struct vm_page *, pmap_t, vm_offset_t);
206153317Ssamstatic void		pmap_alloc_l1(pmap_t);
207153317Ssamstatic void		pmap_free_l1(pmap_t);
208153317Ssamstatic void		pmap_use_l1(pmap_t);
209153317Ssam
210153317Ssamstatic int		pmap_clearbit(struct vm_page *, u_int);
211153317Ssam
212153317Ssamstatic struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t);
213153317Ssamstatic struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t);
214153317Ssamstatic void		pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
215153317Ssamstatic vm_offset_t	kernel_pt_lookup(vm_paddr_t);
216153317Ssam
217153317Ssamstatic MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1");
218153317Ssam
219153317Ssamvm_offset_t avail_end;		/* PA of last available physical page */
220153317Ssamvm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
221153317Ssamvm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
222153317Ssamvm_offset_t pmap_curmaxkvaddr;
223153317Ssam
224153317Ssamextern void *end;
225vm_offset_t kernel_vm_end = 0;
226
227struct pmap kernel_pmap_store;
228pmap_t kernel_pmap;
229
230static pt_entry_t *csrc_pte, *cdst_pte;
231static vm_offset_t csrcp, cdstp;
232static void		pmap_init_l1(struct l1_ttable *, pd_entry_t *);
233/*
234 * These routines are called when the CPU type is identified to set up
235 * the PTE prototypes, cache modes, etc.
236 *
237 * The variables are always here, just in case LKMs need to reference
238 * them (though, they shouldn't).
239 */
240
241pt_entry_t	pte_l1_s_cache_mode;
242pt_entry_t	pte_l1_s_cache_mode_pt;
243pt_entry_t	pte_l1_s_cache_mask;
244
245pt_entry_t	pte_l2_l_cache_mode;
246pt_entry_t	pte_l2_l_cache_mode_pt;
247pt_entry_t	pte_l2_l_cache_mask;
248
249pt_entry_t	pte_l2_s_cache_mode;
250pt_entry_t	pte_l2_s_cache_mode_pt;
251pt_entry_t	pte_l2_s_cache_mask;
252
253pt_entry_t	pte_l2_s_prot_u;
254pt_entry_t	pte_l2_s_prot_w;
255pt_entry_t	pte_l2_s_prot_mask;
256
257pt_entry_t	pte_l1_s_proto;
258pt_entry_t	pte_l1_c_proto;
259pt_entry_t	pte_l2_s_proto;
260
261void		(*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
262void		(*pmap_zero_page_func)(vm_paddr_t, int, int);
263/*
264 * Which pmap is currently 'live' in the cache
265 *
266 * XXXSCW: Fix for SMP ...
267 */
268union pmap_cache_state *pmap_cache_state;
269
270LIST_HEAD(pmaplist, pmap);
271struct pmaplist allpmaps;
272
273/* static pt_entry_t *msgbufmap;*/
274struct msgbuf *msgbufp = 0;
275
276extern void bcopy_page(vm_offset_t, vm_offset_t);
277extern void bzero_page(vm_offset_t);
278
279char *_tmppt;
280
281/*
282 * Metadata for L1 translation tables.
283 */
284struct l1_ttable {
285	/* Entry on the L1 Table list */
286	SLIST_ENTRY(l1_ttable) l1_link;
287
288	/* Entry on the L1 Least Recently Used list */
289	TAILQ_ENTRY(l1_ttable) l1_lru;
290
291	/* Track how many domains are allocated from this L1 */
292	volatile u_int l1_domain_use_count;
293
294	/*
295	 * A free-list of domain numbers for this L1.
296	 * We avoid using ffs() and a bitmap to track domains since ffs()
297	 * is slow on ARM.
298	 */
299	u_int8_t l1_domain_first;
300	u_int8_t l1_domain_free[PMAP_DOMAINS];
301
302	/* Physical address of this L1 page table */
303	vm_paddr_t l1_physaddr;
304
305	/* KVA of this L1 page table */
306	pd_entry_t *l1_kva;
307};
308
309/*
310 * Convert a virtual address into its L1 table index. That is, the
311 * index used to locate the L2 descriptor table pointer in an L1 table.
312 * This is basically used to index l1->l1_kva[].
313 *
314 * Each L2 descriptor table represents 1MB of VA space.
315 */
316#define	L1_IDX(va)		(((vm_offset_t)(va)) >> L1_S_SHIFT)
317
318/*
319 * L1 Page Tables are tracked using a Least Recently Used list.
320 *  - New L1s are allocated from the HEAD.
321 *  - Freed L1s are added to the TAIl.
322 *  - Recently accessed L1s (where an 'access' is some change to one of
323 *    the userland pmaps which owns this L1) are moved to the TAIL.
324 */
325static TAILQ_HEAD(, l1_ttable) l1_lru_list;
326/*
327 * A list of all L1 tables
328 */
329static SLIST_HEAD(, l1_ttable) l1_list;
330static struct mtx l1_lru_lock;
331
332/*
333 * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
334 *
335 * This is normally 16MB worth L2 page descriptors for any given pmap.
336 * Reference counts are maintained for L2 descriptors so they can be
337 * freed when empty.
338 */
339struct l2_dtable {
340	/* The number of L2 page descriptors allocated to this l2_dtable */
341	u_int l2_occupancy;
342
343	/* List of L2 page descriptors */
344	struct l2_bucket {
345		pt_entry_t *l2b_kva;	/* KVA of L2 Descriptor Table */
346		vm_paddr_t l2b_phys;	/* Physical address of same */
347		u_short l2b_l1idx;	/* This L2 table's L1 index */
348		u_short l2b_occupancy;	/* How many active descriptors */
349	} l2_bucket[L2_BUCKET_SIZE];
350};
351
352/* pmap_kenter_internal flags */
353#define KENTER_CACHE	0x1
354#define KENTER_USER	0x2
355
356/*
357 * Given an L1 table index, calculate the corresponding l2_dtable index
358 * and bucket index within the l2_dtable.
359 */
360#define	L2_IDX(l1idx)		(((l1idx) >> L2_BUCKET_LOG2) & \
361				 (L2_SIZE - 1))
362#define	L2_BUCKET(l1idx)	((l1idx) & (L2_BUCKET_SIZE - 1))
363
364/*
365 * Given a virtual address, this macro returns the
366 * virtual address required to drop into the next L2 bucket.
367 */
368#define	L2_NEXT_BUCKET(va)	(((va) & L1_S_FRAME) + L1_S_SIZE)
369
370/*
371 * L2 allocation.
372 */
373#define	pmap_alloc_l2_dtable()		\
374		(void*)uma_zalloc(l2table_zone, M_NOWAIT)
375#define	pmap_free_l2_dtable(l2)		\
376		uma_zfree(l2table_zone, l2)
377
378/*
379 * We try to map the page tables write-through, if possible.  However, not
380 * all CPUs have a write-through cache mode, so on those we have to sync
381 * the cache when we frob page tables.
382 *
383 * We try to evaluate this at compile time, if possible.  However, it's
384 * not always possible to do that, hence this run-time var.
385 */
386int	pmap_needs_pte_sync;
387
388/*
389 * Macro to determine if a mapping might be resident in the
390 * instruction cache and/or TLB
391 */
392#define	PV_BEEN_EXECD(f)  (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
393
394/*
395 * Macro to determine if a mapping might be resident in the
396 * data cache and/or TLB
397 */
398#define	PV_BEEN_REFD(f)   (((f) & PVF_REF) != 0)
399
400/*
401 * Data for the pv entry allocation mechanism
402 */
403#define MINPV	2048
404
405#ifndef PMAP_SHPGPERPROC
406#define PMAP_SHPGPERPROC 200
407#endif
408
409#define pmap_is_current(pm)	((pm) == pmap_kernel() || \
410            curproc->p_vmspace->vm_map.pmap == (pm))
411static uma_zone_t pvzone;
412uma_zone_t l2zone;
413static uma_zone_t l2table_zone;
414static vm_offset_t pmap_kernel_l2dtable_kva;
415static vm_offset_t pmap_kernel_l2ptp_kva;
416static vm_paddr_t pmap_kernel_l2ptp_phys;
417static struct vm_object pvzone_obj;
418static struct vm_object l2zone_obj;
419static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
420int pmap_pagedaemon_waken = 0;
421
422/*
423 * This list exists for the benefit of pmap_map_chunk().  It keeps track
424 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
425 * find them as necessary.
426 *
427 * Note that the data on this list MUST remain valid after initarm() returns,
428 * as pmap_bootstrap() uses it to contruct L2 table metadata.
429 */
430SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
431
432static void
433pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
434{
435	int i;
436
437	l1->l1_kva = l1pt;
438	l1->l1_domain_use_count = 0;
439	l1->l1_domain_first = 0;
440
441	for (i = 0; i < PMAP_DOMAINS; i++)
442		l1->l1_domain_free[i] = i + 1;
443
444	/*
445	 * Copy the kernel's L1 entries to each new L1.
446	 */
447	if (l1pt != pmap_kernel()->pm_l1->l1_kva)
448		memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
449
450	if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0)
451		panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
452	SLIST_INSERT_HEAD(&l1_list, l1, l1_link);
453	TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
454}
455
456static vm_offset_t
457kernel_pt_lookup(vm_paddr_t pa)
458{
459	struct pv_addr *pv;
460
461	SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
462#ifndef ARM32_NEW_VM_LAYOUT
463		if (pv->pv_pa == (pa & ~PAGE_MASK)) {
464			return (pv->pv_va | (pa & PAGE_MASK));
465			}
466#else
467		if (pv->pv_pa == pa)
468			return (pv->pv_va);
469#endif
470	}
471	return (0);
472}
473
474#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
475void
476pmap_pte_init_generic(void)
477{
478
479	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
480	pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
481
482	pte_l2_l_cache_mode = L2_B|L2_C;
483	pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
484
485	pte_l2_s_cache_mode = L2_B|L2_C;
486	pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
487
488	/*
489	 * If we have a write-through cache, set B and C.  If
490	 * we have a write-back cache, then we assume setting
491	 * only C will make those pages write-through.
492	 */
493	if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) {
494		pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
495		pte_l2_l_cache_mode_pt = L2_B|L2_C;
496		pte_l2_s_cache_mode_pt = L2_B|L2_C;
497	} else {
498		pte_l1_s_cache_mode_pt = L1_S_C;
499		pte_l2_l_cache_mode_pt = L2_C;
500		pte_l2_s_cache_mode_pt = L2_C;
501	}
502
503	pte_l2_s_prot_u = L2_S_PROT_U_generic;
504	pte_l2_s_prot_w = L2_S_PROT_W_generic;
505	pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
506
507	pte_l1_s_proto = L1_S_PROTO_generic;
508	pte_l1_c_proto = L1_C_PROTO_generic;
509	pte_l2_s_proto = L2_S_PROTO_generic;
510
511	pmap_copy_page_func = pmap_copy_page_generic;
512	pmap_zero_page_func = pmap_zero_page_generic;
513}
514
515#if defined(CPU_ARM8)
516void
517pmap_pte_init_arm8(void)
518{
519
520	/*
521	 * ARM8 is compatible with generic, but we need to use
522	 * the page tables uncached.
523	 */
524	pmap_pte_init_generic();
525
526	pte_l1_s_cache_mode_pt = 0;
527	pte_l2_l_cache_mode_pt = 0;
528	pte_l2_s_cache_mode_pt = 0;
529}
530#endif /* CPU_ARM8 */
531
532#if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH)
533void
534pmap_pte_init_arm9(void)
535{
536
537	/*
538	 * ARM9 is compatible with generic, but we want to use
539	 * write-through caching for now.
540	 */
541	pmap_pte_init_generic();
542
543	pte_l1_s_cache_mode = L1_S_C;
544	pte_l2_l_cache_mode = L2_C;
545	pte_l2_s_cache_mode = L2_C;
546
547	pte_l1_s_cache_mode_pt = L1_S_C;
548	pte_l2_l_cache_mode_pt = L2_C;
549	pte_l2_s_cache_mode_pt = L2_C;
550}
551#endif /* CPU_ARM9 */
552#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
553
554#if defined(CPU_ARM10)
555void
556pmap_pte_init_arm10(void)
557{
558
559	/*
560	 * ARM10 is compatible with generic, but we want to use
561	 * write-through caching for now.
562	 */
563	pmap_pte_init_generic();
564
565	pte_l1_s_cache_mode = L1_S_B | L1_S_C;
566	pte_l2_l_cache_mode = L2_B | L2_C;
567	pte_l2_s_cache_mode = L2_B | L2_C;
568
569	pte_l1_s_cache_mode_pt = L1_S_C;
570	pte_l2_l_cache_mode_pt = L2_C;
571	pte_l2_s_cache_mode_pt = L2_C;
572
573}
574#endif /* CPU_ARM10 */
575
576#if  ARM_MMU_SA1 == 1
577void
578pmap_pte_init_sa1(void)
579{
580
581	/*
582	 * The StrongARM SA-1 cache does not have a write-through
583	 * mode.  So, do the generic initialization, then reset
584	 * the page table cache mode to B=1,C=1, and note that
585	 * the PTEs need to be sync'd.
586	 */
587	pmap_pte_init_generic();
588
589	pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
590	pte_l2_l_cache_mode_pt = L2_B|L2_C;
591	pte_l2_s_cache_mode_pt = L2_B|L2_C;
592
593	pmap_needs_pte_sync = 1;
594}
595#endif /* ARM_MMU_SA1 == 1*/
596
597#if ARM_MMU_XSCALE == 1
598#if (ARM_NMMUS > 1)
599static u_int xscale_use_minidata;
600#endif
601
602void
603pmap_pte_init_xscale(void)
604{
605	uint32_t auxctl;
606	int write_through = 0;
607
608	pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P;
609	pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
610
611	pte_l2_l_cache_mode = L2_B|L2_C;
612	pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
613
614	pte_l2_s_cache_mode = L2_B|L2_C;
615	pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
616
617	pte_l1_s_cache_mode_pt = L1_S_C;
618	pte_l2_l_cache_mode_pt = L2_C;
619	pte_l2_s_cache_mode_pt = L2_C;
620#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
621	/*
622	 * The XScale core has an enhanced mode where writes that
623	 * miss the cache cause a cache line to be allocated.  This
624	 * is significantly faster than the traditional, write-through
625	 * behavior of this case.
626	 */
627	pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X);
628	pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X);
629	pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X);
630#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
631#ifdef XSCALE_CACHE_WRITE_THROUGH
632	/*
633	 * Some versions of the XScale core have various bugs in
634	 * their cache units, the work-around for which is to run
635	 * the cache in write-through mode.  Unfortunately, this
636	 * has a major (negative) impact on performance.  So, we
637	 * go ahead and run fast-and-loose, in the hopes that we
638	 * don't line up the planets in a way that will trip the
639	 * bugs.
640	 *
641	 * However, we give you the option to be slow-but-correct.
642	 */
643	write_through = 1;
644#elif defined(XSCALE_CACHE_WRITE_BACK)
645	/* force write back cache mode */
646	write_through = 0;
647#elif defined(CPU_XSCALE_PXA2X0)
648	/*
649	 * Intel PXA2[15]0 processors are known to have a bug in
650	 * write-back cache on revision 4 and earlier (stepping
651	 * A[01] and B[012]).  Fixed for C0 and later.
652	 */
653	{
654		uint32_t id, type;
655
656		id = cpufunc_id();
657		type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK);
658
659		if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) {
660			if ((id & CPU_ID_REVISION_MASK) < 5) {
661				/* write through for stepping A0-1 and B0-2 */
662				write_through = 1;
663			}
664		}
665	}
666#endif /* XSCALE_CACHE_WRITE_THROUGH */
667
668	if (write_through) {
669		pte_l1_s_cache_mode = L1_S_C;
670		pte_l2_l_cache_mode = L2_C;
671		pte_l2_s_cache_mode = L2_C;
672	}
673
674#if (ARM_NMMUS > 1)
675	xscale_use_minidata = 1;
676#endif
677
678	pte_l2_s_prot_u = L2_S_PROT_U_xscale;
679	pte_l2_s_prot_w = L2_S_PROT_W_xscale;
680	pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
681
682	pte_l1_s_proto = L1_S_PROTO_xscale;
683	pte_l1_c_proto = L1_C_PROTO_xscale;
684	pte_l2_s_proto = L2_S_PROTO_xscale;
685
686	pmap_copy_page_func = pmap_copy_page_xscale;
687	pmap_zero_page_func = pmap_zero_page_xscale;
688
689	/*
690	 * Disable ECC protection of page table access, for now.
691	 */
692	__asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
693	auxctl &= ~XSCALE_AUXCTL_P;
694	__asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
695}
696
697/*
698 * xscale_setup_minidata:
699 *
700 *	Set up the mini-data cache clean area.  We require the
701 *	caller to allocate the right amount of physically and
702 *	virtually contiguous space.
703 */
704extern vm_offset_t xscale_minidata_clean_addr;
705extern vm_size_t xscale_minidata_clean_size; /* already initialized */
706void
707xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa)
708{
709	pd_entry_t *pde = (pd_entry_t *) l1pt;
710	pt_entry_t *pte;
711	vm_size_t size;
712	uint32_t auxctl;
713
714	xscale_minidata_clean_addr = va;
715
716	/* Round it to page size. */
717	size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
718
719	for (; size != 0;
720	     va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
721#ifndef ARM32_NEW_VM_LAYOUT
722		pte = (pt_entry_t *)
723		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
724#else
725		pte = (pt_entry_t *) kernel_pt_lookup(
726		    pde[L1_IDX(va)] & L1_C_ADDR_MASK);
727#endif
728		if (pte == NULL)
729			panic("xscale_setup_minidata: can't find L2 table for "
730			    "VA 0x%08x", (u_int32_t) va);
731#ifndef ARM32_NEW_VM_LAYOUT
732		pte[(va >> PAGE_SHIFT) & 0x3ff] =
733#else
734		pte[l2pte_index(va)] =
735#endif
736		    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
737		    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
738	}
739
740	/*
741	 * Configure the mini-data cache for write-back with
742	 * read/write-allocate.
743	 *
744	 * NOTE: In order to reconfigure the mini-data cache, we must
745	 * make sure it contains no valid data!  In order to do that,
746	 * we must issue a global data cache invalidate command!
747	 *
748	 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
749	 * THIS IS VERY IMPORTANT!
750	 */
751
752	/* Invalidate data and mini-data. */
753	__asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0));
754	__asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
755	auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
756	__asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
757}
758#endif
759
760/*
761 * Allocate an L1 translation table for the specified pmap.
762 * This is called at pmap creation time.
763 */
764static void
765pmap_alloc_l1(pmap_t pm)
766{
767	struct l1_ttable *l1;
768	u_int8_t domain;
769
770	/*
771	 * Remove the L1 at the head of the LRU list
772	 */
773	mtx_lock(&l1_lru_lock);
774	l1 = TAILQ_FIRST(&l1_lru_list);
775	TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
776
777	/*
778	 * Pick the first available domain number, and update
779	 * the link to the next number.
780	 */
781	domain = l1->l1_domain_first;
782	l1->l1_domain_first = l1->l1_domain_free[domain];
783
784	/*
785	 * If there are still free domain numbers in this L1,
786	 * put it back on the TAIL of the LRU list.
787	 */
788	if (++l1->l1_domain_use_count < PMAP_DOMAINS)
789		TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
790
791	mtx_unlock(&l1_lru_lock);
792
793	/*
794	 * Fix up the relevant bits in the pmap structure
795	 */
796	pm->pm_l1 = l1;
797	pm->pm_domain = domain;
798}
799
800/*
801 * Free an L1 translation table.
802 * This is called at pmap destruction time.
803 */
804static void
805pmap_free_l1(pmap_t pm)
806{
807	struct l1_ttable *l1 = pm->pm_l1;
808
809	mtx_lock(&l1_lru_lock);
810
811	/*
812	 * If this L1 is currently on the LRU list, remove it.
813	 */
814	if (l1->l1_domain_use_count < PMAP_DOMAINS)
815		TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
816
817	/*
818	 * Free up the domain number which was allocated to the pmap
819	 */
820	l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first;
821	l1->l1_domain_first = pm->pm_domain;
822	l1->l1_domain_use_count--;
823
824	/*
825	 * The L1 now must have at least 1 free domain, so add
826	 * it back to the LRU list. If the use count is zero,
827	 * put it at the head of the list, otherwise it goes
828	 * to the tail.
829	 */
830	if (l1->l1_domain_use_count == 0) {
831		TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru);
832	}	else
833		TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
834
835	mtx_unlock(&l1_lru_lock);
836}
837
838static PMAP_INLINE void
839pmap_use_l1(pmap_t pm)
840{
841	struct l1_ttable *l1;
842
843	/*
844	 * Do nothing if we're in interrupt context.
845	 * Access to an L1 by the kernel pmap must not affect
846	 * the LRU list.
847	 */
848	if (pm == pmap_kernel())
849		return;
850
851	l1 = pm->pm_l1;
852
853	/*
854	 * If the L1 is not currently on the LRU list, just return
855	 */
856	if (l1->l1_domain_use_count == PMAP_DOMAINS)
857		return;
858
859	mtx_lock(&l1_lru_lock);
860
861	/*
862	 * Check the use count again, now that we've acquired the lock
863	 */
864	if (l1->l1_domain_use_count == PMAP_DOMAINS) {
865		mtx_unlock(&l1_lru_lock);
866		return;
867	}
868
869	/*
870	 * Move the L1 to the back of the LRU list
871	 */
872	TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
873	TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
874
875	mtx_unlock(&l1_lru_lock);
876}
877
878
879/*
880 * Returns a pointer to the L2 bucket associated with the specified pmap
881 * and VA, or NULL if no L2 bucket exists for the address.
882 */
883static PMAP_INLINE struct l2_bucket *
884pmap_get_l2_bucket(pmap_t pm, vm_offset_t va)
885{
886	struct l2_dtable *l2;
887	struct l2_bucket *l2b;
888	u_short l1idx;
889
890	l1idx = L1_IDX(va);
891
892	if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL ||
893	    (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
894		return (NULL);
895
896	return (l2b);
897}
898
899/*
900 * Returns a pointer to the L2 bucket associated with the specified pmap
901 * and VA.
902 *
903 * If no L2 bucket exists, perform the necessary allocations to put an L2
904 * bucket/page table in place.
905 *
906 * Note that if a new L2 bucket/page was allocated, the caller *must*
907 * increment the bucket occupancy counter appropriately *before*
908 * releasing the pmap's lock to ensure no other thread or cpu deallocates
909 * the bucket/page in the meantime.
910 */
911static struct l2_bucket *
912pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
913{
914	struct l2_dtable *l2;
915	struct l2_bucket *l2b;
916	u_short l1idx;
917
918	l1idx = L1_IDX(va);
919
920	if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
921		/*
922		 * No mapping at this address, as there is
923		 * no entry in the L1 table.
924		 * Need to allocate a new l2_dtable.
925		 */
926		if ((l2 = pmap_alloc_l2_dtable()) == NULL) {
927			return (NULL);
928		}
929		bzero(l2, sizeof(*l2));
930		/*
931		 * Link it into the parent pmap
932		 */
933		pm->pm_l2[L2_IDX(l1idx)] = l2;
934	}
935
936	l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
937
938	/*
939	 * Fetch pointer to the L2 page table associated with the address.
940	 */
941	if (l2b->l2b_kva == NULL) {
942		pt_entry_t *ptep;
943
944		/*
945		 * No L2 page table has been allocated. Chances are, this
946		 * is because we just allocated the l2_dtable, above.
947		 */
948		ptep = (void*)uma_zalloc(l2zone, M_NOWAIT);
949		l2b->l2b_phys = vtophys(ptep);
950		if (ptep == NULL) {
951			/*
952			 * Oops, no more L2 page tables available at this
953			 * time. We may need to deallocate the l2_dtable
954			 * if we allocated a new one above.
955			 */
956			if (l2->l2_occupancy == 0) {
957				pm->pm_l2[L2_IDX(l1idx)] = NULL;
958				pmap_free_l2_dtable(l2);
959			}
960			return (NULL);
961		}
962
963		l2->l2_occupancy++;
964		l2b->l2b_kva = ptep;
965		l2b->l2b_l1idx = l1idx;
966	}
967
968	return (l2b);
969}
970
971static PMAP_INLINE void
972#ifndef PMAP_INCLUDE_PTE_SYNC
973pmap_free_l2_ptp(pt_entry_t *l2)
974#else
975pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2)
976#endif
977{
978#ifdef PMAP_INCLUDE_PTE_SYNC
979	/*
980	 * Note: With a write-back cache, we may need to sync this
981	 * L2 table before re-using it.
982	 * This is because it may have belonged to a non-current
983	 * pmap, in which case the cache syncs would have been
984	 * skipped when the pages were being unmapped. If the
985	 * L2 table were then to be immediately re-allocated to
986	 * the *current* pmap, it may well contain stale mappings
987	 * which have not yet been cleared by a cache write-back
988	 * and so would still be visible to the mmu.
989	 */
990	if (need_sync)
991		PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
992#endif
993	uma_zfree(l2zone, l2);
994}
995/*
996 * One or more mappings in the specified L2 descriptor table have just been
997 * invalidated.
998 *
999 * Garbage collect the metadata and descriptor table itself if necessary.
1000 *
1001 * The pmap lock must be acquired when this is called (not necessary
1002 * for the kernel pmap).
1003 */
1004static void
1005pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
1006{
1007	struct l2_dtable *l2;
1008	pd_entry_t *pl1pd, l1pd;
1009	pt_entry_t *ptep;
1010	u_short l1idx;
1011
1012
1013	/*
1014	 * Update the bucket's reference count according to how many
1015	 * PTEs the caller has just invalidated.
1016	 */
1017	l2b->l2b_occupancy -= count;
1018
1019	/*
1020	 * Note:
1021	 *
1022	 * Level 2 page tables allocated to the kernel pmap are never freed
1023	 * as that would require checking all Level 1 page tables and
1024	 * removing any references to the Level 2 page table. See also the
1025	 * comment elsewhere about never freeing bootstrap L2 descriptors.
1026	 *
1027	 * We make do with just invalidating the mapping in the L2 table.
1028	 *
1029	 * This isn't really a big deal in practice and, in fact, leads
1030	 * to a performance win over time as we don't need to continually
1031	 * alloc/free.
1032	 */
1033	if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
1034		return;
1035
1036	/*
1037	 * There are no more valid mappings in this level 2 page table.
1038	 * Go ahead and NULL-out the pointer in the bucket, then
1039	 * free the page table.
1040	 */
1041	l1idx = l2b->l2b_l1idx;
1042	ptep = l2b->l2b_kva;
1043	l2b->l2b_kva = NULL;
1044
1045	pl1pd = &pm->pm_l1->l1_kva[l1idx];
1046
1047	/*
1048	 * If the L1 slot matches the pmap's domain
1049	 * number, then invalidate it.
1050	 */
1051	l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
1052	if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) {
1053		*pl1pd = 0;
1054		PTE_SYNC(pl1pd);
1055	}
1056
1057	/*
1058	 * Release the L2 descriptor table back to the pool cache.
1059	 */
1060#ifndef PMAP_INCLUDE_PTE_SYNC
1061	pmap_free_l2_ptp(ptep);
1062#else
1063	pmap_free_l2_ptp(!pmap_is_current(pm), ptep);
1064#endif
1065
1066	/*
1067	 * Update the reference count in the associated l2_dtable
1068	 */
1069	l2 = pm->pm_l2[L2_IDX(l1idx)];
1070	if (--l2->l2_occupancy > 0)
1071		return;
1072
1073	/*
1074	 * There are no more valid mappings in any of the Level 1
1075	 * slots managed by this l2_dtable. Go ahead and NULL-out
1076	 * the pointer in the parent pmap and free the l2_dtable.
1077	 */
1078	pm->pm_l2[L2_IDX(l1idx)] = NULL;
1079	pmap_free_l2_dtable(l2);
1080}
1081
1082/*
1083 * Pool cache constructors for L2 descriptor tables, metadata and pmap
1084 * structures.
1085 */
1086static int
1087pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags)
1088{
1089#ifndef PMAP_INCLUDE_PTE_SYNC
1090	struct l2_bucket *l2b;
1091	pt_entry_t *ptep, pte;
1092#ifdef ARM_USE_SMALL_ALLOC
1093	pd_entry_t *pde;
1094#endif
1095	vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK;
1096
1097	/*
1098	 * The mappings for these page tables were initially made using
1099	 * pmap_kenter() by the pool subsystem. Therefore, the cache-
1100	 * mode will not be right for page table mappings. To avoid
1101	 * polluting the pmap_kenter() code with a special case for
1102	 * page tables, we simply fix up the cache-mode here if it's not
1103	 * correct.
1104	 */
1105#ifdef ARM_USE_SMALL_ALLOC
1106	pde = &kernel_pmap->pm_l1->l1_kva[L1_IDX(va)];
1107	if (!l1pte_section_p(*pde)) {
1108#endif
1109		l2b = pmap_get_l2_bucket(pmap_kernel(), va);
1110		ptep = &l2b->l2b_kva[l2pte_index(va)];
1111		pte = *ptep;
1112
1113		if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
1114			/*
1115			 * Page tables must have the cache-mode set to
1116			 * Write-Thru.
1117			 */
1118			*ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
1119			PTE_SYNC(ptep);
1120			cpu_tlb_flushD_SE(va);
1121			cpu_cpwait();
1122		}
1123
1124#ifdef ARM_USE_SMALL_ALLOC
1125	}
1126#endif
1127#endif
1128	memset(mem, 0, L2_TABLE_SIZE_REAL);
1129	PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
1130	return (0);
1131}
1132
1133/*
1134 * A bunch of routines to conditionally flush the caches/TLB depending
1135 * on whether the specified pmap actually needs to be flushed at any
1136 * given time.
1137 */
1138static PMAP_INLINE void
1139pmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va)
1140{
1141
1142	if (pmap_is_current(pm))
1143		cpu_tlb_flushID_SE(va);
1144}
1145
1146static PMAP_INLINE void
1147pmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va)
1148{
1149
1150	if (pmap_is_current(pm))
1151		cpu_tlb_flushD_SE(va);
1152}
1153
1154static PMAP_INLINE void
1155pmap_tlb_flushID(pmap_t pm)
1156{
1157
1158	if (pmap_is_current(pm))
1159		cpu_tlb_flushID();
1160}
1161static PMAP_INLINE void
1162pmap_tlb_flushD(pmap_t pm)
1163{
1164
1165	if (pmap_is_current(pm))
1166		cpu_tlb_flushD();
1167}
1168
1169static PMAP_INLINE void
1170pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len)
1171{
1172
1173	if (pmap_is_current(pm))
1174		cpu_idcache_wbinv_range(va, len);
1175}
1176
1177static PMAP_INLINE void
1178pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len,
1179    boolean_t do_inv, boolean_t rd_only)
1180{
1181
1182	if (pmap_is_current(pm)) {
1183		if (do_inv) {
1184			if (rd_only)
1185				cpu_dcache_inv_range(va, len);
1186			else
1187				cpu_dcache_wbinv_range(va, len);
1188		} else
1189		if (!rd_only)
1190			cpu_dcache_wb_range(va, len);
1191	}
1192}
1193
1194static PMAP_INLINE void
1195pmap_idcache_wbinv_all(pmap_t pm)
1196{
1197
1198	if (pmap_is_current(pm))
1199		cpu_idcache_wbinv_all();
1200}
1201
1202static PMAP_INLINE void
1203pmap_dcache_wbinv_all(pmap_t pm)
1204{
1205
1206	if (pmap_is_current(pm))
1207		cpu_dcache_wbinv_all();
1208}
1209
1210/*
1211 * this routine defines the region(s) of memory that should
1212 * not be tested for the modified bit.
1213 */
1214static PMAP_INLINE int
1215pmap_track_modified(vm_offset_t va)
1216{
1217	if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
1218		return 1;
1219	else
1220		return 0;
1221}
1222/*
1223 * PTE_SYNC_CURRENT:
1224 *
1225 *     Make sure the pte is written out to RAM.
1226 *     We need to do this for one of two cases:
1227 *       - We're dealing with the kernel pmap
1228 *       - There is no pmap active in the cache/tlb.
1229 *       - The specified pmap is 'active' in the cache/tlb.
1230 */
1231#ifdef PMAP_INCLUDE_PTE_SYNC
1232#define	PTE_SYNC_CURRENT(pm, ptep)	\
1233do {					\
1234	if (PMAP_NEEDS_PTE_SYNC && 	\
1235	    pmap_is_current(pm))	\
1236		PTE_SYNC(ptep);		\
1237} while (/*CONSTCOND*/0)
1238#else
1239#define	PTE_SYNC_CURRENT(pm, ptep)	/* nothing */
1240#endif
1241
1242/*
1243 * Since we have a virtually indexed cache, we may need to inhibit caching if
1244 * there is more than one mapping and at least one of them is writable.
1245 * Since we purge the cache on every context switch, we only need to check for
1246 * other mappings within the same pmap, or kernel_pmap.
1247 * This function is also called when a page is unmapped, to possibly reenable
1248 * caching on any remaining mappings.
1249 *
1250 * The code implements the following logic, where:
1251 *
1252 * KW = # of kernel read/write pages
1253 * KR = # of kernel read only pages
1254 * UW = # of user read/write pages
1255 * UR = # of user read only pages
1256 *
1257 * KC = kernel mapping is cacheable
1258 * UC = user mapping is cacheable
1259 *
1260 *               KW=0,KR=0  KW=0,KR>0  KW=1,KR=0  KW>1,KR>=0
1261 *             +---------------------------------------------
1262 * UW=0,UR=0   | ---        KC=1       KC=1       KC=0
1263 * UW=0,UR>0   | UC=1       KC=1,UC=1  KC=0,UC=0  KC=0,UC=0
1264 * UW=1,UR=0   | UC=1       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
1265 * UW>1,UR>=0  | UC=0       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
1266 */
1267
1268static const int pmap_vac_flags[4][4] = {
1269	{-1,		0,		0,		PVF_KNC},
1270	{0,		0,		PVF_NC,		PVF_NC},
1271	{0,		PVF_NC,		PVF_NC,		PVF_NC},
1272	{PVF_UNC,	PVF_NC,		PVF_NC,		PVF_NC}
1273};
1274
1275static PMAP_INLINE int
1276pmap_get_vac_flags(const struct vm_page *pg)
1277{
1278	int kidx, uidx;
1279
1280	kidx = 0;
1281	if (pg->md.kro_mappings || pg->md.krw_mappings > 1)
1282		kidx |= 1;
1283	if (pg->md.krw_mappings)
1284		kidx |= 2;
1285
1286	uidx = 0;
1287	if (pg->md.uro_mappings || pg->md.urw_mappings > 1)
1288		uidx |= 1;
1289	if (pg->md.urw_mappings)
1290		uidx |= 2;
1291
1292	return (pmap_vac_flags[uidx][kidx]);
1293}
1294
1295static __inline void
1296pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vm_offset_t va)
1297{
1298	int nattr;
1299
1300	nattr = pmap_get_vac_flags(pg);
1301
1302	if (nattr < 0) {
1303		pg->md.pvh_attrs &= ~PVF_NC;
1304		return;
1305	}
1306
1307	if (nattr == 0 && (pg->md.pvh_attrs & PVF_NC) == 0) {
1308		return;
1309	}
1310
1311	if (pm == pmap_kernel())
1312		pmap_vac_me_kpmap(pg, pm, va);
1313	else
1314		pmap_vac_me_user(pg, pm, va);
1315
1316	pg->md.pvh_attrs = (pg->md.pvh_attrs & ~PVF_NC) | nattr;
1317}
1318
1319static void
1320pmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vm_offset_t va)
1321{
1322	u_int u_cacheable, u_entries;
1323	struct pv_entry *pv;
1324	pmap_t last_pmap = pm;
1325
1326	/*
1327	 * Pass one, see if there are both kernel and user pmaps for
1328	 * this page.  Calculate whether there are user-writable or
1329	 * kernel-writable pages.
1330	 */
1331	u_cacheable = 0;
1332	TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
1333		if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0)
1334			u_cacheable++;
1335	}
1336
1337	u_entries = pg->md.urw_mappings + pg->md.uro_mappings;
1338
1339	/*
1340	 * We know we have just been updating a kernel entry, so if
1341	 * all user pages are already cacheable, then there is nothing
1342	 * further to do.
1343	 */
1344	if (pg->md.k_mappings == 0 && u_cacheable == u_entries)
1345		return;
1346
1347	if (u_entries) {
1348		/*
1349		 * Scan over the list again, for each entry, if it
1350		 * might not be set correctly, call pmap_vac_me_user
1351		 * to recalculate the settings.
1352		 */
1353		TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
1354			/*
1355			 * We know kernel mappings will get set
1356			 * correctly in other calls.  We also know
1357			 * that if the pmap is the same as last_pmap
1358			 * then we've just handled this entry.
1359			 */
1360			if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap)
1361				continue;
1362
1363			/*
1364			 * If there are kernel entries and this page
1365			 * is writable but non-cacheable, then we can
1366			 * skip this entry also.
1367			 */
1368			if (pg->md.k_mappings &&
1369			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
1370			    (PVF_NC | PVF_WRITE))
1371				continue;
1372
1373			/*
1374			 * Similarly if there are no kernel-writable
1375			 * entries and the page is already
1376			 * read-only/cacheable.
1377			 */
1378			if (pg->md.krw_mappings == 0 &&
1379			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
1380				continue;
1381
1382			/*
1383			 * For some of the remaining cases, we know
1384			 * that we must recalculate, but for others we
1385			 * can't tell if they are correct or not, so
1386			 * we recalculate anyway.
1387			 */
1388			pmap_vac_me_user(pg, (last_pmap = pv->pv_pmap), 0);
1389		}
1390
1391		if (pg->md.k_mappings == 0)
1392			return;
1393	}
1394
1395	pmap_vac_me_user(pg, pm, va);
1396}
1397
1398static void
1399pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vm_offset_t va)
1400{
1401	pmap_t kpmap = pmap_kernel();
1402	struct pv_entry *pv, *npv;
1403	struct l2_bucket *l2b;
1404	pt_entry_t *ptep, pte;
1405	u_int entries = 0;
1406	u_int writable = 0;
1407	u_int cacheable_entries = 0;
1408	u_int kern_cacheable = 0;
1409	u_int other_writable = 0;
1410
1411	/*
1412	 * Count mappings and writable mappings in this pmap.
1413	 * Include kernel mappings as part of our own.
1414	 * Keep a pointer to the first one.
1415	 */
1416	npv = TAILQ_FIRST(&pg->md.pv_list);
1417	TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
1418		/* Count mappings in the same pmap */
1419		if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) {
1420			if (entries++ == 0)
1421				npv = pv;
1422
1423			/* Cacheable mappings */
1424			if ((pv->pv_flags & PVF_NC) == 0) {
1425				cacheable_entries++;
1426				if (kpmap == pv->pv_pmap)
1427					kern_cacheable++;
1428			}
1429
1430			/* Writable mappings */
1431			if (pv->pv_flags & PVF_WRITE)
1432				++writable;
1433		} else
1434		if (pv->pv_flags & PVF_WRITE)
1435			other_writable = 1;
1436	}
1437
1438	/*
1439	 * Enable or disable caching as necessary.
1440	 * Note: the first entry might be part of the kernel pmap,
1441	 * so we can't assume this is indicative of the state of the
1442	 * other (maybe non-kpmap) entries.
1443	 */
1444	if ((entries > 1 && writable) ||
1445	    (entries > 0 && pm == kpmap && other_writable)) {
1446		if (cacheable_entries == 0)
1447			return;
1448
1449		for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) {
1450			if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) ||
1451			    (pv->pv_flags & PVF_NC))
1452				continue;
1453
1454			pv->pv_flags |= PVF_NC;
1455
1456			l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
1457			ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
1458			pte = *ptep & ~L2_S_CACHE_MASK;
1459
1460			if ((va != pv->pv_va || pm != pv->pv_pmap) &&
1461			    l2pte_valid(pte)) {
1462				if (PV_BEEN_EXECD(pv->pv_flags)) {
1463					pmap_idcache_wbinv_range(pv->pv_pmap,
1464					    pv->pv_va, PAGE_SIZE);
1465					pmap_tlb_flushID_SE(pv->pv_pmap,
1466					    pv->pv_va);
1467				} else
1468				if (PV_BEEN_REFD(pv->pv_flags)) {
1469					pmap_dcache_wb_range(pv->pv_pmap,
1470					    pv->pv_va, PAGE_SIZE, TRUE,
1471					    (pv->pv_flags & PVF_WRITE) == 0);
1472					pmap_tlb_flushD_SE(pv->pv_pmap,
1473					    pv->pv_va);
1474				}
1475			}
1476
1477			*ptep = pte;
1478			PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
1479		}
1480		cpu_cpwait();
1481	} else
1482	if (entries > cacheable_entries) {
1483		/*
1484		 * Turn cacheing back on for some pages.  If it is a kernel
1485		 * page, only do so if there are no other writable pages.
1486		 */
1487		for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) {
1488			if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap &&
1489			    (kpmap != pv->pv_pmap || other_writable)))
1490				continue;
1491
1492			pv->pv_flags &= ~PVF_NC;
1493
1494			l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
1495			ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
1496			pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode;
1497
1498			if (l2pte_valid(pte)) {
1499				if (PV_BEEN_EXECD(pv->pv_flags)) {
1500					pmap_tlb_flushID_SE(pv->pv_pmap,
1501					    pv->pv_va);
1502				} else
1503				if (PV_BEEN_REFD(pv->pv_flags)) {
1504					pmap_tlb_flushD_SE(pv->pv_pmap,
1505					    pv->pv_va);
1506				}
1507			}
1508
1509			*ptep = pte;
1510			PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
1511		}
1512	}
1513}
1514
1515/*
1516 * Modify pte bits for all ptes corresponding to the given physical address.
1517 * We use `maskbits' rather than `clearbits' because we're always passing
1518 * constants and the latter would require an extra inversion at run-time.
1519 */
1520static int
1521pmap_clearbit(struct vm_page *pg, u_int maskbits)
1522{
1523	struct l2_bucket *l2b;
1524	struct pv_entry *pv;
1525	pt_entry_t *ptep, npte, opte;
1526	pmap_t pm;
1527	vm_offset_t va;
1528	u_int oflags;
1529	int count = 0;
1530#if 0
1531	PMAP_HEAD_TO_MAP_LOCK();
1532	simple_lock(&pg->mdpage.pvh_slock);
1533#endif
1534
1535	/*
1536	 * Clear saved attributes (modify, reference)
1537	 */
1538	pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
1539
1540	if (TAILQ_EMPTY(&pg->md.pv_list)) {
1541#if 0
1542		simple_unlock(&pg->mdpage.pvh_slock);
1543		PMAP_HEAD_TO_MAP_UNLOCK();
1544#endif
1545		return (0);
1546	}
1547
1548	/*
1549	 * Loop over all current mappings setting/clearing as appropos
1550	 */
1551	TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
1552		va = pv->pv_va;
1553		pm = pv->pv_pmap;
1554		oflags = pv->pv_flags;
1555		pv->pv_flags &= ~maskbits;
1556
1557#if 0
1558		pmap_acquire_pmap_lock(pm);
1559#endif
1560
1561		l2b = pmap_get_l2_bucket(pm, va);
1562
1563		ptep = &l2b->l2b_kva[l2pte_index(va)];
1564		npte = opte = *ptep;
1565
1566		if (maskbits & (PVF_WRITE|PVF_MOD) &&
1567		    !pmap_track_modified(pv->pv_va)) {
1568			if ((pv->pv_flags & PVF_NC)) {
1569				/*
1570				 * Entry is not cacheable:
1571				 *
1572				 * Don't turn caching on again if this is a
1573				 * modified emulation. This would be
1574				 * inconsitent with the settings created by
1575				 * pmap_vac_me_harder(). Otherwise, it's safe
1576				 * to re-enable cacheing.
1577				 *
1578				 * There's no need to call pmap_vac_me_harder()
1579				 * here: all pages are losing their write
1580				 * permission.
1581				 */
1582				if (maskbits & PVF_WRITE) {
1583					npte |= pte_l2_s_cache_mode;
1584					pv->pv_flags &= ~PVF_NC;
1585				}
1586			} else
1587			if (opte & L2_S_PROT_W) {
1588				vm_page_dirty(pg);
1589				/*
1590				 * Entry is writable/cacheable: check if pmap
1591				 * is current if it is flush it, otherwise it
1592				 * won't be in the cache
1593				 */
1594				if (PV_BEEN_EXECD(oflags))
1595					pmap_idcache_wbinv_range(pm, pv->pv_va,
1596					    PAGE_SIZE);
1597				else
1598				if (PV_BEEN_REFD(oflags))
1599					pmap_dcache_wb_range(pm, pv->pv_va,
1600					    PAGE_SIZE,
1601					    (maskbits & PVF_REF) ? TRUE : FALSE,
1602					    FALSE);
1603			}
1604
1605			/* make the pte read only */
1606			npte &= ~L2_S_PROT_W;
1607
1608			if (maskbits & PVF_WRITE) {
1609				/*
1610				 * Keep alias accounting up to date
1611				 */
1612				if (pv->pv_pmap == pmap_kernel()) {
1613					if (oflags & PVF_WRITE) {
1614						pg->md.krw_mappings--;
1615						pg->md.kro_mappings++;
1616					}
1617				} else
1618				if (oflags & PVF_WRITE) {
1619					pg->md.urw_mappings--;
1620					pg->md.uro_mappings++;
1621				}
1622			}
1623		}
1624
1625		if (maskbits & PVF_REF && !pmap_track_modified(pv->pv_va)) {
1626			if ((pv->pv_flags & PVF_NC) == 0 &&
1627			    (maskbits & (PVF_WRITE|PVF_MOD)) == 0) {
1628				/*
1629				 * Check npte here; we may have already
1630				 * done the wbinv above, and the validity
1631				 * of the PTE is the same for opte and
1632				 * npte.
1633				 */
1634				if (npte & L2_S_PROT_W) {
1635					if (PV_BEEN_EXECD(oflags))
1636						pmap_idcache_wbinv_range(pm,
1637						    pv->pv_va, PAGE_SIZE);
1638					else
1639					if (PV_BEEN_REFD(oflags))
1640						pmap_dcache_wb_range(pm,
1641						    pv->pv_va, PAGE_SIZE,
1642						    TRUE, FALSE);
1643				} else
1644				if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) {
1645					/* XXXJRT need idcache_inv_range */
1646					if (PV_BEEN_EXECD(oflags))
1647						pmap_idcache_wbinv_range(pm,
1648						    pv->pv_va, PAGE_SIZE);
1649					else
1650					if (PV_BEEN_REFD(oflags))
1651						pmap_dcache_wb_range(pm,
1652						    pv->pv_va, PAGE_SIZE,
1653						    TRUE, TRUE);
1654				}
1655			}
1656
1657			/*
1658			 * Make the PTE invalid so that we will take a
1659			 * page fault the next time the mapping is
1660			 * referenced.
1661			 */
1662			npte &= ~L2_TYPE_MASK;
1663			npte |= L2_TYPE_INV;
1664		}
1665
1666		if (npte != opte) {
1667			count++;
1668			*ptep = npte;
1669			PTE_SYNC(ptep);
1670			/* Flush the TLB entry if a current pmap. */
1671			if (PV_BEEN_EXECD(oflags))
1672				pmap_tlb_flushID_SE(pm, pv->pv_va);
1673			else
1674			if (PV_BEEN_REFD(oflags))
1675				pmap_tlb_flushD_SE(pm, pv->pv_va);
1676		}
1677
1678#if 0
1679		pmap_release_pmap_lock(pm);
1680#endif
1681
1682	}
1683
1684#if 0
1685	simple_unlock(&pg->mdpage.pvh_slock);
1686	PMAP_HEAD_TO_MAP_UNLOCK();
1687#endif
1688	if (maskbits & PVF_WRITE)
1689		vm_page_flag_clear(pg, PG_WRITEABLE);
1690	return (count);
1691}
1692
1693/*
1694 * main pv_entry manipulation functions:
1695 *   pmap_enter_pv: enter a mapping onto a vm_page list
1696 *   pmap_remove_pv: remove a mappiing from a vm_page list
1697 *
1698 * NOTE: pmap_enter_pv expects to lock the pvh itself
1699 *       pmap_remove_pv expects te caller to lock the pvh before calling
1700 */
1701
1702/*
1703 * pmap_enter_pv: enter a mapping onto a vm_page lst
1704 *
1705 * => caller should hold the proper lock on pmap_main_lock
1706 * => caller should have pmap locked
1707 * => we will gain the lock on the vm_page and allocate the new pv_entry
1708 * => caller should adjust ptp's wire_count before calling
1709 * => caller should not adjust pmap's wire_count
1710 */
1711static void
1712pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
1713    vm_offset_t va, u_int flags)
1714{
1715
1716
1717	pve->pv_pmap = pm;
1718	pve->pv_va = va;
1719	pve->pv_flags = flags;
1720
1721#if 0
1722	mtx_lock(&pg->md.pvh_mtx);
1723#endif
1724	TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list);
1725	TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist);
1726	pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
1727	if (pm == pmap_kernel()) {
1728		if (flags & PVF_WRITE)
1729			pg->md.krw_mappings++;
1730		else
1731			pg->md.kro_mappings++;
1732	}
1733	if (flags & PVF_WRITE)
1734		pg->md.urw_mappings++;
1735	else
1736		pg->md.uro_mappings++;
1737	pg->md.pv_list_count++;
1738#if 0
1739	mtx_unlock(&pg->md.pvh_mtx);
1740#endif
1741	if (pve->pv_flags & PVF_WIRED)
1742		++pm->pm_stats.wired_count;
1743	vm_page_flag_set(pg, PG_REFERENCED);
1744}
1745
1746/*
1747 *
1748 * pmap_find_pv: Find a pv entry
1749 *
1750 * => caller should hold lock on vm_page
1751 */
1752static PMAP_INLINE struct pv_entry *
1753pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
1754{
1755	struct pv_entry *pv;
1756
1757	TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list)
1758	    if (pm == pv->pv_pmap && va == pv->pv_va)
1759		    break;
1760	return (pv);
1761}
1762
1763/*
1764 * vector_page_setprot:
1765 *
1766 *	Manipulate the protection of the vector page.
1767 */
1768void
1769vector_page_setprot(int prot)
1770{
1771	struct l2_bucket *l2b;
1772	pt_entry_t *ptep;
1773
1774	l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
1775
1776	ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
1777
1778	*ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
1779	PTE_SYNC(ptep);
1780	cpu_tlb_flushD_SE(vector_page);
1781	cpu_cpwait();
1782}
1783
1784/*
1785 * pmap_remove_pv: try to remove a mapping from a pv_list
1786 *
1787 * => caller should hold proper lock on pmap_main_lock
1788 * => pmap should be locked
1789 * => caller should hold lock on vm_page [so that attrs can be adjusted]
1790 * => caller should adjust ptp's wire_count and free PTP if needed
1791 * => caller should NOT adjust pmap's wire_count
1792 * => we return the removed pve
1793 */
1794
1795static void
1796pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
1797{
1798
1799	TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list);
1800	TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist);
1801	if (pve->pv_flags & PVF_WIRED)
1802		--pm->pm_stats.wired_count;
1803	pg->md.pv_list_count--;
1804	if (pg->md.pvh_attrs & PVF_MOD)
1805		vm_page_dirty(pg);
1806	if (pm == pmap_kernel()) {
1807		if (pve->pv_flags & PVF_WRITE)
1808			pg->md.krw_mappings--;
1809		else
1810			pg->md.kro_mappings--;
1811	} else
1812		if (pve->pv_flags & PVF_WRITE)
1813			pg->md.urw_mappings--;
1814		else
1815			pg->md.uro_mappings--;
1816	if (TAILQ_FIRST(&pg->md.pv_list) == NULL ||
1817	    (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0)) {
1818		pg->md.pvh_attrs &= ~PVF_MOD;
1819		if (TAILQ_FIRST(&pg->md.pv_list) == NULL)
1820			pg->md.pvh_attrs &= ~PVF_REF;
1821		vm_page_flag_clear(pg, PG_WRITEABLE);
1822	}
1823	if (TAILQ_FIRST(&pg->md.pv_list))
1824		vm_page_flag_set(pg, PG_REFERENCED);
1825	if (pve->pv_flags & PVF_WRITE)
1826		pmap_vac_me_harder(pg, pm, 0);
1827}
1828
1829static struct pv_entry *
1830pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
1831{
1832	struct pv_entry *pve;
1833
1834	pve = TAILQ_FIRST(&pg->md.pv_list);
1835
1836	while (pve) {
1837		if (pve->pv_pmap == pm && pve->pv_va == va) {	/* match? */
1838			pmap_nuke_pv(pg, pm, pve);
1839			break;
1840		}
1841		pve = TAILQ_NEXT(pve, pv_list);
1842	}
1843
1844	return(pve);				/* return removed pve */
1845}
1846/*
1847 *
1848 * pmap_modify_pv: Update pv flags
1849 *
1850 * => caller should hold lock on vm_page [so that attrs can be adjusted]
1851 * => caller should NOT adjust pmap's wire_count
1852 * => caller must call pmap_vac_me_harder() if writable status of a page
1853 *    may have changed.
1854 * => we return the old flags
1855 *
1856 * Modify a physical-virtual mapping in the pv table
1857 */
1858static u_int
1859pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va,
1860    u_int clr_mask, u_int set_mask)
1861{
1862	struct pv_entry *npv;
1863	u_int flags, oflags;
1864
1865	if ((npv = pmap_find_pv(pg, pm, va)) == NULL)
1866		return (0);
1867
1868	/*
1869	 * There is at least one VA mapping this page.
1870	 */
1871
1872	if (clr_mask & (PVF_REF | PVF_MOD))
1873		pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
1874
1875	oflags = npv->pv_flags;
1876	npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
1877
1878	if ((flags ^ oflags) & PVF_WIRED) {
1879		if (flags & PVF_WIRED)
1880			++pm->pm_stats.wired_count;
1881		else
1882			--pm->pm_stats.wired_count;
1883	}
1884
1885	if ((flags ^ oflags) & PVF_WRITE) {
1886		if (pm == pmap_kernel()) {
1887			if (flags & PVF_WRITE) {
1888				pg->md.krw_mappings++;
1889				pg->md.kro_mappings--;
1890			} else {
1891				pg->md.kro_mappings++;
1892				pg->md.krw_mappings--;
1893			}
1894		} else
1895		if (flags & PVF_WRITE) {
1896			pg->md.urw_mappings++;
1897			pg->md.uro_mappings--;
1898		} else {
1899			pg->md.uro_mappings++;
1900			pg->md.urw_mappings--;
1901		}
1902		if (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0) {
1903			pg->md.pvh_attrs &= ~PVF_MOD;
1904			vm_page_flag_clear(pg, PG_WRITEABLE);
1905		}
1906		pmap_vac_me_harder(pg, pm, 0);
1907	}
1908
1909	return (oflags);
1910}
1911
1912/* Function to set the debug level of the pmap code */
1913#ifdef PMAP_DEBUG
1914void
1915pmap_debug(int level)
1916{
1917	pmap_debug_level = level;
1918	dprintf("pmap_debug: level=%d\n", pmap_debug_level);
1919}
1920#endif  /* PMAP_DEBUG */
1921
1922void
1923pmap_pinit0(struct pmap *pmap)
1924{
1925	PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap));
1926
1927	dprintf("pmap_pinit0: pmap = %08x, pm_pdir = %08x\n",
1928		(u_int32_t) pmap, (u_int32_t) pmap->pm_pdir);
1929	bcopy(kernel_pmap, pmap, sizeof(*pmap));
1930}
1931
1932/*
1933 *	Initialize a vm_page's machine-dependent fields.
1934 */
1935void
1936pmap_page_init(vm_page_t m)
1937{
1938
1939	TAILQ_INIT(&m->md.pv_list);
1940	m->md.pv_list_count = 0;
1941}
1942
1943/*
1944 *      Initialize the pmap module.
1945 *      Called by vm_init, to initialize any structures that the pmap
1946 *      system needs to map virtual memory.
1947 */
1948void
1949pmap_init(void)
1950{
1951
1952	PDEBUG(1, printf("pmap_init: phys_start = %08x\n"));
1953
1954	/*
1955	 * init the pv free list
1956	 */
1957	pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
1958	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1959	uma_prealloc(pvzone, MINPV);
1960	/*
1961	 * Now it is safe to enable pv_table recording.
1962	 */
1963	PDEBUG(1, printf("pmap_init: done!\n"));
1964
1965}
1966
1967int
1968pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
1969{
1970	struct l2_dtable *l2;
1971	struct l2_bucket *l2b;
1972	pd_entry_t *pl1pd, l1pd;
1973	pt_entry_t *ptep, pte;
1974	vm_paddr_t pa;
1975	u_int l1idx;
1976	int rv = 0;
1977
1978#if 0
1979	PMAP_MAP_TO_HEAD_LOCK();
1980	pmap_acquire_pmap_lock(pm);
1981#endif
1982	l1idx = L1_IDX(va);
1983
1984	/*
1985	 * If there is no l2_dtable for this address, then the process
1986	 * has no business accessing it.
1987	 *
1988	 * Note: This will catch userland processes trying to access
1989	 * kernel addresses.
1990	 */
1991	l2 = pm->pm_l2[L2_IDX(l1idx)];
1992	if (l2 == NULL)
1993		goto out;
1994
1995	/*
1996	 * Likewise if there is no L2 descriptor table
1997	 */
1998	l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
1999	if (l2b->l2b_kva == NULL)
2000		goto out;
2001
2002	/*
2003	 * Check the PTE itself.
2004	 */
2005	ptep = &l2b->l2b_kva[l2pte_index(va)];
2006	pte = *ptep;
2007	if (pte == 0)
2008		goto out;
2009
2010	/*
2011	 * Catch a userland access to the vector page mapped at 0x0
2012	 */
2013	if (user && (pte & L2_S_PROT_U) == 0)
2014		goto out;
2015
2016	pa = l2pte_pa(pte);
2017
2018	if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) {
2019		/*
2020		 * This looks like a good candidate for "page modified"
2021		 * emulation...
2022		 */
2023		struct pv_entry *pv;
2024		struct vm_page *pg;
2025
2026		/* Extract the physical address of the page */
2027		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) {
2028			goto out;
2029		}
2030		/* Get the current flags for this page. */
2031
2032		pv = pmap_find_pv(pg, pm, va);
2033		if (pv == NULL) {
2034			goto out;
2035		}
2036
2037		/*
2038		 * Do the flags say this page is writable? If not then it
2039		 * is a genuine write fault. If yes then the write fault is
2040		 * our fault as we did not reflect the write access in the
2041		 * PTE. Now we know a write has occurred we can correct this
2042		 * and also set the modified bit
2043		 */
2044		if ((pv->pv_flags & PVF_WRITE) == 0) {
2045			goto out;
2046		}
2047
2048		if (pmap_track_modified(pv->pv_va)) {
2049			pg->md.pvh_attrs |= PVF_REF | PVF_MOD;
2050			vm_page_dirty(pg);
2051		}
2052		pv->pv_flags |= PVF_REF | PVF_MOD;
2053
2054		/*
2055		 * Re-enable write permissions for the page.  No need to call
2056		 * pmap_vac_me_harder(), since this is just a
2057		 * modified-emulation fault, and the PVF_WRITE bit isn't
2058		 * changing. We've already set the cacheable bits based on
2059		 * the assumption that we can write to this page.
2060		 */
2061		*ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
2062		PTE_SYNC(ptep);
2063		rv = 1;
2064	} else
2065	if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) {
2066		/*
2067		 * This looks like a good candidate for "page referenced"
2068		 * emulation.
2069		 */
2070		struct pv_entry *pv;
2071		struct vm_page *pg;
2072
2073		/* Extract the physical address of the page */
2074		vm_page_lock_queues();
2075		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) {
2076			vm_page_unlock_queues();
2077			goto out;
2078		}
2079		/* Get the current flags for this page. */
2080
2081		pv = pmap_find_pv(pg, pm, va);
2082		if (pv == NULL) {
2083			vm_page_unlock_queues();
2084			goto out;
2085		}
2086
2087		pg->md.pvh_attrs |= PVF_REF;
2088		pv->pv_flags |= PVF_REF;
2089
2090
2091		*ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO;
2092		PTE_SYNC(ptep);
2093		rv = 1;
2094		vm_page_unlock_queues();
2095	}
2096
2097	/*
2098	 * We know there is a valid mapping here, so simply
2099	 * fix up the L1 if necessary.
2100	 */
2101	pl1pd = &pm->pm_l1->l1_kva[l1idx];
2102	l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO;
2103	if (*pl1pd != l1pd) {
2104		*pl1pd = l1pd;
2105		PTE_SYNC(pl1pd);
2106		rv = 1;
2107	}
2108
2109#ifdef CPU_SA110
2110	/*
2111	 * There are bugs in the rev K SA110.  This is a check for one
2112	 * of them.
2113	 */
2114	if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 &&
2115	    curcpu()->ci_arm_cpurev < 3) {
2116		/* Always current pmap */
2117		if (l2pte_valid(pte)) {
2118			extern int kernel_debug;
2119			if (kernel_debug & 1) {
2120				struct proc *p = curlwp->l_proc;
2121				printf("prefetch_abort: page is already "
2122				    "mapped - pte=%p *pte=%08x\n", ptep, pte);
2123				printf("prefetch_abort: pc=%08lx proc=%p "
2124				    "process=%s\n", va, p, p->p_comm);
2125				printf("prefetch_abort: far=%08x fs=%x\n",
2126				    cpu_faultaddress(), cpu_faultstatus());
2127			}
2128#ifdef DDB
2129			if (kernel_debug & 2)
2130				Debugger();
2131#endif
2132			rv = 1;
2133		}
2134	}
2135#endif /* CPU_SA110 */
2136
2137#ifdef DEBUG
2138	/*
2139	 * If 'rv == 0' at this point, it generally indicates that there is a
2140	 * stale TLB entry for the faulting address. This happens when two or
2141	 * more processes are sharing an L1. Since we don't flush the TLB on
2142	 * a context switch between such processes, we can take domain faults
2143	 * for mappings which exist at the same VA in both processes. EVEN IF
2144	 * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
2145	 * example.
2146	 *
2147	 * This is extremely likely to happen if pmap_enter() updated the L1
2148	 * entry for a recently entered mapping. In this case, the TLB is
2149	 * flushed for the new mapping, but there may still be TLB entries for
2150	 * other mappings belonging to other processes in the 1MB range
2151	 * covered by the L1 entry.
2152	 *
2153	 * Since 'rv == 0', we know that the L1 already contains the correct
2154	 * value, so the fault must be due to a stale TLB entry.
2155	 *
2156	 * Since we always need to flush the TLB anyway in the case where we
2157	 * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
2158	 * stale TLB entries dynamically.
2159	 *
2160	 * However, the above condition can ONLY happen if the current L1 is
2161	 * being shared. If it happens when the L1 is unshared, it indicates
2162	 * that other parts of the pmap are not doing their job WRT managing
2163	 * the TLB.
2164	 */
2165	if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
2166		extern int last_fault_code;
2167		printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
2168		    pm, va, ftype);
2169		printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
2170		    l2, l2b, ptep, pl1pd);
2171		printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
2172		    pte, l1pd, last_fault_code);
2173#ifdef DDB
2174		Debugger();
2175#endif
2176	}
2177#endif
2178
2179	cpu_tlb_flushID_SE(va);
2180	cpu_cpwait();
2181
2182	rv = 1;
2183
2184out:
2185#if 0
2186	pmap_release_pmap_lock(pm);
2187	PMAP_MAP_TO_HEAD_UNLOCK();
2188#endif
2189	return (rv);
2190}
2191
2192/*
2193 * Initialize the address space (zone) for the pv_entries.  Set a
2194 * high water mark so that the system can recover from excessive
2195 * numbers of pv entries.
2196 */
2197void
2198pmap_init2()
2199{
2200	int shpgperproc = PMAP_SHPGPERPROC;
2201	struct l2_bucket *l2b;
2202	struct l1_ttable *l1;
2203	pd_entry_t *pl1pt;
2204	pt_entry_t *ptep, pte;
2205	vm_offset_t va, eva;
2206	u_int loop, needed;
2207
2208	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
2209
2210	pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
2211	pv_entry_high_water = 9 * (pv_entry_max / 10);
2212	l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
2213	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
2214	uma_prealloc(l2zone, 4096);
2215	l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable),
2216	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
2217	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
2218	uma_prealloc(l2table_zone, 1024);
2219
2220	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
2221	uma_zone_set_obj(l2zone, &l2zone_obj, pv_entry_max);
2222
2223	needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
2224	needed -= 1;
2225	l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK);
2226
2227	for (loop = 0; loop < needed; loop++, l1++) {
2228		/* Allocate a L1 page table */
2229		va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0,
2230		    0xffffffff, L1_TABLE_SIZE, 0);
2231
2232		if (va == 0)
2233			panic("Cannot allocate L1 KVM");
2234
2235		eva = va + L1_TABLE_SIZE;
2236		pl1pt = (pd_entry_t *)va;
2237
2238		while (va < eva) {
2239				l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2240				ptep = &l2b->l2b_kva[l2pte_index(va)];
2241				pte = *ptep;
2242				pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
2243				*ptep = pte;
2244				PTE_SYNC(ptep);
2245				cpu_tlb_flushD_SE(va);
2246
2247				va += PAGE_SIZE;
2248		}
2249		pmap_init_l1(l1, pl1pt);
2250	}
2251
2252
2253#ifdef DEBUG
2254	printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
2255	    needed);
2256#endif
2257}
2258
2259/*
2260 * This is used to stuff certain critical values into the PCB where they
2261 * can be accessed quickly from cpu_switch() et al.
2262 */
2263void
2264pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb)
2265{
2266	struct l2_bucket *l2b;
2267
2268	pcb->pcb_pagedir = pm->pm_l1->l1_physaddr;
2269	pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
2270	    (DOMAIN_CLIENT << (pm->pm_domain * 2));
2271
2272	if (vector_page < KERNBASE) {
2273		pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
2274		l2b = pmap_get_l2_bucket(pm, vector_page);
2275		pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO |
2276	 	    L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL);
2277	} else
2278		pcb->pcb_pl1vec = NULL;
2279}
2280
2281void
2282pmap_activate(struct thread *td)
2283{
2284	pmap_t pm;
2285	struct pcb *pcb;
2286	int s;
2287
2288	pm = vmspace_pmap(td->td_proc->p_vmspace);
2289	pcb = td->td_pcb;
2290
2291	critical_enter();
2292	pmap_set_pcb_pagedir(pm, pcb);
2293
2294	if (td == curthread) {
2295		u_int cur_dacr, cur_ttb;
2296
2297		__asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb));
2298		__asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr));
2299
2300		cur_ttb &= ~(L1_TABLE_SIZE - 1);
2301
2302		if (cur_ttb == (u_int)pcb->pcb_pagedir &&
2303		    cur_dacr == pcb->pcb_dacr) {
2304			/*
2305			 * No need to switch address spaces.
2306			 */
2307			critical_exit();
2308			return;
2309		}
2310
2311
2312		/*
2313		 * We MUST, I repeat, MUST fix up the L1 entry corresponding
2314		 * to 'vector_page' in the incoming L1 table before switching
2315		 * to it otherwise subsequent interrupts/exceptions (including
2316		 * domain faults!) will jump into hyperspace.
2317		 */
2318		if (pcb->pcb_pl1vec) {
2319
2320			*pcb->pcb_pl1vec = pcb->pcb_l1vec;
2321			/*
2322			 * Don't need to PTE_SYNC() at this point since
2323			 * cpu_setttb() is about to flush both the cache
2324			 * and the TLB.
2325			 */
2326		}
2327
2328		cpu_domains(pcb->pcb_dacr);
2329		cpu_setttb(pcb->pcb_pagedir);
2330
2331		splx(s);
2332	}
2333	critical_exit();
2334}
2335
2336static int
2337pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va)
2338{
2339	pd_entry_t *pdep, pde;
2340	pt_entry_t *ptep, pte;
2341	vm_offset_t pa;
2342	int rv = 0;
2343
2344	/*
2345	 * Make sure the descriptor itself has the correct cache mode
2346	 */
2347	pdep = &kl1[L1_IDX(va)];
2348	pde = *pdep;
2349
2350	if (l1pte_section_p(pde)) {
2351		if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
2352			*pdep = (pde & ~L1_S_CACHE_MASK) |
2353			    pte_l1_s_cache_mode_pt;
2354			PTE_SYNC(pdep);
2355			cpu_dcache_wbinv_range((vm_offset_t)pdep,
2356			    sizeof(*pdep));
2357			rv = 1;
2358		}
2359	} else {
2360		pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
2361		ptep = (pt_entry_t *)kernel_pt_lookup(pa);
2362		if (ptep == NULL)
2363			panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep);
2364
2365		ptep = &ptep[l2pte_index(va)];
2366		pte = *ptep;
2367		if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
2368			*ptep = (pte & ~L2_S_CACHE_MASK) |
2369			    pte_l2_s_cache_mode_pt;
2370			PTE_SYNC(ptep);
2371			cpu_dcache_wbinv_range((vm_offset_t)ptep,
2372			    sizeof(*ptep));
2373			rv = 1;
2374		}
2375	}
2376
2377	return (rv);
2378}
2379
2380static void
2381pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap,
2382    pt_entry_t **ptep)
2383{
2384	vm_offset_t va = *availp;
2385	struct l2_bucket *l2b;
2386
2387	if (ptep) {
2388		l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2389		if (l2b == NULL)
2390			panic("pmap_alloc_specials: no l2b for 0x%x", va);
2391
2392		*ptep = &l2b->l2b_kva[l2pte_index(va)];
2393	}
2394
2395	*vap = va;
2396	*availp = va + (PAGE_SIZE * pages);
2397}
2398
2399/*
2400 *	Bootstrap the system enough to run with virtual memory.
2401 *
2402 *	On the arm this is called after mapping has already been enabled
2403 *	and just syncs the pmap module with what has already been done.
2404 *	[We can't call it easily with mapping off since the kernel is not
2405 *	mapped with PA == VA, hence we would have to relocate every address
2406 *	from the linked base (virtual) address "KERNBASE" to the actual
2407 *	(physical) address starting relative to 0]
2408 */
2409#define PMAP_STATIC_L2_SIZE 16
2410#ifdef ARM_USE_SMALL_ALLOC
2411extern struct mtx smallalloc_mtx;
2412extern vm_offset_t alloc_curaddr;
2413#endif
2414
2415void
2416pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt)
2417{
2418	static struct l1_ttable static_l1;
2419	static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
2420	struct l1_ttable *l1 = &static_l1;
2421	struct l2_dtable *l2;
2422	struct l2_bucket *l2b;
2423	pd_entry_t pde;
2424	pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va;
2425	pt_entry_t *ptep;
2426	vm_paddr_t pa;
2427	vm_offset_t va;
2428	vm_size_t size;
2429	int l1idx, l2idx, l2next = 0;
2430
2431	PDEBUG(1, printf("firstaddr = %08x, loadaddr = %08x\n",
2432	    firstaddr, loadaddr));
2433
2434	virtual_avail = firstaddr;
2435	kernel_pmap = &kernel_pmap_store;
2436	kernel_pmap->pm_l1 = l1;
2437
2438	/*
2439	 * Scan the L1 translation table created by initarm() and create
2440	 * the required metadata for all valid mappings found in it.
2441	 */
2442	for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) {
2443		pde = kernel_l1pt[l1idx];
2444
2445		/*
2446		 * We're only interested in Coarse mappings.
2447		 * pmap_extract() can deal with section mappings without
2448		 * recourse to checking L2 metadata.
2449		 */
2450		if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
2451			continue;
2452
2453		/*
2454		 * Lookup the KVA of this L2 descriptor table
2455		 */
2456		pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
2457		ptep = (pt_entry_t *)kernel_pt_lookup(pa);
2458
2459		if (ptep == NULL) {
2460			panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
2461			    (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa);
2462		}
2463
2464		/*
2465		 * Fetch the associated L2 metadata structure.
2466		 * Allocate a new one if necessary.
2467		 */
2468		if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
2469			if (l2next == PMAP_STATIC_L2_SIZE)
2470				panic("pmap_bootstrap: out of static L2s");
2471			kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 =
2472			    &static_l2[l2next++];
2473		}
2474
2475		/*
2476		 * One more L1 slot tracked...
2477		 */
2478		l2->l2_occupancy++;
2479
2480		/*
2481		 * Fill in the details of the L2 descriptor in the
2482		 * appropriate bucket.
2483		 */
2484		l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
2485		l2b->l2b_kva = ptep;
2486		l2b->l2b_phys = pa;
2487		l2b->l2b_l1idx = l1idx;
2488
2489		/*
2490		 * Establish an initial occupancy count for this descriptor
2491		 */
2492		for (l2idx = 0;
2493		    l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
2494		    l2idx++) {
2495			if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) {
2496				l2b->l2b_occupancy++;
2497			}
2498		}
2499
2500		/*
2501		 * Make sure the descriptor itself has the correct cache mode.
2502		 * If not, fix it, but whine about the problem. Port-meisters
2503		 * should consider this a clue to fix up their initarm()
2504		 * function. :)
2505		 */
2506		if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) {
2507			printf("pmap_bootstrap: WARNING! wrong cache mode for "
2508			    "L2 pte @ %p\n", ptep);
2509		}
2510	}
2511
2512
2513	/*
2514	 * Ensure the primary (kernel) L1 has the correct cache mode for
2515	 * a page table. Bitch if it is not correctly set.
2516	 */
2517	for (va = (vm_offset_t)kernel_l1pt;
2518	    va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) {
2519		if (pmap_set_pt_cache_mode(kernel_l1pt, va))
2520			printf("pmap_bootstrap: WARNING! wrong cache mode for "
2521			    "primary L1 @ 0x%x\n", va);
2522	}
2523
2524	cpu_dcache_wbinv_all();
2525	cpu_tlb_flushID();
2526	cpu_cpwait();
2527
2528	kernel_pmap->pm_active = -1;
2529	kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL;
2530	LIST_INIT(&allpmaps);
2531	TAILQ_INIT(&kernel_pmap->pm_pvlist);
2532	LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
2533
2534	/*
2535	 * Reserve some special page table entries/VA space for temporary
2536	 * mapping of pages.
2537	 */
2538#define SYSMAP(c, p, v, n)						\
2539    v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
2540
2541	pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte);
2542	pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte);
2543	pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte);
2544	pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte);
2545	size = ((lastaddr - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE;
2546	pmap_alloc_specials(&virtual_avail,
2547	    round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
2548	    &pmap_kernel_l2ptp_kva, NULL);
2549
2550	size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
2551	pmap_alloc_specials(&virtual_avail,
2552	    round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
2553	    &pmap_kernel_l2dtable_kva, NULL);
2554
2555	pmap_alloc_specials(&virtual_avail,
2556	    1, (vm_offset_t*)&_tmppt, NULL);
2557	SLIST_INIT(&l1_list);
2558	TAILQ_INIT(&l1_lru_list);
2559	mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF);
2560	pmap_init_l1(l1, kernel_l1pt);
2561	cpu_dcache_wbinv_all();
2562
2563	virtual_avail = round_page(virtual_avail);
2564	virtual_end = lastaddr;
2565	kernel_vm_end = pmap_curmaxkvaddr;
2566#ifdef ARM_USE_SMALL_ALLOC
2567	mtx_init(&smallalloc_mtx, "Small alloc page list", NULL, MTX_DEF);
2568	alloc_curaddr = lastaddr;
2569#endif
2570}
2571
2572/***************************************************
2573 * Pmap allocation/deallocation routines.
2574 ***************************************************/
2575
2576/*
2577 * Release any resources held by the given physical map.
2578 * Called when a pmap initialized by pmap_pinit is being released.
2579 * Should only be called if the map contains no valid mappings.
2580 */
2581void
2582pmap_release(pmap_t pmap)
2583{
2584	struct pcb *pcb;
2585
2586	pmap_idcache_wbinv_all(pmap);
2587	pmap_tlb_flushID(pmap);
2588	cpu_cpwait();
2589	LIST_REMOVE(pmap, pm_list);
2590	if (vector_page < KERNBASE) {
2591		struct pcb *curpcb = PCPU_GET(curpcb);
2592		pcb = thread0.td_pcb;
2593		if (pmap_is_current(pmap)) {
2594			/*
2595 			 * Frob the L1 entry corresponding to the vector
2596			 * page so that it contains the kernel pmap's domain
2597			 * number. This will ensure pmap_remove() does not
2598			 * pull the current vector page out from under us.
2599			 */
2600			critical_enter();
2601			*pcb->pcb_pl1vec = pcb->pcb_l1vec;
2602			cpu_domains(pcb->pcb_dacr);
2603			cpu_setttb(pcb->pcb_pagedir);
2604			critical_exit();
2605		}
2606		pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE);
2607		/*
2608		 * Make sure cpu_switch(), et al, DTRT. This is safe to do
2609		 * since this process has no remaining mappings of its own.
2610		 */
2611		curpcb->pcb_pl1vec = pcb->pcb_pl1vec;
2612		curpcb->pcb_l1vec = pcb->pcb_l1vec;
2613		curpcb->pcb_dacr = pcb->pcb_dacr;
2614		curpcb->pcb_pagedir = pcb->pcb_pagedir;
2615
2616	}
2617	pmap_free_l1(pmap);
2618
2619	dprintf("pmap_release()\n");
2620}
2621
2622
2623
2624/*
2625 * Helper function for pmap_grow_l2_bucket()
2626 */
2627static __inline int
2628pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap)
2629{
2630	struct l2_bucket *l2b;
2631	pt_entry_t *ptep;
2632	vm_paddr_t pa;
2633	struct vm_page *pg;
2634
2635	pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO |
2636	    VM_ALLOC_WIRED);
2637	if (pg == NULL)
2638		return (1);
2639	pa = VM_PAGE_TO_PHYS(pg);
2640
2641	if (pap)
2642		*pap = pa;
2643
2644	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2645
2646	ptep = &l2b->l2b_kva[l2pte_index(va)];
2647	*ptep = L2_S_PROTO | pa | cache_mode |
2648	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE);
2649	PTE_SYNC(ptep);
2650	return (0);
2651}
2652
2653/*
2654 * This is the same as pmap_alloc_l2_bucket(), except that it is only
2655 * used by pmap_growkernel().
2656 */
2657static __inline struct l2_bucket *
2658pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va)
2659{
2660	struct l2_dtable *l2;
2661	struct l2_bucket *l2b;
2662	struct l1_ttable *l1;
2663	pd_entry_t *pl1pd;
2664	u_short l1idx;
2665	vm_offset_t nva;
2666
2667	l1idx = L1_IDX(va);
2668
2669	if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
2670		/*
2671		 * No mapping at this address, as there is
2672		 * no entry in the L1 table.
2673		 * Need to allocate a new l2_dtable.
2674		 */
2675		nva = pmap_kernel_l2dtable_kva;
2676		if ((nva & PAGE_MASK) == 0) {
2677			/*
2678			 * Need to allocate a backing page
2679			 */
2680			if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
2681				return (NULL);
2682		}
2683
2684		l2 = (struct l2_dtable *)nva;
2685		nva += sizeof(struct l2_dtable);
2686
2687		if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva &
2688		    PAGE_MASK)) {
2689			/*
2690			 * The new l2_dtable straddles a page boundary.
2691			 * Map in another page to cover it.
2692			 */
2693			if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
2694				return (NULL);
2695		}
2696
2697		pmap_kernel_l2dtable_kva = nva;
2698
2699		/*
2700		 * Link it into the parent pmap
2701		 */
2702		pm->pm_l2[L2_IDX(l1idx)] = l2;
2703	}
2704
2705	l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
2706
2707	/*
2708	 * Fetch pointer to the L2 page table associated with the address.
2709	 */
2710	if (l2b->l2b_kva == NULL) {
2711		pt_entry_t *ptep;
2712
2713		/*
2714		 * No L2 page table has been allocated. Chances are, this
2715		 * is because we just allocated the l2_dtable, above.
2716		 */
2717		nva = pmap_kernel_l2ptp_kva;
2718		ptep = (pt_entry_t *)nva;
2719		if ((nva & PAGE_MASK) == 0) {
2720			/*
2721			 * Need to allocate a backing page
2722			 */
2723			if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt,
2724			    &pmap_kernel_l2ptp_phys))
2725				return (NULL);
2726			PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t));
2727		}
2728
2729		l2->l2_occupancy++;
2730		l2b->l2b_kva = ptep;
2731		l2b->l2b_l1idx = l1idx;
2732		l2b->l2b_phys = pmap_kernel_l2ptp_phys;
2733
2734		pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
2735		pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
2736	}
2737
2738	/* Distribute new L1 entry to all other L1s */
2739	SLIST_FOREACH(l1, &l1_list, l1_link) {
2740			pl1pd = &l1->l1_kva[L1_IDX(va)];
2741			*pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) |
2742			    L1_C_PROTO;
2743			PTE_SYNC(pl1pd);
2744	}
2745
2746	return (l2b);
2747}
2748
2749
2750/*
2751 * grow the number of kernel page table entries, if needed
2752 */
2753void
2754pmap_growkernel(vm_offset_t addr)
2755{
2756	pmap_t kpm = pmap_kernel();
2757	int s;
2758
2759	if (addr <= pmap_curmaxkvaddr)
2760		return;		/* we are OK */
2761
2762	/*
2763	 * whoops!   we need to add kernel PTPs
2764	 */
2765
2766	s = splhigh();	/* to be safe */
2767
2768	/* Map 1MB at a time */
2769	for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE)
2770		pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
2771
2772	/*
2773	 * flush out the cache, expensive but growkernel will happen so
2774	 * rarely
2775	 */
2776	cpu_dcache_wbinv_all();
2777	cpu_tlb_flushD();
2778	cpu_cpwait();
2779	kernel_vm_end = pmap_curmaxkvaddr;
2780
2781}
2782
2783
2784/*
2785 *      pmap_page_protect:
2786 *
2787 *      Lower the permission for all mappings to a given page.
2788 */
2789void
2790pmap_page_protect(vm_page_t m, vm_prot_t prot)
2791{
2792	switch(prot) {
2793	case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
2794	case VM_PROT_READ|VM_PROT_WRITE:
2795		return;
2796
2797	case VM_PROT_READ:
2798	case VM_PROT_READ|VM_PROT_EXECUTE:
2799		pmap_clearbit(m, PVF_WRITE);
2800		break;
2801
2802	default:
2803		pmap_remove_all(m);
2804		break;
2805	}
2806
2807}
2808
2809
2810/*
2811 * Remove all pages from specified address space
2812 * this aids process exit speeds.  Also, this code
2813 * is special cased for current process only, but
2814 * can have the more generic (and slightly slower)
2815 * mode enabled.  This is much faster than pmap_remove
2816 * in the case of running down an entire address space.
2817 */
2818void
2819pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2820{
2821	struct pv_entry *pv, *npv;
2822	struct l2_bucket *l2b = NULL;
2823	vm_page_t m;
2824	pt_entry_t *pt;
2825
2826	vm_page_lock_queues();
2827	for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
2828		if (pv->pv_va >= eva || pv->pv_va < sva) {
2829			npv = TAILQ_NEXT(pv, pv_plist);
2830			continue;
2831		}
2832		if (pv->pv_flags & PVF_WIRED) {
2833			/* The page is wired, cannot remove it now. */
2834			npv = TAILQ_NEXT(pv, pv_plist);
2835			continue;
2836		}
2837		pmap->pm_stats.resident_count--;
2838		l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
2839		KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages"));
2840		pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
2841		m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK);
2842		*pt = 0;
2843		PTE_SYNC(pt);
2844		npv = TAILQ_NEXT(pv, pv_plist);
2845		pmap_nuke_pv(m, pmap, pv);
2846		pmap_free_pv_entry(pv);
2847	}
2848	vm_page_unlock_queues();
2849	cpu_idcache_wbinv_all();
2850	cpu_tlb_flushID();
2851	cpu_cpwait();
2852}
2853
2854
2855/***************************************************
2856 * Low level mapping routines.....
2857 ***************************************************/
2858
2859/* Map a section into the KVA. */
2860
2861void
2862pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags)
2863{
2864	pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL,
2865	    VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL);
2866	struct l1_ttable *l1;
2867
2868	KASSERT(((va | pa) & L1_S_OFFSET) == 0,
2869	    ("Not a valid section mapping"));
2870	if (flags & SECTION_CACHE)
2871		pd |= pte_l1_s_cache_mode;
2872	else if (flags & SECTION_PT)
2873		pd |= pte_l1_s_cache_mode_pt;
2874	SLIST_FOREACH(l1, &l1_list, l1_link) {
2875		l1->l1_kva[L1_IDX(va)] = pd;
2876		PTE_SYNC(&l1->l1_kva[L1_IDX(va)]);
2877	}
2878}
2879
2880/*
2881 * add a wired page to the kva
2882 * note that in order for the mapping to take effect -- you
2883 * should do a invltlb after doing the pmap_kenter...
2884 */
2885static PMAP_INLINE void
2886pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
2887{
2888	struct l2_bucket *l2b;
2889	pt_entry_t *pte;
2890	pt_entry_t opte;
2891	PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n",
2892	    (uint32_t) va, (uint32_t) pa));
2893
2894
2895	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2896	if (l2b == NULL)
2897		l2b = pmap_grow_l2_bucket(pmap_kernel(), va);
2898	KASSERT(l2b != NULL, ("No L2 Bucket"));
2899	pte = &l2b->l2b_kva[l2pte_index(va)];
2900	opte = *pte;
2901	PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n",
2902	    (uint32_t) pte, opte, *pte));
2903	if (l2pte_valid(opte)) {
2904		cpu_dcache_wbinv_range(va, PAGE_SIZE);
2905		cpu_tlb_flushD_SE(va);
2906		cpu_cpwait();
2907	} else {
2908		if (opte == 0)
2909			l2b->l2b_occupancy++;
2910	}
2911	*pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL,
2912	    VM_PROT_READ | VM_PROT_WRITE);
2913	if (flags & KENTER_CACHE)
2914		*pte |= pte_l2_s_cache_mode;
2915	if (flags & KENTER_USER)
2916		*pte |= L2_S_PROT_U;
2917	PTE_SYNC(pte);
2918}
2919
2920void
2921pmap_kenter(vm_offset_t va, vm_paddr_t pa)
2922{
2923	pmap_kenter_internal(va, pa, KENTER_CACHE);
2924}
2925
2926void
2927pmap_kenter_user(vm_offset_t va, vm_paddr_t pa)
2928{
2929
2930	pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER);
2931	/*
2932	 * Call pmap_fault_fixup now, to make sure we'll have no exception
2933	 * at the first use of the new address, or bad things will happen,
2934	 * as we use one of these addresses in the exception handlers.
2935	 */
2936	pmap_fault_fixup(pmap_kernel(), va, VM_PROT_READ|VM_PROT_WRITE, 1);
2937}
2938
2939/*
2940 * remove a page rom the kernel pagetables
2941 */
2942PMAP_INLINE void
2943pmap_kremove(vm_offset_t va)
2944{
2945	struct l2_bucket *l2b;
2946	pt_entry_t *pte, opte;
2947
2948	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2949	if (!l2b)
2950		return;
2951	KASSERT(l2b != NULL, ("No L2 Bucket"));
2952	pte = &l2b->l2b_kva[l2pte_index(va)];
2953	opte = *pte;
2954	if (l2pte_valid(opte)) {
2955		cpu_dcache_wbinv_range(va, PAGE_SIZE);
2956		cpu_tlb_flushD_SE(va);
2957		cpu_cpwait();
2958		*pte = 0;
2959	}
2960}
2961
2962
2963/*
2964 *	Used to map a range of physical addresses into kernel
2965 *	virtual address space.
2966 *
2967 *	The value passed in '*virt' is a suggested virtual address for
2968 *	the mapping. Architectures which can support a direct-mapped
2969 *	physical to virtual region can return the appropriate address
2970 *	within that region, leaving '*virt' unchanged. Other
2971 *	architectures should map the pages starting at '*virt' and
2972 *	update '*virt' with the first usable address after the mapped
2973 *	region.
2974 */
2975vm_offset_t
2976pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
2977{
2978	vm_offset_t sva = *virt;
2979	vm_offset_t va = sva;
2980
2981	PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, "
2982	    "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end,
2983	    prot));
2984
2985	while (start < end) {
2986		pmap_kenter(va, start);
2987		va += PAGE_SIZE;
2988		start += PAGE_SIZE;
2989	}
2990	*virt = va;
2991	return (sva);
2992}
2993
2994static void
2995pmap_wb_page(vm_page_t m, boolean_t do_inv)
2996{
2997	struct pv_entry *pv;
2998
2999	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
3000	    pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, do_inv,
3001		(pv->pv_flags & PVF_WRITE) == 0);
3002}
3003
3004/*
3005 * Add a list of wired pages to the kva
3006 * this routine is only used for temporary
3007 * kernel mappings that do not need to have
3008 * page modification or references recorded.
3009 * Note that old mappings are simply written
3010 * over.  The page *must* be wired.
3011 */
3012void
3013pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
3014{
3015	int i;
3016
3017	for (i = 0; i < count; i++) {
3018		pmap_wb_page(m[i], TRUE);
3019		pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]),
3020		    KENTER_CACHE);
3021		va += PAGE_SIZE;
3022	}
3023}
3024
3025
3026/*
3027 * this routine jerks page mappings from the
3028 * kernel -- it is meant only for temporary mappings.
3029 */
3030void
3031pmap_qremove(vm_offset_t va, int count)
3032{
3033	vm_paddr_t pa;
3034	int i;
3035
3036	for (i = 0; i < count; i++) {
3037		pa = vtophys(va);
3038		if (pa) {
3039			pmap_wb_page(PHYS_TO_VM_PAGE(pa), TRUE);
3040			pmap_kremove(va);
3041		}
3042		va += PAGE_SIZE;
3043	}
3044}
3045
3046
3047/*
3048 * pmap_object_init_pt preloads the ptes for a given object
3049 * into the specified pmap.  This eliminates the blast of soft
3050 * faults on process startup and immediately after an mmap.
3051 */
3052void
3053pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3054    vm_pindex_t pindex, vm_size_t size)
3055{
3056	printf("pmap_object_init_pt()\n");
3057}
3058
3059
3060/*
3061 *	pmap_is_prefaultable:
3062 *
3063 *	Return whether or not the specified virtual address is elgible
3064 *	for prefault.
3065 */
3066boolean_t
3067pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3068{
3069	pd_entry_t *pde;
3070	pt_entry_t *pte;
3071
3072	if (!pmap_get_pde_pte(pmap, addr, &pde, &pte))
3073		return (FALSE);
3074	if (*pte == 0)
3075		return (TRUE);
3076	return (FALSE);
3077}
3078
3079/*
3080 * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
3081 * Returns TRUE if the mapping exists, else FALSE.
3082 *
3083 * NOTE: This function is only used by a couple of arm-specific modules.
3084 * It is not safe to take any pmap locks here, since we could be right
3085 * in the middle of debugging the pmap anyway...
3086 *
3087 * It is possible for this routine to return FALSE even though a valid
3088 * mapping does exist. This is because we don't lock, so the metadata
3089 * state may be inconsistent.
3090 *
3091 * NOTE: We can return a NULL *ptp in the case where the L1 pde is
3092 * a "section" mapping.
3093 */
3094boolean_t
3095pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp)
3096{
3097	struct l2_dtable *l2;
3098	pd_entry_t *pl1pd, l1pd;
3099	pt_entry_t *ptep;
3100	u_short l1idx;
3101
3102	if (pm->pm_l1 == NULL)
3103		return (FALSE);
3104
3105	l1idx = L1_IDX(va);
3106	*pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx];
3107	l1pd = *pl1pd;
3108
3109	if (l1pte_section_p(l1pd)) {
3110		*ptp = NULL;
3111		return (TRUE);
3112	}
3113
3114	if (pm->pm_l2 == NULL)
3115		return (FALSE);
3116
3117	l2 = pm->pm_l2[L2_IDX(l1idx)];
3118
3119	if (l2 == NULL ||
3120	    (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
3121		return (FALSE);
3122	}
3123
3124	*ptp = &ptep[l2pte_index(va)];
3125	return (TRUE);
3126}
3127
3128/*
3129 *      Routine:        pmap_remove_all
3130 *      Function:
3131 *              Removes this physical page from
3132 *              all physical maps in which it resides.
3133 *              Reflects back modify bits to the pager.
3134 *
3135 *      Notes:
3136 *              Original versions of this routine were very
3137 *              inefficient because they iteratively called
3138 *              pmap_remove (slow...)
3139 */
3140void
3141pmap_remove_all(vm_page_t m)
3142{
3143	pv_entry_t pv;
3144	pt_entry_t *ptep, pte;
3145	struct l2_bucket *l2b;
3146	boolean_t flush = FALSE;
3147	pmap_t curpm;
3148	int flags = 0;
3149
3150#if defined(PMAP_DEBUG)
3151	/*
3152	 * XXX this makes pmap_page_protect(NONE) illegal for non-managed
3153	 * pages!
3154	 */
3155	if (m->flags & PG_FICTITIOUS) {
3156		panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m));
3157	}
3158#endif
3159
3160	if (TAILQ_EMPTY(&m->md.pv_list))
3161		return;
3162	curpm = vmspace_pmap(curproc->p_vmspace);
3163	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3164		if (flush == FALSE && (pv->pv_pmap == curpm ||
3165		    pv->pv_pmap == pmap_kernel()))
3166			flush = TRUE;
3167		l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
3168		KASSERT(l2b != NULL, ("No l2 bucket"));
3169		ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
3170		pte = *ptep;
3171		*ptep = 0;
3172		PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
3173		pmap_free_l2_bucket(pv->pv_pmap, l2b, 1);
3174		if (pv->pv_flags & PVF_WIRED)
3175			pv->pv_pmap->pm_stats.wired_count--;
3176		pv->pv_pmap->pm_stats.resident_count--;
3177		flags |= pv->pv_flags;
3178		pmap_nuke_pv(m, pv->pv_pmap, pv);
3179		pmap_free_pv_entry(pv);
3180	}
3181
3182	if (flush) {
3183		if (PV_BEEN_EXECD(flags))
3184			pmap_tlb_flushID(curpm);
3185		else
3186			pmap_tlb_flushD(curpm);
3187	}
3188}
3189
3190
3191/*
3192 *	Set the physical protection on the
3193 *	specified range of this map as requested.
3194 */
3195void
3196pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
3197{
3198	struct l2_bucket *l2b;
3199	pt_entry_t *ptep, pte;
3200	vm_offset_t next_bucket;
3201	u_int flags;
3202	int flush;
3203
3204	if ((prot & VM_PROT_READ) == 0) {
3205		mtx_lock(&Giant);
3206		pmap_remove(pm, sva, eva);
3207		mtx_unlock(&Giant);
3208		return;
3209	}
3210
3211	if (prot & VM_PROT_WRITE) {
3212		/*
3213		 * If this is a read->write transition, just ignore it and let
3214		 * vm_fault() take care of it later.
3215		 */
3216		return;
3217	}
3218
3219	mtx_lock(&Giant);
3220
3221	/*
3222	 * OK, at this point, we know we're doing write-protect operation.
3223	 * If the pmap is active, write-back the range.
3224	 */
3225	pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE);
3226
3227	flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1;
3228	flags = 0;
3229
3230	vm_page_lock_queues();
3231	while (sva < eva) {
3232		next_bucket = L2_NEXT_BUCKET(sva);
3233		if (next_bucket > eva)
3234			next_bucket = eva;
3235
3236		l2b = pmap_get_l2_bucket(pm, sva);
3237		if (l2b == NULL) {
3238			sva = next_bucket;
3239			continue;
3240		}
3241
3242		ptep = &l2b->l2b_kva[l2pte_index(sva)];
3243
3244		while (sva < next_bucket) {
3245			if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) {
3246				struct vm_page *pg;
3247				u_int f;
3248
3249				pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
3250				pte &= ~L2_S_PROT_W;
3251				*ptep = pte;
3252				PTE_SYNC(ptep);
3253
3254				if (pg != NULL) {
3255					f = pmap_modify_pv(pg, pm, sva,
3256					    PVF_WRITE, 0);
3257					pmap_vac_me_harder(pg, pm, sva);
3258					if (pmap_track_modified(sva))
3259						vm_page_dirty(pg);
3260				} else
3261					f = PVF_REF | PVF_EXEC;
3262
3263				if (flush >= 0) {
3264					flush++;
3265					flags |= f;
3266				} else
3267				if (PV_BEEN_EXECD(f))
3268					pmap_tlb_flushID_SE(pm, sva);
3269				else
3270				if (PV_BEEN_REFD(f))
3271					pmap_tlb_flushD_SE(pm, sva);
3272			}
3273
3274			sva += PAGE_SIZE;
3275			ptep++;
3276		}
3277	}
3278
3279
3280	if (flush) {
3281		if (PV_BEEN_EXECD(flags))
3282			pmap_tlb_flushID(pm);
3283		else
3284		if (PV_BEEN_REFD(flags))
3285			pmap_tlb_flushD(pm);
3286	}
3287	vm_page_unlock_queues();
3288
3289	mtx_unlock(&Giant);
3290}
3291
3292
3293/*
3294 *	Insert the given physical page (p) at
3295 *	the specified virtual address (v) in the
3296 *	target physical map with the protection requested.
3297 *
3298 *	If specified, the page will be wired down, meaning
3299 *	that the related pte can not be reclaimed.
3300 *
3301 *	NB:  This is the only routine which MAY NOT lazy-evaluate
3302 *	or lose information.  That is, this routine must actually
3303 *	insert this page into the given map NOW.
3304 */
3305
3306void
3307pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3308    boolean_t wired)
3309{
3310	struct l2_bucket *l2b = NULL;
3311	struct vm_page *opg;
3312	struct pv_entry *pve = NULL;
3313	pt_entry_t *ptep, npte, opte;
3314	u_int nflags;
3315	u_int oflags;
3316	vm_paddr_t pa;
3317
3318	vm_page_lock_queues();
3319	if (va == vector_page) {
3320		pa = systempage.pv_pa;
3321		m = NULL;
3322	} else
3323		pa = VM_PAGE_TO_PHYS(m);
3324	nflags = 0;
3325	if (prot & VM_PROT_WRITE)
3326		nflags |= PVF_WRITE;
3327	if (prot & VM_PROT_EXECUTE)
3328		nflags |= PVF_EXEC;
3329	if (wired)
3330		nflags |= PVF_WIRED;
3331	PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, "
3332	    "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired));
3333
3334	if (pmap == pmap_kernel()) {
3335		l2b = pmap_get_l2_bucket(pmap, va);
3336		if (l2b == NULL)
3337			l2b = pmap_grow_l2_bucket(pmap, va);
3338	} else
3339		l2b = pmap_alloc_l2_bucket(pmap, va);
3340		KASSERT(l2b != NULL,
3341		    ("pmap_enter: failed to allocate l2 bucket"));
3342	ptep = &l2b->l2b_kva[l2pte_index(va)];
3343
3344	opte = *ptep;
3345	npte = pa;
3346	oflags = 0;
3347	if (opte) {
3348		/*
3349		 * There is already a mapping at this address.
3350		 * If the physical address is different, lookup the
3351		 * vm_page.
3352		 */
3353		if (l2pte_pa(opte) != pa)
3354			opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3355		else
3356			opg = m;
3357	} else
3358		opg = NULL;
3359
3360	if ((prot & (VM_PROT_ALL)) ||
3361	    (!m || m->md.pvh_attrs & PVF_REF)) {
3362		/*
3363		 * - The access type indicates that we don't need
3364		 *   to do referenced emulation.
3365		 * OR
3366		 * - The physical page has already been referenced
3367		 *   so no need to re-do referenced emulation here.
3368		 */
3369		npte |= L2_S_PROTO;
3370
3371		nflags |= PVF_REF;
3372
3373		if (m && ((prot & VM_PROT_WRITE) != 0 ||
3374		    (m->md.pvh_attrs & PVF_MOD))) {
3375			/*
3376			 * This is a writable mapping, and the
3377			 * page's mod state indicates it has
3378			 * already been modified. Make it
3379			 * writable from the outset.
3380			 */
3381			nflags |= PVF_MOD;
3382			if (!(m->md.pvh_attrs & PVF_MOD) &&
3383			    pmap_track_modified(va))
3384				vm_page_dirty(m);
3385		}
3386		if (m && opte)
3387			vm_page_flag_set(m, PG_REFERENCED);
3388	} else {
3389		/*
3390		 * Need to do page referenced emulation.
3391		 */
3392		npte |= L2_TYPE_INV;
3393	}
3394
3395	if (prot & VM_PROT_WRITE)
3396		npte |= L2_S_PROT_W;
3397	npte |= pte_l2_s_cache_mode;
3398	if (m && m == opg) {
3399		/*
3400		 * We're changing the attrs of an existing mapping.
3401		 */
3402#if 0
3403		simple_lock(&pg->mdpage.pvh_slock);
3404#endif
3405		oflags = pmap_modify_pv(m, pmap, va,
3406		    PVF_WRITE | PVF_EXEC | PVF_WIRED |
3407		    PVF_MOD | PVF_REF, nflags);
3408#if 0
3409		simple_unlock(&pg->mdpage.pvh_slock);
3410#endif
3411
3412		/*
3413		 * We may need to flush the cache if we're
3414		 * doing rw-ro...
3415		 */
3416		if (pmap_is_current(pmap) &&
3417		    (oflags & PVF_NC) == 0 &&
3418			    (opte & L2_S_PROT_W) != 0 &&
3419			    (prot & VM_PROT_WRITE) == 0)
3420			cpu_dcache_wb_range(va, PAGE_SIZE);
3421	} else {
3422		/*
3423		 * New mapping, or changing the backing page
3424		 * of an existing mapping.
3425		 */
3426		if (opg) {
3427			/*
3428			 * Replacing an existing mapping with a new one.
3429			 * It is part of our managed memory so we
3430			 * must remove it from the PV list
3431			 */
3432#if 0
3433			simple_lock(&opg->mdpage.pvh_slock);
3434#endif
3435			pve = pmap_remove_pv(opg, pmap, va);
3436			if (m && (m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) && pve)
3437				pmap_free_pv_entry(pve);
3438			else if (!pve)
3439				pve = pmap_get_pv_entry();
3440			KASSERT(pve != NULL, ("No pv"));
3441#if 0
3442			simple_unlock(&opg->mdpage.pvh_slock);
3443#endif
3444			oflags = pve->pv_flags;
3445
3446			/*
3447			 * If the old mapping was valid (ref/mod
3448			 * emulation creates 'invalid' mappings
3449			 * initially) then make sure to frob
3450			 * the cache.
3451			 */
3452			if ((oflags & PVF_NC) == 0 &&
3453			    l2pte_valid(opte)) {
3454				if (PV_BEEN_EXECD(oflags)) {
3455					pmap_idcache_wbinv_range(pmap, va,
3456					    PAGE_SIZE);
3457				} else
3458					if (PV_BEEN_REFD(oflags)) {
3459						pmap_dcache_wb_range(pmap, va,
3460						    PAGE_SIZE, TRUE,
3461						    (oflags & PVF_WRITE) == 0);
3462					}
3463			}
3464		} else if (m)
3465			if ((pve = pmap_get_pv_entry()) == NULL) {
3466				panic("pmap_enter: no pv entries");
3467			}
3468		if (m && !(m->flags & (PG_UNMANAGED | PG_FICTITIOUS)))
3469			pmap_enter_pv(m, pve, pmap, va, nflags);
3470	}
3471	/*
3472	 * Make sure userland mappings get the right permissions
3473	 */
3474	if (pmap != pmap_kernel() && va != vector_page) {
3475		npte |= L2_S_PROT_U;
3476	}
3477
3478	/*
3479	 * Keep the stats up to date
3480	 */
3481	if (opte == 0) {
3482		l2b->l2b_occupancy++;
3483		pmap->pm_stats.resident_count++;
3484	}
3485
3486
3487	/*
3488	 * If this is just a wiring change, the two PTEs will be
3489	 * identical, so there's no need to update the page table.
3490	 */
3491	if (npte != opte) {
3492		boolean_t is_cached = pmap_is_current(pmap);
3493
3494		*ptep = npte;
3495		if (is_cached) {
3496			/*
3497			 * We only need to frob the cache/tlb if this pmap
3498			 * is current
3499			 */
3500			PTE_SYNC(ptep);
3501			if (L1_IDX(va) != L1_IDX(vector_page) &&
3502			    l2pte_valid(npte)) {
3503				/*
3504				 * This mapping is likely to be accessed as
3505				 * soon as we return to userland. Fix up the
3506				 * L1 entry to avoid taking another
3507				 * page/domain fault.
3508				 */
3509				pd_entry_t *pl1pd, l1pd;
3510
3511				pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
3512				l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) |
3513				    L1_C_PROTO;
3514				if (*pl1pd != l1pd) {
3515					*pl1pd = l1pd;
3516					PTE_SYNC(pl1pd);
3517				}
3518			}
3519		}
3520
3521		if (PV_BEEN_EXECD(oflags))
3522			pmap_tlb_flushID_SE(pmap, va);
3523		else if (PV_BEEN_REFD(oflags))
3524			pmap_tlb_flushD_SE(pmap, va);
3525
3526
3527		pmap_vac_me_harder(m, pmap, va);
3528	}
3529	vm_page_unlock_queues();
3530}
3531
3532/*
3533 * this code makes some *MAJOR* assumptions:
3534 * 1. Current pmap & pmap exists.
3535 * 2. Not wired.
3536 * 3. Read access.
3537 * 4. No page table pages.
3538 * 6. Page IS managed.
3539 * but is *MUCH* faster than pmap_enter...
3540 */
3541
3542vm_page_t
3543pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
3544{
3545
3546	vm_page_busy(m);
3547	vm_page_unlock_queues();
3548	VM_OBJECT_UNLOCK(m->object);
3549	mtx_lock(&Giant);
3550	pmap_enter(pmap, va, m, VM_PROT_READ|VM_PROT_EXECUTE, FALSE);
3551	pmap_idcache_wbinv_all(pmap);
3552	mtx_unlock(&Giant);
3553	VM_OBJECT_LOCK(m->object);
3554	vm_page_lock_queues();
3555	vm_page_wakeup(m);
3556	return (NULL);
3557}
3558
3559/*
3560 *	Routine:	pmap_change_wiring
3561 *	Function:	Change the wiring attribute for a map/virtual-address
3562 *			pair.
3563 *	In/out conditions:
3564 *			The mapping must already exist in the pmap.
3565 */
3566void
3567pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
3568{
3569	struct l2_bucket *l2b;
3570	pt_entry_t *ptep, pte;
3571	vm_page_t pg;
3572
3573	l2b = pmap_get_l2_bucket(pmap, va);
3574	KASSERT(l2b, ("No l2b bucket in pmap_change_wiring"));
3575	ptep = &l2b->l2b_kva[l2pte_index(va)];
3576	pte = *ptep;
3577	pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
3578	if (pg)
3579		pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired);
3580}
3581
3582
3583/*
3584 *	Copy the range specified by src_addr/len
3585 *	from the source map to the range dst_addr/len
3586 *	in the destination map.
3587 *
3588 *	This routine is only advisory and need not do anything.
3589 */
3590void
3591pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
3592    vm_size_t len, vm_offset_t src_addr)
3593{
3594}
3595
3596
3597/*
3598 *	Routine:	pmap_extract
3599 *	Function:
3600 *		Extract the physical page address associated
3601 *		with the given map/virtual_address pair.
3602 */
3603vm_paddr_t
3604pmap_extract(pmap_t pm, vm_offset_t va)
3605{
3606	struct l2_dtable *l2;
3607	pd_entry_t *pl1pd, l1pd;
3608	pt_entry_t *ptep, pte;
3609	vm_paddr_t pa;
3610	u_int l1idx;
3611	l1idx = L1_IDX(va);
3612	pl1pd = &pm->pm_l1->l1_kva[l1idx];
3613	l1pd = *pl1pd;
3614
3615	if (l1pte_section_p(l1pd)) {
3616		/*
3617		 * These should only happen for pmap_kernel()
3618		 */
3619		KASSERT(pm == pmap_kernel(), ("huh"));
3620		pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
3621	} else {
3622		/*
3623		 * Note that we can't rely on the validity of the L1
3624		 * descriptor as an indication that a mapping exists.
3625		 * We have to look it up in the L2 dtable.
3626		 */
3627		l2 = pm->pm_l2[L2_IDX(l1idx)];
3628
3629		if (l2 == NULL ||
3630		    (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
3631			return (0);
3632		}
3633
3634		ptep = &ptep[l2pte_index(va)];
3635		pte = *ptep;
3636
3637		if (pte == 0)
3638			return (0);
3639
3640		switch (pte & L2_TYPE_MASK) {
3641		case L2_TYPE_L:
3642			pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3643			break;
3644
3645		default:
3646			pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
3647			break;
3648		}
3649	}
3650
3651	return (pa);
3652}
3653
3654/*
3655 * Atomically extract and hold the physical page with the given
3656 * pmap and virtual address pair if that mapping permits the given
3657 * protection.
3658 *
3659 */
3660vm_page_t
3661pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3662{
3663	struct l2_dtable *l2;
3664	pd_entry_t *pl1pd, l1pd;
3665	pt_entry_t *ptep, pte;
3666	vm_paddr_t pa;
3667	vm_page_t m = NULL;
3668	u_int l1idx;
3669	l1idx = L1_IDX(va);
3670	pl1pd = &pmap->pm_l1->l1_kva[l1idx];
3671	l1pd = *pl1pd;
3672
3673	vm_page_lock_queues();
3674	if (l1pte_section_p(l1pd)) {
3675		/*
3676		 * These should only happen for pmap_kernel()
3677		 */
3678		KASSERT(pmap == pmap_kernel(), ("huh"));
3679		pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
3680		if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) {
3681			m = PHYS_TO_VM_PAGE(pa);
3682			vm_page_hold(m);
3683		}
3684
3685	} else {
3686		/*
3687		 * Note that we can't rely on the validity of the L1
3688		 * descriptor as an indication that a mapping exists.
3689		 * We have to look it up in the L2 dtable.
3690		 */
3691		l2 = pmap->pm_l2[L2_IDX(l1idx)];
3692
3693		if (l2 == NULL ||
3694		    (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
3695			return (NULL);
3696		}
3697
3698		ptep = &ptep[l2pte_index(va)];
3699		pte = *ptep;
3700
3701		if (pte == 0)
3702			return (NULL);
3703
3704		if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) {
3705			switch (pte & L2_TYPE_MASK) {
3706			case L2_TYPE_L:
3707				pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3708				break;
3709
3710			default:
3711				pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
3712				break;
3713			}
3714			m = PHYS_TO_VM_PAGE(pa);
3715			vm_page_hold(m);
3716		}
3717	}
3718
3719	vm_page_unlock_queues();
3720	return (m);
3721}
3722
3723/*
3724 * Initialize a preallocated and zeroed pmap structure,
3725 * such as one in a vmspace structure.
3726 */
3727
3728void
3729pmap_pinit(pmap_t pmap)
3730{
3731	PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap));
3732
3733	pmap_alloc_l1(pmap);
3734	bzero(pmap->pm_l2, sizeof(pmap->pm_l2));
3735
3736	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
3737	pmap->pm_count = 1;
3738	pmap->pm_active = 0;
3739
3740	TAILQ_INIT(&pmap->pm_pvlist);
3741	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
3742	pmap->pm_stats.resident_count = 1;
3743	if (vector_page < KERNBASE) {
3744		pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa),
3745		    VM_PROT_READ, 1);
3746	}
3747}
3748
3749
3750/***************************************************
3751 * page management routines.
3752 ***************************************************/
3753
3754
3755static void
3756pmap_free_pv_entry(pv_entry_t pv)
3757{
3758	pv_entry_count--;
3759	uma_zfree(pvzone, pv);
3760}
3761
3762
3763/*
3764 * get a new pv_entry, allocating a block from the system
3765 * when needed.
3766 * the memory allocation is performed bypassing the malloc code
3767 * because of the possibility of allocations at interrupt time.
3768 */
3769static pv_entry_t
3770pmap_get_pv_entry(void)
3771{
3772	pv_entry_t ret_value;
3773
3774	pv_entry_count++;
3775	if (pv_entry_high_water &&
3776	    (pv_entry_count > pv_entry_high_water) &&
3777	    (pmap_pagedaemon_waken == 0)) {
3778	    	pmap_pagedaemon_waken = 1;
3779	    	wakeup (&vm_pages_needed);
3780	}
3781	ret_value = uma_zalloc(pvzone, M_NOWAIT);
3782	return ret_value;
3783}
3784
3785
3786/*
3787 *	Remove the given range of addresses from the specified map.
3788 *
3789 *	It is assumed that the start and end are properly
3790 *	rounded to the page size.
3791 */
3792#define  PMAP_REMOVE_CLEAN_LIST_SIZE     3
3793void
3794pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
3795{
3796	struct l2_bucket *l2b;
3797	vm_offset_t next_bucket;
3798	pt_entry_t *ptep;
3799	u_int cleanlist_idx, total, cnt;
3800	struct {
3801		vm_offset_t va;
3802		pt_entry_t *pte;
3803	} cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
3804	u_int mappings, is_exec, is_refd;
3805	int flushall = 0;
3806
3807
3808	/*
3809	 * we lock in the pmap => pv_head direction
3810	 */
3811#if 0
3812	PMAP_MAP_TO_HEAD_LOCK();
3813	pmap_acquire_pmap_lock(pm);
3814#endif
3815
3816	vm_page_lock_queues();
3817	if (!pmap_is_current(pm)) {
3818		cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
3819	} else
3820		cleanlist_idx = 0;
3821
3822	total = 0;
3823	while (sva < eva) {
3824		/*
3825		 * Do one L2 bucket's worth at a time.
3826		 */
3827		next_bucket = L2_NEXT_BUCKET(sva);
3828		if (next_bucket > eva)
3829			next_bucket = eva;
3830
3831		l2b = pmap_get_l2_bucket(pm, sva);
3832		if (l2b == NULL) {
3833			sva = next_bucket;
3834			continue;
3835		}
3836
3837		ptep = &l2b->l2b_kva[l2pte_index(sva)];
3838		mappings = 0;
3839
3840		while (sva < next_bucket) {
3841			struct vm_page *pg;
3842			pt_entry_t pte;
3843			vm_paddr_t pa;
3844
3845			pte = *ptep;
3846
3847			if (pte == 0) {
3848				/*
3849				 * Nothing here, move along
3850				 */
3851				sva += PAGE_SIZE;
3852				ptep++;
3853				continue;
3854			}
3855
3856			pm->pm_stats.resident_count--;
3857			pa = l2pte_pa(pte);
3858			is_exec = 0;
3859			is_refd = 1;
3860
3861			/*
3862			 * Update flags. In a number of circumstances,
3863			 * we could cluster a lot of these and do a
3864			 * number of sequential pages in one go.
3865			 */
3866			if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
3867				struct pv_entry *pve;
3868#if 0
3869				simple_lock(&pg->mdpage.pvh_slock);
3870#endif
3871				pve = pmap_remove_pv(pg, pm, sva);
3872				if (pve) {
3873#if 0
3874				simple_unlock(&pg->mdpage.pvh_slock);
3875#endif
3876						is_exec =
3877						   PV_BEEN_EXECD(pve->pv_flags);
3878						is_refd =
3879						   PV_BEEN_REFD(pve->pv_flags);
3880					pmap_free_pv_entry(pve);
3881				}
3882			}
3883
3884			if (!l2pte_valid(pte)) {
3885				*ptep = 0;
3886				PTE_SYNC_CURRENT(pm, ptep);
3887				sva += PAGE_SIZE;
3888				ptep++;
3889				mappings++;
3890				continue;
3891			}
3892
3893			if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
3894				/* Add to the clean list. */
3895				cleanlist[cleanlist_idx].pte = ptep;
3896				cleanlist[cleanlist_idx].va =
3897				    sva | (is_exec & 1);
3898				cleanlist_idx++;
3899			} else
3900			if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
3901				/* Nuke everything if needed. */
3902				pmap_idcache_wbinv_all(pm);
3903				pmap_tlb_flushID(pm);
3904
3905				/*
3906				 * Roll back the previous PTE list,
3907				 * and zero out the current PTE.
3908				 */
3909				for (cnt = 0;
3910				     cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
3911					*cleanlist[cnt].pte = 0;
3912				}
3913				*ptep = 0;
3914				PTE_SYNC(ptep);
3915				cleanlist_idx++;
3916				flushall = 1;
3917			} else {
3918				*ptep = 0;
3919				PTE_SYNC(ptep);
3920					if (is_exec)
3921						pmap_tlb_flushID_SE(pm, sva);
3922					else
3923					if (is_refd)
3924						pmap_tlb_flushD_SE(pm, sva);
3925			}
3926
3927			sva += PAGE_SIZE;
3928			ptep++;
3929			mappings++;
3930		}
3931
3932		/*
3933		 * Deal with any left overs
3934		 */
3935		if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
3936			total += cleanlist_idx;
3937			for (cnt = 0; cnt < cleanlist_idx; cnt++) {
3938				vm_offset_t clva =
3939				    cleanlist[cnt].va & ~1;
3940				if (cleanlist[cnt].va & 1) {
3941					pmap_idcache_wbinv_range(pm,
3942					    clva, PAGE_SIZE);
3943					pmap_tlb_flushID_SE(pm, clva);
3944				} else {
3945					pmap_dcache_wb_range(pm,
3946					    clva, PAGE_SIZE, TRUE,
3947					    FALSE);
3948					pmap_tlb_flushD_SE(pm, clva);
3949				}
3950				*cleanlist[cnt].pte = 0;
3951				PTE_SYNC_CURRENT(pm, cleanlist[cnt].pte);
3952			}
3953
3954			if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE)
3955				cleanlist_idx = 0;
3956			else {
3957				/*
3958				 * We are removing so much entries it's just
3959				 * easier to flush the whole cache.
3960				 */
3961				cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
3962				pmap_idcache_wbinv_all(pm);
3963				flushall = 1;
3964			}
3965		}
3966
3967		pmap_free_l2_bucket(pm, l2b, mappings);
3968	}
3969
3970	vm_page_unlock_queues();
3971	if (flushall)
3972		cpu_tlb_flushID();
3973#if 0
3974	pmap_release_pmap_lock(pm);
3975	PMAP_MAP_TO_HEAD_UNLOCK();
3976#endif
3977}
3978
3979
3980
3981
3982/*
3983 * pmap_zero_page()
3984 *
3985 * Zero a given physical page by mapping it at a page hook point.
3986 * In doing the zero page op, the page we zero is mapped cachable, as with
3987 * StrongARM accesses to non-cached pages are non-burst making writing
3988 * _any_ bulk data very slow.
3989 */
3990#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
3991void
3992pmap_zero_page_generic(vm_paddr_t phys, int off, int size)
3993{
3994#ifdef DEBUG
3995	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
3996
3997	if (pg->md.pvh_list != NULL)
3998		panic("pmap_zero_page: page has mappings");
3999#endif
4000
4001
4002	/*
4003	 * Hook in the page, zero it, and purge the cache for that
4004	 * zeroed page. Invalidate the TLB as needed.
4005	 */
4006	*cdst_pte = L2_S_PROTO | phys |
4007	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
4008	PTE_SYNC(cdst_pte);
4009	cpu_tlb_flushD_SE(cdstp);
4010	cpu_cpwait();
4011	if (off || size != PAGE_SIZE)
4012		bzero((void *)(cdstp + off), size);
4013	else
4014		bzero_page(cdstp);
4015	cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
4016}
4017#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
4018
4019#if ARM_MMU_XSCALE == 1
4020void
4021pmap_zero_page_xscale(vm_paddr_t phys, int off, int size)
4022{
4023	/*
4024	 * Hook in the page, zero it, and purge the cache for that
4025	 * zeroed page. Invalidate the TLB as needed.
4026	 */
4027	*cdst_pte = L2_S_PROTO | phys |
4028	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
4029	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
4030	PTE_SYNC(cdst_pte);
4031	cpu_tlb_flushD_SE(cdstp);
4032	cpu_cpwait();
4033	if (off || size != PAGE_SIZE)
4034		bzero((void *)(cdstp + off), size);
4035	else
4036		bzero_page(cdstp);
4037	xscale_cache_clean_minidata();
4038}
4039
4040/*
4041 * Change the PTEs for the specified kernel mappings such that they
4042 * will use the mini data cache instead of the main data cache.
4043 */
4044void
4045pmap_use_minicache(vm_offset_t va, vm_size_t size)
4046{
4047	struct l2_bucket *l2b;
4048	pt_entry_t *ptep, *sptep, pte;
4049	vm_offset_t next_bucket, eva;
4050
4051#if (ARM_NMMUS > 1)
4052	if (xscale_use_minidata == 0)
4053		return;
4054#endif
4055
4056	eva = va + size;
4057
4058	while (va < eva) {
4059		next_bucket = L2_NEXT_BUCKET(va);
4060		if (next_bucket > eva)
4061			next_bucket = eva;
4062
4063		l2b = pmap_get_l2_bucket(pmap_kernel(), va);
4064
4065		sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
4066
4067		while (va < next_bucket) {
4068			pte = *ptep;
4069			if (!l2pte_minidata(pte)) {
4070				cpu_dcache_wbinv_range(va, PAGE_SIZE);
4071				cpu_tlb_flushD_SE(va);
4072				*ptep = pte & ~L2_B;
4073			}
4074			ptep++;
4075			va += PAGE_SIZE;
4076		}
4077		PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
4078	}
4079	cpu_cpwait();
4080}
4081#endif /* ARM_MMU_XSCALE == 1 */
4082
4083/*
4084 *	pmap_zero_page zeros the specified hardware page by mapping
4085 *	the page into KVM and using bzero to clear its contents.
4086 */
4087void
4088pmap_zero_page(vm_page_t m)
4089{
4090	pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE);
4091}
4092
4093
4094/*
4095 *	pmap_zero_page_area zeros the specified hardware page by mapping
4096 *	the page into KVM and using bzero to clear its contents.
4097 *
4098 *	off and size may not cover an area beyond a single hardware page.
4099 */
4100void
4101pmap_zero_page_area(vm_page_t m, int off, int size)
4102{
4103
4104	pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size);
4105}
4106
4107
4108/*
4109 *	pmap_zero_page_idle zeros the specified hardware page by mapping
4110 *	the page into KVM and using bzero to clear its contents.  This
4111 *	is intended to be called from the vm_pagezero process only and
4112 *	outside of Giant.
4113 */
4114void
4115pmap_zero_page_idle(vm_page_t m)
4116{
4117
4118	pmap_zero_page(m);
4119}
4120
4121/*
4122 * pmap_clean_page()
4123 *
4124 * This is a local function used to work out the best strategy to clean
4125 * a single page referenced by its entry in the PV table. It's used by
4126 * pmap_copy_page, pmap_zero page and maybe some others later on.
4127 *
4128 * Its policy is effectively:
4129 *  o If there are no mappings, we don't bother doing anything with the cache.
4130 *  o If there is one mapping, we clean just that page.
4131 *  o If there are multiple mappings, we clean the entire cache.
4132 *
4133 * So that some functions can be further optimised, it returns 0 if it didn't
4134 * clean the entire cache, or 1 if it did.
4135 *
4136 * XXX One bug in this routine is that if the pv_entry has a single page
4137 * mapped at 0x00000000 a whole cache clean will be performed rather than
4138 * just the 1 page. Since this should not occur in everyday use and if it does
4139 * it will just result in not the most efficient clean for the page.
4140 */
4141static int
4142pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
4143{
4144	pmap_t pm, pm_to_clean = NULL;
4145	struct pv_entry *npv;
4146	u_int cache_needs_cleaning = 0;
4147	u_int flags = 0;
4148	vm_offset_t page_to_clean = 0;
4149
4150	if (pv == NULL) {
4151		/* nothing mapped in so nothing to flush */
4152		return (0);
4153	}
4154
4155	/*
4156	 * Since we flush the cache each time we change to a different
4157	 * user vmspace, we only need to flush the page if it is in the
4158	 * current pmap.
4159	 */
4160	if (curthread)
4161		pm = vmspace_pmap(curproc->p_vmspace);
4162	else
4163		pm = pmap_kernel();
4164
4165	for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) {
4166		if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) {
4167			flags |= npv->pv_flags;
4168			/*
4169			 * The page is mapped non-cacheable in
4170			 * this map.  No need to flush the cache.
4171			 */
4172			if (npv->pv_flags & PVF_NC) {
4173#ifdef DIAGNOSTIC
4174				if (cache_needs_cleaning)
4175					panic("pmap_clean_page: "
4176					    "cache inconsistency");
4177#endif
4178				break;
4179			} else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
4180				continue;
4181			if (cache_needs_cleaning) {
4182				page_to_clean = 0;
4183				break;
4184			} else {
4185				page_to_clean = npv->pv_va;
4186				pm_to_clean = npv->pv_pmap;
4187			}
4188			cache_needs_cleaning = 1;
4189		}
4190	}
4191	if (page_to_clean) {
4192		if (PV_BEEN_EXECD(flags))
4193			pmap_idcache_wbinv_range(pm_to_clean, page_to_clean,
4194			    PAGE_SIZE);
4195		else
4196			pmap_dcache_wb_range(pm_to_clean, page_to_clean,
4197			    PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0);
4198	} else if (cache_needs_cleaning) {
4199		if (PV_BEEN_EXECD(flags))
4200			pmap_idcache_wbinv_all(pm);
4201		else
4202			pmap_dcache_wbinv_all(pm);
4203		return (1);
4204	}
4205	return (0);
4206}
4207
4208/*
4209 *	pmap_copy_page copies the specified (machine independent)
4210 *	page by mapping the page into virtual memory and using
4211 *	bcopy to copy the page, one machine dependent page at a
4212 *	time.
4213 */
4214
4215/*
4216 * pmap_copy_page()
4217 *
4218 * Copy one physical page into another, by mapping the pages into
4219 * hook points. The same comment regarding cachability as in
4220 * pmap_zero_page also applies here.
4221 */
4222#if  (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
4223void
4224pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
4225{
4226	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
4227#ifdef DEBUG
4228	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
4229
4230	if (dst_pg->md.pvh_list != NULL)
4231		panic("pmap_copy_page: dst page has mappings");
4232#endif
4233
4234
4235	/*
4236	 * Clean the source page.  Hold the source page's lock for
4237	 * the duration of the copy so that no other mappings can
4238	 * be created while we have a potentially aliased mapping.
4239	 */
4240#if 0
4241	mtx_lock(&src_pg->md.pvh_mtx);
4242#endif
4243	(void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE);
4244
4245	/*
4246	 * Map the pages into the page hook points, copy them, and purge
4247	 * the cache for the appropriate page. Invalidate the TLB
4248	 * as required.
4249	 */
4250	*csrc_pte = L2_S_PROTO | src |
4251	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
4252	PTE_SYNC(csrc_pte);
4253	*cdst_pte = L2_S_PROTO | dst |
4254	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
4255	PTE_SYNC(cdst_pte);
4256	cpu_tlb_flushD_SE(csrcp);
4257	cpu_tlb_flushD_SE(cdstp);
4258	cpu_cpwait();
4259	bcopy_page(csrcp, cdstp);
4260	cpu_dcache_inv_range(csrcp, PAGE_SIZE);
4261#if 0
4262	mtx_lock(&src_pg->md.pvh_mtx);
4263#endif
4264	cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
4265}
4266#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
4267
4268#if ARM_MMU_XSCALE == 1
4269void
4270pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst)
4271{
4272	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
4273#ifdef DEBUG
4274	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
4275
4276	if (dst_pg->md.pvh_list != NULL)
4277		panic("pmap_copy_page: dst page has mappings");
4278#endif
4279
4280
4281	/*
4282	 * Clean the source page.  Hold the source page's lock for
4283	 * the duration of the copy so that no other mappings can
4284	 * be created while we have a potentially aliased mapping.
4285	 */
4286	(void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE);
4287
4288	/*
4289	 * Map the pages into the page hook points, copy them, and purge
4290	 * the cache for the appropriate page. Invalidate the TLB
4291	 * as required.
4292	 */
4293	*csrc_pte = L2_S_PROTO | src |
4294	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
4295	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
4296	PTE_SYNC(csrc_pte);
4297	*cdst_pte = L2_S_PROTO | dst |
4298	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
4299	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
4300	PTE_SYNC(cdst_pte);
4301	cpu_tlb_flushD_SE(csrcp);
4302	cpu_tlb_flushD_SE(cdstp);
4303	cpu_cpwait();
4304	bcopy_page(csrcp, cdstp);
4305	xscale_cache_clean_minidata();
4306}
4307#endif /* ARM_MMU_XSCALE == 1 */
4308
4309void
4310pmap_copy_page(vm_page_t src, vm_page_t dst)
4311{
4312	cpu_dcache_wbinv_all();
4313	pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
4314}
4315
4316
4317
4318
4319/*
4320 * this routine returns true if a physical page resides
4321 * in the given pmap.
4322 */
4323boolean_t
4324pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4325{
4326	pv_entry_t pv;
4327	int loops = 0;
4328	int s;
4329
4330	if (m->flags & PG_FICTITIOUS)
4331		return (FALSE);
4332
4333	s = splvm();
4334
4335	/*
4336	 * Not found, check current mappings returning immediately
4337	 */
4338	for (pv = TAILQ_FIRST(&m->md.pv_list);
4339	    pv;
4340	    pv = TAILQ_NEXT(pv, pv_list)) {
4341	    	if (pv->pv_pmap == pmap) {
4342	    		splx(s);
4343	    		return (TRUE);
4344	    	}
4345		loops++;
4346		if (loops >= 16)
4347			break;
4348	}
4349	splx(s);
4350	return (FALSE);
4351}
4352
4353
4354/*
4355 *	pmap_ts_referenced:
4356 *
4357 *	Return the count of reference bits for a page, clearing all of them.
4358 */
4359int
4360pmap_ts_referenced(vm_page_t m)
4361{
4362	return (pmap_clearbit(m, PVF_REF));
4363}
4364
4365
4366boolean_t
4367pmap_is_modified(vm_page_t m)
4368{
4369
4370	if (m->md.pvh_attrs & PVF_MOD)
4371		return (TRUE);
4372
4373	return(FALSE);
4374}
4375
4376
4377/*
4378 *	Clear the modify bits on the specified physical page.
4379 */
4380void
4381pmap_clear_modify(vm_page_t m)
4382{
4383
4384	if (m->md.pvh_attrs & PVF_MOD)
4385		pmap_clearbit(m, PVF_MOD);
4386}
4387
4388
4389/*
4390 *	pmap_clear_reference:
4391 *
4392 *	Clear the reference bit on the specified physical page.
4393 */
4394void
4395pmap_clear_reference(vm_page_t m)
4396{
4397
4398	if (m->md.pvh_attrs & PVF_REF)
4399		pmap_clearbit(m, PVF_REF);
4400}
4401
4402
4403/*
4404 * perform the pmap work for mincore
4405 */
4406int
4407pmap_mincore(pmap_t pmap, vm_offset_t addr)
4408{
4409	printf("pmap_mincore()\n");
4410
4411	return (0);
4412}
4413
4414
4415vm_offset_t
4416pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
4417{
4418
4419	return(addr);
4420}
4421
4422
4423/*
4424 * Map a set of physical memory pages into the kernel virtual
4425 * address space. Return a pointer to where it is mapped. This
4426 * routine is intended to be used for mapping device memory,
4427 * NOT real memory.
4428 */
4429void *
4430pmap_mapdev(vm_offset_t pa, vm_size_t size)
4431{
4432	vm_offset_t va, tmpva, offset;
4433
4434	offset = pa & PAGE_MASK;
4435	size = roundup(size, PAGE_SIZE);
4436
4437	GIANT_REQUIRED;
4438
4439	va = kmem_alloc_nofault(kernel_map, size);
4440	if (!va)
4441		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
4442	for (tmpva = va; size > 0;) {
4443		pmap_kenter_internal(tmpva, pa, 0);
4444		size -= PAGE_SIZE;
4445		tmpva += PAGE_SIZE;
4446		pa += PAGE_SIZE;
4447	}
4448
4449	return ((void *)(va));
4450}
4451
4452#define BOOTSTRAP_DEBUG
4453
4454/*
4455 * pmap_map_section:
4456 *
4457 *	Create a single section mapping.
4458 */
4459void
4460pmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
4461    int prot, int cache)
4462{
4463	pd_entry_t *pde = (pd_entry_t *) l1pt;
4464	pd_entry_t fl;
4465
4466	KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2"));
4467
4468	switch (cache) {
4469	case PTE_NOCACHE:
4470	default:
4471		fl = 0;
4472		break;
4473
4474	case PTE_CACHE:
4475		fl = pte_l1_s_cache_mode;
4476		break;
4477
4478	case PTE_PAGETABLE:
4479		fl = pte_l1_s_cache_mode_pt;
4480		break;
4481	}
4482
4483	pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
4484	    L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL);
4485	PTE_SYNC(&pde[va >> L1_S_SHIFT]);
4486
4487}
4488
4489/*
4490 * pmap_link_l2pt:
4491 *
4492 *	Link the L2 page table specified by "pa" into the L1
4493 *	page table at the slot for "va".
4494 */
4495void
4496pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv)
4497{
4498	pd_entry_t *pde = (pd_entry_t *) l1pt, proto;
4499	u_int slot = va >> L1_S_SHIFT;
4500
4501#ifndef ARM32_NEW_VM_LAYOUT
4502	KASSERT((va & ((L1_S_SIZE * 4) - 1)) == 0, ("blah"));
4503	KASSERT((l2pv->pv_pa & PAGE_MASK) == 0, ("ouin"));
4504#endif
4505
4506	proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
4507
4508	pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
4509#ifdef ARM32_NEW_VM_LAYOUT
4510	PTE_SYNC(&pde[slot]);
4511#else
4512	pde[slot + 1] = proto | (l2pv->pv_pa + 0x400);
4513	pde[slot + 2] = proto | (l2pv->pv_pa + 0x800);
4514	pde[slot + 3] = proto | (l2pv->pv_pa + 0xc00);
4515	PTE_SYNC_RANGE(&pde[slot + 0], 4);
4516#endif
4517
4518	SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
4519
4520
4521}
4522
4523/*
4524 * pmap_map_entry
4525 *
4526 * 	Create a single page mapping.
4527 */
4528void
4529pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
4530    int cache)
4531{
4532	pd_entry_t *pde = (pd_entry_t *) l1pt;
4533	pt_entry_t fl;
4534	pt_entry_t *pte;
4535
4536	KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin"));
4537
4538	switch (cache) {
4539	case PTE_NOCACHE:
4540	default:
4541		fl = 0;
4542		break;
4543
4544	case PTE_CACHE:
4545		fl = pte_l2_s_cache_mode;
4546		break;
4547
4548	case PTE_PAGETABLE:
4549		fl = pte_l2_s_cache_mode_pt;
4550		break;
4551	}
4552
4553	if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
4554		panic("pmap_map_entry: no L2 table for VA 0x%08x", va);
4555
4556#ifndef ARM32_NEW_VM_LAYOUT
4557	pte = (pt_entry_t *)
4558	    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
4559#else
4560	pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
4561#endif
4562
4563	if (pte == NULL)
4564		panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va);
4565
4566#ifndef ARM32_NEW_VM_LAYOUT
4567	pte[(va >> PAGE_SHIFT) & 0x3ff] =
4568	    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl;
4569	PTE_SYNC(&pte[(va >> PAGE_SHIFT) & 0x3ff]);
4570#else
4571	pte[l2pte_index(va)] =
4572	    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl;
4573	PTE_SYNC(&pte[l2pte_index(va)]);
4574#endif
4575}
4576
4577/*
4578 * pmap_map_chunk:
4579 *
4580 *	Map a chunk of memory using the most efficient mappings
4581 *	possible (section. large page, small page) into the
4582 *	provided L1 and L2 tables at the specified virtual address.
4583 */
4584vm_size_t
4585pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
4586    vm_size_t size, int prot, int cache)
4587{
4588	pd_entry_t *pde = (pd_entry_t *) l1pt;
4589	pt_entry_t *pte, f1, f2s, f2l;
4590	vm_size_t resid;
4591	int i;
4592
4593	resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4594
4595	if (l1pt == 0)
4596		panic("pmap_map_chunk: no L1 table provided");
4597
4598#ifdef VERBOSE_INIT_ARM
4599	printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
4600	    "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
4601#endif
4602
4603	switch (cache) {
4604	case PTE_NOCACHE:
4605	default:
4606		f1 = 0;
4607		f2l = 0;
4608		f2s = 0;
4609		break;
4610
4611	case PTE_CACHE:
4612		f1 = pte_l1_s_cache_mode;
4613		f2l = pte_l2_l_cache_mode;
4614		f2s = pte_l2_s_cache_mode;
4615		break;
4616
4617	case PTE_PAGETABLE:
4618		f1 = pte_l1_s_cache_mode_pt;
4619		f2l = pte_l2_l_cache_mode_pt;
4620		f2s = pte_l2_s_cache_mode_pt;
4621		break;
4622	}
4623
4624	size = resid;
4625
4626	while (resid > 0) {
4627		/* See if we can use a section mapping. */
4628		if (L1_S_MAPPABLE_P(va, pa, resid)) {
4629#ifdef VERBOSE_INIT_ARM
4630			printf("S");
4631#endif
4632			pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
4633			    L1_S_PROT(PTE_KERNEL, prot) | f1 |
4634			    L1_S_DOM(PMAP_DOMAIN_KERNEL);
4635			PTE_SYNC(&pde[va >> L1_S_SHIFT]);
4636			va += L1_S_SIZE;
4637			pa += L1_S_SIZE;
4638			resid -= L1_S_SIZE;
4639			continue;
4640		}
4641
4642		/*
4643		 * Ok, we're going to use an L2 table.  Make sure
4644		 * one is actually in the corresponding L1 slot
4645		 * for the current VA.
4646		 */
4647		if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
4648			panic("pmap_map_chunk: no L2 table for VA 0x%08x", va);
4649
4650#ifndef ARM32_NEW_VM_LAYOUT
4651		pte = (pt_entry_t *)
4652		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
4653#else
4654		pte = (pt_entry_t *) kernel_pt_lookup(
4655		    pde[L1_IDX(va)] & L1_C_ADDR_MASK);
4656#endif
4657		if (pte == NULL)
4658			panic("pmap_map_chunk: can't find L2 table for VA"
4659			    "0x%08x", va);
4660		/* See if we can use a L2 large page mapping. */
4661		if (L2_L_MAPPABLE_P(va, pa, resid)) {
4662#ifdef VERBOSE_INIT_ARM
4663			printf("L");
4664#endif
4665			for (i = 0; i < 16; i++) {
4666#ifndef ARM32_NEW_VM_LAYOUT
4667				pte[((va >> PAGE_SHIFT) & 0x3f0) + i] =
4668				    L2_L_PROTO | pa |
4669				    L2_L_PROT(PTE_KERNEL, prot) | f2l;
4670				PTE_SYNC(&pte[((va >> PAGE_SHIFT) & 0x3f0) + i]);
4671#else
4672				pte[l2pte_index(va) + i] =
4673				    L2_L_PROTO | pa |
4674				    L2_L_PROT(PTE_KERNEL, prot) | f2l;
4675				PTE_SYNC(&pte[l2pte_index(va) + i]);
4676#endif
4677			}
4678			va += L2_L_SIZE;
4679			pa += L2_L_SIZE;
4680			resid -= L2_L_SIZE;
4681			continue;
4682		}
4683
4684		/* Use a small page mapping. */
4685#ifdef VERBOSE_INIT_ARM
4686		printf("P");
4687#endif
4688#ifndef ARM32_NEW_VM_LAYOUT
4689		pte[(va >> PAGE_SHIFT) & 0x3ff] =
4690		    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
4691		PTE_SYNC(&pte[(va >> PAGE_SHIFT) & 0x3ff]);
4692#else
4693		pte[l2pte_index(va)] =
4694		    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
4695		PTE_SYNC(&pte[l2pte_index(va)]);
4696#endif
4697		va += PAGE_SIZE;
4698		pa += PAGE_SIZE;
4699		resid -= PAGE_SIZE;
4700	}
4701#ifdef VERBOSE_INIT_ARM
4702	printf("\n");
4703#endif
4704	return (size);
4705
4706}
4707
4708/********************** Static device map routines ***************************/
4709
4710static const struct pmap_devmap *pmap_devmap_table;
4711
4712/*
4713 * Register the devmap table.  This is provided in case early console
4714 * initialization needs to register mappings created by bootstrap code
4715 * before pmap_devmap_bootstrap() is called.
4716 */
4717void
4718pmap_devmap_register(const struct pmap_devmap *table)
4719{
4720
4721	pmap_devmap_table = table;
4722}
4723
4724/*
4725 * Map all of the static regions in the devmap table, and remember
4726 * the devmap table so other parts of the kernel can look up entries
4727 * later.
4728 */
4729void
4730pmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table)
4731{
4732	int i;
4733
4734	pmap_devmap_table = table;
4735
4736	for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
4737#ifdef VERBOSE_INIT_ARM
4738		printf("devmap: %08lx -> %08lx @ %08lx\n",
4739		    pmap_devmap_table[i].pd_pa,
4740		    pmap_devmap_table[i].pd_pa +
4741			pmap_devmap_table[i].pd_size - 1,
4742		    pmap_devmap_table[i].pd_va);
4743#endif
4744		pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
4745		    pmap_devmap_table[i].pd_pa,
4746		    pmap_devmap_table[i].pd_size,
4747		    pmap_devmap_table[i].pd_prot,
4748		    pmap_devmap_table[i].pd_cache);
4749	}
4750}
4751
4752const struct pmap_devmap *
4753pmap_devmap_find_pa(vm_paddr_t pa, vm_size_t size)
4754{
4755	int i;
4756
4757	if (pmap_devmap_table == NULL)
4758		return (NULL);
4759
4760	for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
4761		if (pa >= pmap_devmap_table[i].pd_pa &&
4762		    pa + size <= pmap_devmap_table[i].pd_pa +
4763				 pmap_devmap_table[i].pd_size)
4764			return (&pmap_devmap_table[i]);
4765	}
4766
4767	return (NULL);
4768}
4769
4770const struct pmap_devmap *
4771pmap_devmap_find_va(vm_offset_t va, vm_size_t size)
4772{
4773	int i;
4774
4775	if (pmap_devmap_table == NULL)
4776		return (NULL);
4777
4778	for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
4779		if (va >= pmap_devmap_table[i].pd_va &&
4780		    va + size <= pmap_devmap_table[i].pd_va +
4781				 pmap_devmap_table[i].pd_size)
4782			return (&pmap_devmap_table[i]);
4783	}
4784
4785	return (NULL);
4786}
4787
4788