1/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */
2/*-
3 * Copyright 2004 Olivier Houchard.
4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*-
39 * Copyright (c) 2002-2003 Wasabi Systems, Inc.
40 * Copyright (c) 2001 Richard Earnshaw
41 * Copyright (c) 2001-2002 Christopher Gilbert
42 * All rights reserved.
43 *
44 * 1. Redistributions of source code must retain the above copyright
45 *    notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in the
48 *    documentation and/or other materials provided with the distribution.
49 * 3. The name of the company nor the name of the author may be used to
50 *    endorse or promote products derived from this software without specific
51 *    prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
54 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
55 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 */
65/*-
66 * Copyright (c) 1999 The NetBSD Foundation, Inc.
67 * All rights reserved.
68 *
69 * This code is derived from software contributed to The NetBSD Foundation
70 * by Charles M. Hannum.
71 *
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions
74 * are met:
75 * 1. Redistributions of source code must retain the above copyright
76 *    notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce the above copyright
78 *    notice, this list of conditions and the following disclaimer in the
79 *    documentation and/or other materials provided with the distribution.
80 *
81 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
82 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
83 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
84 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
85 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
86 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
91 * POSSIBILITY OF SUCH DAMAGE.
92 */
93
94/*-
95 * Copyright (c) 1994-1998 Mark Brinicombe.
96 * Copyright (c) 1994 Brini.
97 * All rights reserved.
98 *
99 * This code is derived from software written for Brini by Mark Brinicombe
100 *
101 * Redistribution and use in source and binary forms, with or without
102 * modification, are permitted provided that the following conditions
103 * are met:
104 * 1. Redistributions of source code must retain the above copyright
105 *    notice, this list of conditions and the following disclaimer.
106 * 2. Redistributions in binary form must reproduce the above copyright
107 *    notice, this list of conditions and the following disclaimer in the
108 *    documentation and/or other materials provided with the distribution.
109 * 3. All advertising materials mentioning features or use of this software
110 *    must display the following acknowledgement:
111 *      This product includes software developed by Mark Brinicombe.
112 * 4. The name of the author may not be used to endorse or promote products
113 *    derived from this software without specific prior written permission.
114 *
115 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
116 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
117 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
118 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
119 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
120 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
121 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
122 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
123 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
124 *
125 * RiscBSD kernel project
126 *
127 * pmap.c
128 *
129 * Machine dependant vm stuff
130 *
131 * Created      : 20/09/94
132 */
133
134/*
135 * Special compilation symbols
136 * PMAP_DEBUG           - Build in pmap_debug_level code
137 */
138/* Include header files */
139
140#include "opt_vm.h"
141
142#include <sys/cdefs.h>
143__FBSDID("$FreeBSD$");
144#include <sys/param.h>
145#include <sys/systm.h>
146#include <sys/kernel.h>
147#include <sys/ktr.h>
148#include <sys/proc.h>
149#include <sys/malloc.h>
150#include <sys/msgbuf.h>
151#include <sys/vmmeter.h>
152#include <sys/mman.h>
153#include <sys/smp.h>
154#include <sys/sched.h>
155
156#include <vm/vm.h>
157#include <vm/vm_param.h>
158#include <vm/uma.h>
159#include <vm/pmap.h>
160#include <vm/vm_kern.h>
161#include <vm/vm_object.h>
162#include <vm/vm_map.h>
163#include <vm/vm_page.h>
164#include <vm/vm_pageout.h>
165#include <vm/vm_extern.h>
166#include <sys/lock.h>
167#include <sys/mutex.h>
168#include <machine/md_var.h>
169#include <machine/cpu.h>
170#include <machine/cpufunc.h>
171#include <machine/pcb.h>
172
173#ifdef PMAP_DEBUG
174#define PDEBUG(_lev_,_stat_) \
175        if (pmap_debug_level >= (_lev_)) \
176                ((_stat_))
177#define dprintf printf
178
179int pmap_debug_level = 0;
180#define PMAP_INLINE
181#else   /* PMAP_DEBUG */
182#define PDEBUG(_lev_,_stat_) /* Nothing */
183#define dprintf(x, arg...)
184#define PMAP_INLINE __inline
185#endif  /* PMAP_DEBUG */
186
187extern struct pv_addr systempage;
188/*
189 * Internal function prototypes
190 */
191static void pmap_free_pv_entry (pv_entry_t);
192static pv_entry_t pmap_get_pv_entry(void);
193
194static void		pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
195    vm_prot_t, boolean_t, int);
196static void		pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t);
197static void		pmap_alloc_l1(pmap_t);
198static void		pmap_free_l1(pmap_t);
199
200static int		pmap_clearbit(struct vm_page *, u_int);
201
202static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t);
203static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t);
204static void		pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
205static vm_offset_t	kernel_pt_lookup(vm_paddr_t);
206
207static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1");
208
209vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
210vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
211vm_offset_t pmap_curmaxkvaddr;
212vm_paddr_t kernel_l1pa;
213
214extern void *end;
215vm_offset_t kernel_vm_end = 0;
216
217struct pmap kernel_pmap_store;
218
219static pt_entry_t *csrc_pte, *cdst_pte;
220static vm_offset_t csrcp, cdstp;
221static struct mtx cmtx;
222
223static void		pmap_init_l1(struct l1_ttable *, pd_entry_t *);
224/*
225 * These routines are called when the CPU type is identified to set up
226 * the PTE prototypes, cache modes, etc.
227 *
228 * The variables are always here, just in case LKMs need to reference
229 * them (though, they shouldn't).
230 */
231
232pt_entry_t	pte_l1_s_cache_mode;
233pt_entry_t	pte_l1_s_cache_mode_pt;
234pt_entry_t	pte_l1_s_cache_mask;
235
236pt_entry_t	pte_l2_l_cache_mode;
237pt_entry_t	pte_l2_l_cache_mode_pt;
238pt_entry_t	pte_l2_l_cache_mask;
239
240pt_entry_t	pte_l2_s_cache_mode;
241pt_entry_t	pte_l2_s_cache_mode_pt;
242pt_entry_t	pte_l2_s_cache_mask;
243
244pt_entry_t	pte_l2_s_prot_u;
245pt_entry_t	pte_l2_s_prot_w;
246pt_entry_t	pte_l2_s_prot_mask;
247
248pt_entry_t	pte_l1_s_proto;
249pt_entry_t	pte_l1_c_proto;
250pt_entry_t	pte_l2_s_proto;
251
252void		(*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
253void		(*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
254		    vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs,
255		    int cnt);
256void		(*pmap_zero_page_func)(vm_paddr_t, int, int);
257/*
258 * Which pmap is currently 'live' in the cache
259 *
260 * XXXSCW: Fix for SMP ...
261 */
262union pmap_cache_state *pmap_cache_state;
263
264struct msgbuf *msgbufp = 0;
265
266/*
267 * Crashdump maps.
268 */
269static caddr_t crashdumpmap;
270
271extern void bcopy_page(vm_offset_t, vm_offset_t);
272extern void bzero_page(vm_offset_t);
273
274extern vm_offset_t alloc_firstaddr;
275
276char *_tmppt;
277
278/*
279 * Metadata for L1 translation tables.
280 */
281struct l1_ttable {
282	/* Entry on the L1 Table list */
283	SLIST_ENTRY(l1_ttable) l1_link;
284
285	/* Entry on the L1 Least Recently Used list */
286	TAILQ_ENTRY(l1_ttable) l1_lru;
287
288	/* Track how many domains are allocated from this L1 */
289	volatile u_int l1_domain_use_count;
290
291	/*
292	 * A free-list of domain numbers for this L1.
293	 * We avoid using ffs() and a bitmap to track domains since ffs()
294	 * is slow on ARM.
295	 */
296	u_int8_t l1_domain_first;
297	u_int8_t l1_domain_free[PMAP_DOMAINS];
298
299	/* Physical address of this L1 page table */
300	vm_paddr_t l1_physaddr;
301
302	/* KVA of this L1 page table */
303	pd_entry_t *l1_kva;
304};
305
306/*
307 * Convert a virtual address into its L1 table index. That is, the
308 * index used to locate the L2 descriptor table pointer in an L1 table.
309 * This is basically used to index l1->l1_kva[].
310 *
311 * Each L2 descriptor table represents 1MB of VA space.
312 */
313#define	L1_IDX(va)		(((vm_offset_t)(va)) >> L1_S_SHIFT)
314
315/*
316 * L1 Page Tables are tracked using a Least Recently Used list.
317 *  - New L1s are allocated from the HEAD.
318 *  - Freed L1s are added to the TAIl.
319 *  - Recently accessed L1s (where an 'access' is some change to one of
320 *    the userland pmaps which owns this L1) are moved to the TAIL.
321 */
322static TAILQ_HEAD(, l1_ttable) l1_lru_list;
323/*
324 * A list of all L1 tables
325 */
326static SLIST_HEAD(, l1_ttable) l1_list;
327static struct mtx l1_lru_lock;
328
329/*
330 * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
331 *
332 * This is normally 16MB worth L2 page descriptors for any given pmap.
333 * Reference counts are maintained for L2 descriptors so they can be
334 * freed when empty.
335 */
336struct l2_dtable {
337	/* The number of L2 page descriptors allocated to this l2_dtable */
338	u_int l2_occupancy;
339
340	/* List of L2 page descriptors */
341	struct l2_bucket {
342		pt_entry_t *l2b_kva;	/* KVA of L2 Descriptor Table */
343		vm_paddr_t l2b_phys;	/* Physical address of same */
344		u_short l2b_l1idx;	/* This L2 table's L1 index */
345		u_short l2b_occupancy;	/* How many active descriptors */
346	} l2_bucket[L2_BUCKET_SIZE];
347};
348
349/* pmap_kenter_internal flags */
350#define KENTER_CACHE	0x1
351#define KENTER_USER	0x2
352
353/*
354 * Given an L1 table index, calculate the corresponding l2_dtable index
355 * and bucket index within the l2_dtable.
356 */
357#define	L2_IDX(l1idx)		(((l1idx) >> L2_BUCKET_LOG2) & \
358				 (L2_SIZE - 1))
359#define	L2_BUCKET(l1idx)	((l1idx) & (L2_BUCKET_SIZE - 1))
360
361/*
362 * Given a virtual address, this macro returns the
363 * virtual address required to drop into the next L2 bucket.
364 */
365#define	L2_NEXT_BUCKET(va)	(((va) & L1_S_FRAME) + L1_S_SIZE)
366
367/*
368 * L2 allocation.
369 */
370#define	pmap_alloc_l2_dtable()		\
371		(void*)uma_zalloc(l2table_zone, M_NOWAIT|M_USE_RESERVE)
372#define	pmap_free_l2_dtable(l2)		\
373		uma_zfree(l2table_zone, l2)
374
375/*
376 * We try to map the page tables write-through, if possible.  However, not
377 * all CPUs have a write-through cache mode, so on those we have to sync
378 * the cache when we frob page tables.
379 *
380 * We try to evaluate this at compile time, if possible.  However, it's
381 * not always possible to do that, hence this run-time var.
382 */
383int	pmap_needs_pte_sync;
384
385/*
386 * Macro to determine if a mapping might be resident in the
387 * instruction cache and/or TLB
388 */
389#define	PV_BEEN_EXECD(f)  (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
390
391/*
392 * Macro to determine if a mapping might be resident in the
393 * data cache and/or TLB
394 */
395#define	PV_BEEN_REFD(f)   (((f) & PVF_REF) != 0)
396
397#ifndef PMAP_SHPGPERPROC
398#define PMAP_SHPGPERPROC 200
399#endif
400
401#define pmap_is_current(pm)	((pm) == pmap_kernel() || \
402            curproc->p_vmspace->vm_map.pmap == (pm))
403static uma_zone_t pvzone = NULL;
404uma_zone_t l2zone;
405static uma_zone_t l2table_zone;
406static vm_offset_t pmap_kernel_l2dtable_kva;
407static vm_offset_t pmap_kernel_l2ptp_kva;
408static vm_paddr_t pmap_kernel_l2ptp_phys;
409static struct vm_object pvzone_obj;
410static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
411
412void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs,
413    vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
414#if ARM_MMU_XSCALE == 1
415void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs,
416    vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
417#endif
418
419/*
420 * This list exists for the benefit of pmap_map_chunk().  It keeps track
421 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
422 * find them as necessary.
423 *
424 * Note that the data on this list MUST remain valid after initarm() returns,
425 * as pmap_bootstrap() uses it to contruct L2 table metadata.
426 */
427SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
428
429static void
430pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
431{
432	int i;
433
434	l1->l1_kva = l1pt;
435	l1->l1_domain_use_count = 0;
436	l1->l1_domain_first = 0;
437
438	for (i = 0; i < PMAP_DOMAINS; i++)
439		l1->l1_domain_free[i] = i + 1;
440
441	/*
442	 * Copy the kernel's L1 entries to each new L1.
443	 */
444	if (l1pt != pmap_kernel()->pm_l1->l1_kva)
445		memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
446
447	if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0)
448		panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
449	SLIST_INSERT_HEAD(&l1_list, l1, l1_link);
450	TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
451}
452
453static vm_offset_t
454kernel_pt_lookup(vm_paddr_t pa)
455{
456	struct pv_addr *pv;
457
458	SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
459		if (pv->pv_pa == pa)
460			return (pv->pv_va);
461	}
462	return (0);
463}
464
465#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
466void
467pmap_pte_init_generic(void)
468{
469
470	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
471	pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
472
473	pte_l2_l_cache_mode = L2_B|L2_C;
474	pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
475
476	pte_l2_s_cache_mode = L2_B|L2_C;
477	pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
478
479	/*
480	 * If we have a write-through cache, set B and C.  If
481	 * we have a write-back cache, then we assume setting
482	 * only C will make those pages write-through.
483	 */
484	if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) {
485		pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
486		pte_l2_l_cache_mode_pt = L2_B|L2_C;
487		pte_l2_s_cache_mode_pt = L2_B|L2_C;
488	} else {
489		pte_l1_s_cache_mode_pt = L1_S_C;
490		pte_l2_l_cache_mode_pt = L2_C;
491		pte_l2_s_cache_mode_pt = L2_C;
492	}
493
494	pte_l2_s_prot_u = L2_S_PROT_U_generic;
495	pte_l2_s_prot_w = L2_S_PROT_W_generic;
496	pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
497
498	pte_l1_s_proto = L1_S_PROTO_generic;
499	pte_l1_c_proto = L1_C_PROTO_generic;
500	pte_l2_s_proto = L2_S_PROTO_generic;
501
502	pmap_copy_page_func = pmap_copy_page_generic;
503	pmap_copy_page_offs_func = pmap_copy_page_offs_generic;
504	pmap_zero_page_func = pmap_zero_page_generic;
505}
506
507#if defined(CPU_ARM8)
508void
509pmap_pte_init_arm8(void)
510{
511
512	/*
513	 * ARM8 is compatible with generic, but we need to use
514	 * the page tables uncached.
515	 */
516	pmap_pte_init_generic();
517
518	pte_l1_s_cache_mode_pt = 0;
519	pte_l2_l_cache_mode_pt = 0;
520	pte_l2_s_cache_mode_pt = 0;
521}
522#endif /* CPU_ARM8 */
523
524#if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH)
525void
526pmap_pte_init_arm9(void)
527{
528
529	/*
530	 * ARM9 is compatible with generic, but we want to use
531	 * write-through caching for now.
532	 */
533	pmap_pte_init_generic();
534
535	pte_l1_s_cache_mode = L1_S_C;
536	pte_l2_l_cache_mode = L2_C;
537	pte_l2_s_cache_mode = L2_C;
538
539	pte_l1_s_cache_mode_pt = L1_S_C;
540	pte_l2_l_cache_mode_pt = L2_C;
541	pte_l2_s_cache_mode_pt = L2_C;
542}
543#endif /* CPU_ARM9 */
544#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
545
546#if defined(CPU_ARM10)
547void
548pmap_pte_init_arm10(void)
549{
550
551	/*
552	 * ARM10 is compatible with generic, but we want to use
553	 * write-through caching for now.
554	 */
555	pmap_pte_init_generic();
556
557	pte_l1_s_cache_mode = L1_S_B | L1_S_C;
558	pte_l2_l_cache_mode = L2_B | L2_C;
559	pte_l2_s_cache_mode = L2_B | L2_C;
560
561	pte_l1_s_cache_mode_pt = L1_S_C;
562	pte_l2_l_cache_mode_pt = L2_C;
563	pte_l2_s_cache_mode_pt = L2_C;
564
565}
566#endif /* CPU_ARM10 */
567
568#if  ARM_MMU_SA1 == 1
569void
570pmap_pte_init_sa1(void)
571{
572
573	/*
574	 * The StrongARM SA-1 cache does not have a write-through
575	 * mode.  So, do the generic initialization, then reset
576	 * the page table cache mode to B=1,C=1, and note that
577	 * the PTEs need to be sync'd.
578	 */
579	pmap_pte_init_generic();
580
581	pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
582	pte_l2_l_cache_mode_pt = L2_B|L2_C;
583	pte_l2_s_cache_mode_pt = L2_B|L2_C;
584
585	pmap_needs_pte_sync = 1;
586}
587#endif /* ARM_MMU_SA1 == 1*/
588
589#if ARM_MMU_XSCALE == 1
590#if (ARM_NMMUS > 1) || defined (CPU_XSCALE_CORE3)
591static u_int xscale_use_minidata;
592#endif
593
594void
595pmap_pte_init_xscale(void)
596{
597	uint32_t auxctl;
598	int write_through = 0;
599
600	pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P;
601	pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
602
603	pte_l2_l_cache_mode = L2_B|L2_C;
604	pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
605
606	pte_l2_s_cache_mode = L2_B|L2_C;
607	pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
608
609	pte_l1_s_cache_mode_pt = L1_S_C;
610	pte_l2_l_cache_mode_pt = L2_C;
611	pte_l2_s_cache_mode_pt = L2_C;
612#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
613	/*
614	 * The XScale core has an enhanced mode where writes that
615	 * miss the cache cause a cache line to be allocated.  This
616	 * is significantly faster than the traditional, write-through
617	 * behavior of this case.
618	 */
619	pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X);
620	pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X);
621	pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X);
622#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
623#ifdef XSCALE_CACHE_WRITE_THROUGH
624	/*
625	 * Some versions of the XScale core have various bugs in
626	 * their cache units, the work-around for which is to run
627	 * the cache in write-through mode.  Unfortunately, this
628	 * has a major (negative) impact on performance.  So, we
629	 * go ahead and run fast-and-loose, in the hopes that we
630	 * don't line up the planets in a way that will trip the
631	 * bugs.
632	 *
633	 * However, we give you the option to be slow-but-correct.
634	 */
635	write_through = 1;
636#elif defined(XSCALE_CACHE_WRITE_BACK)
637	/* force write back cache mode */
638	write_through = 0;
639#elif defined(CPU_XSCALE_PXA2X0)
640	/*
641	 * Intel PXA2[15]0 processors are known to have a bug in
642	 * write-back cache on revision 4 and earlier (stepping
643	 * A[01] and B[012]).  Fixed for C0 and later.
644	 */
645	{
646		uint32_t id, type;
647
648		id = cpufunc_id();
649		type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK);
650
651		if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) {
652			if ((id & CPU_ID_REVISION_MASK) < 5) {
653				/* write through for stepping A0-1 and B0-2 */
654				write_through = 1;
655			}
656		}
657	}
658#endif /* XSCALE_CACHE_WRITE_THROUGH */
659
660	if (write_through) {
661		pte_l1_s_cache_mode = L1_S_C;
662		pte_l2_l_cache_mode = L2_C;
663		pte_l2_s_cache_mode = L2_C;
664	}
665
666#if (ARM_NMMUS > 1)
667	xscale_use_minidata = 1;
668#endif
669
670	pte_l2_s_prot_u = L2_S_PROT_U_xscale;
671	pte_l2_s_prot_w = L2_S_PROT_W_xscale;
672	pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
673
674	pte_l1_s_proto = L1_S_PROTO_xscale;
675	pte_l1_c_proto = L1_C_PROTO_xscale;
676	pte_l2_s_proto = L2_S_PROTO_xscale;
677
678#ifdef CPU_XSCALE_CORE3
679	pmap_copy_page_func = pmap_copy_page_generic;
680	pmap_copy_page_offs_func = pmap_copy_page_offs_generic;
681	pmap_zero_page_func = pmap_zero_page_generic;
682	xscale_use_minidata = 0;
683	/* Make sure it is L2-cachable */
684    	pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_T);
685	pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode &~ L1_S_XSCALE_P;
686	pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_T) ;
687	pte_l2_l_cache_mode_pt = pte_l1_s_cache_mode;
688	pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_T);
689	pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode;
690
691#else
692	pmap_copy_page_func = pmap_copy_page_xscale;
693	pmap_copy_page_offs_func = pmap_copy_page_offs_xscale;
694	pmap_zero_page_func = pmap_zero_page_xscale;
695#endif
696
697	/*
698	 * Disable ECC protection of page table access, for now.
699	 */
700	__asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
701	auxctl &= ~XSCALE_AUXCTL_P;
702	__asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
703}
704
705/*
706 * xscale_setup_minidata:
707 *
708 *	Set up the mini-data cache clean area.  We require the
709 *	caller to allocate the right amount of physically and
710 *	virtually contiguous space.
711 */
712extern vm_offset_t xscale_minidata_clean_addr;
713extern vm_size_t xscale_minidata_clean_size; /* already initialized */
714void
715xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa)
716{
717	pd_entry_t *pde = (pd_entry_t *) l1pt;
718	pt_entry_t *pte;
719	vm_size_t size;
720	uint32_t auxctl;
721
722	xscale_minidata_clean_addr = va;
723
724	/* Round it to page size. */
725	size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
726
727	for (; size != 0;
728	     va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
729		pte = (pt_entry_t *) kernel_pt_lookup(
730		    pde[L1_IDX(va)] & L1_C_ADDR_MASK);
731		if (pte == NULL)
732			panic("xscale_setup_minidata: can't find L2 table for "
733			    "VA 0x%08x", (u_int32_t) va);
734		pte[l2pte_index(va)] =
735		    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
736		    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
737	}
738
739	/*
740	 * Configure the mini-data cache for write-back with
741	 * read/write-allocate.
742	 *
743	 * NOTE: In order to reconfigure the mini-data cache, we must
744	 * make sure it contains no valid data!  In order to do that,
745	 * we must issue a global data cache invalidate command!
746	 *
747	 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
748	 * THIS IS VERY IMPORTANT!
749	 */
750
751	/* Invalidate data and mini-data. */
752	__asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0));
753	__asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
754	auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
755	__asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
756}
757#endif
758
759/*
760 * Allocate an L1 translation table for the specified pmap.
761 * This is called at pmap creation time.
762 */
763static void
764pmap_alloc_l1(pmap_t pm)
765{
766	struct l1_ttable *l1;
767	u_int8_t domain;
768
769	/*
770	 * Remove the L1 at the head of the LRU list
771	 */
772	mtx_lock(&l1_lru_lock);
773	l1 = TAILQ_FIRST(&l1_lru_list);
774	TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
775
776	/*
777	 * Pick the first available domain number, and update
778	 * the link to the next number.
779	 */
780	domain = l1->l1_domain_first;
781	l1->l1_domain_first = l1->l1_domain_free[domain];
782
783	/*
784	 * If there are still free domain numbers in this L1,
785	 * put it back on the TAIL of the LRU list.
786	 */
787	if (++l1->l1_domain_use_count < PMAP_DOMAINS)
788		TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
789
790	mtx_unlock(&l1_lru_lock);
791
792	/*
793	 * Fix up the relevant bits in the pmap structure
794	 */
795	pm->pm_l1 = l1;
796	pm->pm_domain = domain + 1;
797}
798
799/*
800 * Free an L1 translation table.
801 * This is called at pmap destruction time.
802 */
803static void
804pmap_free_l1(pmap_t pm)
805{
806	struct l1_ttable *l1 = pm->pm_l1;
807
808	mtx_lock(&l1_lru_lock);
809
810	/*
811	 * If this L1 is currently on the LRU list, remove it.
812	 */
813	if (l1->l1_domain_use_count < PMAP_DOMAINS)
814		TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
815
816	/*
817	 * Free up the domain number which was allocated to the pmap
818	 */
819	l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first;
820	l1->l1_domain_first = pm->pm_domain - 1;
821	l1->l1_domain_use_count--;
822
823	/*
824	 * The L1 now must have at least 1 free domain, so add
825	 * it back to the LRU list. If the use count is zero,
826	 * put it at the head of the list, otherwise it goes
827	 * to the tail.
828	 */
829	if (l1->l1_domain_use_count == 0) {
830		TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru);
831	}	else
832		TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
833
834	mtx_unlock(&l1_lru_lock);
835}
836
837/*
838 * Returns a pointer to the L2 bucket associated with the specified pmap
839 * and VA, or NULL if no L2 bucket exists for the address.
840 */
841static PMAP_INLINE struct l2_bucket *
842pmap_get_l2_bucket(pmap_t pm, vm_offset_t va)
843{
844	struct l2_dtable *l2;
845	struct l2_bucket *l2b;
846	u_short l1idx;
847
848	l1idx = L1_IDX(va);
849
850	if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL ||
851	    (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
852		return (NULL);
853
854	return (l2b);
855}
856
857/*
858 * Returns a pointer to the L2 bucket associated with the specified pmap
859 * and VA.
860 *
861 * If no L2 bucket exists, perform the necessary allocations to put an L2
862 * bucket/page table in place.
863 *
864 * Note that if a new L2 bucket/page was allocated, the caller *must*
865 * increment the bucket occupancy counter appropriately *before*
866 * releasing the pmap's lock to ensure no other thread or cpu deallocates
867 * the bucket/page in the meantime.
868 */
869static struct l2_bucket *
870pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
871{
872	struct l2_dtable *l2;
873	struct l2_bucket *l2b;
874	u_short l1idx;
875
876	l1idx = L1_IDX(va);
877
878	PMAP_ASSERT_LOCKED(pm);
879	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
880	if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
881		/*
882		 * No mapping at this address, as there is
883		 * no entry in the L1 table.
884		 * Need to allocate a new l2_dtable.
885		 */
886again_l2table:
887		PMAP_UNLOCK(pm);
888		vm_page_unlock_queues();
889		if ((l2 = pmap_alloc_l2_dtable()) == NULL) {
890			vm_page_lock_queues();
891			PMAP_LOCK(pm);
892			return (NULL);
893		}
894		vm_page_lock_queues();
895		PMAP_LOCK(pm);
896		if (pm->pm_l2[L2_IDX(l1idx)] != NULL) {
897			PMAP_UNLOCK(pm);
898			vm_page_unlock_queues();
899			uma_zfree(l2table_zone, l2);
900			vm_page_lock_queues();
901			PMAP_LOCK(pm);
902			l2 = pm->pm_l2[L2_IDX(l1idx)];
903			if (l2 == NULL)
904				goto again_l2table;
905			/*
906			 * Someone already allocated the l2_dtable while
907			 * we were doing the same.
908			 */
909		} else {
910			bzero(l2, sizeof(*l2));
911			/*
912			 * Link it into the parent pmap
913			 */
914			pm->pm_l2[L2_IDX(l1idx)] = l2;
915		}
916	}
917
918	l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
919
920	/*
921	 * Fetch pointer to the L2 page table associated with the address.
922	 */
923	if (l2b->l2b_kva == NULL) {
924		pt_entry_t *ptep;
925
926		/*
927		 * No L2 page table has been allocated. Chances are, this
928		 * is because we just allocated the l2_dtable, above.
929		 */
930again_ptep:
931		PMAP_UNLOCK(pm);
932		vm_page_unlock_queues();
933		ptep = (void*)uma_zalloc(l2zone, M_NOWAIT|M_USE_RESERVE);
934		vm_page_lock_queues();
935		PMAP_LOCK(pm);
936		if (l2b->l2b_kva != 0) {
937			/* We lost the race. */
938			PMAP_UNLOCK(pm);
939			vm_page_unlock_queues();
940			uma_zfree(l2zone, ptep);
941			vm_page_lock_queues();
942			PMAP_LOCK(pm);
943			if (l2b->l2b_kva == 0)
944				goto again_ptep;
945			return (l2b);
946		}
947		l2b->l2b_phys = vtophys(ptep);
948		if (ptep == NULL) {
949			/*
950			 * Oops, no more L2 page tables available at this
951			 * time. We may need to deallocate the l2_dtable
952			 * if we allocated a new one above.
953			 */
954			if (l2->l2_occupancy == 0) {
955				pm->pm_l2[L2_IDX(l1idx)] = NULL;
956				pmap_free_l2_dtable(l2);
957			}
958			return (NULL);
959		}
960
961		l2->l2_occupancy++;
962		l2b->l2b_kva = ptep;
963		l2b->l2b_l1idx = l1idx;
964	}
965
966	return (l2b);
967}
968
969static PMAP_INLINE void
970#ifndef PMAP_INCLUDE_PTE_SYNC
971pmap_free_l2_ptp(pt_entry_t *l2)
972#else
973pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2)
974#endif
975{
976#ifdef PMAP_INCLUDE_PTE_SYNC
977	/*
978	 * Note: With a write-back cache, we may need to sync this
979	 * L2 table before re-using it.
980	 * This is because it may have belonged to a non-current
981	 * pmap, in which case the cache syncs would have been
982	 * skipped when the pages were being unmapped. If the
983	 * L2 table were then to be immediately re-allocated to
984	 * the *current* pmap, it may well contain stale mappings
985	 * which have not yet been cleared by a cache write-back
986	 * and so would still be visible to the mmu.
987	 */
988	if (need_sync)
989		PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
990#endif
991	uma_zfree(l2zone, l2);
992}
993/*
994 * One or more mappings in the specified L2 descriptor table have just been
995 * invalidated.
996 *
997 * Garbage collect the metadata and descriptor table itself if necessary.
998 *
999 * The pmap lock must be acquired when this is called (not necessary
1000 * for the kernel pmap).
1001 */
1002static void
1003pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
1004{
1005	struct l2_dtable *l2;
1006	pd_entry_t *pl1pd, l1pd;
1007	pt_entry_t *ptep;
1008	u_short l1idx;
1009
1010
1011	/*
1012	 * Update the bucket's reference count according to how many
1013	 * PTEs the caller has just invalidated.
1014	 */
1015	l2b->l2b_occupancy -= count;
1016
1017	/*
1018	 * Note:
1019	 *
1020	 * Level 2 page tables allocated to the kernel pmap are never freed
1021	 * as that would require checking all Level 1 page tables and
1022	 * removing any references to the Level 2 page table. See also the
1023	 * comment elsewhere about never freeing bootstrap L2 descriptors.
1024	 *
1025	 * We make do with just invalidating the mapping in the L2 table.
1026	 *
1027	 * This isn't really a big deal in practice and, in fact, leads
1028	 * to a performance win over time as we don't need to continually
1029	 * alloc/free.
1030	 */
1031	if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
1032		return;
1033
1034	/*
1035	 * There are no more valid mappings in this level 2 page table.
1036	 * Go ahead and NULL-out the pointer in the bucket, then
1037	 * free the page table.
1038	 */
1039	l1idx = l2b->l2b_l1idx;
1040	ptep = l2b->l2b_kva;
1041	l2b->l2b_kva = NULL;
1042
1043	pl1pd = &pm->pm_l1->l1_kva[l1idx];
1044
1045	/*
1046	 * If the L1 slot matches the pmap's domain
1047	 * number, then invalidate it.
1048	 */
1049	l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
1050	if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) {
1051		*pl1pd = 0;
1052		PTE_SYNC(pl1pd);
1053	}
1054
1055	/*
1056	 * Release the L2 descriptor table back to the pool cache.
1057	 */
1058#ifndef PMAP_INCLUDE_PTE_SYNC
1059	pmap_free_l2_ptp(ptep);
1060#else
1061	pmap_free_l2_ptp(!pmap_is_current(pm), ptep);
1062#endif
1063
1064	/*
1065	 * Update the reference count in the associated l2_dtable
1066	 */
1067	l2 = pm->pm_l2[L2_IDX(l1idx)];
1068	if (--l2->l2_occupancy > 0)
1069		return;
1070
1071	/*
1072	 * There are no more valid mappings in any of the Level 1
1073	 * slots managed by this l2_dtable. Go ahead and NULL-out
1074	 * the pointer in the parent pmap and free the l2_dtable.
1075	 */
1076	pm->pm_l2[L2_IDX(l1idx)] = NULL;
1077	pmap_free_l2_dtable(l2);
1078}
1079
1080/*
1081 * Pool cache constructors for L2 descriptor tables, metadata and pmap
1082 * structures.
1083 */
1084static int
1085pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags)
1086{
1087#ifndef PMAP_INCLUDE_PTE_SYNC
1088	struct l2_bucket *l2b;
1089	pt_entry_t *ptep, pte;
1090#ifdef ARM_USE_SMALL_ALLOC
1091	pd_entry_t *pde;
1092#endif
1093	vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK;
1094
1095	/*
1096	 * The mappings for these page tables were initially made using
1097	 * pmap_kenter() by the pool subsystem. Therefore, the cache-
1098	 * mode will not be right for page table mappings. To avoid
1099	 * polluting the pmap_kenter() code with a special case for
1100	 * page tables, we simply fix up the cache-mode here if it's not
1101	 * correct.
1102	 */
1103#ifdef ARM_USE_SMALL_ALLOC
1104	pde = &kernel_pmap->pm_l1->l1_kva[L1_IDX(va)];
1105	if (!l1pte_section_p(*pde)) {
1106#endif
1107		l2b = pmap_get_l2_bucket(pmap_kernel(), va);
1108		ptep = &l2b->l2b_kva[l2pte_index(va)];
1109		pte = *ptep;
1110
1111		if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
1112			/*
1113			 * Page tables must have the cache-mode set to
1114			 * Write-Thru.
1115			 */
1116			*ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
1117			PTE_SYNC(ptep);
1118			cpu_tlb_flushD_SE(va);
1119			cpu_cpwait();
1120		}
1121#ifdef ARM_USE_SMALL_ALLOC
1122	}
1123#endif
1124#endif
1125	memset(mem, 0, L2_TABLE_SIZE_REAL);
1126	PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
1127	return (0);
1128}
1129
1130/*
1131 * A bunch of routines to conditionally flush the caches/TLB depending
1132 * on whether the specified pmap actually needs to be flushed at any
1133 * given time.
1134 */
1135static PMAP_INLINE void
1136pmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va)
1137{
1138
1139	if (pmap_is_current(pm))
1140		cpu_tlb_flushID_SE(va);
1141}
1142
1143static PMAP_INLINE void
1144pmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va)
1145{
1146
1147	if (pmap_is_current(pm))
1148		cpu_tlb_flushD_SE(va);
1149}
1150
1151static PMAP_INLINE void
1152pmap_tlb_flushID(pmap_t pm)
1153{
1154
1155	if (pmap_is_current(pm))
1156		cpu_tlb_flushID();
1157}
1158static PMAP_INLINE void
1159pmap_tlb_flushD(pmap_t pm)
1160{
1161
1162	if (pmap_is_current(pm))
1163		cpu_tlb_flushD();
1164}
1165
1166static int
1167pmap_has_valid_mapping(pmap_t pm, vm_offset_t va)
1168{
1169	pd_entry_t *pde;
1170	pt_entry_t *ptep;
1171
1172	if (pmap_get_pde_pte(pm, va, &pde, &ptep) &&
1173	    ptep && ((*ptep & L2_TYPE_MASK) != L2_TYPE_INV))
1174		return (1);
1175
1176	return (0);
1177}
1178
1179static PMAP_INLINE void
1180pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len)
1181{
1182	vm_size_t rest;
1183
1184	CTR4(KTR_PMAP, "pmap_dcache_wbinv_range: pmap %p is_kernel %d va 0x%08x"
1185	    " len 0x%x ", pm, pm == pmap_kernel(), va, len);
1186
1187	if (pmap_is_current(pm) || pm == pmap_kernel()) {
1188		rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len);
1189		while (len > 0) {
1190			if (pmap_has_valid_mapping(pm, va)) {
1191				cpu_idcache_wbinv_range(va, rest);
1192				cpu_l2cache_wbinv_range(va, rest);
1193			}
1194			len -= rest;
1195			va += rest;
1196			rest = MIN(PAGE_SIZE, len);
1197		}
1198	}
1199}
1200
1201static PMAP_INLINE void
1202pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv,
1203    boolean_t rd_only)
1204{
1205	vm_size_t rest;
1206
1207	CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x "
1208	    "len 0x%x ", pm, pm == pmap_kernel(), va, len);
1209	CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only);
1210
1211	if (pmap_is_current(pm)) {
1212		rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len);
1213		while (len > 0) {
1214			if (pmap_has_valid_mapping(pm, va)) {
1215				if (do_inv && rd_only) {
1216					cpu_dcache_inv_range(va, rest);
1217					cpu_l2cache_inv_range(va, rest);
1218				} else if (do_inv) {
1219					cpu_dcache_wbinv_range(va, rest);
1220					cpu_l2cache_wbinv_range(va, rest);
1221				} else if (!rd_only) {
1222					cpu_dcache_wb_range(va, rest);
1223					cpu_l2cache_wb_range(va, rest);
1224				}
1225			}
1226			len -= rest;
1227			va += rest;
1228
1229			rest = MIN(PAGE_SIZE, len);
1230		}
1231	}
1232}
1233
1234static PMAP_INLINE void
1235pmap_idcache_wbinv_all(pmap_t pm)
1236{
1237
1238	if (pmap_is_current(pm)) {
1239		cpu_idcache_wbinv_all();
1240		cpu_l2cache_wbinv_all();
1241	}
1242}
1243
1244#ifdef notyet
1245static PMAP_INLINE void
1246pmap_dcache_wbinv_all(pmap_t pm)
1247{
1248
1249	if (pmap_is_current(pm)) {
1250		cpu_dcache_wbinv_all();
1251		cpu_l2cache_wbinv_all();
1252	}
1253}
1254#endif
1255
1256/*
1257 * PTE_SYNC_CURRENT:
1258 *
1259 *     Make sure the pte is written out to RAM.
1260 *     We need to do this for one of two cases:
1261 *       - We're dealing with the kernel pmap
1262 *       - There is no pmap active in the cache/tlb.
1263 *       - The specified pmap is 'active' in the cache/tlb.
1264 */
1265#ifdef PMAP_INCLUDE_PTE_SYNC
1266#define	PTE_SYNC_CURRENT(pm, ptep)	\
1267do {					\
1268	if (PMAP_NEEDS_PTE_SYNC && 	\
1269	    pmap_is_current(pm))	\
1270		PTE_SYNC(ptep);		\
1271} while (/*CONSTCOND*/0)
1272#else
1273#define	PTE_SYNC_CURRENT(pm, ptep)	/* nothing */
1274#endif
1275
1276/*
1277 * cacheable == -1 means we must make the entry uncacheable, 1 means
1278 * cacheable;
1279 */
1280static __inline void
1281pmap_set_cache_entry(pv_entry_t pv, pmap_t pm, vm_offset_t va, int cacheable)
1282{
1283	struct l2_bucket *l2b;
1284	pt_entry_t *ptep, pte;
1285
1286	l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
1287	ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
1288
1289	if (cacheable == 1) {
1290		pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode;
1291		if (l2pte_valid(pte)) {
1292			if (PV_BEEN_EXECD(pv->pv_flags)) {
1293				pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va);
1294			} else if (PV_BEEN_REFD(pv->pv_flags)) {
1295				pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va);
1296			}
1297		}
1298	} else {
1299		pte = *ptep &~ L2_S_CACHE_MASK;
1300		if ((va != pv->pv_va || pm != pv->pv_pmap) &&
1301			    l2pte_valid(pte)) {
1302			if (PV_BEEN_EXECD(pv->pv_flags)) {
1303				pmap_idcache_wbinv_range(pv->pv_pmap,
1304					    pv->pv_va, PAGE_SIZE);
1305				pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va);
1306			} else if (PV_BEEN_REFD(pv->pv_flags)) {
1307				pmap_dcache_wb_range(pv->pv_pmap,
1308					    pv->pv_va, PAGE_SIZE, TRUE,
1309					    (pv->pv_flags & PVF_WRITE) == 0);
1310				pmap_tlb_flushD_SE(pv->pv_pmap,
1311					    pv->pv_va);
1312			}
1313		}
1314	}
1315	*ptep = pte;
1316	PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
1317}
1318
1319static void
1320pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
1321{
1322	int pmwc = 0;
1323	int writable = 0, kwritable = 0, uwritable = 0;
1324	int entries = 0, kentries = 0, uentries = 0;
1325	struct pv_entry *pv;
1326
1327	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1328
1329	/* the cache gets written back/invalidated on context switch.
1330	 * therefore, if a user page shares an entry in the same page or
1331	 * with the kernel map and at least one is writable, then the
1332	 * cache entry must be set write-through.
1333	 */
1334
1335	TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
1336			/* generate a count of the pv_entry uses */
1337		if (pv->pv_flags & PVF_WRITE) {
1338			if (pv->pv_pmap == pmap_kernel())
1339				kwritable++;
1340			else if (pv->pv_pmap == pm)
1341				uwritable++;
1342			writable++;
1343		}
1344		if (pv->pv_pmap == pmap_kernel())
1345			kentries++;
1346		else {
1347			if (pv->pv_pmap == pm)
1348				uentries++;
1349			entries++;
1350		}
1351	}
1352		/*
1353		 * check if the user duplicate mapping has
1354		 * been removed.
1355		 */
1356	if ((pm != pmap_kernel()) && (((uentries > 1) && uwritable) ||
1357	    (uwritable > 1)))
1358			pmwc = 1;
1359
1360	TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
1361		/* check for user uncachable conditions - order is important */
1362		if (pm != pmap_kernel() &&
1363		    (pv->pv_pmap == pm || pv->pv_pmap == pmap_kernel())) {
1364
1365			if ((uentries > 1 && uwritable) || uwritable > 1) {
1366
1367				/* user duplicate mapping */
1368				if (pv->pv_pmap != pmap_kernel())
1369					pv->pv_flags |= PVF_MWC;
1370
1371				if (!(pv->pv_flags & PVF_NC)) {
1372					pv->pv_flags |= PVF_NC;
1373					pmap_set_cache_entry(pv, pm, va, -1);
1374				}
1375				continue;
1376			} else	/* no longer a duplicate user */
1377				pv->pv_flags &= ~PVF_MWC;
1378		}
1379
1380		/*
1381		 * check for kernel uncachable conditions
1382		 * kernel writable or kernel readable with writable user entry
1383		 */
1384		if ((kwritable && (entries || kentries > 1)) ||
1385		    (kwritable > 1) ||
1386		    ((kwritable != writable) && kentries &&
1387		     (pv->pv_pmap == pmap_kernel() ||
1388		      (pv->pv_flags & PVF_WRITE) ||
1389		      (pv->pv_flags & PVF_MWC)))) {
1390
1391			if (!(pv->pv_flags & PVF_NC)) {
1392				pv->pv_flags |= PVF_NC;
1393				pmap_set_cache_entry(pv, pm, va, -1);
1394			}
1395			continue;
1396		}
1397
1398			/* kernel and user are cachable */
1399		if ((pm == pmap_kernel()) && !(pv->pv_flags & PVF_MWC) &&
1400		    (pv->pv_flags & PVF_NC)) {
1401
1402			pv->pv_flags &= ~PVF_NC;
1403			if (!(pg->md.pv_memattr & VM_MEMATTR_UNCACHEABLE))
1404				pmap_set_cache_entry(pv, pm, va, 1);
1405			continue;
1406		}
1407			/* user is no longer sharable and writable */
1408		if (pm != pmap_kernel() &&
1409		    (pv->pv_pmap == pm || pv->pv_pmap == pmap_kernel()) &&
1410		    !pmwc && (pv->pv_flags & PVF_NC)) {
1411
1412			pv->pv_flags &= ~(PVF_NC | PVF_MWC);
1413			if (!(pg->md.pv_memattr & VM_MEMATTR_UNCACHEABLE))
1414				pmap_set_cache_entry(pv, pm, va, 1);
1415		}
1416	}
1417
1418	if ((kwritable == 0) && (writable == 0)) {
1419		pg->md.pvh_attrs &= ~PVF_MOD;
1420		vm_page_aflag_clear(pg, PGA_WRITEABLE);
1421		return;
1422	}
1423}
1424
1425/*
1426 * Modify pte bits for all ptes corresponding to the given physical address.
1427 * We use `maskbits' rather than `clearbits' because we're always passing
1428 * constants and the latter would require an extra inversion at run-time.
1429 */
1430static int
1431pmap_clearbit(struct vm_page *pg, u_int maskbits)
1432{
1433	struct l2_bucket *l2b;
1434	struct pv_entry *pv;
1435	pt_entry_t *ptep, npte, opte;
1436	pmap_t pm;
1437	vm_offset_t va;
1438	u_int oflags;
1439	int count = 0;
1440
1441	vm_page_lock_queues();
1442
1443	if (maskbits & PVF_WRITE)
1444		maskbits |= PVF_MOD;
1445	/*
1446	 * Clear saved attributes (modify, reference)
1447	 */
1448	pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
1449
1450	if (TAILQ_EMPTY(&pg->md.pv_list)) {
1451		vm_page_unlock_queues();
1452		return (0);
1453	}
1454
1455	/*
1456	 * Loop over all current mappings setting/clearing as appropos
1457	 */
1458	TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
1459		va = pv->pv_va;
1460		pm = pv->pv_pmap;
1461		oflags = pv->pv_flags;
1462
1463		if (!(oflags & maskbits)) {
1464			if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) {
1465				if (!(pg->md.pv_memattr &
1466				    VM_MEMATTR_UNCACHEABLE)) {
1467					PMAP_LOCK(pm);
1468					l2b = pmap_get_l2_bucket(pm, va);
1469					ptep = &l2b->l2b_kva[l2pte_index(va)];
1470					*ptep |= pte_l2_s_cache_mode;
1471					PTE_SYNC(ptep);
1472					PMAP_UNLOCK(pm);
1473				}
1474				pv->pv_flags &= ~(PVF_NC | PVF_MWC);
1475			}
1476			continue;
1477		}
1478		pv->pv_flags &= ~maskbits;
1479
1480		PMAP_LOCK(pm);
1481
1482		l2b = pmap_get_l2_bucket(pm, va);
1483
1484		ptep = &l2b->l2b_kva[l2pte_index(va)];
1485		npte = opte = *ptep;
1486
1487		if (maskbits & (PVF_WRITE|PVF_MOD)) {
1488			if ((pv->pv_flags & PVF_NC)) {
1489				/*
1490				 * Entry is not cacheable:
1491				 *
1492				 * Don't turn caching on again if this is a
1493				 * modified emulation. This would be
1494				 * inconsitent with the settings created by
1495				 * pmap_fix_cache(). Otherwise, it's safe
1496				 * to re-enable cacheing.
1497				 *
1498				 * There's no need to call pmap_fix_cache()
1499				 * here: all pages are losing their write
1500				 * permission.
1501				 */
1502				if (maskbits & PVF_WRITE) {
1503					if (!(pg->md.pv_memattr &
1504					    VM_MEMATTR_UNCACHEABLE))
1505						npte |= pte_l2_s_cache_mode;
1506					pv->pv_flags &= ~(PVF_NC | PVF_MWC);
1507				}
1508			} else
1509			if (opte & L2_S_PROT_W) {
1510				vm_page_dirty(pg);
1511				/*
1512				 * Entry is writable/cacheable: check if pmap
1513				 * is current if it is flush it, otherwise it
1514				 * won't be in the cache
1515				 */
1516				if (PV_BEEN_EXECD(oflags))
1517					pmap_idcache_wbinv_range(pm, pv->pv_va,
1518					    PAGE_SIZE);
1519				else
1520				if (PV_BEEN_REFD(oflags))
1521					pmap_dcache_wb_range(pm, pv->pv_va,
1522					    PAGE_SIZE,
1523					    (maskbits & PVF_REF) ? TRUE : FALSE,
1524					    FALSE);
1525			}
1526
1527			/* make the pte read only */
1528			npte &= ~L2_S_PROT_W;
1529		}
1530
1531		if (maskbits & PVF_REF) {
1532			if ((pv->pv_flags & PVF_NC) == 0 &&
1533			    (maskbits & (PVF_WRITE|PVF_MOD)) == 0) {
1534				/*
1535				 * Check npte here; we may have already
1536				 * done the wbinv above, and the validity
1537				 * of the PTE is the same for opte and
1538				 * npte.
1539				 */
1540				if (npte & L2_S_PROT_W) {
1541					if (PV_BEEN_EXECD(oflags))
1542						pmap_idcache_wbinv_range(pm,
1543						    pv->pv_va, PAGE_SIZE);
1544					else
1545					if (PV_BEEN_REFD(oflags))
1546						pmap_dcache_wb_range(pm,
1547						    pv->pv_va, PAGE_SIZE,
1548						    TRUE, FALSE);
1549				} else
1550				if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) {
1551					/* XXXJRT need idcache_inv_range */
1552					if (PV_BEEN_EXECD(oflags))
1553						pmap_idcache_wbinv_range(pm,
1554						    pv->pv_va, PAGE_SIZE);
1555					else
1556					if (PV_BEEN_REFD(oflags))
1557						pmap_dcache_wb_range(pm,
1558						    pv->pv_va, PAGE_SIZE,
1559						    TRUE, TRUE);
1560				}
1561			}
1562
1563			/*
1564			 * Make the PTE invalid so that we will take a
1565			 * page fault the next time the mapping is
1566			 * referenced.
1567			 */
1568			npte &= ~L2_TYPE_MASK;
1569			npte |= L2_TYPE_INV;
1570		}
1571
1572		if (npte != opte) {
1573			count++;
1574			*ptep = npte;
1575			PTE_SYNC(ptep);
1576			/* Flush the TLB entry if a current pmap. */
1577			if (PV_BEEN_EXECD(oflags))
1578				pmap_tlb_flushID_SE(pm, pv->pv_va);
1579			else
1580			if (PV_BEEN_REFD(oflags))
1581				pmap_tlb_flushD_SE(pm, pv->pv_va);
1582		}
1583
1584		PMAP_UNLOCK(pm);
1585
1586	}
1587
1588	if (maskbits & PVF_WRITE)
1589		vm_page_aflag_clear(pg, PGA_WRITEABLE);
1590	vm_page_unlock_queues();
1591	return (count);
1592}
1593
1594/*
1595 * main pv_entry manipulation functions:
1596 *   pmap_enter_pv: enter a mapping onto a vm_page list
1597 *   pmap_remove_pv: remove a mappiing from a vm_page list
1598 *
1599 * NOTE: pmap_enter_pv expects to lock the pvh itself
1600 *       pmap_remove_pv expects te caller to lock the pvh before calling
1601 */
1602
1603/*
1604 * pmap_enter_pv: enter a mapping onto a vm_page lst
1605 *
1606 * => caller should hold the proper lock on pmap_main_lock
1607 * => caller should have pmap locked
1608 * => we will gain the lock on the vm_page and allocate the new pv_entry
1609 * => caller should adjust ptp's wire_count before calling
1610 * => caller should not adjust pmap's wire_count
1611 */
1612static void
1613pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
1614    vm_offset_t va, u_int flags)
1615{
1616
1617	int km;
1618
1619	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1620
1621	if (pg->md.pv_kva) {
1622		/* PMAP_ASSERT_LOCKED(pmap_kernel()); */
1623		pve->pv_pmap = pmap_kernel();
1624		pve->pv_va = pg->md.pv_kva;
1625		pve->pv_flags = PVF_WRITE | PVF_UNMAN;
1626		pg->md.pv_kva = 0;
1627
1628		if (!(km = PMAP_OWNED(pmap_kernel())))
1629			PMAP_LOCK(pmap_kernel());
1630		TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list);
1631		TAILQ_INSERT_HEAD(&pve->pv_pmap->pm_pvlist, pve, pv_plist);
1632		PMAP_UNLOCK(pmap_kernel());
1633		vm_page_unlock_queues();
1634		if ((pve = pmap_get_pv_entry()) == NULL)
1635			panic("pmap_kenter_internal: no pv entries");
1636		vm_page_lock_queues();
1637		if (km)
1638			PMAP_LOCK(pmap_kernel());
1639	}
1640
1641	PMAP_ASSERT_LOCKED(pm);
1642	pve->pv_pmap = pm;
1643	pve->pv_va = va;
1644	pve->pv_flags = flags;
1645
1646	TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list);
1647	TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist);
1648	pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
1649	if (pve->pv_flags & PVF_WIRED)
1650		++pm->pm_stats.wired_count;
1651	vm_page_aflag_set(pg, PGA_REFERENCED);
1652}
1653
1654/*
1655 *
1656 * pmap_find_pv: Find a pv entry
1657 *
1658 * => caller should hold lock on vm_page
1659 */
1660static PMAP_INLINE struct pv_entry *
1661pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
1662{
1663	struct pv_entry *pv;
1664
1665	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1666	TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list)
1667	    if (pm == pv->pv_pmap && va == pv->pv_va)
1668		    break;
1669	return (pv);
1670}
1671
1672/*
1673 * vector_page_setprot:
1674 *
1675 *	Manipulate the protection of the vector page.
1676 */
1677void
1678vector_page_setprot(int prot)
1679{
1680	struct l2_bucket *l2b;
1681	pt_entry_t *ptep;
1682
1683	l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
1684
1685	ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
1686
1687	*ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
1688	PTE_SYNC(ptep);
1689	cpu_tlb_flushD_SE(vector_page);
1690	cpu_cpwait();
1691}
1692
1693/*
1694 * pmap_remove_pv: try to remove a mapping from a pv_list
1695 *
1696 * => caller should hold proper lock on pmap_main_lock
1697 * => pmap should be locked
1698 * => caller should hold lock on vm_page [so that attrs can be adjusted]
1699 * => caller should adjust ptp's wire_count and free PTP if needed
1700 * => caller should NOT adjust pmap's wire_count
1701 * => we return the removed pve
1702 */
1703
1704static void
1705pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
1706{
1707
1708	struct pv_entry *pv;
1709	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1710	PMAP_ASSERT_LOCKED(pm);
1711	TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list);
1712	TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist);
1713	if (pve->pv_flags & PVF_WIRED)
1714		--pm->pm_stats.wired_count;
1715	if (pg->md.pvh_attrs & PVF_MOD)
1716		vm_page_dirty(pg);
1717	if (TAILQ_FIRST(&pg->md.pv_list) == NULL)
1718		pg->md.pvh_attrs &= ~PVF_REF;
1719       	else
1720		vm_page_aflag_set(pg, PGA_REFERENCED);
1721	if ((pve->pv_flags & PVF_NC) && ((pm == pmap_kernel()) ||
1722	     (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC)))
1723		pmap_fix_cache(pg, pm, 0);
1724	else if (pve->pv_flags & PVF_WRITE) {
1725		TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list)
1726		    if (pve->pv_flags & PVF_WRITE)
1727			    break;
1728		if (!pve) {
1729			pg->md.pvh_attrs &= ~PVF_MOD;
1730			vm_page_aflag_clear(pg, PGA_WRITEABLE);
1731		}
1732	}
1733	pv = TAILQ_FIRST(&pg->md.pv_list);
1734	if (pv != NULL && (pv->pv_flags & PVF_UNMAN) &&
1735	    TAILQ_NEXT(pv, pv_list) == NULL) {
1736		pm = kernel_pmap;
1737		pg->md.pv_kva = pv->pv_va;
1738			/* a recursive pmap_nuke_pv */
1739		TAILQ_REMOVE(&pg->md.pv_list, pv, pv_list);
1740		TAILQ_REMOVE(&pm->pm_pvlist, pv, pv_plist);
1741		if (pv->pv_flags & PVF_WIRED)
1742			--pm->pm_stats.wired_count;
1743		pg->md.pvh_attrs &= ~PVF_REF;
1744		pg->md.pvh_attrs &= ~PVF_MOD;
1745		vm_page_aflag_clear(pg, PGA_WRITEABLE);
1746		pmap_free_pv_entry(pv);
1747	}
1748}
1749
1750static struct pv_entry *
1751pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
1752{
1753	struct pv_entry *pve;
1754
1755	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1756	pve = TAILQ_FIRST(&pg->md.pv_list);
1757
1758	while (pve) {
1759		if (pve->pv_pmap == pm && pve->pv_va == va) {	/* match? */
1760			pmap_nuke_pv(pg, pm, pve);
1761			break;
1762		}
1763		pve = TAILQ_NEXT(pve, pv_list);
1764	}
1765
1766	if (pve == NULL && pg->md.pv_kva == va)
1767		pg->md.pv_kva = 0;
1768
1769	return(pve);				/* return removed pve */
1770}
1771/*
1772 *
1773 * pmap_modify_pv: Update pv flags
1774 *
1775 * => caller should hold lock on vm_page [so that attrs can be adjusted]
1776 * => caller should NOT adjust pmap's wire_count
1777 * => we return the old flags
1778 *
1779 * Modify a physical-virtual mapping in the pv table
1780 */
1781static u_int
1782pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va,
1783    u_int clr_mask, u_int set_mask)
1784{
1785	struct pv_entry *npv;
1786	u_int flags, oflags;
1787
1788	PMAP_ASSERT_LOCKED(pm);
1789	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1790	if ((npv = pmap_find_pv(pg, pm, va)) == NULL)
1791		return (0);
1792
1793	/*
1794	 * There is at least one VA mapping this page.
1795	 */
1796
1797	if (clr_mask & (PVF_REF | PVF_MOD))
1798		pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
1799
1800	oflags = npv->pv_flags;
1801	npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
1802
1803	if ((flags ^ oflags) & PVF_WIRED) {
1804		if (flags & PVF_WIRED)
1805			++pm->pm_stats.wired_count;
1806		else
1807			--pm->pm_stats.wired_count;
1808	}
1809
1810	if ((flags ^ oflags) & PVF_WRITE)
1811		pmap_fix_cache(pg, pm, 0);
1812
1813	return (oflags);
1814}
1815
1816/* Function to set the debug level of the pmap code */
1817#ifdef PMAP_DEBUG
1818void
1819pmap_debug(int level)
1820{
1821	pmap_debug_level = level;
1822	dprintf("pmap_debug: level=%d\n", pmap_debug_level);
1823}
1824#endif  /* PMAP_DEBUG */
1825
1826void
1827pmap_pinit0(struct pmap *pmap)
1828{
1829	PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap));
1830
1831	dprintf("pmap_pinit0: pmap = %08x, pm_pdir = %08x\n",
1832		(u_int32_t) pmap, (u_int32_t) pmap->pm_pdir);
1833	bcopy(kernel_pmap, pmap, sizeof(*pmap));
1834	bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx));
1835	PMAP_LOCK_INIT(pmap);
1836}
1837
1838/*
1839 *	Initialize a vm_page's machine-dependent fields.
1840 */
1841void
1842pmap_page_init(vm_page_t m)
1843{
1844
1845	TAILQ_INIT(&m->md.pv_list);
1846	m->md.pv_memattr = VM_MEMATTR_DEFAULT;
1847}
1848
1849/*
1850 *      Initialize the pmap module.
1851 *      Called by vm_init, to initialize any structures that the pmap
1852 *      system needs to map virtual memory.
1853 */
1854void
1855pmap_init(void)
1856{
1857	int shpgperproc = PMAP_SHPGPERPROC;
1858
1859	PDEBUG(1, printf("pmap_init: phys_start = %08x\n", PHYSADDR));
1860
1861	/*
1862	 * init the pv free list
1863	 */
1864	pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
1865	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1866	/*
1867	 * Now it is safe to enable pv_table recording.
1868	 */
1869	PDEBUG(1, printf("pmap_init: done!\n"));
1870
1871	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1872
1873	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
1874	pv_entry_high_water = 9 * (pv_entry_max / 10);
1875	l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
1876	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
1877	l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable),
1878	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1879	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1880
1881	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
1882
1883}
1884
1885int
1886pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
1887{
1888	struct l2_dtable *l2;
1889	struct l2_bucket *l2b;
1890	pd_entry_t *pl1pd, l1pd;
1891	pt_entry_t *ptep, pte;
1892	vm_paddr_t pa;
1893	u_int l1idx;
1894	int rv = 0;
1895
1896	l1idx = L1_IDX(va);
1897	vm_page_lock_queues();
1898	PMAP_LOCK(pm);
1899
1900	/*
1901	 * If there is no l2_dtable for this address, then the process
1902	 * has no business accessing it.
1903	 *
1904	 * Note: This will catch userland processes trying to access
1905	 * kernel addresses.
1906	 */
1907	l2 = pm->pm_l2[L2_IDX(l1idx)];
1908	if (l2 == NULL)
1909		goto out;
1910
1911	/*
1912	 * Likewise if there is no L2 descriptor table
1913	 */
1914	l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
1915	if (l2b->l2b_kva == NULL)
1916		goto out;
1917
1918	/*
1919	 * Check the PTE itself.
1920	 */
1921	ptep = &l2b->l2b_kva[l2pte_index(va)];
1922	pte = *ptep;
1923	if (pte == 0)
1924		goto out;
1925
1926	/*
1927	 * Catch a userland access to the vector page mapped at 0x0
1928	 */
1929	if (user && (pte & L2_S_PROT_U) == 0)
1930		goto out;
1931	if (va == vector_page)
1932		goto out;
1933
1934	pa = l2pte_pa(pte);
1935
1936	if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) {
1937		/*
1938		 * This looks like a good candidate for "page modified"
1939		 * emulation...
1940		 */
1941		struct pv_entry *pv;
1942		struct vm_page *pg;
1943
1944		/* Extract the physical address of the page */
1945		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) {
1946			goto out;
1947		}
1948		/* Get the current flags for this page. */
1949
1950		pv = pmap_find_pv(pg, pm, va);
1951		if (pv == NULL) {
1952			goto out;
1953		}
1954
1955		/*
1956		 * Do the flags say this page is writable? If not then it
1957		 * is a genuine write fault. If yes then the write fault is
1958		 * our fault as we did not reflect the write access in the
1959		 * PTE. Now we know a write has occurred we can correct this
1960		 * and also set the modified bit
1961		 */
1962		if ((pv->pv_flags & PVF_WRITE) == 0) {
1963			goto out;
1964		}
1965
1966		pg->md.pvh_attrs |= PVF_REF | PVF_MOD;
1967		vm_page_dirty(pg);
1968		pv->pv_flags |= PVF_REF | PVF_MOD;
1969
1970		/*
1971		 * Re-enable write permissions for the page.  No need to call
1972		 * pmap_fix_cache(), since this is just a
1973		 * modified-emulation fault, and the PVF_WRITE bit isn't
1974		 * changing. We've already set the cacheable bits based on
1975		 * the assumption that we can write to this page.
1976		 */
1977		*ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
1978		PTE_SYNC(ptep);
1979		rv = 1;
1980	} else
1981	if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) {
1982		/*
1983		 * This looks like a good candidate for "page referenced"
1984		 * emulation.
1985		 */
1986		struct pv_entry *pv;
1987		struct vm_page *pg;
1988
1989		/* Extract the physical address of the page */
1990		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
1991			goto out;
1992		/* Get the current flags for this page. */
1993
1994		pv = pmap_find_pv(pg, pm, va);
1995		if (pv == NULL)
1996			goto out;
1997
1998		pg->md.pvh_attrs |= PVF_REF;
1999		pv->pv_flags |= PVF_REF;
2000
2001
2002		*ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO;
2003		PTE_SYNC(ptep);
2004		rv = 1;
2005	}
2006
2007	/*
2008	 * We know there is a valid mapping here, so simply
2009	 * fix up the L1 if necessary.
2010	 */
2011	pl1pd = &pm->pm_l1->l1_kva[l1idx];
2012	l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO;
2013	if (*pl1pd != l1pd) {
2014		*pl1pd = l1pd;
2015		PTE_SYNC(pl1pd);
2016		rv = 1;
2017	}
2018
2019#ifdef CPU_SA110
2020	/*
2021	 * There are bugs in the rev K SA110.  This is a check for one
2022	 * of them.
2023	 */
2024	if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 &&
2025	    curcpu()->ci_arm_cpurev < 3) {
2026		/* Always current pmap */
2027		if (l2pte_valid(pte)) {
2028			extern int kernel_debug;
2029			if (kernel_debug & 1) {
2030				struct proc *p = curlwp->l_proc;
2031				printf("prefetch_abort: page is already "
2032				    "mapped - pte=%p *pte=%08x\n", ptep, pte);
2033				printf("prefetch_abort: pc=%08lx proc=%p "
2034				    "process=%s\n", va, p, p->p_comm);
2035				printf("prefetch_abort: far=%08x fs=%x\n",
2036				    cpu_faultaddress(), cpu_faultstatus());
2037			}
2038#ifdef DDB
2039			if (kernel_debug & 2)
2040				Debugger();
2041#endif
2042			rv = 1;
2043		}
2044	}
2045#endif /* CPU_SA110 */
2046
2047#ifdef DEBUG
2048	/*
2049	 * If 'rv == 0' at this point, it generally indicates that there is a
2050	 * stale TLB entry for the faulting address. This happens when two or
2051	 * more processes are sharing an L1. Since we don't flush the TLB on
2052	 * a context switch between such processes, we can take domain faults
2053	 * for mappings which exist at the same VA in both processes. EVEN IF
2054	 * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
2055	 * example.
2056	 *
2057	 * This is extremely likely to happen if pmap_enter() updated the L1
2058	 * entry for a recently entered mapping. In this case, the TLB is
2059	 * flushed for the new mapping, but there may still be TLB entries for
2060	 * other mappings belonging to other processes in the 1MB range
2061	 * covered by the L1 entry.
2062	 *
2063	 * Since 'rv == 0', we know that the L1 already contains the correct
2064	 * value, so the fault must be due to a stale TLB entry.
2065	 *
2066	 * Since we always need to flush the TLB anyway in the case where we
2067	 * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
2068	 * stale TLB entries dynamically.
2069	 *
2070	 * However, the above condition can ONLY happen if the current L1 is
2071	 * being shared. If it happens when the L1 is unshared, it indicates
2072	 * that other parts of the pmap are not doing their job WRT managing
2073	 * the TLB.
2074	 */
2075	if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
2076		extern int last_fault_code;
2077		printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
2078		    pm, va, ftype);
2079		printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
2080		    l2, l2b, ptep, pl1pd);
2081		printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
2082		    pte, l1pd, last_fault_code);
2083#ifdef DDB
2084		Debugger();
2085#endif
2086	}
2087#endif
2088
2089	cpu_tlb_flushID_SE(va);
2090	cpu_cpwait();
2091
2092	rv = 1;
2093
2094out:
2095	vm_page_unlock_queues();
2096	PMAP_UNLOCK(pm);
2097	return (rv);
2098}
2099
2100void
2101pmap_postinit(void)
2102{
2103	struct l2_bucket *l2b;
2104	struct l1_ttable *l1;
2105	pd_entry_t *pl1pt;
2106	pt_entry_t *ptep, pte;
2107	vm_offset_t va, eva;
2108	u_int loop, needed;
2109
2110	needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
2111	needed -= 1;
2112	l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK);
2113
2114	for (loop = 0; loop < needed; loop++, l1++) {
2115		/* Allocate a L1 page table */
2116		va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0,
2117		    0xffffffff, L1_TABLE_SIZE, 0);
2118
2119		if (va == 0)
2120			panic("Cannot allocate L1 KVM");
2121
2122		eva = va + L1_TABLE_SIZE;
2123		pl1pt = (pd_entry_t *)va;
2124
2125		while (va < eva) {
2126				l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2127				ptep = &l2b->l2b_kva[l2pte_index(va)];
2128				pte = *ptep;
2129				pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
2130				*ptep = pte;
2131				PTE_SYNC(ptep);
2132				cpu_tlb_flushD_SE(va);
2133
2134				va += PAGE_SIZE;
2135		}
2136		pmap_init_l1(l1, pl1pt);
2137	}
2138
2139
2140#ifdef DEBUG
2141	printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
2142	    needed);
2143#endif
2144}
2145
2146/*
2147 * This is used to stuff certain critical values into the PCB where they
2148 * can be accessed quickly from cpu_switch() et al.
2149 */
2150void
2151pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb)
2152{
2153	struct l2_bucket *l2b;
2154
2155	pcb->pcb_pagedir = pm->pm_l1->l1_physaddr;
2156	pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
2157	    (DOMAIN_CLIENT << (pm->pm_domain * 2));
2158
2159	if (vector_page < KERNBASE) {
2160		pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
2161		l2b = pmap_get_l2_bucket(pm, vector_page);
2162		pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO |
2163	 	    L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL);
2164	} else
2165		pcb->pcb_pl1vec = NULL;
2166}
2167
2168void
2169pmap_activate(struct thread *td)
2170{
2171	pmap_t pm;
2172	struct pcb *pcb;
2173
2174	pm = vmspace_pmap(td->td_proc->p_vmspace);
2175	pcb = td->td_pcb;
2176
2177	critical_enter();
2178	pmap_set_pcb_pagedir(pm, pcb);
2179
2180	if (td == curthread) {
2181		u_int cur_dacr, cur_ttb;
2182
2183		__asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb));
2184		__asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr));
2185
2186		cur_ttb &= ~(L1_TABLE_SIZE - 1);
2187
2188		if (cur_ttb == (u_int)pcb->pcb_pagedir &&
2189		    cur_dacr == pcb->pcb_dacr) {
2190			/*
2191			 * No need to switch address spaces.
2192			 */
2193			critical_exit();
2194			return;
2195		}
2196
2197
2198		/*
2199		 * We MUST, I repeat, MUST fix up the L1 entry corresponding
2200		 * to 'vector_page' in the incoming L1 table before switching
2201		 * to it otherwise subsequent interrupts/exceptions (including
2202		 * domain faults!) will jump into hyperspace.
2203		 */
2204		if (pcb->pcb_pl1vec) {
2205
2206			*pcb->pcb_pl1vec = pcb->pcb_l1vec;
2207			/*
2208			 * Don't need to PTE_SYNC() at this point since
2209			 * cpu_setttb() is about to flush both the cache
2210			 * and the TLB.
2211			 */
2212		}
2213
2214		cpu_domains(pcb->pcb_dacr);
2215		cpu_setttb(pcb->pcb_pagedir);
2216	}
2217	critical_exit();
2218}
2219
2220static int
2221pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va)
2222{
2223	pd_entry_t *pdep, pde;
2224	pt_entry_t *ptep, pte;
2225	vm_offset_t pa;
2226	int rv = 0;
2227
2228	/*
2229	 * Make sure the descriptor itself has the correct cache mode
2230	 */
2231	pdep = &kl1[L1_IDX(va)];
2232	pde = *pdep;
2233
2234	if (l1pte_section_p(pde)) {
2235		if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
2236			*pdep = (pde & ~L1_S_CACHE_MASK) |
2237			    pte_l1_s_cache_mode_pt;
2238			PTE_SYNC(pdep);
2239			cpu_dcache_wbinv_range((vm_offset_t)pdep,
2240			    sizeof(*pdep));
2241			cpu_l2cache_wbinv_range((vm_offset_t)pdep,
2242			    sizeof(*pdep));
2243			rv = 1;
2244		}
2245	} else {
2246		pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
2247		ptep = (pt_entry_t *)kernel_pt_lookup(pa);
2248		if (ptep == NULL)
2249			panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep);
2250
2251		ptep = &ptep[l2pte_index(va)];
2252		pte = *ptep;
2253		if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
2254			*ptep = (pte & ~L2_S_CACHE_MASK) |
2255			    pte_l2_s_cache_mode_pt;
2256			PTE_SYNC(ptep);
2257			cpu_dcache_wbinv_range((vm_offset_t)ptep,
2258			    sizeof(*ptep));
2259			cpu_l2cache_wbinv_range((vm_offset_t)ptep,
2260			    sizeof(*ptep));
2261			rv = 1;
2262		}
2263	}
2264
2265	return (rv);
2266}
2267
2268static void
2269pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap,
2270    pt_entry_t **ptep)
2271{
2272	vm_offset_t va = *availp;
2273	struct l2_bucket *l2b;
2274
2275	if (ptep) {
2276		l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2277		if (l2b == NULL)
2278			panic("pmap_alloc_specials: no l2b for 0x%x", va);
2279
2280		*ptep = &l2b->l2b_kva[l2pte_index(va)];
2281	}
2282
2283	*vap = va;
2284	*availp = va + (PAGE_SIZE * pages);
2285}
2286
2287/*
2288 *	Bootstrap the system enough to run with virtual memory.
2289 *
2290 *	On the arm this is called after mapping has already been enabled
2291 *	and just syncs the pmap module with what has already been done.
2292 *	[We can't call it easily with mapping off since the kernel is not
2293 *	mapped with PA == VA, hence we would have to relocate every address
2294 *	from the linked base (virtual) address "KERNBASE" to the actual
2295 *	(physical) address starting relative to 0]
2296 */
2297#define PMAP_STATIC_L2_SIZE 16
2298#ifdef ARM_USE_SMALL_ALLOC
2299extern struct mtx smallalloc_mtx;
2300#endif
2301
2302void
2303pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt)
2304{
2305	static struct l1_ttable static_l1;
2306	static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
2307	struct l1_ttable *l1 = &static_l1;
2308	struct l2_dtable *l2;
2309	struct l2_bucket *l2b;
2310	pd_entry_t pde;
2311	pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va;
2312	pt_entry_t *ptep;
2313	vm_paddr_t pa;
2314	vm_offset_t va;
2315	vm_size_t size;
2316	int l1idx, l2idx, l2next = 0;
2317
2318	PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n",
2319	    firstaddr, lastaddr));
2320
2321	virtual_avail = firstaddr;
2322	kernel_pmap->pm_l1 = l1;
2323	kernel_l1pa = l1pt->pv_pa;
2324
2325	/*
2326	 * Scan the L1 translation table created by initarm() and create
2327	 * the required metadata for all valid mappings found in it.
2328	 */
2329	for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) {
2330		pde = kernel_l1pt[l1idx];
2331
2332		/*
2333		 * We're only interested in Coarse mappings.
2334		 * pmap_extract() can deal with section mappings without
2335		 * recourse to checking L2 metadata.
2336		 */
2337		if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
2338			continue;
2339
2340		/*
2341		 * Lookup the KVA of this L2 descriptor table
2342		 */
2343		pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
2344		ptep = (pt_entry_t *)kernel_pt_lookup(pa);
2345
2346		if (ptep == NULL) {
2347			panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
2348			    (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa);
2349		}
2350
2351		/*
2352		 * Fetch the associated L2 metadata structure.
2353		 * Allocate a new one if necessary.
2354		 */
2355		if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
2356			if (l2next == PMAP_STATIC_L2_SIZE)
2357				panic("pmap_bootstrap: out of static L2s");
2358			kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 =
2359			    &static_l2[l2next++];
2360		}
2361
2362		/*
2363		 * One more L1 slot tracked...
2364		 */
2365		l2->l2_occupancy++;
2366
2367		/*
2368		 * Fill in the details of the L2 descriptor in the
2369		 * appropriate bucket.
2370		 */
2371		l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
2372		l2b->l2b_kva = ptep;
2373		l2b->l2b_phys = pa;
2374		l2b->l2b_l1idx = l1idx;
2375
2376		/*
2377		 * Establish an initial occupancy count for this descriptor
2378		 */
2379		for (l2idx = 0;
2380		    l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
2381		    l2idx++) {
2382			if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) {
2383				l2b->l2b_occupancy++;
2384			}
2385		}
2386
2387		/*
2388		 * Make sure the descriptor itself has the correct cache mode.
2389		 * If not, fix it, but whine about the problem. Port-meisters
2390		 * should consider this a clue to fix up their initarm()
2391		 * function. :)
2392		 */
2393		if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) {
2394			printf("pmap_bootstrap: WARNING! wrong cache mode for "
2395			    "L2 pte @ %p\n", ptep);
2396		}
2397	}
2398
2399
2400	/*
2401	 * Ensure the primary (kernel) L1 has the correct cache mode for
2402	 * a page table. Bitch if it is not correctly set.
2403	 */
2404	for (va = (vm_offset_t)kernel_l1pt;
2405	    va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) {
2406		if (pmap_set_pt_cache_mode(kernel_l1pt, va))
2407			printf("pmap_bootstrap: WARNING! wrong cache mode for "
2408			    "primary L1 @ 0x%x\n", va);
2409	}
2410
2411	cpu_dcache_wbinv_all();
2412	cpu_l2cache_wbinv_all();
2413	cpu_tlb_flushID();
2414	cpu_cpwait();
2415
2416	PMAP_LOCK_INIT(kernel_pmap);
2417	CPU_FILL(&kernel_pmap->pm_active);
2418	kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL;
2419	TAILQ_INIT(&kernel_pmap->pm_pvlist);
2420
2421	/*
2422	 * Reserve some special page table entries/VA space for temporary
2423	 * mapping of pages.
2424	 */
2425#define SYSMAP(c, p, v, n)						\
2426    v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
2427
2428	pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte);
2429	pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte);
2430	pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte);
2431	pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte);
2432	size = ((lastaddr - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE;
2433	pmap_alloc_specials(&virtual_avail,
2434	    round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
2435	    &pmap_kernel_l2ptp_kva, NULL);
2436
2437	size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
2438	pmap_alloc_specials(&virtual_avail,
2439	    round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
2440	    &pmap_kernel_l2dtable_kva, NULL);
2441
2442	pmap_alloc_specials(&virtual_avail,
2443	    1, (vm_offset_t*)&_tmppt, NULL);
2444	pmap_alloc_specials(&virtual_avail,
2445	    MAXDUMPPGS, (vm_offset_t *)&crashdumpmap, NULL);
2446	SLIST_INIT(&l1_list);
2447	TAILQ_INIT(&l1_lru_list);
2448	mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF);
2449	pmap_init_l1(l1, kernel_l1pt);
2450	cpu_dcache_wbinv_all();
2451	cpu_l2cache_wbinv_all();
2452
2453	virtual_avail = round_page(virtual_avail);
2454	virtual_end = lastaddr;
2455	kernel_vm_end = pmap_curmaxkvaddr;
2456	arm_nocache_startaddr = lastaddr;
2457	mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF);
2458
2459#ifdef ARM_USE_SMALL_ALLOC
2460	mtx_init(&smallalloc_mtx, "Small alloc page list", NULL, MTX_DEF);
2461	arm_init_smallalloc();
2462#endif
2463	pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb);
2464}
2465
2466/***************************************************
2467 * Pmap allocation/deallocation routines.
2468 ***************************************************/
2469
2470/*
2471 * Release any resources held by the given physical map.
2472 * Called when a pmap initialized by pmap_pinit is being released.
2473 * Should only be called if the map contains no valid mappings.
2474 */
2475void
2476pmap_release(pmap_t pmap)
2477{
2478	struct pcb *pcb;
2479
2480	pmap_idcache_wbinv_all(pmap);
2481	cpu_l2cache_wbinv_all();
2482	pmap_tlb_flushID(pmap);
2483	cpu_cpwait();
2484	if (vector_page < KERNBASE) {
2485		struct pcb *curpcb = PCPU_GET(curpcb);
2486		pcb = thread0.td_pcb;
2487		if (pmap_is_current(pmap)) {
2488			/*
2489 			 * Frob the L1 entry corresponding to the vector
2490			 * page so that it contains the kernel pmap's domain
2491			 * number. This will ensure pmap_remove() does not
2492			 * pull the current vector page out from under us.
2493			 */
2494			critical_enter();
2495			*pcb->pcb_pl1vec = pcb->pcb_l1vec;
2496			cpu_domains(pcb->pcb_dacr);
2497			cpu_setttb(pcb->pcb_pagedir);
2498			critical_exit();
2499		}
2500		pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE);
2501		/*
2502		 * Make sure cpu_switch(), et al, DTRT. This is safe to do
2503		 * since this process has no remaining mappings of its own.
2504		 */
2505		curpcb->pcb_pl1vec = pcb->pcb_pl1vec;
2506		curpcb->pcb_l1vec = pcb->pcb_l1vec;
2507		curpcb->pcb_dacr = pcb->pcb_dacr;
2508		curpcb->pcb_pagedir = pcb->pcb_pagedir;
2509
2510	}
2511	pmap_free_l1(pmap);
2512	PMAP_LOCK_DESTROY(pmap);
2513
2514	dprintf("pmap_release()\n");
2515}
2516
2517
2518
2519/*
2520 * Helper function for pmap_grow_l2_bucket()
2521 */
2522static __inline int
2523pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap)
2524{
2525	struct l2_bucket *l2b;
2526	pt_entry_t *ptep;
2527	vm_paddr_t pa;
2528	struct vm_page *pg;
2529
2530	pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
2531	if (pg == NULL)
2532		return (1);
2533	pa = VM_PAGE_TO_PHYS(pg);
2534
2535	if (pap)
2536		*pap = pa;
2537
2538	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2539
2540	ptep = &l2b->l2b_kva[l2pte_index(va)];
2541	*ptep = L2_S_PROTO | pa | cache_mode |
2542	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE);
2543	PTE_SYNC(ptep);
2544	return (0);
2545}
2546
2547/*
2548 * This is the same as pmap_alloc_l2_bucket(), except that it is only
2549 * used by pmap_growkernel().
2550 */
2551static __inline struct l2_bucket *
2552pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va)
2553{
2554	struct l2_dtable *l2;
2555	struct l2_bucket *l2b;
2556	struct l1_ttable *l1;
2557	pd_entry_t *pl1pd;
2558	u_short l1idx;
2559	vm_offset_t nva;
2560
2561	l1idx = L1_IDX(va);
2562
2563	if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
2564		/*
2565		 * No mapping at this address, as there is
2566		 * no entry in the L1 table.
2567		 * Need to allocate a new l2_dtable.
2568		 */
2569		nva = pmap_kernel_l2dtable_kva;
2570		if ((nva & PAGE_MASK) == 0) {
2571			/*
2572			 * Need to allocate a backing page
2573			 */
2574			if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
2575				return (NULL);
2576		}
2577
2578		l2 = (struct l2_dtable *)nva;
2579		nva += sizeof(struct l2_dtable);
2580
2581		if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva &
2582		    PAGE_MASK)) {
2583			/*
2584			 * The new l2_dtable straddles a page boundary.
2585			 * Map in another page to cover it.
2586			 */
2587			if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
2588				return (NULL);
2589		}
2590
2591		pmap_kernel_l2dtable_kva = nva;
2592
2593		/*
2594		 * Link it into the parent pmap
2595		 */
2596		pm->pm_l2[L2_IDX(l1idx)] = l2;
2597		memset(l2, 0, sizeof(*l2));
2598	}
2599
2600	l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
2601
2602	/*
2603	 * Fetch pointer to the L2 page table associated with the address.
2604	 */
2605	if (l2b->l2b_kva == NULL) {
2606		pt_entry_t *ptep;
2607
2608		/*
2609		 * No L2 page table has been allocated. Chances are, this
2610		 * is because we just allocated the l2_dtable, above.
2611		 */
2612		nva = pmap_kernel_l2ptp_kva;
2613		ptep = (pt_entry_t *)nva;
2614		if ((nva & PAGE_MASK) == 0) {
2615			/*
2616			 * Need to allocate a backing page
2617			 */
2618			if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt,
2619			    &pmap_kernel_l2ptp_phys))
2620				return (NULL);
2621			PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t));
2622		}
2623		memset(ptep, 0, L2_TABLE_SIZE_REAL);
2624		l2->l2_occupancy++;
2625		l2b->l2b_kva = ptep;
2626		l2b->l2b_l1idx = l1idx;
2627		l2b->l2b_phys = pmap_kernel_l2ptp_phys;
2628
2629		pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
2630		pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
2631	}
2632
2633	/* Distribute new L1 entry to all other L1s */
2634	SLIST_FOREACH(l1, &l1_list, l1_link) {
2635			pl1pd = &l1->l1_kva[L1_IDX(va)];
2636			*pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) |
2637			    L1_C_PROTO;
2638			PTE_SYNC(pl1pd);
2639	}
2640
2641	return (l2b);
2642}
2643
2644
2645/*
2646 * grow the number of kernel page table entries, if needed
2647 */
2648void
2649pmap_growkernel(vm_offset_t addr)
2650{
2651	pmap_t kpm = pmap_kernel();
2652
2653	if (addr <= pmap_curmaxkvaddr)
2654		return;		/* we are OK */
2655
2656	/*
2657	 * whoops!   we need to add kernel PTPs
2658	 */
2659
2660	/* Map 1MB at a time */
2661	for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE)
2662		pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
2663
2664	/*
2665	 * flush out the cache, expensive but growkernel will happen so
2666	 * rarely
2667	 */
2668	cpu_dcache_wbinv_all();
2669	cpu_l2cache_wbinv_all();
2670	cpu_tlb_flushD();
2671	cpu_cpwait();
2672	kernel_vm_end = pmap_curmaxkvaddr;
2673}
2674
2675
2676/*
2677 * Remove all pages from specified address space
2678 * this aids process exit speeds.  Also, this code
2679 * is special cased for current process only, but
2680 * can have the more generic (and slightly slower)
2681 * mode enabled.  This is much faster than pmap_remove
2682 * in the case of running down an entire address space.
2683 */
2684void
2685pmap_remove_pages(pmap_t pmap)
2686{
2687	struct pv_entry *pv, *npv;
2688	struct l2_bucket *l2b = NULL;
2689	vm_page_t m;
2690	pt_entry_t *pt;
2691
2692	vm_page_lock_queues();
2693	PMAP_LOCK(pmap);
2694	cpu_idcache_wbinv_all();
2695	cpu_l2cache_wbinv_all();
2696	for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
2697		if (pv->pv_flags & PVF_WIRED || pv->pv_flags & PVF_UNMAN) {
2698			/* Cannot remove wired or unmanaged pages now. */
2699			npv = TAILQ_NEXT(pv, pv_plist);
2700			continue;
2701		}
2702		pmap->pm_stats.resident_count--;
2703		l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
2704		KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages"));
2705		pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
2706		m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK);
2707#ifdef ARM_USE_SMALL_ALLOC
2708		KASSERT((vm_offset_t)m >= alloc_firstaddr, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt));
2709#else
2710		KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt));
2711#endif
2712		*pt = 0;
2713		PTE_SYNC(pt);
2714		npv = TAILQ_NEXT(pv, pv_plist);
2715		pmap_nuke_pv(m, pmap, pv);
2716		if (TAILQ_EMPTY(&m->md.pv_list))
2717			vm_page_aflag_clear(m, PGA_WRITEABLE);
2718		pmap_free_pv_entry(pv);
2719		pmap_free_l2_bucket(pmap, l2b, 1);
2720	}
2721	vm_page_unlock_queues();
2722	cpu_tlb_flushID();
2723	cpu_cpwait();
2724	PMAP_UNLOCK(pmap);
2725}
2726
2727
2728/***************************************************
2729 * Low level mapping routines.....
2730 ***************************************************/
2731
2732#ifdef ARM_HAVE_SUPERSECTIONS
2733/* Map a super section into the KVA. */
2734
2735void
2736pmap_kenter_supersection(vm_offset_t va, uint64_t pa, int flags)
2737{
2738	pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) |
2739	    (((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL,
2740	    VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL);
2741	struct l1_ttable *l1;
2742	vm_offset_t va0, va_end;
2743
2744	KASSERT(((va | pa) & L1_SUP_OFFSET) == 0,
2745	    ("Not a valid super section mapping"));
2746	if (flags & SECTION_CACHE)
2747		pd |= pte_l1_s_cache_mode;
2748	else if (flags & SECTION_PT)
2749		pd |= pte_l1_s_cache_mode_pt;
2750	va0 = va & L1_SUP_FRAME;
2751	va_end = va + L1_SUP_SIZE;
2752	SLIST_FOREACH(l1, &l1_list, l1_link) {
2753		va = va0;
2754		for (; va < va_end; va += L1_S_SIZE) {
2755			l1->l1_kva[L1_IDX(va)] = pd;
2756			PTE_SYNC(&l1->l1_kva[L1_IDX(va)]);
2757		}
2758	}
2759}
2760#endif
2761
2762/* Map a section into the KVA. */
2763
2764void
2765pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags)
2766{
2767	pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL,
2768	    VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL);
2769	struct l1_ttable *l1;
2770
2771	KASSERT(((va | pa) & L1_S_OFFSET) == 0,
2772	    ("Not a valid section mapping"));
2773	if (flags & SECTION_CACHE)
2774		pd |= pte_l1_s_cache_mode;
2775	else if (flags & SECTION_PT)
2776		pd |= pte_l1_s_cache_mode_pt;
2777	SLIST_FOREACH(l1, &l1_list, l1_link) {
2778		l1->l1_kva[L1_IDX(va)] = pd;
2779		PTE_SYNC(&l1->l1_kva[L1_IDX(va)]);
2780	}
2781}
2782
2783/*
2784 * Make a temporary mapping for a physical address.  This is only intended
2785 * to be used for panic dumps.
2786 */
2787void *
2788pmap_kenter_temp(vm_paddr_t pa, int i)
2789{
2790	vm_offset_t va;
2791
2792	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
2793	pmap_kenter(va, pa);
2794	return ((void *)crashdumpmap);
2795}
2796
2797/*
2798 * add a wired page to the kva
2799 * note that in order for the mapping to take effect -- you
2800 * should do a invltlb after doing the pmap_kenter...
2801 */
2802static PMAP_INLINE void
2803pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
2804{
2805	struct l2_bucket *l2b;
2806	pt_entry_t *pte;
2807	pt_entry_t opte;
2808	struct pv_entry *pve;
2809	vm_page_t m;
2810
2811	PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n",
2812	    (uint32_t) va, (uint32_t) pa));
2813
2814
2815	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2816	if (l2b == NULL)
2817		l2b = pmap_grow_l2_bucket(pmap_kernel(), va);
2818	KASSERT(l2b != NULL, ("No L2 Bucket"));
2819	pte = &l2b->l2b_kva[l2pte_index(va)];
2820	opte = *pte;
2821	PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n",
2822	    (uint32_t) pte, opte, *pte));
2823	if (l2pte_valid(opte)) {
2824		pmap_kremove(va);
2825	} else {
2826		if (opte == 0)
2827			l2b->l2b_occupancy++;
2828	}
2829	*pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL,
2830	    VM_PROT_READ | VM_PROT_WRITE);
2831	if (flags & KENTER_CACHE)
2832		*pte |= pte_l2_s_cache_mode;
2833	if (flags & KENTER_USER)
2834		*pte |= L2_S_PROT_U;
2835	PTE_SYNC(pte);
2836
2837		/* kernel direct mappings can be shared, so use a pv_entry
2838		 * to ensure proper caching.
2839		 *
2840		 * The pvzone is used to delay the recording of kernel
2841		 * mappings until the VM is running.
2842		 *
2843		 * This expects the physical memory to have vm_page_array entry.
2844		 */
2845	if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa))) {
2846		vm_page_lock_queues();
2847		if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva) {
2848			/* release vm_page lock for pv_entry UMA */
2849			vm_page_unlock_queues();
2850			if ((pve = pmap_get_pv_entry()) == NULL)
2851				panic("pmap_kenter_internal: no pv entries");
2852			vm_page_lock_queues();
2853			PMAP_LOCK(pmap_kernel());
2854			pmap_enter_pv(m, pve, pmap_kernel(), va,
2855			    PVF_WRITE | PVF_UNMAN);
2856			pmap_fix_cache(m, pmap_kernel(), va);
2857			PMAP_UNLOCK(pmap_kernel());
2858		} else {
2859			m->md.pv_kva = va;
2860		}
2861		vm_page_unlock_queues();
2862	}
2863}
2864
2865void
2866pmap_kenter(vm_offset_t va, vm_paddr_t pa)
2867{
2868	pmap_kenter_internal(va, pa, KENTER_CACHE);
2869}
2870
2871void
2872pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa)
2873{
2874
2875	pmap_kenter_internal(va, pa, 0);
2876}
2877
2878void
2879pmap_kenter_user(vm_offset_t va, vm_paddr_t pa)
2880{
2881
2882	pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER);
2883	/*
2884	 * Call pmap_fault_fixup now, to make sure we'll have no exception
2885	 * at the first use of the new address, or bad things will happen,
2886	 * as we use one of these addresses in the exception handlers.
2887	 */
2888	pmap_fault_fixup(pmap_kernel(), va, VM_PROT_READ|VM_PROT_WRITE, 1);
2889}
2890
2891/*
2892 * remove a page from the kernel pagetables
2893 */
2894void
2895pmap_kremove(vm_offset_t va)
2896{
2897	struct l2_bucket *l2b;
2898	pt_entry_t *pte, opte;
2899	struct pv_entry *pve;
2900	vm_page_t m;
2901	vm_offset_t pa;
2902
2903	l2b = pmap_get_l2_bucket(pmap_kernel(), va);
2904	if (!l2b)
2905		return;
2906	KASSERT(l2b != NULL, ("No L2 Bucket"));
2907	pte = &l2b->l2b_kva[l2pte_index(va)];
2908	opte = *pte;
2909	if (l2pte_valid(opte)) {
2910			/* pa = vtophs(va) taken from pmap_extract() */
2911		switch (opte & L2_TYPE_MASK) {
2912		case L2_TYPE_L:
2913			pa = (opte & L2_L_FRAME) | (va & L2_L_OFFSET);
2914			break;
2915		default:
2916			pa = (opte & L2_S_FRAME) | (va & L2_S_OFFSET);
2917			break;
2918		}
2919			/* note: should never have to remove an allocation
2920			 * before the pvzone is initialized.
2921			 */
2922		vm_page_lock_queues();
2923		PMAP_LOCK(pmap_kernel());
2924		if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) &&
2925		    (pve = pmap_remove_pv(m, pmap_kernel(), va)))
2926			pmap_free_pv_entry(pve);
2927		PMAP_UNLOCK(pmap_kernel());
2928		vm_page_unlock_queues();
2929		va = va & ~PAGE_MASK;
2930		cpu_dcache_wbinv_range(va, PAGE_SIZE);
2931		cpu_l2cache_wbinv_range(va, PAGE_SIZE);
2932		cpu_tlb_flushD_SE(va);
2933		cpu_cpwait();
2934		*pte = 0;
2935	}
2936}
2937
2938
2939/*
2940 *	Used to map a range of physical addresses into kernel
2941 *	virtual address space.
2942 *
2943 *	The value passed in '*virt' is a suggested virtual address for
2944 *	the mapping. Architectures which can support a direct-mapped
2945 *	physical to virtual region can return the appropriate address
2946 *	within that region, leaving '*virt' unchanged. Other
2947 *	architectures should map the pages starting at '*virt' and
2948 *	update '*virt' with the first usable address after the mapped
2949 *	region.
2950 */
2951vm_offset_t
2952pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
2953{
2954#ifdef ARM_USE_SMALL_ALLOC
2955	return (arm_ptovirt(start));
2956#else
2957	vm_offset_t sva = *virt;
2958	vm_offset_t va = sva;
2959
2960	PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, "
2961	    "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end,
2962	    prot));
2963
2964	while (start < end) {
2965		pmap_kenter(va, start);
2966		va += PAGE_SIZE;
2967		start += PAGE_SIZE;
2968	}
2969	*virt = va;
2970	return (sva);
2971#endif
2972}
2973
2974static void
2975pmap_wb_page(vm_page_t m)
2976{
2977	struct pv_entry *pv;
2978
2979	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
2980	    pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, FALSE,
2981		(pv->pv_flags & PVF_WRITE) == 0);
2982}
2983
2984static void
2985pmap_inv_page(vm_page_t m)
2986{
2987	struct pv_entry *pv;
2988
2989	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
2990	    pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, TRUE);
2991}
2992/*
2993 * Add a list of wired pages to the kva
2994 * this routine is only used for temporary
2995 * kernel mappings that do not need to have
2996 * page modification or references recorded.
2997 * Note that old mappings are simply written
2998 * over.  The page *must* be wired.
2999 */
3000void
3001pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
3002{
3003	int i;
3004
3005	for (i = 0; i < count; i++) {
3006		pmap_wb_page(m[i]);
3007		pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]),
3008		    KENTER_CACHE);
3009		va += PAGE_SIZE;
3010	}
3011}
3012
3013
3014/*
3015 * this routine jerks page mappings from the
3016 * kernel -- it is meant only for temporary mappings.
3017 */
3018void
3019pmap_qremove(vm_offset_t va, int count)
3020{
3021	vm_paddr_t pa;
3022	int i;
3023
3024	for (i = 0; i < count; i++) {
3025		pa = vtophys(va);
3026		if (pa) {
3027			pmap_inv_page(PHYS_TO_VM_PAGE(pa));
3028			pmap_kremove(va);
3029		}
3030		va += PAGE_SIZE;
3031	}
3032}
3033
3034
3035/*
3036 * pmap_object_init_pt preloads the ptes for a given object
3037 * into the specified pmap.  This eliminates the blast of soft
3038 * faults on process startup and immediately after an mmap.
3039 */
3040void
3041pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3042    vm_pindex_t pindex, vm_size_t size)
3043{
3044
3045	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
3046	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3047	    ("pmap_object_init_pt: non-device object"));
3048}
3049
3050
3051/*
3052 *	pmap_is_prefaultable:
3053 *
3054 *	Return whether or not the specified virtual address is elgible
3055 *	for prefault.
3056 */
3057boolean_t
3058pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3059{
3060	pd_entry_t *pde;
3061	pt_entry_t *pte;
3062
3063	if (!pmap_get_pde_pte(pmap, addr, &pde, &pte))
3064		return (FALSE);
3065	KASSERT(pte != NULL, ("Valid mapping but no pte ?"));
3066	if (*pte == 0)
3067		return (TRUE);
3068	return (FALSE);
3069}
3070
3071/*
3072 * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
3073 * Returns TRUE if the mapping exists, else FALSE.
3074 *
3075 * NOTE: This function is only used by a couple of arm-specific modules.
3076 * It is not safe to take any pmap locks here, since we could be right
3077 * in the middle of debugging the pmap anyway...
3078 *
3079 * It is possible for this routine to return FALSE even though a valid
3080 * mapping does exist. This is because we don't lock, so the metadata
3081 * state may be inconsistent.
3082 *
3083 * NOTE: We can return a NULL *ptp in the case where the L1 pde is
3084 * a "section" mapping.
3085 */
3086boolean_t
3087pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp)
3088{
3089	struct l2_dtable *l2;
3090	pd_entry_t *pl1pd, l1pd;
3091	pt_entry_t *ptep;
3092	u_short l1idx;
3093
3094	if (pm->pm_l1 == NULL)
3095		return (FALSE);
3096
3097	l1idx = L1_IDX(va);
3098	*pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx];
3099	l1pd = *pl1pd;
3100
3101	if (l1pte_section_p(l1pd)) {
3102		*ptp = NULL;
3103		return (TRUE);
3104	}
3105
3106	if (pm->pm_l2 == NULL)
3107		return (FALSE);
3108
3109	l2 = pm->pm_l2[L2_IDX(l1idx)];
3110
3111	if (l2 == NULL ||
3112	    (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
3113		return (FALSE);
3114	}
3115
3116	*ptp = &ptep[l2pte_index(va)];
3117	return (TRUE);
3118}
3119
3120/*
3121 *      Routine:        pmap_remove_all
3122 *      Function:
3123 *              Removes this physical page from
3124 *              all physical maps in which it resides.
3125 *              Reflects back modify bits to the pager.
3126 *
3127 *      Notes:
3128 *              Original versions of this routine were very
3129 *              inefficient because they iteratively called
3130 *              pmap_remove (slow...)
3131 */
3132void
3133pmap_remove_all(vm_page_t m)
3134{
3135	pv_entry_t pv;
3136	pt_entry_t *ptep;
3137	struct l2_bucket *l2b;
3138	boolean_t flush = FALSE;
3139	pmap_t curpm;
3140	int flags = 0;
3141
3142	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3143	    ("pmap_remove_all: page %p is not managed", m));
3144	if (TAILQ_EMPTY(&m->md.pv_list))
3145		return;
3146	vm_page_lock_queues();
3147	pmap_remove_write(m);
3148	curpm = vmspace_pmap(curproc->p_vmspace);
3149	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3150		if (flush == FALSE && (pv->pv_pmap == curpm ||
3151		    pv->pv_pmap == pmap_kernel()))
3152			flush = TRUE;
3153
3154		PMAP_LOCK(pv->pv_pmap);
3155		/*
3156		 * Cached contents were written-back in pmap_remove_write(),
3157		 * but we still have to invalidate the cache entry to make
3158		 * sure stale data are not retrieved when another page will be
3159		 * mapped under this virtual address.
3160		 */
3161		if (pmap_is_current(pv->pv_pmap)) {
3162			cpu_dcache_inv_range(pv->pv_va, PAGE_SIZE);
3163			if (pmap_has_valid_mapping(pv->pv_pmap, pv->pv_va))
3164				cpu_l2cache_inv_range(pv->pv_va, PAGE_SIZE);
3165		}
3166
3167		if (pv->pv_flags & PVF_UNMAN) {
3168			/* remove the pv entry, but do not remove the mapping
3169			 * and remember this is a kernel mapped page
3170			 */
3171			m->md.pv_kva = pv->pv_va;
3172		} else {
3173			/* remove the mapping and pv entry */
3174			l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
3175			KASSERT(l2b != NULL, ("No l2 bucket"));
3176			ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
3177			*ptep = 0;
3178			PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
3179			pmap_free_l2_bucket(pv->pv_pmap, l2b, 1);
3180			pv->pv_pmap->pm_stats.resident_count--;
3181			flags |= pv->pv_flags;
3182		}
3183		pmap_nuke_pv(m, pv->pv_pmap, pv);
3184		PMAP_UNLOCK(pv->pv_pmap);
3185		pmap_free_pv_entry(pv);
3186	}
3187
3188	if (flush) {
3189		if (PV_BEEN_EXECD(flags))
3190			pmap_tlb_flushID(curpm);
3191		else
3192			pmap_tlb_flushD(curpm);
3193	}
3194	vm_page_aflag_clear(m, PGA_WRITEABLE);
3195	vm_page_unlock_queues();
3196}
3197
3198
3199/*
3200 *	Set the physical protection on the
3201 *	specified range of this map as requested.
3202 */
3203void
3204pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
3205{
3206	struct l2_bucket *l2b;
3207	pt_entry_t *ptep, pte;
3208	vm_offset_t next_bucket;
3209	u_int flags;
3210	int flush;
3211
3212	CTR4(KTR_PMAP, "pmap_protect: pmap %p sva 0x%08x eva 0x%08x prot %x",
3213	    pm, sva, eva, prot);
3214
3215	if ((prot & VM_PROT_READ) == 0) {
3216		pmap_remove(pm, sva, eva);
3217		return;
3218	}
3219
3220	if (prot & VM_PROT_WRITE) {
3221		/*
3222		 * If this is a read->write transition, just ignore it and let
3223		 * vm_fault() take care of it later.
3224		 */
3225		return;
3226	}
3227
3228	vm_page_lock_queues();
3229	PMAP_LOCK(pm);
3230
3231	/*
3232	 * OK, at this point, we know we're doing write-protect operation.
3233	 * If the pmap is active, write-back the range.
3234	 */
3235	pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE);
3236
3237	flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1;
3238	flags = 0;
3239
3240	while (sva < eva) {
3241		next_bucket = L2_NEXT_BUCKET(sva);
3242		if (next_bucket > eva)
3243			next_bucket = eva;
3244
3245		l2b = pmap_get_l2_bucket(pm, sva);
3246		if (l2b == NULL) {
3247			sva = next_bucket;
3248			continue;
3249		}
3250
3251		ptep = &l2b->l2b_kva[l2pte_index(sva)];
3252
3253		while (sva < next_bucket) {
3254			if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) {
3255				struct vm_page *pg;
3256				u_int f;
3257
3258				pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
3259				pte &= ~L2_S_PROT_W;
3260				*ptep = pte;
3261				PTE_SYNC(ptep);
3262
3263				if (pg != NULL) {
3264					if (!(pg->oflags & VPO_UNMANAGED)) {
3265						f = pmap_modify_pv(pg, pm, sva,
3266						    PVF_WRITE, 0);
3267						vm_page_dirty(pg);
3268					} else
3269						f = 0;
3270				} else
3271					f = PVF_REF | PVF_EXEC;
3272
3273				if (flush >= 0) {
3274					flush++;
3275					flags |= f;
3276				} else
3277				if (PV_BEEN_EXECD(f))
3278					pmap_tlb_flushID_SE(pm, sva);
3279				else
3280				if (PV_BEEN_REFD(f))
3281					pmap_tlb_flushD_SE(pm, sva);
3282			}
3283
3284			sva += PAGE_SIZE;
3285			ptep++;
3286		}
3287	}
3288
3289
3290	if (flush) {
3291		if (PV_BEEN_EXECD(flags))
3292			pmap_tlb_flushID(pm);
3293		else
3294		if (PV_BEEN_REFD(flags))
3295			pmap_tlb_flushD(pm);
3296	}
3297	vm_page_unlock_queues();
3298
3299 	PMAP_UNLOCK(pm);
3300}
3301
3302
3303/*
3304 *	Insert the given physical page (p) at
3305 *	the specified virtual address (v) in the
3306 *	target physical map with the protection requested.
3307 *
3308 *	If specified, the page will be wired down, meaning
3309 *	that the related pte can not be reclaimed.
3310 *
3311 *	NB:  This is the only routine which MAY NOT lazy-evaluate
3312 *	or lose information.  That is, this routine must actually
3313 *	insert this page into the given map NOW.
3314 */
3315
3316void
3317pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
3318    vm_prot_t prot, boolean_t wired)
3319{
3320
3321	vm_page_lock_queues();
3322	PMAP_LOCK(pmap);
3323	pmap_enter_locked(pmap, va, m, prot, wired, M_WAITOK);
3324	vm_page_unlock_queues();
3325 	PMAP_UNLOCK(pmap);
3326}
3327
3328/*
3329 *	The page queues and pmap must be locked.
3330 */
3331static void
3332pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
3333    boolean_t wired, int flags)
3334{
3335	struct l2_bucket *l2b = NULL;
3336	struct vm_page *opg;
3337	struct pv_entry *pve = NULL;
3338	pt_entry_t *ptep, npte, opte;
3339	u_int nflags;
3340	u_int oflags;
3341	vm_paddr_t pa;
3342
3343	PMAP_ASSERT_LOCKED(pmap);
3344	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3345	if (va == vector_page) {
3346		pa = systempage.pv_pa;
3347		m = NULL;
3348	} else {
3349		KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
3350		    (flags & M_NOWAIT) != 0,
3351		    ("pmap_enter_locked: page %p is not busy", m));
3352		pa = VM_PAGE_TO_PHYS(m);
3353	}
3354	nflags = 0;
3355	if (prot & VM_PROT_WRITE)
3356		nflags |= PVF_WRITE;
3357	if (prot & VM_PROT_EXECUTE)
3358		nflags |= PVF_EXEC;
3359	if (wired)
3360		nflags |= PVF_WIRED;
3361	PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, "
3362	    "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired));
3363
3364	if (pmap == pmap_kernel()) {
3365		l2b = pmap_get_l2_bucket(pmap, va);
3366		if (l2b == NULL)
3367			l2b = pmap_grow_l2_bucket(pmap, va);
3368	} else {
3369do_l2b_alloc:
3370		l2b = pmap_alloc_l2_bucket(pmap, va);
3371		if (l2b == NULL) {
3372			if (flags & M_WAITOK) {
3373				PMAP_UNLOCK(pmap);
3374				vm_page_unlock_queues();
3375				VM_WAIT;
3376				vm_page_lock_queues();
3377				PMAP_LOCK(pmap);
3378				goto do_l2b_alloc;
3379			}
3380			return;
3381		}
3382	}
3383
3384	ptep = &l2b->l2b_kva[l2pte_index(va)];
3385
3386	opte = *ptep;
3387	npte = pa;
3388	oflags = 0;
3389	if (opte) {
3390		/*
3391		 * There is already a mapping at this address.
3392		 * If the physical address is different, lookup the
3393		 * vm_page.
3394		 */
3395		if (l2pte_pa(opte) != pa)
3396			opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3397		else
3398			opg = m;
3399	} else
3400		opg = NULL;
3401
3402	if ((prot & (VM_PROT_ALL)) ||
3403	    (!m || m->md.pvh_attrs & PVF_REF)) {
3404		/*
3405		 * - The access type indicates that we don't need
3406		 *   to do referenced emulation.
3407		 * OR
3408		 * - The physical page has already been referenced
3409		 *   so no need to re-do referenced emulation here.
3410		 */
3411		npte |= L2_S_PROTO;
3412
3413		nflags |= PVF_REF;
3414
3415		if (m && ((prot & VM_PROT_WRITE) != 0 ||
3416		    (m->md.pvh_attrs & PVF_MOD))) {
3417			/*
3418			 * This is a writable mapping, and the
3419			 * page's mod state indicates it has
3420			 * already been modified. Make it
3421			 * writable from the outset.
3422			 */
3423			nflags |= PVF_MOD;
3424			if (!(m->md.pvh_attrs & PVF_MOD))
3425				vm_page_dirty(m);
3426		}
3427		if (m && opte)
3428			vm_page_aflag_set(m, PGA_REFERENCED);
3429	} else {
3430		/*
3431		 * Need to do page referenced emulation.
3432		 */
3433		npte |= L2_TYPE_INV;
3434	}
3435
3436	if (prot & VM_PROT_WRITE) {
3437		npte |= L2_S_PROT_W;
3438		if (m != NULL &&
3439		    (m->oflags & VPO_UNMANAGED) == 0)
3440			vm_page_aflag_set(m, PGA_WRITEABLE);
3441	}
3442	if (!(m->md.pv_memattr & VM_MEMATTR_UNCACHEABLE))
3443		npte |= pte_l2_s_cache_mode;
3444	if (m && m == opg) {
3445		/*
3446		 * We're changing the attrs of an existing mapping.
3447		 */
3448		oflags = pmap_modify_pv(m, pmap, va,
3449		    PVF_WRITE | PVF_EXEC | PVF_WIRED |
3450		    PVF_MOD | PVF_REF, nflags);
3451
3452		/*
3453		 * We may need to flush the cache if we're
3454		 * doing rw-ro...
3455		 */
3456		if (pmap_is_current(pmap) &&
3457		    (oflags & PVF_NC) == 0 &&
3458		    (opte & L2_S_PROT_W) != 0 &&
3459		    (prot & VM_PROT_WRITE) == 0 &&
3460		    (opte & L2_TYPE_MASK) != L2_TYPE_INV) {
3461			cpu_dcache_wb_range(va, PAGE_SIZE);
3462			cpu_l2cache_wb_range(va, PAGE_SIZE);
3463		}
3464	} else {
3465		/*
3466		 * New mapping, or changing the backing page
3467		 * of an existing mapping.
3468		 */
3469		if (opg) {
3470			/*
3471			 * Replacing an existing mapping with a new one.
3472			 * It is part of our managed memory so we
3473			 * must remove it from the PV list
3474			 */
3475			if ((pve = pmap_remove_pv(opg, pmap, va))) {
3476
3477			/* note for patch: the oflags/invalidation was moved
3478			 * because PG_FICTITIOUS pages could free the pve
3479			 */
3480			    oflags = pve->pv_flags;
3481			/*
3482			 * If the old mapping was valid (ref/mod
3483			 * emulation creates 'invalid' mappings
3484			 * initially) then make sure to frob
3485			 * the cache.
3486			 */
3487			    if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) {
3488				if (PV_BEEN_EXECD(oflags)) {
3489					pmap_idcache_wbinv_range(pmap, va,
3490					    PAGE_SIZE);
3491				} else
3492					if (PV_BEEN_REFD(oflags)) {
3493						pmap_dcache_wb_range(pmap, va,
3494						    PAGE_SIZE, TRUE,
3495						    (oflags & PVF_WRITE) == 0);
3496					}
3497			    }
3498
3499			/* free/allocate a pv_entry for UNMANAGED pages if
3500			 * this physical page is not/is already mapped.
3501			 */
3502
3503			    if (m && (m->oflags & VPO_UNMANAGED) &&
3504				  !m->md.pv_kva &&
3505				 TAILQ_EMPTY(&m->md.pv_list)) {
3506				pmap_free_pv_entry(pve);
3507				pve = NULL;
3508			    }
3509			} else if (m &&
3510				 (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva ||
3511				  !TAILQ_EMPTY(&m->md.pv_list)))
3512				pve = pmap_get_pv_entry();
3513		} else if (m &&
3514			   (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva ||
3515			   !TAILQ_EMPTY(&m->md.pv_list)))
3516			pve = pmap_get_pv_entry();
3517
3518		if (m) {
3519			if ((m->oflags & VPO_UNMANAGED)) {
3520				if (!TAILQ_EMPTY(&m->md.pv_list) ||
3521				    m->md.pv_kva) {
3522					KASSERT(pve != NULL, ("No pv"));
3523					nflags |= PVF_UNMAN;
3524					pmap_enter_pv(m, pve, pmap, va, nflags);
3525				} else
3526					m->md.pv_kva = va;
3527			} else {
3528				KASSERT(va < kmi.clean_sva ||
3529				    va >= kmi.clean_eva,
3530		("pmap_enter: managed mapping within the clean submap"));
3531 				KASSERT(pve != NULL, ("No pv"));
3532 				pmap_enter_pv(m, pve, pmap, va, nflags);
3533			}
3534		}
3535	}
3536	/*
3537	 * Make sure userland mappings get the right permissions
3538	 */
3539	if (pmap != pmap_kernel() && va != vector_page) {
3540		npte |= L2_S_PROT_U;
3541	}
3542
3543	/*
3544	 * Keep the stats up to date
3545	 */
3546	if (opte == 0) {
3547		l2b->l2b_occupancy++;
3548		pmap->pm_stats.resident_count++;
3549	}
3550
3551	/*
3552	 * If this is just a wiring change, the two PTEs will be
3553	 * identical, so there's no need to update the page table.
3554	 */
3555	if (npte != opte) {
3556		boolean_t is_cached = pmap_is_current(pmap);
3557
3558		*ptep = npte;
3559		if (is_cached) {
3560			/*
3561			 * We only need to frob the cache/tlb if this pmap
3562			 * is current
3563			 */
3564			PTE_SYNC(ptep);
3565			if (L1_IDX(va) != L1_IDX(vector_page) &&
3566			    l2pte_valid(npte)) {
3567				/*
3568				 * This mapping is likely to be accessed as
3569				 * soon as we return to userland. Fix up the
3570				 * L1 entry to avoid taking another
3571				 * page/domain fault.
3572				 */
3573				pd_entry_t *pl1pd, l1pd;
3574
3575				pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
3576				l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) |
3577				    L1_C_PROTO;
3578				if (*pl1pd != l1pd) {
3579					*pl1pd = l1pd;
3580					PTE_SYNC(pl1pd);
3581				}
3582			}
3583		}
3584
3585		if (PV_BEEN_EXECD(oflags))
3586			pmap_tlb_flushID_SE(pmap, va);
3587		else if (PV_BEEN_REFD(oflags))
3588			pmap_tlb_flushD_SE(pmap, va);
3589
3590
3591		if (m)
3592			pmap_fix_cache(m, pmap, va);
3593	}
3594}
3595
3596/*
3597 * Maps a sequence of resident pages belonging to the same object.
3598 * The sequence begins with the given page m_start.  This page is
3599 * mapped at the given virtual address start.  Each subsequent page is
3600 * mapped at a virtual address that is offset from start by the same
3601 * amount as the page is offset from m_start within the object.  The
3602 * last page in the sequence is the page with the largest offset from
3603 * m_start that can be mapped at a virtual address less than the given
3604 * virtual address end.  Not every virtual page between start and end
3605 * is mapped; only those for which a resident page exists with the
3606 * corresponding offset from m_start are mapped.
3607 */
3608void
3609pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3610    vm_page_t m_start, vm_prot_t prot)
3611{
3612	vm_page_t m;
3613	vm_pindex_t diff, psize;
3614
3615	psize = atop(end - start);
3616	m = m_start;
3617	vm_page_lock_queues();
3618	PMAP_LOCK(pmap);
3619	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3620		pmap_enter_locked(pmap, start + ptoa(diff), m, prot &
3621		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT);
3622		m = TAILQ_NEXT(m, listq);
3623	}
3624	vm_page_unlock_queues();
3625 	PMAP_UNLOCK(pmap);
3626}
3627
3628/*
3629 * this code makes some *MAJOR* assumptions:
3630 * 1. Current pmap & pmap exists.
3631 * 2. Not wired.
3632 * 3. Read access.
3633 * 4. No page table pages.
3634 * but is *MUCH* faster than pmap_enter...
3635 */
3636
3637void
3638pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3639{
3640
3641	vm_page_lock_queues();
3642 	PMAP_LOCK(pmap);
3643	pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
3644	    FALSE, M_NOWAIT);
3645	vm_page_unlock_queues();
3646 	PMAP_UNLOCK(pmap);
3647}
3648
3649/*
3650 *	Routine:	pmap_change_wiring
3651 *	Function:	Change the wiring attribute for a map/virtual-address
3652 *			pair.
3653 *	In/out conditions:
3654 *			The mapping must already exist in the pmap.
3655 */
3656void
3657pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
3658{
3659	struct l2_bucket *l2b;
3660	pt_entry_t *ptep, pte;
3661	vm_page_t pg;
3662
3663	vm_page_lock_queues();
3664 	PMAP_LOCK(pmap);
3665	l2b = pmap_get_l2_bucket(pmap, va);
3666	KASSERT(l2b, ("No l2b bucket in pmap_change_wiring"));
3667	ptep = &l2b->l2b_kva[l2pte_index(va)];
3668	pte = *ptep;
3669	pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
3670	if (pg)
3671		pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired ? PVF_WIRED : 0);
3672	vm_page_unlock_queues();
3673 	PMAP_UNLOCK(pmap);
3674}
3675
3676
3677/*
3678 *	Copy the range specified by src_addr/len
3679 *	from the source map to the range dst_addr/len
3680 *	in the destination map.
3681 *
3682 *	This routine is only advisory and need not do anything.
3683 */
3684void
3685pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
3686    vm_size_t len, vm_offset_t src_addr)
3687{
3688}
3689
3690
3691/*
3692 *	Routine:	pmap_extract
3693 *	Function:
3694 *		Extract the physical page address associated
3695 *		with the given map/virtual_address pair.
3696 */
3697vm_paddr_t
3698pmap_extract(pmap_t pm, vm_offset_t va)
3699{
3700	struct l2_dtable *l2;
3701	pd_entry_t l1pd;
3702	pt_entry_t *ptep, pte;
3703	vm_paddr_t pa;
3704	u_int l1idx;
3705	l1idx = L1_IDX(va);
3706
3707	PMAP_LOCK(pm);
3708	l1pd = pm->pm_l1->l1_kva[l1idx];
3709	if (l1pte_section_p(l1pd)) {
3710		/*
3711		 * These should only happen for pmap_kernel()
3712		 */
3713		KASSERT(pm == pmap_kernel(), ("huh"));
3714		/* XXX: what to do about the bits > 32 ? */
3715		if (l1pd & L1_S_SUPERSEC)
3716			pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET);
3717		else
3718			pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
3719	} else {
3720		/*
3721		 * Note that we can't rely on the validity of the L1
3722		 * descriptor as an indication that a mapping exists.
3723		 * We have to look it up in the L2 dtable.
3724		 */
3725		l2 = pm->pm_l2[L2_IDX(l1idx)];
3726
3727		if (l2 == NULL ||
3728		    (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
3729			PMAP_UNLOCK(pm);
3730			return (0);
3731		}
3732
3733		ptep = &ptep[l2pte_index(va)];
3734		pte = *ptep;
3735
3736		if (pte == 0) {
3737			PMAP_UNLOCK(pm);
3738			return (0);
3739		}
3740
3741		switch (pte & L2_TYPE_MASK) {
3742		case L2_TYPE_L:
3743			pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3744			break;
3745
3746		default:
3747			pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
3748			break;
3749		}
3750	}
3751
3752	PMAP_UNLOCK(pm);
3753	return (pa);
3754}
3755
3756/*
3757 * Atomically extract and hold the physical page with the given
3758 * pmap and virtual address pair if that mapping permits the given
3759 * protection.
3760 *
3761 */
3762vm_page_t
3763pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
3764{
3765	struct l2_dtable *l2;
3766	pd_entry_t l1pd;
3767	pt_entry_t *ptep, pte;
3768	vm_paddr_t pa, paddr;
3769	vm_page_t m = NULL;
3770	u_int l1idx;
3771	l1idx = L1_IDX(va);
3772	paddr = 0;
3773
3774 	PMAP_LOCK(pmap);
3775retry:
3776	l1pd = pmap->pm_l1->l1_kva[l1idx];
3777	if (l1pte_section_p(l1pd)) {
3778		/*
3779		 * These should only happen for pmap_kernel()
3780		 */
3781		KASSERT(pmap == pmap_kernel(), ("huh"));
3782		/* XXX: what to do about the bits > 32 ? */
3783		if (l1pd & L1_S_SUPERSEC)
3784			pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET);
3785		else
3786			pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
3787		if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr))
3788			goto retry;
3789		if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) {
3790			m = PHYS_TO_VM_PAGE(pa);
3791			vm_page_hold(m);
3792		}
3793
3794	} else {
3795		/*
3796		 * Note that we can't rely on the validity of the L1
3797		 * descriptor as an indication that a mapping exists.
3798		 * We have to look it up in the L2 dtable.
3799		 */
3800		l2 = pmap->pm_l2[L2_IDX(l1idx)];
3801
3802		if (l2 == NULL ||
3803		    (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
3804		 	PMAP_UNLOCK(pmap);
3805			return (NULL);
3806		}
3807
3808		ptep = &ptep[l2pte_index(va)];
3809		pte = *ptep;
3810
3811		if (pte == 0) {
3812		 	PMAP_UNLOCK(pmap);
3813			return (NULL);
3814		}
3815		if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) {
3816			switch (pte & L2_TYPE_MASK) {
3817			case L2_TYPE_L:
3818				pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3819				break;
3820
3821			default:
3822				pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
3823				break;
3824			}
3825			if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr))
3826				goto retry;
3827			m = PHYS_TO_VM_PAGE(pa);
3828			vm_page_hold(m);
3829		}
3830	}
3831
3832 	PMAP_UNLOCK(pmap);
3833	PA_UNLOCK_COND(paddr);
3834	return (m);
3835}
3836
3837/*
3838 * Initialize a preallocated and zeroed pmap structure,
3839 * such as one in a vmspace structure.
3840 */
3841
3842int
3843pmap_pinit(pmap_t pmap)
3844{
3845	PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap));
3846
3847	PMAP_LOCK_INIT(pmap);
3848	pmap_alloc_l1(pmap);
3849	bzero(pmap->pm_l2, sizeof(pmap->pm_l2));
3850
3851	CPU_ZERO(&pmap->pm_active);
3852
3853	TAILQ_INIT(&pmap->pm_pvlist);
3854	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
3855	pmap->pm_stats.resident_count = 1;
3856	if (vector_page < KERNBASE) {
3857		pmap_enter(pmap, vector_page,
3858		    VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa),
3859		    VM_PROT_READ, 1);
3860	}
3861	return (1);
3862}
3863
3864
3865/***************************************************
3866 * page management routines.
3867 ***************************************************/
3868
3869
3870static void
3871pmap_free_pv_entry(pv_entry_t pv)
3872{
3873	pv_entry_count--;
3874	uma_zfree(pvzone, pv);
3875}
3876
3877
3878/*
3879 * get a new pv_entry, allocating a block from the system
3880 * when needed.
3881 * the memory allocation is performed bypassing the malloc code
3882 * because of the possibility of allocations at interrupt time.
3883 */
3884static pv_entry_t
3885pmap_get_pv_entry(void)
3886{
3887	pv_entry_t ret_value;
3888
3889	pv_entry_count++;
3890	if (pv_entry_count > pv_entry_high_water)
3891		pagedaemon_wakeup();
3892	ret_value = uma_zalloc(pvzone, M_NOWAIT);
3893	return ret_value;
3894}
3895
3896/*
3897 *	Remove the given range of addresses from the specified map.
3898 *
3899 *	It is assumed that the start and end are properly
3900 *	rounded to the page size.
3901 */
3902#define	PMAP_REMOVE_CLEAN_LIST_SIZE	3
3903void
3904pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
3905{
3906	struct l2_bucket *l2b;
3907	vm_offset_t next_bucket;
3908	pt_entry_t *ptep;
3909	u_int total;
3910	u_int mappings, is_exec, is_refd;
3911	int flushall = 0;
3912
3913
3914	/*
3915	 * we lock in the pmap => pv_head direction
3916	 */
3917
3918	vm_page_lock_queues();
3919	PMAP_LOCK(pm);
3920	total = 0;
3921	while (sva < eva) {
3922		/*
3923		 * Do one L2 bucket's worth at a time.
3924		 */
3925		next_bucket = L2_NEXT_BUCKET(sva);
3926		if (next_bucket > eva)
3927			next_bucket = eva;
3928
3929		l2b = pmap_get_l2_bucket(pm, sva);
3930		if (l2b == NULL) {
3931			sva = next_bucket;
3932			continue;
3933		}
3934
3935		ptep = &l2b->l2b_kva[l2pte_index(sva)];
3936		mappings = 0;
3937
3938		while (sva < next_bucket) {
3939			struct vm_page *pg;
3940			pt_entry_t pte;
3941			vm_paddr_t pa;
3942
3943			pte = *ptep;
3944
3945			if (pte == 0) {
3946				/*
3947				 * Nothing here, move along
3948				 */
3949				sva += PAGE_SIZE;
3950				ptep++;
3951				continue;
3952			}
3953
3954			pm->pm_stats.resident_count--;
3955			pa = l2pte_pa(pte);
3956			is_exec = 0;
3957			is_refd = 1;
3958
3959			/*
3960			 * Update flags. In a number of circumstances,
3961			 * we could cluster a lot of these and do a
3962			 * number of sequential pages in one go.
3963			 */
3964			if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
3965				struct pv_entry *pve;
3966
3967				pve = pmap_remove_pv(pg, pm, sva);
3968				if (pve) {
3969					is_exec = PV_BEEN_EXECD(pve->pv_flags);
3970					is_refd = PV_BEEN_REFD(pve->pv_flags);
3971					pmap_free_pv_entry(pve);
3972				}
3973			}
3974
3975			if (l2pte_valid(pte) && pmap_is_current(pm)) {
3976				if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) {
3977					total++;
3978			   		if (is_exec) {
3979        					cpu_idcache_wbinv_range(sva,
3980						    PAGE_SIZE);
3981						cpu_l2cache_wbinv_range(sva,
3982						    PAGE_SIZE);
3983						cpu_tlb_flushID_SE(sva);
3984			   		} else if (is_refd) {
3985						cpu_dcache_wbinv_range(sva,
3986						    PAGE_SIZE);
3987						cpu_l2cache_wbinv_range(sva,
3988						    PAGE_SIZE);
3989						cpu_tlb_flushD_SE(sva);
3990					}
3991				} else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE) {
3992					/* flushall will also only get set for
3993					 * for a current pmap
3994					 */
3995					cpu_idcache_wbinv_all();
3996					cpu_l2cache_wbinv_all();
3997					flushall = 1;
3998					total++;
3999				}
4000			}
4001			*ptep = 0;
4002			PTE_SYNC(ptep);
4003
4004			sva += PAGE_SIZE;
4005			ptep++;
4006			mappings++;
4007		}
4008
4009		pmap_free_l2_bucket(pm, l2b, mappings);
4010	}
4011
4012	vm_page_unlock_queues();
4013	if (flushall)
4014		cpu_tlb_flushID();
4015 	PMAP_UNLOCK(pm);
4016}
4017
4018/*
4019 * pmap_zero_page()
4020 *
4021 * Zero a given physical page by mapping it at a page hook point.
4022 * In doing the zero page op, the page we zero is mapped cachable, as with
4023 * StrongARM accesses to non-cached pages are non-burst making writing
4024 * _any_ bulk data very slow.
4025 */
4026#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_CORE3)
4027void
4028pmap_zero_page_generic(vm_paddr_t phys, int off, int size)
4029{
4030#ifdef ARM_USE_SMALL_ALLOC
4031	char *dstpg;
4032#endif
4033
4034#ifdef DEBUG
4035	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
4036
4037	if (pg->md.pvh_list != NULL)
4038		panic("pmap_zero_page: page has mappings");
4039#endif
4040
4041	if (_arm_bzero && size >= _min_bzero_size &&
4042	    _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0)
4043		return;
4044
4045#ifdef ARM_USE_SMALL_ALLOC
4046	dstpg = (char *)arm_ptovirt(phys);
4047	if (off || size != PAGE_SIZE) {
4048		bzero(dstpg + off, size);
4049		cpu_dcache_wbinv_range((vm_offset_t)(dstpg + off), size);
4050		cpu_l2cache_wbinv_range((vm_offset_t)(dstpg + off), size);
4051	} else {
4052		bzero_page((vm_offset_t)dstpg);
4053		cpu_dcache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE);
4054		cpu_l2cache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE);
4055	}
4056#else
4057
4058	mtx_lock(&cmtx);
4059	/*
4060	 * Hook in the page, zero it, invalidate the TLB as needed.
4061	 *
4062	 * Note the temporary zero-page mapping must be a non-cached page in
4063	 * order to work without corruption when write-allocate is enabled.
4064	 */
4065	*cdst_pte = L2_S_PROTO | phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE);
4066	PTE_SYNC(cdst_pte);
4067	cpu_tlb_flushD_SE(cdstp);
4068	cpu_cpwait();
4069	if (off || size != PAGE_SIZE)
4070		bzero((void *)(cdstp + off), size);
4071	else
4072		bzero_page(cdstp);
4073
4074	mtx_unlock(&cmtx);
4075#endif
4076}
4077#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
4078
4079#if ARM_MMU_XSCALE == 1
4080void
4081pmap_zero_page_xscale(vm_paddr_t phys, int off, int size)
4082{
4083#ifdef ARM_USE_SMALL_ALLOC
4084	char *dstpg;
4085#endif
4086
4087	if (_arm_bzero && size >= _min_bzero_size &&
4088	    _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0)
4089		return;
4090#ifdef ARM_USE_SMALL_ALLOC
4091	dstpg = (char *)arm_ptovirt(phys);
4092	if (off || size != PAGE_SIZE) {
4093		bzero(dstpg + off, size);
4094		cpu_dcache_wbinv_range((vm_offset_t)(dstpg + off), size);
4095	} else {
4096		bzero_page((vm_offset_t)dstpg);
4097		cpu_dcache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE);
4098	}
4099#else
4100	mtx_lock(&cmtx);
4101	/*
4102	 * Hook in the page, zero it, and purge the cache for that
4103	 * zeroed page. Invalidate the TLB as needed.
4104	 */
4105	*cdst_pte = L2_S_PROTO | phys |
4106	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
4107	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
4108	PTE_SYNC(cdst_pte);
4109	cpu_tlb_flushD_SE(cdstp);
4110	cpu_cpwait();
4111	if (off || size != PAGE_SIZE)
4112		bzero((void *)(cdstp + off), size);
4113	else
4114		bzero_page(cdstp);
4115	mtx_unlock(&cmtx);
4116	xscale_cache_clean_minidata();
4117#endif
4118}
4119
4120/*
4121 * Change the PTEs for the specified kernel mappings such that they
4122 * will use the mini data cache instead of the main data cache.
4123 */
4124void
4125pmap_use_minicache(vm_offset_t va, vm_size_t size)
4126{
4127	struct l2_bucket *l2b;
4128	pt_entry_t *ptep, *sptep, pte;
4129	vm_offset_t next_bucket, eva;
4130
4131#if (ARM_NMMUS > 1) || defined(CPU_XSCALE_CORE3)
4132	if (xscale_use_minidata == 0)
4133		return;
4134#endif
4135
4136	eva = va + size;
4137
4138	while (va < eva) {
4139		next_bucket = L2_NEXT_BUCKET(va);
4140		if (next_bucket > eva)
4141			next_bucket = eva;
4142
4143		l2b = pmap_get_l2_bucket(pmap_kernel(), va);
4144
4145		sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
4146
4147		while (va < next_bucket) {
4148			pte = *ptep;
4149			if (!l2pte_minidata(pte)) {
4150				cpu_dcache_wbinv_range(va, PAGE_SIZE);
4151				cpu_tlb_flushD_SE(va);
4152				*ptep = pte & ~L2_B;
4153			}
4154			ptep++;
4155			va += PAGE_SIZE;
4156		}
4157		PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
4158	}
4159	cpu_cpwait();
4160}
4161#endif /* ARM_MMU_XSCALE == 1 */
4162
4163/*
4164 *	pmap_zero_page zeros the specified hardware page by mapping
4165 *	the page into KVM and using bzero to clear its contents.
4166 */
4167void
4168pmap_zero_page(vm_page_t m)
4169{
4170	pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE);
4171}
4172
4173
4174/*
4175 *	pmap_zero_page_area zeros the specified hardware page by mapping
4176 *	the page into KVM and using bzero to clear its contents.
4177 *
4178 *	off and size may not cover an area beyond a single hardware page.
4179 */
4180void
4181pmap_zero_page_area(vm_page_t m, int off, int size)
4182{
4183
4184	pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size);
4185}
4186
4187
4188/*
4189 *	pmap_zero_page_idle zeros the specified hardware page by mapping
4190 *	the page into KVM and using bzero to clear its contents.  This
4191 *	is intended to be called from the vm_pagezero process only and
4192 *	outside of Giant.
4193 */
4194void
4195pmap_zero_page_idle(vm_page_t m)
4196{
4197
4198	pmap_zero_page(m);
4199}
4200
4201#if 0
4202/*
4203 * pmap_clean_page()
4204 *
4205 * This is a local function used to work out the best strategy to clean
4206 * a single page referenced by its entry in the PV table. It should be used by
4207 * pmap_copy_page, pmap_zero page and maybe some others later on.
4208 *
4209 * Its policy is effectively:
4210 *  o If there are no mappings, we don't bother doing anything with the cache.
4211 *  o If there is one mapping, we clean just that page.
4212 *  o If there are multiple mappings, we clean the entire cache.
4213 *
4214 * So that some functions can be further optimised, it returns 0 if it didn't
4215 * clean the entire cache, or 1 if it did.
4216 *
4217 * XXX One bug in this routine is that if the pv_entry has a single page
4218 * mapped at 0x00000000 a whole cache clean will be performed rather than
4219 * just the 1 page. Since this should not occur in everyday use and if it does
4220 * it will just result in not the most efficient clean for the page.
4221 *
4222 * We don't yet use this function but may want to.
4223 */
4224static int
4225pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
4226{
4227	pmap_t pm, pm_to_clean = NULL;
4228	struct pv_entry *npv;
4229	u_int cache_needs_cleaning = 0;
4230	u_int flags = 0;
4231	vm_offset_t page_to_clean = 0;
4232
4233	if (pv == NULL) {
4234		/* nothing mapped in so nothing to flush */
4235		return (0);
4236	}
4237
4238	/*
4239	 * Since we flush the cache each time we change to a different
4240	 * user vmspace, we only need to flush the page if it is in the
4241	 * current pmap.
4242	 */
4243	if (curthread)
4244		pm = vmspace_pmap(curproc->p_vmspace);
4245	else
4246		pm = pmap_kernel();
4247
4248	for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) {
4249		if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) {
4250			flags |= npv->pv_flags;
4251			/*
4252			 * The page is mapped non-cacheable in
4253			 * this map.  No need to flush the cache.
4254			 */
4255			if (npv->pv_flags & PVF_NC) {
4256#ifdef DIAGNOSTIC
4257				if (cache_needs_cleaning)
4258					panic("pmap_clean_page: "
4259					    "cache inconsistency");
4260#endif
4261				break;
4262			} else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
4263				continue;
4264			if (cache_needs_cleaning) {
4265				page_to_clean = 0;
4266				break;
4267			} else {
4268				page_to_clean = npv->pv_va;
4269				pm_to_clean = npv->pv_pmap;
4270			}
4271			cache_needs_cleaning = 1;
4272		}
4273	}
4274	if (page_to_clean) {
4275		if (PV_BEEN_EXECD(flags))
4276			pmap_idcache_wbinv_range(pm_to_clean, page_to_clean,
4277			    PAGE_SIZE);
4278		else
4279			pmap_dcache_wb_range(pm_to_clean, page_to_clean,
4280			    PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0);
4281	} else if (cache_needs_cleaning) {
4282		if (PV_BEEN_EXECD(flags))
4283			pmap_idcache_wbinv_all(pm);
4284		else
4285			pmap_dcache_wbinv_all(pm);
4286		return (1);
4287	}
4288	return (0);
4289}
4290#endif
4291
4292/*
4293 *	pmap_copy_page copies the specified (machine independent)
4294 *	page by mapping the page into virtual memory and using
4295 *	bcopy to copy the page, one machine dependent page at a
4296 *	time.
4297 */
4298
4299/*
4300 * pmap_copy_page()
4301 *
4302 * Copy one physical page into another, by mapping the pages into
4303 * hook points. The same comment regarding cachability as in
4304 * pmap_zero_page also applies here.
4305 */
4306#if  (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 || defined (CPU_XSCALE_CORE3)
4307void
4308pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
4309{
4310#if 0
4311	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
4312#endif
4313#ifdef DEBUG
4314	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
4315
4316	if (dst_pg->md.pvh_list != NULL)
4317		panic("pmap_copy_page: dst page has mappings");
4318#endif
4319
4320
4321	/*
4322	 * Clean the source page.  Hold the source page's lock for
4323	 * the duration of the copy so that no other mappings can
4324	 * be created while we have a potentially aliased mapping.
4325	 */
4326#if 0
4327	/*
4328	 * XXX: Not needed while we call cpu_dcache_wbinv_all() in
4329	 * pmap_copy_page().
4330	 */
4331	(void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE);
4332#endif
4333	/*
4334	 * Map the pages into the page hook points, copy them, and purge
4335	 * the cache for the appropriate page. Invalidate the TLB
4336	 * as required.
4337	 */
4338	mtx_lock(&cmtx);
4339	*csrc_pte = L2_S_PROTO | src |
4340	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
4341	PTE_SYNC(csrc_pte);
4342	*cdst_pte = L2_S_PROTO | dst |
4343	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
4344	PTE_SYNC(cdst_pte);
4345	cpu_tlb_flushD_SE(csrcp);
4346	cpu_tlb_flushD_SE(cdstp);
4347	cpu_cpwait();
4348	bcopy_page(csrcp, cdstp);
4349	mtx_unlock(&cmtx);
4350	cpu_dcache_inv_range(csrcp, PAGE_SIZE);
4351	cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
4352	cpu_l2cache_inv_range(csrcp, PAGE_SIZE);
4353	cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE);
4354}
4355
4356void
4357pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs,
4358    vm_paddr_t b_phys, vm_offset_t b_offs, int cnt)
4359{
4360
4361	mtx_lock(&cmtx);
4362	*csrc_pte = L2_S_PROTO | a_phys |
4363	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
4364	PTE_SYNC(csrc_pte);
4365	*cdst_pte = L2_S_PROTO | b_phys |
4366	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
4367	PTE_SYNC(cdst_pte);
4368	cpu_tlb_flushD_SE(csrcp);
4369	cpu_tlb_flushD_SE(cdstp);
4370	cpu_cpwait();
4371	bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt);
4372	mtx_unlock(&cmtx);
4373	cpu_dcache_inv_range(csrcp + a_offs, cnt);
4374	cpu_dcache_wbinv_range(cdstp + b_offs, cnt);
4375	cpu_l2cache_inv_range(csrcp + a_offs, cnt);
4376	cpu_l2cache_wbinv_range(cdstp + b_offs, cnt);
4377}
4378#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
4379
4380#if ARM_MMU_XSCALE == 1
4381void
4382pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst)
4383{
4384#if 0
4385	/* XXX: Only needed for pmap_clean_page(), which is commented out. */
4386	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
4387#endif
4388#ifdef DEBUG
4389	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
4390
4391	if (dst_pg->md.pvh_list != NULL)
4392		panic("pmap_copy_page: dst page has mappings");
4393#endif
4394
4395
4396	/*
4397	 * Clean the source page.  Hold the source page's lock for
4398	 * the duration of the copy so that no other mappings can
4399	 * be created while we have a potentially aliased mapping.
4400	 */
4401#if 0
4402	/*
4403	 * XXX: Not needed while we call cpu_dcache_wbinv_all() in
4404	 * pmap_copy_page().
4405	 */
4406	(void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE);
4407#endif
4408	/*
4409	 * Map the pages into the page hook points, copy them, and purge
4410	 * the cache for the appropriate page. Invalidate the TLB
4411	 * as required.
4412	 */
4413	mtx_lock(&cmtx);
4414	*csrc_pte = L2_S_PROTO | src |
4415	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
4416	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
4417	PTE_SYNC(csrc_pte);
4418	*cdst_pte = L2_S_PROTO | dst |
4419	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
4420	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
4421	PTE_SYNC(cdst_pte);
4422	cpu_tlb_flushD_SE(csrcp);
4423	cpu_tlb_flushD_SE(cdstp);
4424	cpu_cpwait();
4425	bcopy_page(csrcp, cdstp);
4426	mtx_unlock(&cmtx);
4427	xscale_cache_clean_minidata();
4428}
4429
4430void
4431pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs,
4432    vm_paddr_t b_phys, vm_offset_t b_offs, int cnt)
4433{
4434
4435	mtx_lock(&cmtx);
4436	*csrc_pte = L2_S_PROTO | a_phys |
4437	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
4438	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
4439	PTE_SYNC(csrc_pte);
4440	*cdst_pte = L2_S_PROTO | b_phys |
4441	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
4442	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
4443	PTE_SYNC(cdst_pte);
4444	cpu_tlb_flushD_SE(csrcp);
4445	cpu_tlb_flushD_SE(cdstp);
4446	cpu_cpwait();
4447	bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt);
4448	mtx_unlock(&cmtx);
4449	xscale_cache_clean_minidata();
4450}
4451#endif /* ARM_MMU_XSCALE == 1 */
4452
4453void
4454pmap_copy_page(vm_page_t src, vm_page_t dst)
4455{
4456#ifdef ARM_USE_SMALL_ALLOC
4457	vm_offset_t srcpg, dstpg;
4458#endif
4459
4460	cpu_dcache_wbinv_all();
4461	cpu_l2cache_wbinv_all();
4462	if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size &&
4463	    _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst),
4464	    (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0)
4465		return;
4466#ifdef ARM_USE_SMALL_ALLOC
4467	srcpg = arm_ptovirt(VM_PAGE_TO_PHYS(src));
4468	dstpg = arm_ptovirt(VM_PAGE_TO_PHYS(dst));
4469	bcopy_page(srcpg, dstpg);
4470	cpu_dcache_wbinv_range(dstpg, PAGE_SIZE);
4471	cpu_l2cache_wbinv_range(dstpg, PAGE_SIZE);
4472#else
4473	pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
4474#endif
4475}
4476
4477int unmapped_buf_allowed = 1;
4478
4479void
4480pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
4481    vm_offset_t b_offset, int xfersize)
4482{
4483	vm_page_t a_pg, b_pg;
4484	vm_offset_t a_pg_offset, b_pg_offset;
4485	int cnt;
4486#ifdef ARM_USE_SMALL_ALLOC
4487	vm_offset_t a_va, b_va;
4488#endif
4489
4490	cpu_dcache_wbinv_all();
4491	cpu_l2cache_wbinv_all();
4492	while (xfersize > 0) {
4493		a_pg = ma[a_offset >> PAGE_SHIFT];
4494		a_pg_offset = a_offset & PAGE_MASK;
4495		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
4496		b_pg = mb[b_offset >> PAGE_SHIFT];
4497		b_pg_offset = b_offset & PAGE_MASK;
4498		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
4499#ifdef ARM_USE_SMALL_ALLOC
4500		a_va = arm_ptovirt(VM_PAGE_TO_PHYS(a_pg)) + a_pg_offset;
4501		b_va = arm_ptovirt(VM_PAGE_TO_PHYS(b_pg)) + b_pg_offset;
4502		bcopy((char *)a_va, (char *)b_va, cnt);
4503		cpu_dcache_wbinv_range(b_va, cnt);
4504		cpu_l2cache_wbinv_range(b_va, cnt);
4505#else
4506		pmap_copy_page_offs_func(VM_PAGE_TO_PHYS(a_pg), a_pg_offset,
4507		    VM_PAGE_TO_PHYS(b_pg), b_pg_offset, cnt);
4508#endif
4509		xfersize -= cnt;
4510		a_offset += cnt;
4511		b_offset += cnt;
4512	}
4513}
4514
4515/*
4516 * this routine returns true if a physical page resides
4517 * in the given pmap.
4518 */
4519boolean_t
4520pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
4521{
4522	pv_entry_t pv;
4523	int loops = 0;
4524	boolean_t rv;
4525
4526	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4527	    ("pmap_page_exists_quick: page %p is not managed", m));
4528	rv = FALSE;
4529	vm_page_lock_queues();
4530	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4531	    	if (pv->pv_pmap == pmap) {
4532			rv = TRUE;
4533			break;
4534	    	}
4535		loops++;
4536		if (loops >= 16)
4537			break;
4538	}
4539	vm_page_unlock_queues();
4540	return (rv);
4541}
4542
4543/*
4544 *	pmap_page_wired_mappings:
4545 *
4546 *	Return the number of managed mappings to the given physical page
4547 *	that are wired.
4548 */
4549int
4550pmap_page_wired_mappings(vm_page_t m)
4551{
4552	pv_entry_t pv;
4553	int count;
4554
4555	count = 0;
4556	if ((m->oflags & VPO_UNMANAGED) != 0)
4557		return (count);
4558	vm_page_lock_queues();
4559	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
4560		if ((pv->pv_flags & PVF_WIRED) != 0)
4561			count++;
4562	vm_page_unlock_queues();
4563	return (count);
4564}
4565
4566/*
4567 *	pmap_ts_referenced:
4568 *
4569 *	Return the count of reference bits for a page, clearing all of them.
4570 */
4571int
4572pmap_ts_referenced(vm_page_t m)
4573{
4574
4575	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4576	    ("pmap_ts_referenced: page %p is not managed", m));
4577	return (pmap_clearbit(m, PVF_REF));
4578}
4579
4580
4581boolean_t
4582pmap_is_modified(vm_page_t m)
4583{
4584
4585	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4586	    ("pmap_is_modified: page %p is not managed", m));
4587	if (m->md.pvh_attrs & PVF_MOD)
4588		return (TRUE);
4589
4590	return(FALSE);
4591}
4592
4593
4594/*
4595 *	Clear the modify bits on the specified physical page.
4596 */
4597void
4598pmap_clear_modify(vm_page_t m)
4599{
4600
4601	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4602	    ("pmap_clear_modify: page %p is not managed", m));
4603	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
4604	KASSERT((m->oflags & VPO_BUSY) == 0,
4605	    ("pmap_clear_modify: page %p is busy", m));
4606
4607	/*
4608	 * If the page is not PGA_WRITEABLE, then no mappings can be modified.
4609	 * If the object containing the page is locked and the page is not
4610	 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
4611	 */
4612	if ((m->aflags & PGA_WRITEABLE) == 0)
4613		return;
4614	if (m->md.pvh_attrs & PVF_MOD)
4615		pmap_clearbit(m, PVF_MOD);
4616}
4617
4618
4619/*
4620 *	pmap_is_referenced:
4621 *
4622 *	Return whether or not the specified physical page was referenced
4623 *	in any physical maps.
4624 */
4625boolean_t
4626pmap_is_referenced(vm_page_t m)
4627{
4628
4629	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4630	    ("pmap_is_referenced: page %p is not managed", m));
4631	return ((m->md.pvh_attrs & PVF_REF) != 0);
4632}
4633
4634/*
4635 *	pmap_clear_reference:
4636 *
4637 *	Clear the reference bit on the specified physical page.
4638 */
4639void
4640pmap_clear_reference(vm_page_t m)
4641{
4642
4643	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4644	    ("pmap_clear_reference: page %p is not managed", m));
4645	if (m->md.pvh_attrs & PVF_REF)
4646		pmap_clearbit(m, PVF_REF);
4647}
4648
4649
4650/*
4651 * Clear the write and modified bits in each of the given page's mappings.
4652 */
4653void
4654pmap_remove_write(vm_page_t m)
4655{
4656
4657	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4658	    ("pmap_remove_write: page %p is not managed", m));
4659
4660	/*
4661	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
4662	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
4663	 * is clear, no page table entries need updating.
4664	 */
4665	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
4666	if ((m->oflags & VPO_BUSY) != 0 ||
4667	    (m->aflags & PGA_WRITEABLE) != 0)
4668		pmap_clearbit(m, PVF_WRITE);
4669}
4670
4671
4672/*
4673 * perform the pmap work for mincore
4674 */
4675int
4676pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
4677{
4678	struct l2_bucket *l2b;
4679	pt_entry_t *ptep, pte;
4680	vm_paddr_t pa;
4681	vm_page_t m;
4682	int val;
4683	boolean_t managed;
4684
4685	PMAP_LOCK(pmap);
4686retry:
4687	l2b = pmap_get_l2_bucket(pmap, addr);
4688        if (l2b == NULL) {
4689                val = 0;
4690                goto out;
4691        }
4692	ptep = &l2b->l2b_kva[l2pte_index(addr)];
4693	pte = *ptep;
4694	if (!l2pte_valid(pte)) {
4695		val = 0;
4696		goto out;
4697	}
4698	val = MINCORE_INCORE;
4699	if (pte & L2_S_PROT_W)
4700		val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
4701        managed = false;
4702	pa = l2pte_pa(pte);
4703        m = PHYS_TO_VM_PAGE(pa);
4704        if (m != NULL && !(m->oflags & VPO_UNMANAGED))
4705                managed = true;
4706	if (managed) {
4707		/*
4708		 * The ARM pmap tries to maintain a per-mapping
4709		 * reference bit.  The trouble is that it's kept in
4710		 * the PV entry, not the PTE, so it's costly to access
4711		 * here.  You would need to acquire the pvh global
4712		 * lock, call pmap_find_pv(), and introduce a custom
4713		 * version of vm_page_pa_tryrelock() that releases and
4714		 * reacquires the pvh global lock.  In the end, I
4715		 * doubt it's worthwhile.  This may falsely report
4716		 * the given address as referenced.
4717		 */
4718		if ((m->md.pvh_attrs & PVF_REF) != 0)
4719			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
4720	}
4721	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
4722	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
4723		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
4724		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
4725			goto retry;
4726	} else
4727out:
4728		PA_UNLOCK_COND(*locked_pa);
4729	PMAP_UNLOCK(pmap);
4730	return (val);
4731}
4732
4733
4734void
4735pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
4736{
4737}
4738
4739
4740/*
4741 *	Increase the starting virtual address of the given mapping if a
4742 *	different alignment might result in more superpage mappings.
4743 */
4744void
4745pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
4746    vm_offset_t *addr, vm_size_t size)
4747{
4748}
4749
4750
4751/*
4752 * Map a set of physical memory pages into the kernel virtual
4753 * address space. Return a pointer to where it is mapped. This
4754 * routine is intended to be used for mapping device memory,
4755 * NOT real memory.
4756 */
4757void *
4758pmap_mapdev(vm_offset_t pa, vm_size_t size)
4759{
4760	vm_offset_t va, tmpva, offset;
4761
4762	offset = pa & PAGE_MASK;
4763	size = roundup(size, PAGE_SIZE);
4764
4765	GIANT_REQUIRED;
4766
4767	va = kmem_alloc_nofault(kernel_map, size);
4768	if (!va)
4769		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
4770	for (tmpva = va; size > 0;) {
4771		pmap_kenter_internal(tmpva, pa, 0);
4772		size -= PAGE_SIZE;
4773		tmpva += PAGE_SIZE;
4774		pa += PAGE_SIZE;
4775	}
4776
4777	return ((void *)(va + offset));
4778}
4779
4780#define BOOTSTRAP_DEBUG
4781
4782/*
4783 * pmap_map_section:
4784 *
4785 *	Create a single section mapping.
4786 */
4787void
4788pmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
4789    int prot, int cache)
4790{
4791	pd_entry_t *pde = (pd_entry_t *) l1pt;
4792	pd_entry_t fl;
4793
4794	KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2"));
4795
4796	switch (cache) {
4797	case PTE_NOCACHE:
4798	default:
4799		fl = 0;
4800		break;
4801
4802	case PTE_CACHE:
4803		fl = pte_l1_s_cache_mode;
4804		break;
4805
4806	case PTE_PAGETABLE:
4807		fl = pte_l1_s_cache_mode_pt;
4808		break;
4809	}
4810
4811	pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
4812	    L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL);
4813	PTE_SYNC(&pde[va >> L1_S_SHIFT]);
4814
4815}
4816
4817/*
4818 * pmap_link_l2pt:
4819 *
4820 *	Link the L2 page table specified by l2pv.pv_pa into the L1
4821 *	page table at the slot for "va".
4822 */
4823void
4824pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv)
4825{
4826	pd_entry_t *pde = (pd_entry_t *) l1pt, proto;
4827	u_int slot = va >> L1_S_SHIFT;
4828
4829	proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
4830
4831#ifdef VERBOSE_INIT_ARM
4832	printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va);
4833#endif
4834
4835	pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
4836
4837	PTE_SYNC(&pde[slot]);
4838
4839	SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
4840
4841
4842}
4843
4844/*
4845 * pmap_map_entry
4846 *
4847 * 	Create a single page mapping.
4848 */
4849void
4850pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
4851    int cache)
4852{
4853	pd_entry_t *pde = (pd_entry_t *) l1pt;
4854	pt_entry_t fl;
4855	pt_entry_t *pte;
4856
4857	KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin"));
4858
4859	switch (cache) {
4860	case PTE_NOCACHE:
4861	default:
4862		fl = 0;
4863		break;
4864
4865	case PTE_CACHE:
4866		fl = pte_l2_s_cache_mode;
4867		break;
4868
4869	case PTE_PAGETABLE:
4870		fl = pte_l2_s_cache_mode_pt;
4871		break;
4872	}
4873
4874	if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
4875		panic("pmap_map_entry: no L2 table for VA 0x%08x", va);
4876
4877	pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
4878
4879	if (pte == NULL)
4880		panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va);
4881
4882	pte[l2pte_index(va)] =
4883	    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl;
4884	PTE_SYNC(&pte[l2pte_index(va)]);
4885}
4886
4887/*
4888 * pmap_map_chunk:
4889 *
4890 *	Map a chunk of memory using the most efficient mappings
4891 *	possible (section. large page, small page) into the
4892 *	provided L1 and L2 tables at the specified virtual address.
4893 */
4894vm_size_t
4895pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
4896    vm_size_t size, int prot, int cache)
4897{
4898	pd_entry_t *pde = (pd_entry_t *) l1pt;
4899	pt_entry_t *pte, f1, f2s, f2l;
4900	vm_size_t resid;
4901	int i;
4902
4903	resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4904
4905	if (l1pt == 0)
4906		panic("pmap_map_chunk: no L1 table provided");
4907
4908#ifdef VERBOSE_INIT_ARM
4909	printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x "
4910	    "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
4911#endif
4912
4913	switch (cache) {
4914	case PTE_NOCACHE:
4915	default:
4916		f1 = 0;
4917		f2l = 0;
4918		f2s = 0;
4919		break;
4920
4921	case PTE_CACHE:
4922		f1 = pte_l1_s_cache_mode;
4923		f2l = pte_l2_l_cache_mode;
4924		f2s = pte_l2_s_cache_mode;
4925		break;
4926
4927	case PTE_PAGETABLE:
4928		f1 = pte_l1_s_cache_mode_pt;
4929		f2l = pte_l2_l_cache_mode_pt;
4930		f2s = pte_l2_s_cache_mode_pt;
4931		break;
4932	}
4933
4934	size = resid;
4935
4936	while (resid > 0) {
4937		/* See if we can use a section mapping. */
4938		if (L1_S_MAPPABLE_P(va, pa, resid)) {
4939#ifdef VERBOSE_INIT_ARM
4940			printf("S");
4941#endif
4942			pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
4943			    L1_S_PROT(PTE_KERNEL, prot) | f1 |
4944			    L1_S_DOM(PMAP_DOMAIN_KERNEL);
4945			PTE_SYNC(&pde[va >> L1_S_SHIFT]);
4946			va += L1_S_SIZE;
4947			pa += L1_S_SIZE;
4948			resid -= L1_S_SIZE;
4949			continue;
4950		}
4951
4952		/*
4953		 * Ok, we're going to use an L2 table.  Make sure
4954		 * one is actually in the corresponding L1 slot
4955		 * for the current VA.
4956		 */
4957		if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
4958			panic("pmap_map_chunk: no L2 table for VA 0x%08x", va);
4959
4960		pte = (pt_entry_t *) kernel_pt_lookup(
4961		    pde[L1_IDX(va)] & L1_C_ADDR_MASK);
4962		if (pte == NULL)
4963			panic("pmap_map_chunk: can't find L2 table for VA"
4964			    "0x%08x", va);
4965		/* See if we can use a L2 large page mapping. */
4966		if (L2_L_MAPPABLE_P(va, pa, resid)) {
4967#ifdef VERBOSE_INIT_ARM
4968			printf("L");
4969#endif
4970			for (i = 0; i < 16; i++) {
4971				pte[l2pte_index(va) + i] =
4972				    L2_L_PROTO | pa |
4973				    L2_L_PROT(PTE_KERNEL, prot) | f2l;
4974				PTE_SYNC(&pte[l2pte_index(va) + i]);
4975			}
4976			va += L2_L_SIZE;
4977			pa += L2_L_SIZE;
4978			resid -= L2_L_SIZE;
4979			continue;
4980		}
4981
4982		/* Use a small page mapping. */
4983#ifdef VERBOSE_INIT_ARM
4984		printf("P");
4985#endif
4986		pte[l2pte_index(va)] =
4987		    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
4988		PTE_SYNC(&pte[l2pte_index(va)]);
4989		va += PAGE_SIZE;
4990		pa += PAGE_SIZE;
4991		resid -= PAGE_SIZE;
4992	}
4993#ifdef VERBOSE_INIT_ARM
4994	printf("\n");
4995#endif
4996	return (size);
4997
4998}
4999
5000/********************** Static device map routines ***************************/
5001
5002static const struct pmap_devmap *pmap_devmap_table;
5003
5004/*
5005 * Register the devmap table.  This is provided in case early console
5006 * initialization needs to register mappings created by bootstrap code
5007 * before pmap_devmap_bootstrap() is called.
5008 */
5009void
5010pmap_devmap_register(const struct pmap_devmap *table)
5011{
5012
5013	pmap_devmap_table = table;
5014}
5015
5016/*
5017 * Map all of the static regions in the devmap table, and remember
5018 * the devmap table so other parts of the kernel can look up entries
5019 * later.
5020 */
5021void
5022pmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table)
5023{
5024	int i;
5025
5026	pmap_devmap_table = table;
5027
5028	for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
5029#ifdef VERBOSE_INIT_ARM
5030		printf("devmap: %08x -> %08x @ %08x\n",
5031		    pmap_devmap_table[i].pd_pa,
5032		    pmap_devmap_table[i].pd_pa +
5033			pmap_devmap_table[i].pd_size - 1,
5034		    pmap_devmap_table[i].pd_va);
5035#endif
5036		pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
5037		    pmap_devmap_table[i].pd_pa,
5038		    pmap_devmap_table[i].pd_size,
5039		    pmap_devmap_table[i].pd_prot,
5040		    pmap_devmap_table[i].pd_cache);
5041	}
5042}
5043
5044const struct pmap_devmap *
5045pmap_devmap_find_pa(vm_paddr_t pa, vm_size_t size)
5046{
5047	int i;
5048
5049	if (pmap_devmap_table == NULL)
5050		return (NULL);
5051
5052	for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
5053		if (pa >= pmap_devmap_table[i].pd_pa &&
5054		    pa + size <= pmap_devmap_table[i].pd_pa +
5055				 pmap_devmap_table[i].pd_size)
5056			return (&pmap_devmap_table[i]);
5057	}
5058
5059	return (NULL);
5060}
5061
5062const struct pmap_devmap *
5063pmap_devmap_find_va(vm_offset_t va, vm_size_t size)
5064{
5065	int i;
5066
5067	if (pmap_devmap_table == NULL)
5068		return (NULL);
5069
5070	for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
5071		if (va >= pmap_devmap_table[i].pd_va &&
5072		    va + size <= pmap_devmap_table[i].pd_va +
5073				 pmap_devmap_table[i].pd_size)
5074			return (&pmap_devmap_table[i]);
5075	}
5076
5077	return (NULL);
5078}
5079
5080void
5081pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
5082{
5083	/*
5084	 * Remember the memattr in a field that gets used to set the appropriate
5085	 * bits in the PTEs as mappings are established.
5086	 */
5087	m->md.pv_memattr = ma;
5088
5089	/*
5090	 * It appears that this function can only be called before any mappings
5091	 * for the page are established on ARM.  If this ever changes, this code
5092	 * will need to walk the pv_list and make each of the existing mappings
5093	 * uncacheable, being careful to sync caches and PTEs (and maybe
5094	 * invalidate TLB?) for any current mapping it modifies.
5095	 */
5096	if (m->md.pv_kva != 0 || TAILQ_FIRST(&m->md.pv_list) != NULL)
5097		panic("Can't change memattr on page with existing mappings");
5098}
5099
5100
5101