pmap.c revision 225418
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
38 *	from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps
39 *	JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish
40 */
41
42/*
43 *	Manages physical address maps.
44 *
45 *	In addition to hardware address maps, this
46 *	module is called upon to provide software-use-only
47 *	maps which may or may not be stored in the same
48 *	form as hardware maps.	These pseudo-maps are
49 *	used to store intermediate results from copy
50 *	operations to and from address spaces.
51 *
52 *	Since the information managed by this module is
53 *	also stored by the logical address mapping module,
54 *	this module may throw away valid virtual-to-physical
55 *	mappings at almost any time.  However, invalidations
56 *	of virtual-to-physical mappings must be done as
57 *	requested.
58 *
59 *	In order to cope with hardware architectures which
60 *	make virtual-to-physical map invalidates expensive,
61 *	this module may delay invalidate or reduced protection
62 *	operations until such time as they are actually
63 *	necessary.  This module is given full information as
64 *	to which processors are currently using which maps,
65 *	and to when physical maps must be made correct.
66 */
67
68#include <sys/cdefs.h>
69__FBSDID("$FreeBSD: head/sys/mips/mips/pmap.c 225418 2011-09-06 10:30:11Z kib $");
70
71#include "opt_ddb.h"
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/proc.h>
76#include <sys/msgbuf.h>
77#include <sys/vmmeter.h>
78#include <sys/mman.h>
79#include <sys/smp.h>
80#ifdef DDB
81#include <ddb/ddb.h>
82#endif
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <vm/vm_phys.h>
87#include <sys/lock.h>
88#include <sys/mutex.h>
89#include <vm/vm_kern.h>
90#include <vm/vm_page.h>
91#include <vm/vm_map.h>
92#include <vm/vm_object.h>
93#include <vm/vm_extern.h>
94#include <vm/vm_pageout.h>
95#include <vm/vm_pager.h>
96#include <vm/uma.h>
97#include <sys/pcpu.h>
98#include <sys/sched.h>
99#ifdef SMP
100#include <sys/smp.h>
101#endif
102
103#include <machine/cache.h>
104#include <machine/md_var.h>
105#include <machine/tlb.h>
106
107#undef PMAP_DEBUG
108
109#ifndef PMAP_SHPGPERPROC
110#define	PMAP_SHPGPERPROC 200
111#endif
112
113#if !defined(DIAGNOSTIC)
114#define	PMAP_INLINE __inline
115#else
116#define	PMAP_INLINE
117#endif
118
119/*
120 * Get PDEs and PTEs for user/kernel address space
121 */
122#define	pmap_seg_index(v)	(((v) >> SEGSHIFT) & (NPDEPG - 1))
123#define	pmap_pde_index(v)	(((v) >> PDRSHIFT) & (NPDEPG - 1))
124#define	pmap_pte_index(v)	(((v) >> PAGE_SHIFT) & (NPTEPG - 1))
125#define	pmap_pde_pindex(v)	((v) >> PDRSHIFT)
126
127#ifdef __mips_n64
128#define	NUPDE			(NPDEPG * NPDEPG)
129#define	NUSERPGTBLS		(NUPDE + NPDEPG)
130#else
131#define	NUPDE			(NPDEPG)
132#define	NUSERPGTBLS		(NUPDE)
133#endif
134
135#define	is_kernel_pmap(x)	((x) == kernel_pmap)
136
137struct pmap kernel_pmap_store;
138pd_entry_t *kernel_segmap;
139
140vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
141vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
142
143static int nkpt;
144unsigned pmap_max_asid;		/* max ASID supported by the system */
145
146#define	PMAP_ASID_RESERVED	0
147
148vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
149
150static void pmap_asid_alloc(pmap_t pmap);
151
152/*
153 * Data for the pv entry allocation mechanism
154 */
155static uma_zone_t pvzone;
156static struct vm_object pvzone_obj;
157static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
158
159static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
160static pv_entry_t get_pv_entry(pmap_t locked_pmap);
161static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
162static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
163    vm_offset_t va);
164static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
165static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
166    vm_page_t m, vm_prot_t prot, vm_page_t mpte);
167static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va);
168static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
169static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
170static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
171    vm_offset_t va, vm_page_t m);
172static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
173static void pmap_invalidate_all(pmap_t pmap);
174static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
175static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
176
177static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
178static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
179static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
180static pt_entry_t init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
181
182#ifdef SMP
183static void pmap_invalidate_page_action(void *arg);
184static void pmap_invalidate_all_action(void *arg);
185static void pmap_update_page_action(void *arg);
186#endif
187
188#ifndef __mips_n64
189/*
190 * This structure is for high memory (memory above 512Meg in 32 bit) support.
191 * The highmem area does not have a KSEG0 mapping, and we need a mechanism to
192 * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc.
193 *
194 * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To
195 * access a highmem physical address on a CPU, we map the physical address to
196 * the reserved virtual address for the CPU in the kernel pagetable.  This is
197 * done with interrupts disabled(although a spinlock and sched_pin would be
198 * sufficient).
199 */
200struct local_sysmaps {
201	vm_offset_t	base;
202	uint32_t	saved_intr;
203	uint16_t	valid1, valid2;
204};
205static struct local_sysmaps sysmap_lmem[MAXCPU];
206
207static __inline void
208pmap_alloc_lmem_map(void)
209{
210	int i;
211
212	for (i = 0; i < MAXCPU; i++) {
213		sysmap_lmem[i].base = virtual_avail;
214		virtual_avail += PAGE_SIZE * 2;
215		sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
216	}
217}
218
219static __inline vm_offset_t
220pmap_lmem_map1(vm_paddr_t phys)
221{
222	struct local_sysmaps *sysm;
223	pt_entry_t *pte, npte;
224	vm_offset_t va;
225	uint32_t intr;
226	int cpu;
227
228	intr = intr_disable();
229	cpu = PCPU_GET(cpuid);
230	sysm = &sysmap_lmem[cpu];
231	sysm->saved_intr = intr;
232	va = sysm->base;
233	npte = TLBLO_PA_TO_PFN(phys) |
234	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
235	pte = pmap_pte(kernel_pmap, va);
236	*pte = npte;
237	sysm->valid1 = 1;
238	return (va);
239}
240
241static __inline vm_offset_t
242pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
243{
244	struct local_sysmaps *sysm;
245	pt_entry_t *pte, npte;
246	vm_offset_t va1, va2;
247	uint32_t intr;
248	int cpu;
249
250	intr = intr_disable();
251	cpu = PCPU_GET(cpuid);
252	sysm = &sysmap_lmem[cpu];
253	sysm->saved_intr = intr;
254	va1 = sysm->base;
255	va2 = sysm->base + PAGE_SIZE;
256	npte = TLBLO_PA_TO_PFN(phys1) |
257	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
258	pte = pmap_pte(kernel_pmap, va1);
259	*pte = npte;
260	npte =  TLBLO_PA_TO_PFN(phys2) |
261	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
262	pte = pmap_pte(kernel_pmap, va2);
263	*pte = npte;
264	sysm->valid1 = 1;
265	sysm->valid2 = 1;
266	return (va1);
267}
268
269static __inline void
270pmap_lmem_unmap(void)
271{
272	struct local_sysmaps *sysm;
273	pt_entry_t *pte;
274	int cpu;
275
276	cpu = PCPU_GET(cpuid);
277	sysm = &sysmap_lmem[cpu];
278	pte = pmap_pte(kernel_pmap, sysm->base);
279	*pte = PTE_G;
280	tlb_invalidate_address(kernel_pmap, sysm->base);
281	sysm->valid1 = 0;
282	if (sysm->valid2) {
283		pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);
284		*pte = PTE_G;
285		tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);
286		sysm->valid2 = 0;
287	}
288	intr_restore(sysm->saved_intr);
289}
290#else  /* __mips_n64 */
291
292static __inline void
293pmap_alloc_lmem_map(void)
294{
295}
296
297static __inline vm_offset_t
298pmap_lmem_map1(vm_paddr_t phys)
299{
300
301	return (0);
302}
303
304static __inline vm_offset_t
305pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
306{
307
308	return (0);
309}
310
311static __inline vm_offset_t
312pmap_lmem_unmap(void)
313{
314
315	return (0);
316}
317#endif /* !__mips_n64 */
318
319/*
320 * Page table entry lookup routines.
321 */
322static __inline pd_entry_t *
323pmap_segmap(pmap_t pmap, vm_offset_t va)
324{
325
326	return (&pmap->pm_segtab[pmap_seg_index(va)]);
327}
328
329#ifdef __mips_n64
330static __inline pd_entry_t *
331pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
332{
333	pd_entry_t *pde;
334
335	pde = (pd_entry_t *)*pdpe;
336	return (&pde[pmap_pde_index(va)]);
337}
338
339static __inline pd_entry_t *
340pmap_pde(pmap_t pmap, vm_offset_t va)
341{
342	pd_entry_t *pdpe;
343
344	pdpe = pmap_segmap(pmap, va);
345	if (pdpe == NULL || *pdpe == NULL)
346		return (NULL);
347
348	return (pmap_pdpe_to_pde(pdpe, va));
349}
350#else
351static __inline pd_entry_t *
352pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
353{
354
355	return (pdpe);
356}
357
358static __inline
359pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
360{
361
362	return (pmap_segmap(pmap, va));
363}
364#endif
365
366static __inline pt_entry_t *
367pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
368{
369	pt_entry_t *pte;
370
371	pte = (pt_entry_t *)*pde;
372	return (&pte[pmap_pte_index(va)]);
373}
374
375pt_entry_t *
376pmap_pte(pmap_t pmap, vm_offset_t va)
377{
378	pd_entry_t *pde;
379
380	pde = pmap_pde(pmap, va);
381	if (pde == NULL || *pde == NULL)
382		return (NULL);
383
384	return (pmap_pde_to_pte(pde, va));
385}
386
387vm_offset_t
388pmap_steal_memory(vm_size_t size)
389{
390	vm_paddr_t bank_size, pa;
391	vm_offset_t va;
392
393	size = round_page(size);
394	bank_size = phys_avail[1] - phys_avail[0];
395	while (size > bank_size) {
396		int i;
397
398		for (i = 0; phys_avail[i + 2]; i += 2) {
399			phys_avail[i] = phys_avail[i + 2];
400			phys_avail[i + 1] = phys_avail[i + 3];
401		}
402		phys_avail[i] = 0;
403		phys_avail[i + 1] = 0;
404		if (!phys_avail[0])
405			panic("pmap_steal_memory: out of memory");
406		bank_size = phys_avail[1] - phys_avail[0];
407	}
408
409	pa = phys_avail[0];
410	phys_avail[0] += size;
411	if (MIPS_DIRECT_MAPPABLE(pa) == 0)
412		panic("Out of memory below 512Meg?");
413	va = MIPS_PHYS_TO_DIRECT(pa);
414	bzero((caddr_t)va, size);
415	return (va);
416}
417
418/*
419 * Bootstrap the system enough to run with virtual memory.  This
420 * assumes that the phys_avail array has been initialized.
421 */
422static void
423pmap_create_kernel_pagetable(void)
424{
425	int i, j;
426	vm_offset_t ptaddr;
427	pt_entry_t *pte;
428#ifdef __mips_n64
429	pd_entry_t *pde;
430	vm_offset_t pdaddr;
431	int npt, npde;
432#endif
433
434	/*
435	 * Allocate segment table for the kernel
436	 */
437	kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
438
439	/*
440	 * Allocate second level page tables for the kernel
441	 */
442#ifdef __mips_n64
443	npde = howmany(NKPT, NPDEPG);
444	pdaddr = pmap_steal_memory(PAGE_SIZE * npde);
445#endif
446	nkpt = NKPT;
447	ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt);
448
449	/*
450	 * The R[4-7]?00 stores only one copy of the Global bit in the
451	 * translation lookaside buffer for each 2 page entry. Thus invalid
452	 * entrys must have the Global bit set so when Entry LO and Entry HI
453	 * G bits are anded together they will produce a global bit to store
454	 * in the tlb.
455	 */
456	for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++)
457		*pte = PTE_G;
458
459#ifdef __mips_n64
460	for (i = 0,  npt = nkpt; npt > 0; i++) {
461		kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE);
462		pde = (pd_entry_t *)kernel_segmap[i];
463
464		for (j = 0; j < NPDEPG && npt > 0; j++, npt--)
465			pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE);
466	}
467#else
468	for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++)
469		kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE));
470#endif
471
472	PMAP_LOCK_INIT(kernel_pmap);
473	kernel_pmap->pm_segtab = kernel_segmap;
474	CPU_FILL(&kernel_pmap->pm_active);
475	TAILQ_INIT(&kernel_pmap->pm_pvlist);
476	kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
477	kernel_pmap->pm_asid[0].gen = 0;
478	kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE;
479}
480
481void
482pmap_bootstrap(void)
483{
484	int i;
485	int need_local_mappings = 0;
486
487	/* Sort. */
488again:
489	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
490		/*
491		 * Keep the memory aligned on page boundary.
492		 */
493		phys_avail[i] = round_page(phys_avail[i]);
494		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
495
496		if (i < 2)
497			continue;
498		if (phys_avail[i - 2] > phys_avail[i]) {
499			vm_paddr_t ptemp[2];
500
501			ptemp[0] = phys_avail[i + 0];
502			ptemp[1] = phys_avail[i + 1];
503
504			phys_avail[i + 0] = phys_avail[i - 2];
505			phys_avail[i + 1] = phys_avail[i - 1];
506
507			phys_avail[i - 2] = ptemp[0];
508			phys_avail[i - 1] = ptemp[1];
509			goto again;
510		}
511	}
512
513       	/*
514	 * In 32 bit, we may have memory which cannot be mapped directly.
515	 * This memory will need temporary mapping before it can be
516	 * accessed.
517	 */
518	if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1))
519		need_local_mappings = 1;
520
521	/*
522	 * Copy the phys_avail[] array before we start stealing memory from it.
523	 */
524	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
525		physmem_desc[i] = phys_avail[i];
526		physmem_desc[i + 1] = phys_avail[i + 1];
527	}
528
529	Maxmem = atop(phys_avail[i - 1]);
530
531	if (bootverbose) {
532		printf("Physical memory chunk(s):\n");
533		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
534			vm_paddr_t size;
535
536			size = phys_avail[i + 1] - phys_avail[i];
537			printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
538			    (uintmax_t) phys_avail[i],
539			    (uintmax_t) phys_avail[i + 1] - 1,
540			    (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
541		}
542		printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem));
543	}
544	/*
545	 * Steal the message buffer from the beginning of memory.
546	 */
547	msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize);
548	msgbufinit(msgbufp, msgbufsize);
549
550	/*
551	 * Steal thread0 kstack.
552	 */
553	kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
554
555	virtual_avail = VM_MIN_KERNEL_ADDRESS;
556	virtual_end = VM_MAX_KERNEL_ADDRESS;
557
558#ifdef SMP
559	/*
560	 * Steal some virtual address space to map the pcpu area.
561	 */
562	virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
563	pcpup = (struct pcpu *)virtual_avail;
564	virtual_avail += PAGE_SIZE * 2;
565
566	/*
567	 * Initialize the wired TLB entry mapping the pcpu region for
568	 * the BSP at 'pcpup'. Up until this point we were operating
569	 * with the 'pcpup' for the BSP pointing to a virtual address
570	 * in KSEG0 so there was no need for a TLB mapping.
571	 */
572	mips_pcpu_tlb_init(PCPU_ADDR(0));
573
574	if (bootverbose)
575		printf("pcpu is available at virtual address %p.\n", pcpup);
576#endif
577
578	if (need_local_mappings)
579		pmap_alloc_lmem_map();
580	pmap_create_kernel_pagetable();
581	pmap_max_asid = VMNUM_PIDS;
582	mips_wr_entryhi(0);
583	mips_wr_pagemask(0);
584}
585
586/*
587 * Initialize a vm_page's machine-dependent fields.
588 */
589void
590pmap_page_init(vm_page_t m)
591{
592
593	TAILQ_INIT(&m->md.pv_list);
594	m->md.pv_list_count = 0;
595	m->md.pv_flags = 0;
596}
597
598/*
599 *	Initialize the pmap module.
600 *	Called by vm_init, to initialize any structures that the pmap
601 *	system needs to map virtual memory.
602 *	pmap_init has been enhanced to support in a fairly consistant
603 *	way, discontiguous physical memory.
604 */
605void
606pmap_init(void)
607{
608
609	/*
610	 * Initialize the address space (zone) for the pv entries.  Set a
611	 * high water mark so that the system can recover from excessive
612	 * numbers of pv entries.
613	 */
614	pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
615	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
616	pv_entry_max = PMAP_SHPGPERPROC * maxproc + cnt.v_page_count;
617	pv_entry_high_water = 9 * (pv_entry_max / 10);
618	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
619}
620
621/***************************************************
622 * Low level helper routines.....
623 ***************************************************/
624
625static __inline void
626pmap_invalidate_all_local(pmap_t pmap)
627{
628	u_int cpuid;
629
630	cpuid = PCPU_GET(cpuid);
631
632	if (pmap == kernel_pmap) {
633		tlb_invalidate_all();
634		return;
635	}
636	if (CPU_ISSET(cpuid, &pmap->pm_active))
637		tlb_invalidate_all_user(pmap);
638	else
639		pmap->pm_asid[cpuid].gen = 0;
640}
641
642#ifdef SMP
643static void
644pmap_invalidate_all(pmap_t pmap)
645{
646
647	smp_rendezvous(0, pmap_invalidate_all_action, 0, pmap);
648}
649
650static void
651pmap_invalidate_all_action(void *arg)
652{
653
654	pmap_invalidate_all_local((pmap_t)arg);
655}
656#else
657static void
658pmap_invalidate_all(pmap_t pmap)
659{
660
661	pmap_invalidate_all_local(pmap);
662}
663#endif
664
665static __inline void
666pmap_invalidate_page_local(pmap_t pmap, vm_offset_t va)
667{
668	u_int cpuid;
669
670	cpuid = PCPU_GET(cpuid);
671
672	if (is_kernel_pmap(pmap)) {
673		tlb_invalidate_address(pmap, va);
674		return;
675	}
676	if (pmap->pm_asid[cpuid].gen != PCPU_GET(asid_generation))
677		return;
678	else if (!CPU_ISSET(cpuid, &pmap->pm_active)) {
679		pmap->pm_asid[cpuid].gen = 0;
680		return;
681	}
682	tlb_invalidate_address(pmap, va);
683}
684
685#ifdef SMP
686struct pmap_invalidate_page_arg {
687	pmap_t pmap;
688	vm_offset_t va;
689};
690
691static void
692pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
693{
694	struct pmap_invalidate_page_arg arg;
695
696	arg.pmap = pmap;
697	arg.va = va;
698	smp_rendezvous(0, pmap_invalidate_page_action, 0, &arg);
699}
700
701static void
702pmap_invalidate_page_action(void *arg)
703{
704	struct pmap_invalidate_page_arg *p = arg;
705
706	pmap_invalidate_page_local(p->pmap, p->va);
707}
708#else
709static void
710pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
711{
712
713	pmap_invalidate_page_local(pmap, va);
714}
715#endif
716
717static __inline void
718pmap_update_page_local(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
719{
720	u_int cpuid;
721
722	cpuid = PCPU_GET(cpuid);
723
724	if (is_kernel_pmap(pmap)) {
725		tlb_update(pmap, va, pte);
726		return;
727	}
728	if (pmap->pm_asid[cpuid].gen != PCPU_GET(asid_generation))
729		return;
730	else if (!CPU_ISSET(cpuid, &pmap->pm_active)) {
731		pmap->pm_asid[cpuid].gen = 0;
732		return;
733	}
734	tlb_update(pmap, va, pte);
735}
736
737#ifdef SMP
738struct pmap_update_page_arg {
739	pmap_t pmap;
740	vm_offset_t va;
741	pt_entry_t pte;
742};
743
744static void
745pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
746{
747	struct pmap_update_page_arg arg;
748
749	arg.pmap = pmap;
750	arg.va = va;
751	arg.pte = pte;
752	smp_rendezvous(0, pmap_update_page_action, 0, &arg);
753}
754
755static void
756pmap_update_page_action(void *arg)
757{
758	struct pmap_update_page_arg *p = arg;
759
760	pmap_update_page_local(p->pmap, p->va, p->pte);
761}
762#else
763static void
764pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
765{
766
767	pmap_update_page_local(pmap, va, pte);
768}
769#endif
770
771/*
772 *	Routine:	pmap_extract
773 *	Function:
774 *		Extract the physical page address associated
775 *		with the given map/virtual_address pair.
776 */
777vm_paddr_t
778pmap_extract(pmap_t pmap, vm_offset_t va)
779{
780	pt_entry_t *pte;
781	vm_offset_t retval = 0;
782
783	PMAP_LOCK(pmap);
784	pte = pmap_pte(pmap, va);
785	if (pte) {
786		retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK);
787	}
788	PMAP_UNLOCK(pmap);
789	return (retval);
790}
791
792/*
793 *	Routine:	pmap_extract_and_hold
794 *	Function:
795 *		Atomically extract and hold the physical page
796 *		with the given pmap and virtual address pair
797 *		if that mapping permits the given protection.
798 */
799vm_page_t
800pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
801{
802	pt_entry_t pte;
803	vm_page_t m;
804	vm_paddr_t pa;
805
806	m = NULL;
807	pa = 0;
808	PMAP_LOCK(pmap);
809retry:
810	pte = *pmap_pte(pmap, va);
811	if (pte != 0 && pte_test(&pte, PTE_V) &&
812	    (pte_test(&pte, PTE_D) || (prot & VM_PROT_WRITE) == 0)) {
813		if (vm_page_pa_tryrelock(pmap, TLBLO_PTE_TO_PA(pte), &pa))
814			goto retry;
815
816		m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(pte));
817		vm_page_hold(m);
818	}
819	PA_UNLOCK_COND(pa);
820	PMAP_UNLOCK(pmap);
821	return (m);
822}
823
824/***************************************************
825 * Low level mapping routines.....
826 ***************************************************/
827
828/*
829 * add a wired page to the kva
830 */
831void
832pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr)
833{
834	pt_entry_t *pte;
835	pt_entry_t opte, npte;
836
837#ifdef PMAP_DEBUG
838	printf("pmap_kenter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
839#endif
840	npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | attr;
841
842	pte = pmap_pte(kernel_pmap, va);
843	opte = *pte;
844	*pte = npte;
845	if (pte_test(&opte, PTE_V) && opte != npte)
846		pmap_update_page(kernel_pmap, va, npte);
847}
848
849void
850pmap_kenter(vm_offset_t va, vm_paddr_t pa)
851{
852
853	KASSERT(is_cacheable_mem(pa),
854		("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa));
855
856	pmap_kenter_attr(va, pa, PTE_C_CACHE);
857}
858
859/*
860 * remove a page from the kernel pagetables
861 */
862 /* PMAP_INLINE */ void
863pmap_kremove(vm_offset_t va)
864{
865	pt_entry_t *pte;
866
867	/*
868	 * Write back all caches from the page being destroyed
869	 */
870	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
871
872	pte = pmap_pte(kernel_pmap, va);
873	*pte = PTE_G;
874	pmap_invalidate_page(kernel_pmap, va);
875}
876
877/*
878 *	Used to map a range of physical addresses into kernel
879 *	virtual address space.
880 *
881 *	The value passed in '*virt' is a suggested virtual address for
882 *	the mapping. Architectures which can support a direct-mapped
883 *	physical to virtual region can return the appropriate address
884 *	within that region, leaving '*virt' unchanged. Other
885 *	architectures should map the pages starting at '*virt' and
886 *	update '*virt' with the first usable address after the mapped
887 *	region.
888 *
889 *	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
890 */
891vm_offset_t
892pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
893{
894	vm_offset_t va, sva;
895
896	if (MIPS_DIRECT_MAPPABLE(end - 1))
897		return (MIPS_PHYS_TO_DIRECT(start));
898
899	va = sva = *virt;
900	while (start < end) {
901		pmap_kenter(va, start);
902		va += PAGE_SIZE;
903		start += PAGE_SIZE;
904	}
905	*virt = va;
906	return (sva);
907}
908
909/*
910 * Add a list of wired pages to the kva
911 * this routine is only used for temporary
912 * kernel mappings that do not need to have
913 * page modification or references recorded.
914 * Note that old mappings are simply written
915 * over.  The page *must* be wired.
916 */
917void
918pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
919{
920	int i;
921	vm_offset_t origva = va;
922
923	for (i = 0; i < count; i++) {
924		pmap_flush_pvcache(m[i]);
925		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
926		va += PAGE_SIZE;
927	}
928
929	mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count);
930}
931
932/*
933 * this routine jerks page mappings from the
934 * kernel -- it is meant only for temporary mappings.
935 */
936void
937pmap_qremove(vm_offset_t va, int count)
938{
939	/*
940	 * No need to wb/inv caches here,
941	 *   pmap_kremove will do it for us
942	 */
943
944	while (count-- > 0) {
945		pmap_kremove(va);
946		va += PAGE_SIZE;
947	}
948}
949
950/***************************************************
951 * Page table page management routines.....
952 ***************************************************/
953
954/*  Revision 1.507
955 *
956 * Simplify the reference counting of page table pages.	 Specifically, use
957 * the page table page's wired count rather than its hold count to contain
958 * the reference count.
959 */
960
961/*
962 * This routine unholds page table pages, and if the hold count
963 * drops to zero, then it decrements the wire count.
964 */
965static PMAP_INLINE int
966pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
967{
968	--m->wire_count;
969	if (m->wire_count == 0)
970		return (_pmap_unwire_pte_hold(pmap, va, m));
971	else
972		return (0);
973}
974
975static int
976_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
977{
978	pd_entry_t *pde;
979
980	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
981	/*
982	 * unmap the page table page
983	 */
984#ifdef __mips_n64
985	if (m->pindex < NUPDE)
986		pde = pmap_pde(pmap, va);
987	else
988		pde = pmap_segmap(pmap, va);
989#else
990	pde = pmap_pde(pmap, va);
991#endif
992	*pde = 0;
993	pmap->pm_stats.resident_count--;
994
995#ifdef __mips_n64
996	if (m->pindex < NUPDE) {
997		pd_entry_t *pdp;
998		vm_page_t pdpg;
999
1000		/*
1001		 * Recursively decrement next level pagetable refcount
1002		 */
1003		pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
1004		pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
1005		pmap_unwire_pte_hold(pmap, va, pdpg);
1006	}
1007#endif
1008	if (pmap->pm_ptphint == m)
1009		pmap->pm_ptphint = NULL;
1010
1011	/*
1012	 * If the page is finally unwired, simply free it.
1013	 */
1014	vm_page_free_zero(m);
1015	atomic_subtract_int(&cnt.v_wire_count, 1);
1016	return (1);
1017}
1018
1019/*
1020 * After removing a page table entry, this routine is used to
1021 * conditionally free the page, and manage the hold/wire counts.
1022 */
1023static int
1024pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
1025{
1026	unsigned ptepindex;
1027	pd_entry_t pteva;
1028
1029	if (va >= VM_MAXUSER_ADDRESS)
1030		return (0);
1031
1032	if (mpte == NULL) {
1033		ptepindex = pmap_pde_pindex(va);
1034		if (pmap->pm_ptphint &&
1035		    (pmap->pm_ptphint->pindex == ptepindex)) {
1036			mpte = pmap->pm_ptphint;
1037		} else {
1038			pteva = *pmap_pde(pmap, va);
1039			mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pteva));
1040			pmap->pm_ptphint = mpte;
1041		}
1042	}
1043	return (pmap_unwire_pte_hold(pmap, va, mpte));
1044}
1045
1046void
1047pmap_pinit0(pmap_t pmap)
1048{
1049	int i;
1050
1051	PMAP_LOCK_INIT(pmap);
1052	pmap->pm_segtab = kernel_segmap;
1053	CPU_ZERO(&pmap->pm_active);
1054	pmap->pm_ptphint = NULL;
1055	for (i = 0; i < MAXCPU; i++) {
1056		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1057		pmap->pm_asid[i].gen = 0;
1058	}
1059	PCPU_SET(curpmap, pmap);
1060	TAILQ_INIT(&pmap->pm_pvlist);
1061	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1062}
1063
1064void
1065pmap_grow_direct_page_cache()
1066{
1067
1068#ifdef __mips_n64
1069	vm_contig_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
1070#else
1071	vm_contig_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
1072#endif
1073}
1074
1075vm_page_t
1076pmap_alloc_direct_page(unsigned int index, int req)
1077{
1078	vm_page_t m;
1079
1080	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req);
1081	if (m == NULL)
1082		return (NULL);
1083
1084	if ((m->flags & PG_ZERO) == 0)
1085		pmap_zero_page(m);
1086
1087	m->pindex = index;
1088	atomic_add_int(&cnt.v_wire_count, 1);
1089	m->wire_count = 1;
1090	return (m);
1091}
1092
1093/*
1094 * Initialize a preallocated and zeroed pmap structure,
1095 * such as one in a vmspace structure.
1096 */
1097int
1098pmap_pinit(pmap_t pmap)
1099{
1100	vm_offset_t ptdva;
1101	vm_page_t ptdpg;
1102	int i;
1103
1104	PMAP_LOCK_INIT(pmap);
1105
1106	/*
1107	 * allocate the page directory page
1108	 */
1109	while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL)
1110	       pmap_grow_direct_page_cache();
1111
1112	ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg));
1113	pmap->pm_segtab = (pd_entry_t *)ptdva;
1114	CPU_ZERO(&pmap->pm_active);
1115	pmap->pm_ptphint = NULL;
1116	for (i = 0; i < MAXCPU; i++) {
1117		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1118		pmap->pm_asid[i].gen = 0;
1119	}
1120	TAILQ_INIT(&pmap->pm_pvlist);
1121	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1122
1123	return (1);
1124}
1125
1126/*
1127 * this routine is called if the page table page is not
1128 * mapped correctly.
1129 */
1130static vm_page_t
1131_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
1132{
1133	vm_offset_t pageva;
1134	vm_page_t m;
1135
1136	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1137	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1138	    ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1139
1140	/*
1141	 * Find or fabricate a new pagetable page
1142	 */
1143	if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) {
1144		if (flags & M_WAITOK) {
1145			PMAP_UNLOCK(pmap);
1146			vm_page_unlock_queues();
1147			pmap_grow_direct_page_cache();
1148			vm_page_lock_queues();
1149			PMAP_LOCK(pmap);
1150		}
1151
1152		/*
1153		 * Indicate the need to retry.	While waiting, the page
1154		 * table page may have been allocated.
1155		 */
1156		return (NULL);
1157	}
1158
1159	/*
1160	 * Map the pagetable page into the process address space, if it
1161	 * isn't already there.
1162	 */
1163	pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1164
1165#ifdef __mips_n64
1166	if (ptepindex >= NUPDE) {
1167		pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva;
1168	} else {
1169		pd_entry_t *pdep, *pde;
1170		int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT);
1171		int pdeindex = ptepindex & (NPDEPG - 1);
1172		vm_page_t pg;
1173
1174		pdep = &pmap->pm_segtab[segindex];
1175		if (*pdep == NULL) {
1176			/* recurse for allocating page dir */
1177			if (_pmap_allocpte(pmap, NUPDE + segindex,
1178			    flags) == NULL) {
1179				/* alloc failed, release current */
1180				--m->wire_count;
1181				atomic_subtract_int(&cnt.v_wire_count, 1);
1182				vm_page_free_zero(m);
1183				return (NULL);
1184			}
1185		} else {
1186			pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
1187			pg->wire_count++;
1188		}
1189		/* Next level entry */
1190		pde = (pd_entry_t *)*pdep;
1191		pde[pdeindex] = (pd_entry_t)pageva;
1192		pmap->pm_ptphint = m;
1193	}
1194#else
1195	pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva;
1196#endif
1197	pmap->pm_stats.resident_count++;
1198
1199	/*
1200	 * Set the page table hint
1201	 */
1202	pmap->pm_ptphint = m;
1203	return (m);
1204}
1205
1206static vm_page_t
1207pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1208{
1209	unsigned ptepindex;
1210	pd_entry_t *pde;
1211	vm_page_t m;
1212
1213	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1214	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1215	    ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1216
1217	/*
1218	 * Calculate pagetable page index
1219	 */
1220	ptepindex = pmap_pde_pindex(va);
1221retry:
1222	/*
1223	 * Get the page directory entry
1224	 */
1225	pde = pmap_pde(pmap, va);
1226
1227	/*
1228	 * If the page table page is mapped, we just increment the hold
1229	 * count, and activate it.
1230	 */
1231	if (pde != NULL && *pde != NULL) {
1232		/*
1233		 * In order to get the page table page, try the hint first.
1234		 */
1235		if (pmap->pm_ptphint &&
1236		    (pmap->pm_ptphint->pindex == ptepindex)) {
1237			m = pmap->pm_ptphint;
1238		} else {
1239			m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
1240			pmap->pm_ptphint = m;
1241		}
1242		m->wire_count++;
1243	} else {
1244		/*
1245		 * Here if the pte page isn't mapped, or if it has been
1246		 * deallocated.
1247		 */
1248		m = _pmap_allocpte(pmap, ptepindex, flags);
1249		if (m == NULL && (flags & M_WAITOK))
1250			goto retry;
1251	}
1252	return (m);
1253}
1254
1255
1256/***************************************************
1257* Pmap allocation/deallocation routines.
1258 ***************************************************/
1259/*
1260 *  Revision 1.397
1261 *  - Merged pmap_release and pmap_release_free_page.  When pmap_release is
1262 *    called only the page directory page(s) can be left in the pmap pte
1263 *    object, since all page table pages will have been freed by
1264 *    pmap_remove_pages and pmap_remove.  In addition, there can only be one
1265 *    reference to the pmap and the page directory is wired, so the page(s)
1266 *    can never be busy.  So all there is to do is clear the magic mappings
1267 *    from the page directory and free the page(s).
1268 */
1269
1270
1271/*
1272 * Release any resources held by the given physical map.
1273 * Called when a pmap initialized by pmap_pinit is being released.
1274 * Should only be called if the map contains no valid mappings.
1275 */
1276void
1277pmap_release(pmap_t pmap)
1278{
1279	vm_offset_t ptdva;
1280	vm_page_t ptdpg;
1281
1282	KASSERT(pmap->pm_stats.resident_count == 0,
1283	    ("pmap_release: pmap resident count %ld != 0",
1284	    pmap->pm_stats.resident_count));
1285
1286	ptdva = (vm_offset_t)pmap->pm_segtab;
1287	ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
1288
1289	ptdpg->wire_count--;
1290	atomic_subtract_int(&cnt.v_wire_count, 1);
1291	vm_page_free_zero(ptdpg);
1292	PMAP_LOCK_DESTROY(pmap);
1293}
1294
1295/*
1296 * grow the number of kernel page table entries, if needed
1297 */
1298void
1299pmap_growkernel(vm_offset_t addr)
1300{
1301	vm_page_t nkpg;
1302	pd_entry_t *pde, *pdpe;
1303	pt_entry_t *pte;
1304	int i;
1305
1306	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1307	addr = roundup2(addr, NBSEG);
1308	if (addr - 1 >= kernel_map->max_offset)
1309		addr = kernel_map->max_offset;
1310	while (kernel_vm_end < addr) {
1311		pdpe = pmap_segmap(kernel_pmap, kernel_vm_end);
1312#ifdef __mips_n64
1313		if (*pdpe == 0) {
1314			/* new intermediate page table entry */
1315			nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1316			if (nkpg == NULL)
1317				panic("pmap_growkernel: no memory to grow kernel");
1318			*pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1319			continue; /* try again */
1320		}
1321#endif
1322		pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
1323		if (*pde != 0) {
1324			kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1325			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1326				kernel_vm_end = kernel_map->max_offset;
1327				break;
1328			}
1329			continue;
1330		}
1331
1332		/*
1333		 * This index is bogus, but out of the way
1334		 */
1335		nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1336		if (!nkpg)
1337			panic("pmap_growkernel: no memory to grow kernel");
1338		nkpt++;
1339		*pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1340
1341		/*
1342		 * The R[4-7]?00 stores only one copy of the Global bit in
1343		 * the translation lookaside buffer for each 2 page entry.
1344		 * Thus invalid entrys must have the Global bit set so when
1345		 * Entry LO and Entry HI G bits are anded together they will
1346		 * produce a global bit to store in the tlb.
1347		 */
1348		pte = (pt_entry_t *)*pde;
1349		for (i = 0; i < NPTEPG; i++)
1350			pte[i] = PTE_G;
1351
1352		kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1353		if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1354			kernel_vm_end = kernel_map->max_offset;
1355			break;
1356		}
1357	}
1358}
1359
1360/***************************************************
1361* page management routines.
1362 ***************************************************/
1363
1364/*
1365 * free the pv_entry back to the free list
1366 */
1367static PMAP_INLINE void
1368free_pv_entry(pv_entry_t pv)
1369{
1370
1371	pv_entry_count--;
1372	uma_zfree(pvzone, pv);
1373}
1374
1375/*
1376 * get a new pv_entry, allocating a block from the system
1377 * when needed.
1378 * the memory allocation is performed bypassing the malloc code
1379 * because of the possibility of allocations at interrupt time.
1380 */
1381static pv_entry_t
1382get_pv_entry(pmap_t locked_pmap)
1383{
1384	static const struct timeval printinterval = { 60, 0 };
1385	static struct timeval lastprint;
1386	struct vpgqueues *vpq;
1387	pt_entry_t *pte, oldpte;
1388	pmap_t pmap;
1389	pv_entry_t allocated_pv, next_pv, pv;
1390	vm_offset_t va;
1391	vm_page_t m;
1392
1393	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1394	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1395	allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
1396	if (allocated_pv != NULL) {
1397		pv_entry_count++;
1398		if (pv_entry_count > pv_entry_high_water)
1399			pagedaemon_wakeup();
1400		else
1401			return (allocated_pv);
1402	}
1403	/*
1404	 * Reclaim pv entries: At first, destroy mappings to inactive
1405	 * pages.  After that, if a pv entry is still needed, destroy
1406	 * mappings to active pages.
1407	 */
1408	if (ratecheck(&lastprint, &printinterval))
1409		printf("Approaching the limit on PV entries, "
1410		    "increase the vm.pmap.shpgperproc tunable.\n");
1411	vpq = &vm_page_queues[PQ_INACTIVE];
1412retry:
1413	TAILQ_FOREACH(m, &vpq->pl, pageq) {
1414		if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy)
1415			continue;
1416		TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
1417			va = pv->pv_va;
1418			pmap = pv->pv_pmap;
1419			/* Avoid deadlock and lock recursion. */
1420			if (pmap > locked_pmap)
1421				PMAP_LOCK(pmap);
1422			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
1423				continue;
1424			pmap->pm_stats.resident_count--;
1425			pte = pmap_pte(pmap, va);
1426			KASSERT(pte != NULL, ("pte"));
1427			oldpte = *pte;
1428			if (is_kernel_pmap(pmap))
1429				*pte = PTE_G;
1430			else
1431				*pte = 0;
1432			KASSERT(!pte_test(&oldpte, PTE_W),
1433			    ("wired pte for unwired page"));
1434			if (m->md.pv_flags & PV_TABLE_REF)
1435				vm_page_aflag_set(m, PGA_REFERENCED);
1436			if (pte_test(&oldpte, PTE_D))
1437				vm_page_dirty(m);
1438			pmap_invalidate_page(pmap, va);
1439			TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1440			m->md.pv_list_count--;
1441			TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1442			pmap_unuse_pt(pmap, va, pv->pv_ptem);
1443			if (pmap != locked_pmap)
1444				PMAP_UNLOCK(pmap);
1445			if (allocated_pv == NULL)
1446				allocated_pv = pv;
1447			else
1448				free_pv_entry(pv);
1449		}
1450		if (TAILQ_EMPTY(&m->md.pv_list)) {
1451			vm_page_aflag_clear(m, PGA_WRITEABLE);
1452			m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1453		}
1454	}
1455	if (allocated_pv == NULL) {
1456		if (vpq == &vm_page_queues[PQ_INACTIVE]) {
1457			vpq = &vm_page_queues[PQ_ACTIVE];
1458			goto retry;
1459		}
1460		panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
1461	}
1462	return (allocated_pv);
1463}
1464
1465/*
1466 *  Revision 1.370
1467 *
1468 *  Move pmap_collect() out of the machine-dependent code, rename it
1469 *  to reflect its new location, and add page queue and flag locking.
1470 *
1471 *  Notes: (1) alpha, i386, and ia64 had identical implementations
1472 *  of pmap_collect() in terms of machine-independent interfaces;
1473 *  (2) sparc64 doesn't require it; (3) powerpc had it as a TODO.
1474 *
1475 *  MIPS implementation was identical to alpha [Junos 8.2]
1476 */
1477
1478/*
1479 * If it is the first entry on the list, it is actually
1480 * in the header and we must copy the following entry up
1481 * to the header.  Otherwise we must search the list for
1482 * the entry.  In either case we free the now unused entry.
1483 */
1484
1485static pv_entry_t
1486pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1487{
1488	pv_entry_t pv;
1489
1490	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1491	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1492	if (pvh->pv_list_count < pmap->pm_stats.resident_count) {
1493		TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
1494			if (pmap == pv->pv_pmap && va == pv->pv_va)
1495				break;
1496		}
1497	} else {
1498		TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1499			if (va == pv->pv_va)
1500				break;
1501		}
1502	}
1503	if (pv != NULL) {
1504		TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
1505		pvh->pv_list_count--;
1506		TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1507	}
1508	return (pv);
1509}
1510
1511static void
1512pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1513{
1514	pv_entry_t pv;
1515
1516	pv = pmap_pvh_remove(pvh, pmap, va);
1517	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx",
1518	     (u_long)VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh)),
1519	     (u_long)va));
1520	free_pv_entry(pv);
1521}
1522
1523static void
1524pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1525{
1526
1527	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1528	pmap_pvh_free(&m->md, pmap, va);
1529	if (TAILQ_EMPTY(&m->md.pv_list))
1530		vm_page_aflag_clear(m, PGA_WRITEABLE);
1531}
1532
1533/*
1534 * Conditionally create a pv entry.
1535 */
1536static boolean_t
1537pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va,
1538    vm_page_t m)
1539{
1540	pv_entry_t pv;
1541
1542	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1543	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1544	if (pv_entry_count < pv_entry_high_water &&
1545	    (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
1546		pv_entry_count++;
1547		pv->pv_va = va;
1548		pv->pv_pmap = pmap;
1549		pv->pv_ptem = mpte;
1550		TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1551		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1552		m->md.pv_list_count++;
1553		return (TRUE);
1554	} else
1555		return (FALSE);
1556}
1557
1558/*
1559 * pmap_remove_pte: do the things to unmap a page in a process
1560 */
1561static int
1562pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va)
1563{
1564	pt_entry_t oldpte;
1565	vm_page_t m;
1566	vm_paddr_t pa;
1567
1568	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1569	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1570
1571	oldpte = *ptq;
1572	if (is_kernel_pmap(pmap))
1573		*ptq = PTE_G;
1574	else
1575		*ptq = 0;
1576
1577	if (pte_test(&oldpte, PTE_W))
1578		pmap->pm_stats.wired_count -= 1;
1579
1580	pmap->pm_stats.resident_count -= 1;
1581	pa = TLBLO_PTE_TO_PA(oldpte);
1582
1583	if (page_is_managed(pa)) {
1584		m = PHYS_TO_VM_PAGE(pa);
1585		if (pte_test(&oldpte, PTE_D)) {
1586			KASSERT(!pte_test(&oldpte, PTE_RO),
1587			    ("%s: modified page not writable: va: %p, pte: %#jx",
1588			    __func__, (void *)va, (uintmax_t)oldpte));
1589			vm_page_dirty(m);
1590		}
1591		if (m->md.pv_flags & PV_TABLE_REF)
1592			vm_page_aflag_set(m, PGA_REFERENCED);
1593		m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1594
1595		pmap_remove_entry(pmap, m, va);
1596	}
1597	return (pmap_unuse_pt(pmap, va, NULL));
1598}
1599
1600/*
1601 * Remove a single page from a process address space
1602 */
1603static void
1604pmap_remove_page(struct pmap *pmap, vm_offset_t va)
1605{
1606	pt_entry_t *ptq;
1607
1608	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1609	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1610	ptq = pmap_pte(pmap, va);
1611
1612	/*
1613	 * if there is no pte for this address, just skip it!!!
1614	 */
1615	if (!ptq || !pte_test(ptq, PTE_V)) {
1616		return;
1617	}
1618
1619	/*
1620	 * Write back all caches from the page being destroyed
1621	 */
1622	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
1623
1624	/*
1625	 * get a local va for mappings for this pmap.
1626	 */
1627	(void)pmap_remove_pte(pmap, ptq, va);
1628	pmap_invalidate_page(pmap, va);
1629
1630	return;
1631}
1632
1633/*
1634 *	Remove the given range of addresses from the specified map.
1635 *
1636 *	It is assumed that the start and end are properly
1637 *	rounded to the page size.
1638 */
1639void
1640pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
1641{
1642	vm_offset_t va_next;
1643	pd_entry_t *pde, *pdpe;
1644	pt_entry_t *pte;
1645
1646	if (pmap == NULL)
1647		return;
1648
1649	if (pmap->pm_stats.resident_count == 0)
1650		return;
1651
1652	vm_page_lock_queues();
1653	PMAP_LOCK(pmap);
1654
1655	/*
1656	 * special handling of removing one page.  a very common operation
1657	 * and easy to short circuit some code.
1658	 */
1659	if ((sva + PAGE_SIZE) == eva) {
1660		pmap_remove_page(pmap, sva);
1661		goto out;
1662	}
1663	for (; sva < eva; sva = va_next) {
1664		pdpe = pmap_segmap(pmap, sva);
1665#ifdef __mips_n64
1666		if (*pdpe == 0) {
1667			va_next = (sva + NBSEG) & ~SEGMASK;
1668			if (va_next < sva)
1669				va_next = eva;
1670			continue;
1671		}
1672#endif
1673		va_next = (sva + NBPDR) & ~PDRMASK;
1674		if (va_next < sva)
1675			va_next = eva;
1676
1677		pde = pmap_pdpe_to_pde(pdpe, sva);
1678		if (*pde == 0)
1679			continue;
1680		if (va_next > eva)
1681			va_next = eva;
1682		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next;
1683		    pte++, sva += PAGE_SIZE) {
1684			pmap_remove_page(pmap, sva);
1685		}
1686	}
1687out:
1688	vm_page_unlock_queues();
1689	PMAP_UNLOCK(pmap);
1690}
1691
1692/*
1693 *	Routine:	pmap_remove_all
1694 *	Function:
1695 *		Removes this physical page from
1696 *		all physical maps in which it resides.
1697 *		Reflects back modify bits to the pager.
1698 *
1699 *	Notes:
1700 *		Original versions of this routine were very
1701 *		inefficient because they iteratively called
1702 *		pmap_remove (slow...)
1703 */
1704
1705void
1706pmap_remove_all(vm_page_t m)
1707{
1708	pv_entry_t pv;
1709	pt_entry_t *pte, tpte;
1710
1711	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1712	    ("pmap_remove_all: page %p is not managed", m));
1713	vm_page_lock_queues();
1714
1715	if (m->md.pv_flags & PV_TABLE_REF)
1716		vm_page_aflag_set(m, PGA_REFERENCED);
1717
1718	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1719		PMAP_LOCK(pv->pv_pmap);
1720
1721		/*
1722		 * If it's last mapping writeback all caches from
1723		 * the page being destroyed
1724	 	 */
1725		if (m->md.pv_list_count == 1)
1726			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
1727
1728		pv->pv_pmap->pm_stats.resident_count--;
1729
1730		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
1731
1732		tpte = *pte;
1733		if (is_kernel_pmap(pv->pv_pmap))
1734			*pte = PTE_G;
1735		else
1736			*pte = 0;
1737
1738		if (pte_test(&tpte, PTE_W))
1739			pv->pv_pmap->pm_stats.wired_count--;
1740
1741		/*
1742		 * Update the vm_page_t clean and reference bits.
1743		 */
1744		if (pte_test(&tpte, PTE_D)) {
1745			KASSERT(!pte_test(&tpte, PTE_RO),
1746			    ("%s: modified page not writable: va: %p, pte: %#jx",
1747			    __func__, (void *)pv->pv_va, (uintmax_t)tpte));
1748			vm_page_dirty(m);
1749		}
1750		pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1751
1752		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1753		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1754		m->md.pv_list_count--;
1755		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
1756		PMAP_UNLOCK(pv->pv_pmap);
1757		free_pv_entry(pv);
1758	}
1759
1760	vm_page_aflag_clear(m, PGA_WRITEABLE);
1761	m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1762	vm_page_unlock_queues();
1763}
1764
1765/*
1766 *	Set the physical protection on the
1767 *	specified range of this map as requested.
1768 */
1769void
1770pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1771{
1772	pt_entry_t *pte;
1773	pd_entry_t *pde, *pdpe;
1774	vm_offset_t va_next;
1775
1776	if (pmap == NULL)
1777		return;
1778
1779	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1780		pmap_remove(pmap, sva, eva);
1781		return;
1782	}
1783	if (prot & VM_PROT_WRITE)
1784		return;
1785
1786	vm_page_lock_queues();
1787	PMAP_LOCK(pmap);
1788	for (; sva < eva; sva = va_next) {
1789		pt_entry_t pbits;
1790		vm_page_t m;
1791		vm_paddr_t pa;
1792
1793		pdpe = pmap_segmap(pmap, sva);
1794#ifdef __mips_n64
1795		if (*pdpe == 0) {
1796			va_next = (sva + NBSEG) & ~SEGMASK;
1797			if (va_next < sva)
1798				va_next = eva;
1799			continue;
1800		}
1801#endif
1802		va_next = (sva + NBPDR) & ~PDRMASK;
1803		if (va_next < sva)
1804			va_next = eva;
1805
1806		pde = pmap_pdpe_to_pde(pdpe, sva);
1807		if (pde == NULL || *pde == NULL)
1808			continue;
1809		if (va_next > eva)
1810			va_next = eva;
1811
1812		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1813		     sva += PAGE_SIZE) {
1814
1815			/* Skip invalid PTEs */
1816			if (!pte_test(pte, PTE_V))
1817				continue;
1818			pbits = *pte;
1819			pa = TLBLO_PTE_TO_PA(pbits);
1820			if (page_is_managed(pa) && pte_test(&pbits, PTE_D)) {
1821				m = PHYS_TO_VM_PAGE(pa);
1822				vm_page_dirty(m);
1823				m->md.pv_flags &= ~PV_TABLE_MOD;
1824			}
1825			pte_clear(&pbits, PTE_D);
1826			pte_set(&pbits, PTE_RO);
1827
1828			if (pbits != *pte) {
1829				*pte = pbits;
1830				pmap_update_page(pmap, sva, pbits);
1831			}
1832		}
1833	}
1834	vm_page_unlock_queues();
1835	PMAP_UNLOCK(pmap);
1836}
1837
1838/*
1839 *	Insert the given physical page (p) at
1840 *	the specified virtual address (v) in the
1841 *	target physical map with the protection requested.
1842 *
1843 *	If specified, the page will be wired down, meaning
1844 *	that the related pte can not be reclaimed.
1845 *
1846 *	NB:  This is the only routine which MAY NOT lazy-evaluate
1847 *	or lose information.  That is, this routine must actually
1848 *	insert this page into the given map NOW.
1849 */
1850void
1851pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1852    vm_prot_t prot, boolean_t wired)
1853{
1854	vm_paddr_t pa, opa;
1855	pt_entry_t *pte;
1856	pt_entry_t origpte, newpte;
1857	pv_entry_t pv;
1858	vm_page_t mpte, om;
1859	pt_entry_t rw = 0;
1860
1861	if (pmap == NULL)
1862		return;
1863
1864	va &= ~PAGE_MASK;
1865 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
1866	KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
1867	    ("pmap_enter: page %p is not busy", m));
1868
1869	mpte = NULL;
1870
1871	vm_page_lock_queues();
1872	PMAP_LOCK(pmap);
1873
1874	/*
1875	 * In the case that a page table page is not resident, we are
1876	 * creating it here.
1877	 */
1878	if (va < VM_MAXUSER_ADDRESS) {
1879		mpte = pmap_allocpte(pmap, va, M_WAITOK);
1880	}
1881	pte = pmap_pte(pmap, va);
1882
1883	/*
1884	 * Page Directory table entry not valid, we need a new PT page
1885	 */
1886	if (pte == NULL) {
1887		panic("pmap_enter: invalid page directory, pdir=%p, va=%p",
1888		    (void *)pmap->pm_segtab, (void *)va);
1889	}
1890	pa = VM_PAGE_TO_PHYS(m);
1891	om = NULL;
1892	origpte = *pte;
1893	opa = TLBLO_PTE_TO_PA(origpte);
1894
1895	/*
1896	 * Mapping has not changed, must be protection or wiring change.
1897	 */
1898	if (pte_test(&origpte, PTE_V) && opa == pa) {
1899		/*
1900		 * Wiring change, just update stats. We don't worry about
1901		 * wiring PT pages as they remain resident as long as there
1902		 * are valid mappings in them. Hence, if a user page is
1903		 * wired, the PT page will be also.
1904		 */
1905		if (wired && !pte_test(&origpte, PTE_W))
1906			pmap->pm_stats.wired_count++;
1907		else if (!wired && pte_test(&origpte, PTE_W))
1908			pmap->pm_stats.wired_count--;
1909
1910		KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
1911		    ("%s: modified page not writable: va: %p, pte: %#jx",
1912		    __func__, (void *)va, (uintmax_t)origpte));
1913
1914		/*
1915		 * Remove extra pte reference
1916		 */
1917		if (mpte)
1918			mpte->wire_count--;
1919
1920		if (page_is_managed(opa)) {
1921			om = m;
1922		}
1923		goto validate;
1924	}
1925
1926	pv = NULL;
1927
1928	/*
1929	 * Mapping has changed, invalidate old range and fall through to
1930	 * handle validating new mapping.
1931	 */
1932	if (opa) {
1933		if (pte_test(&origpte, PTE_W))
1934			pmap->pm_stats.wired_count--;
1935
1936		if (page_is_managed(opa)) {
1937			om = PHYS_TO_VM_PAGE(opa);
1938			pv = pmap_pvh_remove(&om->md, pmap, va);
1939		}
1940		if (mpte != NULL) {
1941			mpte->wire_count--;
1942			KASSERT(mpte->wire_count > 0,
1943			    ("pmap_enter: missing reference to page table page,"
1944			    " va: %p", (void *)va));
1945		}
1946	} else
1947		pmap->pm_stats.resident_count++;
1948
1949	/*
1950	 * Enter on the PV list if part of our managed memory. Note that we
1951	 * raise IPL while manipulating pv_table since pmap_enter can be
1952	 * called at interrupt time.
1953	 */
1954	if ((m->oflags & VPO_UNMANAGED) == 0) {
1955		KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
1956		    ("pmap_enter: managed mapping within the clean submap"));
1957		if (pv == NULL)
1958			pv = get_pv_entry(pmap);
1959		pv->pv_va = va;
1960		pv->pv_pmap = pmap;
1961		pv->pv_ptem = mpte;
1962		TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1963		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1964		m->md.pv_list_count++;
1965	} else if (pv != NULL)
1966		free_pv_entry(pv);
1967
1968	/*
1969	 * Increment counters
1970	 */
1971	if (wired)
1972		pmap->pm_stats.wired_count++;
1973
1974validate:
1975	if ((access & VM_PROT_WRITE) != 0)
1976		m->md.pv_flags |= PV_TABLE_MOD | PV_TABLE_REF;
1977	rw = init_pte_prot(va, m, prot);
1978
1979#ifdef PMAP_DEBUG
1980	printf("pmap_enter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
1981#endif
1982	/*
1983	 * Now validate mapping with desired protection/wiring.
1984	 */
1985	newpte = TLBLO_PA_TO_PFN(pa) | rw | PTE_V;
1986
1987	if (is_cacheable_mem(pa))
1988		newpte |= PTE_C_CACHE;
1989	else
1990		newpte |= PTE_C_UNCACHED;
1991
1992	if (wired)
1993		newpte |= PTE_W;
1994
1995	if (is_kernel_pmap(pmap))
1996	         newpte |= PTE_G;
1997
1998	/*
1999	 * if the mapping or permission bits are different, we need to
2000	 * update the pte.
2001	 */
2002	if (origpte != newpte) {
2003		if (pte_test(&origpte, PTE_V)) {
2004			*pte = newpte;
2005			if (page_is_managed(opa) && (opa != pa)) {
2006				if (om->md.pv_flags & PV_TABLE_REF)
2007					vm_page_aflag_set(om, PGA_REFERENCED);
2008				om->md.pv_flags &=
2009				    ~(PV_TABLE_REF | PV_TABLE_MOD);
2010			}
2011			if (pte_test(&origpte, PTE_D)) {
2012				KASSERT(!pte_test(&origpte, PTE_RO),
2013				    ("pmap_enter: modified page not writable:"
2014				    " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte));
2015				if (page_is_managed(opa))
2016					vm_page_dirty(om);
2017			}
2018			if (page_is_managed(opa) &&
2019			    TAILQ_EMPTY(&om->md.pv_list))
2020				vm_page_aflag_clear(om, PGA_WRITEABLE);
2021		} else {
2022			*pte = newpte;
2023		}
2024	}
2025	pmap_update_page(pmap, va, newpte);
2026
2027	/*
2028	 * Sync I & D caches for executable pages.  Do this only if the
2029	 * target pmap belongs to the current process.  Otherwise, an
2030	 * unresolvable TLB miss may occur.
2031	 */
2032	if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) &&
2033	    (prot & VM_PROT_EXECUTE)) {
2034		mips_icache_sync_range(va, PAGE_SIZE);
2035		mips_dcache_wbinv_range(va, PAGE_SIZE);
2036	}
2037	vm_page_unlock_queues();
2038	PMAP_UNLOCK(pmap);
2039}
2040
2041/*
2042 * this code makes some *MAJOR* assumptions:
2043 * 1. Current pmap & pmap exists.
2044 * 2. Not wired.
2045 * 3. Read access.
2046 * 4. No page table pages.
2047 * but is *MUCH* faster than pmap_enter...
2048 */
2049
2050void
2051pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2052{
2053
2054	vm_page_lock_queues();
2055	PMAP_LOCK(pmap);
2056	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
2057	vm_page_unlock_queues();
2058	PMAP_UNLOCK(pmap);
2059}
2060
2061static vm_page_t
2062pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2063    vm_prot_t prot, vm_page_t mpte)
2064{
2065	pt_entry_t *pte;
2066	vm_paddr_t pa;
2067
2068	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2069	    (m->oflags & VPO_UNMANAGED) != 0,
2070	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2071	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2072	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2073
2074	/*
2075	 * In the case that a page table page is not resident, we are
2076	 * creating it here.
2077	 */
2078	if (va < VM_MAXUSER_ADDRESS) {
2079		pd_entry_t *pde;
2080		unsigned ptepindex;
2081
2082		/*
2083		 * Calculate pagetable page index
2084		 */
2085		ptepindex = pmap_pde_pindex(va);
2086		if (mpte && (mpte->pindex == ptepindex)) {
2087			mpte->wire_count++;
2088		} else {
2089			/*
2090			 * Get the page directory entry
2091			 */
2092			pde = pmap_pde(pmap, va);
2093
2094			/*
2095			 * If the page table page is mapped, we just
2096			 * increment the hold count, and activate it.
2097			 */
2098			if (pde && *pde != 0) {
2099				if (pmap->pm_ptphint &&
2100				    (pmap->pm_ptphint->pindex == ptepindex)) {
2101					mpte = pmap->pm_ptphint;
2102				} else {
2103					mpte = PHYS_TO_VM_PAGE(
2104						MIPS_DIRECT_TO_PHYS(*pde));
2105					pmap->pm_ptphint = mpte;
2106				}
2107				mpte->wire_count++;
2108			} else {
2109				mpte = _pmap_allocpte(pmap, ptepindex,
2110				    M_NOWAIT);
2111				if (mpte == NULL)
2112					return (mpte);
2113			}
2114		}
2115	} else {
2116		mpte = NULL;
2117	}
2118
2119	pte = pmap_pte(pmap, va);
2120	if (pte_test(pte, PTE_V)) {
2121		if (mpte != NULL) {
2122			mpte->wire_count--;
2123			mpte = NULL;
2124		}
2125		return (mpte);
2126	}
2127
2128	/*
2129	 * Enter on the PV list if part of our managed memory.
2130	 */
2131	if ((m->oflags & VPO_UNMANAGED) == 0 &&
2132	    !pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
2133		if (mpte != NULL) {
2134			pmap_unwire_pte_hold(pmap, va, mpte);
2135			mpte = NULL;
2136		}
2137		return (mpte);
2138	}
2139
2140	/*
2141	 * Increment counters
2142	 */
2143	pmap->pm_stats.resident_count++;
2144
2145	pa = VM_PAGE_TO_PHYS(m);
2146
2147	/*
2148	 * Now validate mapping with RO protection
2149	 */
2150	*pte = TLBLO_PA_TO_PFN(pa) | PTE_V;
2151
2152	if (is_cacheable_mem(pa))
2153		*pte |= PTE_C_CACHE;
2154	else
2155		*pte |= PTE_C_UNCACHED;
2156
2157	if (is_kernel_pmap(pmap))
2158		*pte |= PTE_G;
2159	else {
2160		*pte |= PTE_RO;
2161		/*
2162		 * Sync I & D caches.  Do this only if the target pmap
2163		 * belongs to the current process.  Otherwise, an
2164		 * unresolvable TLB miss may occur. */
2165		if (pmap == &curproc->p_vmspace->vm_pmap) {
2166			va &= ~PAGE_MASK;
2167			mips_icache_sync_range(va, PAGE_SIZE);
2168			mips_dcache_wbinv_range(va, PAGE_SIZE);
2169		}
2170	}
2171	return (mpte);
2172}
2173
2174/*
2175 * Make a temporary mapping for a physical address.  This is only intended
2176 * to be used for panic dumps.
2177 *
2178 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2179 */
2180void *
2181pmap_kenter_temporary(vm_paddr_t pa, int i)
2182{
2183	vm_offset_t va;
2184
2185	if (i != 0)
2186		printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
2187		    __func__);
2188
2189	if (MIPS_DIRECT_MAPPABLE(pa)) {
2190		va = MIPS_PHYS_TO_DIRECT(pa);
2191	} else {
2192#ifndef __mips_n64    /* XXX : to be converted to new style */
2193		int cpu;
2194		register_t intr;
2195		struct local_sysmaps *sysm;
2196		pt_entry_t *pte, npte;
2197
2198		/* If this is used other than for dumps, we may need to leave
2199		 * interrupts disasbled on return. If crash dumps don't work when
2200		 * we get to this point, we might want to consider this (leaving things
2201		 * disabled as a starting point ;-)
2202	 	 */
2203		intr = intr_disable();
2204		cpu = PCPU_GET(cpuid);
2205		sysm = &sysmap_lmem[cpu];
2206		/* Since this is for the debugger, no locks or any other fun */
2207		npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
2208		pte = pmap_pte(kernel_pmap, sysm->base);
2209		*pte = npte;
2210		sysm->valid1 = 1;
2211		pmap_update_page(kernel_pmap, sysm->base, npte);
2212		va = sysm->base;
2213		intr_restore(intr);
2214#endif
2215	}
2216	return ((void *)va);
2217}
2218
2219void
2220pmap_kenter_temporary_free(vm_paddr_t pa)
2221{
2222#ifndef __mips_n64    /* XXX : to be converted to new style */
2223	int cpu;
2224	register_t intr;
2225	struct local_sysmaps *sysm;
2226#endif
2227
2228	if (MIPS_DIRECT_MAPPABLE(pa)) {
2229		/* nothing to do for this case */
2230		return;
2231	}
2232#ifndef __mips_n64    /* XXX : to be converted to new style */
2233	cpu = PCPU_GET(cpuid);
2234	sysm = &sysmap_lmem[cpu];
2235	if (sysm->valid1) {
2236		pt_entry_t *pte;
2237
2238		intr = intr_disable();
2239		pte = pmap_pte(kernel_pmap, sysm->base);
2240		*pte = PTE_G;
2241		pmap_invalidate_page(kernel_pmap, sysm->base);
2242		intr_restore(intr);
2243		sysm->valid1 = 0;
2244	}
2245#endif
2246}
2247
2248/*
2249 * Moved the code to Machine Independent
2250 *	 vm_map_pmap_enter()
2251 */
2252
2253/*
2254 * Maps a sequence of resident pages belonging to the same object.
2255 * The sequence begins with the given page m_start.  This page is
2256 * mapped at the given virtual address start.  Each subsequent page is
2257 * mapped at a virtual address that is offset from start by the same
2258 * amount as the page is offset from m_start within the object.  The
2259 * last page in the sequence is the page with the largest offset from
2260 * m_start that can be mapped at a virtual address less than the given
2261 * virtual address end.  Not every virtual page between start and end
2262 * is mapped; only those for which a resident page exists with the
2263 * corresponding offset from m_start are mapped.
2264 */
2265void
2266pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2267    vm_page_t m_start, vm_prot_t prot)
2268{
2269	vm_page_t m, mpte;
2270	vm_pindex_t diff, psize;
2271
2272	VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
2273	psize = atop(end - start);
2274	mpte = NULL;
2275	m = m_start;
2276	vm_page_lock_queues();
2277	PMAP_LOCK(pmap);
2278	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2279		mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
2280		    prot, mpte);
2281		m = TAILQ_NEXT(m, listq);
2282	}
2283	vm_page_unlock_queues();
2284 	PMAP_UNLOCK(pmap);
2285}
2286
2287/*
2288 * pmap_object_init_pt preloads the ptes for a given object
2289 * into the specified pmap.  This eliminates the blast of soft
2290 * faults on process startup and immediately after an mmap.
2291 */
2292void
2293pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2294    vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2295{
2296	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2297	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2298	    ("pmap_object_init_pt: non-device object"));
2299}
2300
2301/*
2302 *	Routine:	pmap_change_wiring
2303 *	Function:	Change the wiring attribute for a map/virtual-address
2304 *			pair.
2305 *	In/out conditions:
2306 *			The mapping must already exist in the pmap.
2307 */
2308void
2309pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
2310{
2311	pt_entry_t *pte;
2312
2313	if (pmap == NULL)
2314		return;
2315
2316	PMAP_LOCK(pmap);
2317	pte = pmap_pte(pmap, va);
2318
2319	if (wired && !pte_test(pte, PTE_W))
2320		pmap->pm_stats.wired_count++;
2321	else if (!wired && pte_test(pte, PTE_W))
2322		pmap->pm_stats.wired_count--;
2323
2324	/*
2325	 * Wiring is not a hardware characteristic so there is no need to
2326	 * invalidate TLB.
2327	 */
2328	if (wired)
2329		pte_set(pte, PTE_W);
2330	else
2331		pte_clear(pte, PTE_W);
2332	PMAP_UNLOCK(pmap);
2333}
2334
2335/*
2336 *	Copy the range specified by src_addr/len
2337 *	from the source map to the range dst_addr/len
2338 *	in the destination map.
2339 *
2340 *	This routine is only advisory and need not do anything.
2341 */
2342
2343void
2344pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2345    vm_size_t len, vm_offset_t src_addr)
2346{
2347}
2348
2349/*
2350 *	pmap_zero_page zeros the specified hardware page by mapping
2351 *	the page into KVM and using bzero to clear its contents.
2352 *
2353 * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2354 */
2355void
2356pmap_zero_page(vm_page_t m)
2357{
2358	vm_offset_t va;
2359	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2360
2361	if (MIPS_DIRECT_MAPPABLE(phys)) {
2362		va = MIPS_PHYS_TO_DIRECT(phys);
2363		bzero((caddr_t)va, PAGE_SIZE);
2364		mips_dcache_wbinv_range(va, PAGE_SIZE);
2365	} else {
2366		va = pmap_lmem_map1(phys);
2367		bzero((caddr_t)va, PAGE_SIZE);
2368		mips_dcache_wbinv_range(va, PAGE_SIZE);
2369		pmap_lmem_unmap();
2370	}
2371}
2372
2373/*
2374 *	pmap_zero_page_area zeros the specified hardware page by mapping
2375 *	the page into KVM and using bzero to clear its contents.
2376 *
2377 *	off and size may not cover an area beyond a single hardware page.
2378 */
2379void
2380pmap_zero_page_area(vm_page_t m, int off, int size)
2381{
2382	vm_offset_t va;
2383	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2384
2385	if (MIPS_DIRECT_MAPPABLE(phys)) {
2386		va = MIPS_PHYS_TO_DIRECT(phys);
2387		bzero((char *)(caddr_t)va + off, size);
2388		mips_dcache_wbinv_range(va + off, size);
2389	} else {
2390		va = pmap_lmem_map1(phys);
2391		bzero((char *)va + off, size);
2392		mips_dcache_wbinv_range(va + off, size);
2393		pmap_lmem_unmap();
2394	}
2395}
2396
2397void
2398pmap_zero_page_idle(vm_page_t m)
2399{
2400	vm_offset_t va;
2401	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2402
2403	if (MIPS_DIRECT_MAPPABLE(phys)) {
2404		va = MIPS_PHYS_TO_DIRECT(phys);
2405		bzero((caddr_t)va, PAGE_SIZE);
2406		mips_dcache_wbinv_range(va, PAGE_SIZE);
2407	} else {
2408		va = pmap_lmem_map1(phys);
2409		bzero((caddr_t)va, PAGE_SIZE);
2410		mips_dcache_wbinv_range(va, PAGE_SIZE);
2411		pmap_lmem_unmap();
2412	}
2413}
2414
2415/*
2416 *	pmap_copy_page copies the specified (machine independent)
2417 *	page by mapping the page into virtual memory and using
2418 *	bcopy to copy the page, one machine dependent page at a
2419 *	time.
2420 *
2421 * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2422 */
2423void
2424pmap_copy_page(vm_page_t src, vm_page_t dst)
2425{
2426	vm_offset_t va_src, va_dst;
2427	vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src);
2428	vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst);
2429
2430	if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) {
2431		/* easy case, all can be accessed via KSEG0 */
2432		/*
2433		 * Flush all caches for VA that are mapped to this page
2434		 * to make sure that data in SDRAM is up to date
2435		 */
2436		pmap_flush_pvcache(src);
2437		mips_dcache_wbinv_range_index(
2438		    MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE);
2439		va_src = MIPS_PHYS_TO_DIRECT(phys_src);
2440		va_dst = MIPS_PHYS_TO_DIRECT(phys_dst);
2441		bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
2442		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2443	} else {
2444		va_src = pmap_lmem_map2(phys_src, phys_dst);
2445		va_dst = va_src + PAGE_SIZE;
2446		bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
2447		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2448		pmap_lmem_unmap();
2449	}
2450}
2451
2452/*
2453 * Returns true if the pmap's pv is one of the first
2454 * 16 pvs linked to from this page.  This count may
2455 * be changed upwards or downwards in the future; it
2456 * is only necessary that true be returned for a small
2457 * subset of pmaps for proper page aging.
2458 */
2459boolean_t
2460pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2461{
2462	pv_entry_t pv;
2463	int loops = 0;
2464	boolean_t rv;
2465
2466	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2467	    ("pmap_page_exists_quick: page %p is not managed", m));
2468	rv = FALSE;
2469	vm_page_lock_queues();
2470	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2471		if (pv->pv_pmap == pmap) {
2472			rv = TRUE;
2473			break;
2474		}
2475		loops++;
2476		if (loops >= 16)
2477			break;
2478	}
2479	vm_page_unlock_queues();
2480	return (rv);
2481}
2482
2483/*
2484 * Remove all pages from specified address space
2485 * this aids process exit speeds.  Also, this code
2486 * is special cased for current process only, but
2487 * can have the more generic (and slightly slower)
2488 * mode enabled.  This is much faster than pmap_remove
2489 * in the case of running down an entire address space.
2490 */
2491void
2492pmap_remove_pages(pmap_t pmap)
2493{
2494	pt_entry_t *pte, tpte;
2495	pv_entry_t pv, npv;
2496	vm_page_t m;
2497
2498	if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2499		printf("warning: pmap_remove_pages called with non-current pmap\n");
2500		return;
2501	}
2502	vm_page_lock_queues();
2503	PMAP_LOCK(pmap);
2504	for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv != NULL; pv = npv) {
2505
2506		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2507		if (!pte_test(pte, PTE_V))
2508			panic("pmap_remove_pages: page on pm_pvlist has no pte");
2509		tpte = *pte;
2510
2511/*
2512 * We cannot remove wired pages from a process' mapping at this time
2513 */
2514		if (pte_test(&tpte, PTE_W)) {
2515			npv = TAILQ_NEXT(pv, pv_plist);
2516			continue;
2517		}
2518		*pte = is_kernel_pmap(pmap) ? PTE_G : 0;
2519
2520		m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte));
2521		KASSERT(m != NULL,
2522		    ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
2523
2524		pv->pv_pmap->pm_stats.resident_count--;
2525
2526		/*
2527		 * Update the vm_page_t clean and reference bits.
2528		 */
2529		if (pte_test(&tpte, PTE_D)) {
2530			vm_page_dirty(m);
2531		}
2532		npv = TAILQ_NEXT(pv, pv_plist);
2533		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2534
2535		m->md.pv_list_count--;
2536		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2537		if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
2538			vm_page_aflag_clear(m, PGA_WRITEABLE);
2539		}
2540		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
2541		free_pv_entry(pv);
2542	}
2543	pmap_invalidate_all(pmap);
2544	PMAP_UNLOCK(pmap);
2545	vm_page_unlock_queues();
2546}
2547
2548/*
2549 * pmap_testbit tests bits in pte's
2550 * note that the testbit/changebit routines are inline,
2551 * and a lot of things compile-time evaluate.
2552 */
2553static boolean_t
2554pmap_testbit(vm_page_t m, int bit)
2555{
2556	pv_entry_t pv;
2557	pt_entry_t *pte;
2558	boolean_t rv = FALSE;
2559
2560	if (m->oflags & VPO_UNMANAGED)
2561		return (rv);
2562
2563	if (TAILQ_FIRST(&m->md.pv_list) == NULL)
2564		return (rv);
2565
2566	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2567	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2568		PMAP_LOCK(pv->pv_pmap);
2569		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2570		rv = pte_test(pte, bit);
2571		PMAP_UNLOCK(pv->pv_pmap);
2572		if (rv)
2573			break;
2574	}
2575	return (rv);
2576}
2577
2578/*
2579 * this routine is used to clear dirty bits in ptes
2580 */
2581static __inline void
2582pmap_changebit(vm_page_t m, int bit, boolean_t setem)
2583{
2584	pv_entry_t pv;
2585	pt_entry_t *pte;
2586
2587	if (m->oflags & VPO_UNMANAGED)
2588		return;
2589
2590	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2591	/*
2592	 * Loop over all current mappings setting/clearing as appropos If
2593	 * setting RO do we need to clear the VAC?
2594	 */
2595	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2596		PMAP_LOCK(pv->pv_pmap);
2597		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2598		if (setem) {
2599			*pte |= bit;
2600			pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
2601		} else {
2602			pt_entry_t pbits = *pte;
2603
2604			if (pbits & bit) {
2605				if (bit == PTE_D) {
2606					if (pbits & PTE_D)
2607						vm_page_dirty(m);
2608					*pte = (pbits & ~PTE_D) | PTE_RO;
2609				} else {
2610					*pte = pbits & ~bit;
2611				}
2612				pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
2613			}
2614		}
2615		PMAP_UNLOCK(pv->pv_pmap);
2616	}
2617	if (!setem && bit == PTE_D)
2618		vm_page_aflag_clear(m, PGA_WRITEABLE);
2619}
2620
2621/*
2622 *	pmap_page_wired_mappings:
2623 *
2624 *	Return the number of managed mappings to the given physical page
2625 *	that are wired.
2626 */
2627int
2628pmap_page_wired_mappings(vm_page_t m)
2629{
2630	pv_entry_t pv;
2631	pmap_t pmap;
2632	pt_entry_t *pte;
2633	int count;
2634
2635	count = 0;
2636	if ((m->oflags & VPO_UNMANAGED) != 0)
2637		return (count);
2638	vm_page_lock_queues();
2639	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2640		pmap = pv->pv_pmap;
2641		PMAP_LOCK(pmap);
2642		pte = pmap_pte(pmap, pv->pv_va);
2643		if (pte_test(pte, PTE_W))
2644			count++;
2645		PMAP_UNLOCK(pmap);
2646	}
2647	vm_page_unlock_queues();
2648	return (count);
2649}
2650
2651/*
2652 * Clear the write and modified bits in each of the given page's mappings.
2653 */
2654void
2655pmap_remove_write(vm_page_t m)
2656{
2657	pv_entry_t pv, npv;
2658	vm_offset_t va;
2659	pt_entry_t *pte;
2660
2661	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2662	    ("pmap_remove_write: page %p is not managed", m));
2663
2664	/*
2665	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
2666	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
2667	 * is clear, no page table entries need updating.
2668	 */
2669	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2670	if ((m->oflags & VPO_BUSY) == 0 &&
2671	    (m->aflags & PGA_WRITEABLE) == 0)
2672		return;
2673
2674	/*
2675	 * Loop over all current mappings setting/clearing as appropos.
2676	 */
2677	vm_page_lock_queues();
2678	for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) {
2679		npv = TAILQ_NEXT(pv, pv_plist);
2680		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2681		if (pte == NULL || !pte_test(pte, PTE_V))
2682			panic("page on pm_pvlist has no pte");
2683
2684		va = pv->pv_va;
2685		pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
2686		    VM_PROT_READ | VM_PROT_EXECUTE);
2687	}
2688	vm_page_aflag_clear(m, PGA_WRITEABLE);
2689	vm_page_unlock_queues();
2690}
2691
2692/*
2693 *	pmap_ts_referenced:
2694 *
2695 *	Return the count of reference bits for a page, clearing all of them.
2696 */
2697int
2698pmap_ts_referenced(vm_page_t m)
2699{
2700
2701	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2702	    ("pmap_ts_referenced: page %p is not managed", m));
2703	if (m->md.pv_flags & PV_TABLE_REF) {
2704		vm_page_lock_queues();
2705		m->md.pv_flags &= ~PV_TABLE_REF;
2706		vm_page_unlock_queues();
2707		return (1);
2708	}
2709	return (0);
2710}
2711
2712/*
2713 *	pmap_is_modified:
2714 *
2715 *	Return whether or not the specified physical page was modified
2716 *	in any physical maps.
2717 */
2718boolean_t
2719pmap_is_modified(vm_page_t m)
2720{
2721	boolean_t rv;
2722
2723	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2724	    ("pmap_is_modified: page %p is not managed", m));
2725
2726	/*
2727	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
2728	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
2729	 * is clear, no PTEs can have PTE_D set.
2730	 */
2731	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2732	if ((m->oflags & VPO_BUSY) == 0 &&
2733	    (m->aflags & PGA_WRITEABLE) == 0)
2734		return (FALSE);
2735	vm_page_lock_queues();
2736	if (m->md.pv_flags & PV_TABLE_MOD)
2737		rv = TRUE;
2738	else
2739		rv = pmap_testbit(m, PTE_D);
2740	vm_page_unlock_queues();
2741	return (rv);
2742}
2743
2744/* N/C */
2745
2746/*
2747 *	pmap_is_prefaultable:
2748 *
2749 *	Return whether or not the specified virtual address is elgible
2750 *	for prefault.
2751 */
2752boolean_t
2753pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2754{
2755	pd_entry_t *pde;
2756	pt_entry_t *pte;
2757	boolean_t rv;
2758
2759	rv = FALSE;
2760	PMAP_LOCK(pmap);
2761	pde = pmap_pde(pmap, addr);
2762	if (pde != NULL && *pde != 0) {
2763		pte = pmap_pde_to_pte(pde, addr);
2764		rv = (*pte == 0);
2765	}
2766	PMAP_UNLOCK(pmap);
2767	return (rv);
2768}
2769
2770/*
2771 *	Clear the modify bits on the specified physical page.
2772 */
2773void
2774pmap_clear_modify(vm_page_t m)
2775{
2776
2777	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2778	    ("pmap_clear_modify: page %p is not managed", m));
2779	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2780	KASSERT((m->oflags & VPO_BUSY) == 0,
2781	    ("pmap_clear_modify: page %p is busy", m));
2782
2783	/*
2784	 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set.
2785	 * If the object containing the page is locked and the page is not
2786	 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
2787	 */
2788	if ((m->aflags & PGA_WRITEABLE) == 0)
2789		return;
2790	vm_page_lock_queues();
2791	if (m->md.pv_flags & PV_TABLE_MOD) {
2792		pmap_changebit(m, PTE_D, FALSE);
2793		m->md.pv_flags &= ~PV_TABLE_MOD;
2794	}
2795	vm_page_unlock_queues();
2796}
2797
2798/*
2799 *	pmap_is_referenced:
2800 *
2801 *	Return whether or not the specified physical page was referenced
2802 *	in any physical maps.
2803 */
2804boolean_t
2805pmap_is_referenced(vm_page_t m)
2806{
2807
2808	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2809	    ("pmap_is_referenced: page %p is not managed", m));
2810	return ((m->md.pv_flags & PV_TABLE_REF) != 0);
2811}
2812
2813/*
2814 *	pmap_clear_reference:
2815 *
2816 *	Clear the reference bit on the specified physical page.
2817 */
2818void
2819pmap_clear_reference(vm_page_t m)
2820{
2821
2822	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2823	    ("pmap_clear_reference: page %p is not managed", m));
2824	vm_page_lock_queues();
2825	if (m->md.pv_flags & PV_TABLE_REF) {
2826		m->md.pv_flags &= ~PV_TABLE_REF;
2827	}
2828	vm_page_unlock_queues();
2829}
2830
2831/*
2832 * Miscellaneous support routines follow
2833 */
2834
2835/*
2836 * Map a set of physical memory pages into the kernel virtual
2837 * address space. Return a pointer to where it is mapped. This
2838 * routine is intended to be used for mapping device memory,
2839 * NOT real memory.
2840 */
2841
2842/*
2843 * Map a set of physical memory pages into the kernel virtual
2844 * address space. Return a pointer to where it is mapped. This
2845 * routine is intended to be used for mapping device memory,
2846 * NOT real memory.
2847 *
2848 * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
2849 */
2850void *
2851pmap_mapdev(vm_paddr_t pa, vm_size_t size)
2852{
2853        vm_offset_t va, tmpva, offset;
2854
2855	/*
2856	 * KSEG1 maps only first 512M of phys address space. For
2857	 * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
2858	 */
2859	if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
2860		return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
2861	else {
2862		offset = pa & PAGE_MASK;
2863		size = roundup(size + offset, PAGE_SIZE);
2864
2865		va = kmem_alloc_nofault(kernel_map, size);
2866		if (!va)
2867			panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2868		pa = trunc_page(pa);
2869		for (tmpva = va; size > 0;) {
2870			pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED);
2871			size -= PAGE_SIZE;
2872			tmpva += PAGE_SIZE;
2873			pa += PAGE_SIZE;
2874		}
2875	}
2876
2877	return ((void *)(va + offset));
2878}
2879
2880void
2881pmap_unmapdev(vm_offset_t va, vm_size_t size)
2882{
2883#ifndef __mips_n64
2884	vm_offset_t base, offset, tmpva;
2885
2886	/* If the address is within KSEG1 then there is nothing to do */
2887	if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END)
2888		return;
2889
2890	base = trunc_page(va);
2891	offset = va & PAGE_MASK;
2892	size = roundup(size + offset, PAGE_SIZE);
2893	for (tmpva = base; tmpva < base + size; tmpva += PAGE_SIZE)
2894		pmap_kremove(tmpva);
2895	kmem_free(kernel_map, base, size);
2896#endif
2897}
2898
2899/*
2900 * perform the pmap work for mincore
2901 */
2902int
2903pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
2904{
2905	pt_entry_t *ptep, pte;
2906	vm_paddr_t pa;
2907	vm_page_t m;
2908	int val;
2909	boolean_t managed;
2910
2911	PMAP_LOCK(pmap);
2912retry:
2913	ptep = pmap_pte(pmap, addr);
2914	pte = (ptep != NULL) ? *ptep : 0;
2915	if (!pte_test(&pte, PTE_V)) {
2916		val = 0;
2917		goto out;
2918	}
2919	val = MINCORE_INCORE;
2920	if (pte_test(&pte, PTE_D))
2921		val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
2922	pa = TLBLO_PTE_TO_PA(pte);
2923	managed = page_is_managed(pa);
2924	if (managed) {
2925		/*
2926		 * This may falsely report the given address as
2927		 * MINCORE_REFERENCED.  Unfortunately, due to the lack of
2928		 * per-PTE reference information, it is impossible to
2929		 * determine if the address is MINCORE_REFERENCED.
2930		 */
2931		m = PHYS_TO_VM_PAGE(pa);
2932		if ((m->aflags & PGA_REFERENCED) != 0)
2933			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
2934	}
2935	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
2936	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
2937		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
2938		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
2939			goto retry;
2940	} else
2941out:
2942		PA_UNLOCK_COND(*locked_pa);
2943	PMAP_UNLOCK(pmap);
2944	return (val);
2945}
2946
2947void
2948pmap_activate(struct thread *td)
2949{
2950	pmap_t pmap, oldpmap;
2951	struct proc *p = td->td_proc;
2952	u_int cpuid;
2953
2954	critical_enter();
2955
2956	pmap = vmspace_pmap(p->p_vmspace);
2957	oldpmap = PCPU_GET(curpmap);
2958	cpuid = PCPU_GET(cpuid);
2959
2960	if (oldpmap)
2961		CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
2962	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
2963	pmap_asid_alloc(pmap);
2964	if (td == curthread) {
2965		PCPU_SET(segbase, pmap->pm_segtab);
2966		mips_wr_entryhi(pmap->pm_asid[cpuid].asid);
2967	}
2968
2969	PCPU_SET(curpmap, pmap);
2970	critical_exit();
2971}
2972
2973void
2974pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
2975{
2976}
2977
2978/*
2979 *	Increase the starting virtual address of the given mapping if a
2980 *	different alignment might result in more superpage mappings.
2981 */
2982void
2983pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
2984    vm_offset_t *addr, vm_size_t size)
2985{
2986	vm_offset_t superpage_offset;
2987
2988	if (size < NBSEG)
2989		return;
2990	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
2991		offset += ptoa(object->pg_color);
2992	superpage_offset = offset & SEGMASK;
2993	if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG ||
2994	    (*addr & SEGMASK) == superpage_offset)
2995		return;
2996	if ((*addr & SEGMASK) < superpage_offset)
2997		*addr = (*addr & ~SEGMASK) + superpage_offset;
2998	else
2999		*addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset;
3000}
3001
3002/*
3003 * 	Increase the starting virtual address of the given mapping so
3004 * 	that it is aligned to not be the second page in a TLB entry.
3005 * 	This routine assumes that the length is appropriately-sized so
3006 * 	that the allocation does not share a TLB entry at all if required.
3007 */
3008void
3009pmap_align_tlb(vm_offset_t *addr)
3010{
3011	if ((*addr & PAGE_SIZE) == 0)
3012		return;
3013	*addr += PAGE_SIZE;
3014	return;
3015}
3016
3017#ifdef DDB
3018DB_SHOW_COMMAND(ptable, ddb_pid_dump)
3019{
3020	pmap_t pmap;
3021	struct thread *td = NULL;
3022	struct proc *p;
3023	int i, j, k;
3024	vm_paddr_t pa;
3025	vm_offset_t va;
3026
3027	if (have_addr) {
3028		td = db_lookup_thread(addr, TRUE);
3029		if (td == NULL) {
3030			db_printf("Invalid pid or tid");
3031			return;
3032		}
3033		p = td->td_proc;
3034		if (p->p_vmspace == NULL) {
3035			db_printf("No vmspace for process");
3036			return;
3037		}
3038			pmap = vmspace_pmap(p->p_vmspace);
3039	} else
3040		pmap = kernel_pmap;
3041
3042	db_printf("pmap:%p segtab:%p asid:%x generation:%x\n",
3043	    pmap, pmap->pm_segtab, pmap->pm_asid[0].asid,
3044	    pmap->pm_asid[0].gen);
3045	for (i = 0; i < NPDEPG; i++) {
3046		pd_entry_t *pdpe;
3047		pt_entry_t *pde;
3048		pt_entry_t pte;
3049
3050		pdpe = (pd_entry_t *)pmap->pm_segtab[i];
3051		if (pdpe == NULL)
3052			continue;
3053		db_printf("[%4d] %p\n", i, pdpe);
3054#ifdef __mips_n64
3055		for (j = 0; j < NPDEPG; j++) {
3056			pde = (pt_entry_t *)pdpe[j];
3057			if (pde == NULL)
3058				continue;
3059			db_printf("\t[%4d] %p\n", j, pde);
3060#else
3061		{
3062			j = 0;
3063			pde =  (pt_entry_t *)pdpe;
3064#endif
3065			for (k = 0; k < NPTEPG; k++) {
3066				pte = pde[k];
3067				if (pte == 0 || !pte_test(&pte, PTE_V))
3068					continue;
3069				pa = TLBLO_PTE_TO_PA(pte);
3070				va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT);
3071				db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n",
3072				       k, (void *)va, (uintmax_t)pte, (uintmax_t)pa);
3073			}
3074		}
3075	}
3076}
3077#endif
3078
3079#if defined(DEBUG)
3080
3081static void pads(pmap_t pm);
3082void pmap_pvdump(vm_offset_t pa);
3083
3084/* print address space of pmap*/
3085static void
3086pads(pmap_t pm)
3087{
3088	unsigned va, i, j;
3089	pt_entry_t *ptep;
3090
3091	if (pm == kernel_pmap)
3092		return;
3093	for (i = 0; i < NPTEPG; i++)
3094		if (pm->pm_segtab[i])
3095			for (j = 0; j < NPTEPG; j++) {
3096				va = (i << SEGSHIFT) + (j << PAGE_SHIFT);
3097				if (pm == kernel_pmap && va < KERNBASE)
3098					continue;
3099				if (pm != kernel_pmap &&
3100				    va >= VM_MAXUSER_ADDRESS)
3101					continue;
3102				ptep = pmap_pte(pm, va);
3103				if (pte_test(ptep, PTE_V))
3104					printf("%x:%x ", va, *(int *)ptep);
3105			}
3106
3107}
3108
3109void
3110pmap_pvdump(vm_offset_t pa)
3111{
3112	register pv_entry_t pv;
3113	vm_page_t m;
3114
3115	printf("pa %x", pa);
3116	m = PHYS_TO_VM_PAGE(pa);
3117	for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3118	    pv = TAILQ_NEXT(pv, pv_list)) {
3119		printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3120		pads(pv->pv_pmap);
3121	}
3122	printf(" ");
3123}
3124
3125/* N/C */
3126#endif
3127
3128
3129/*
3130 * Allocate TLB address space tag (called ASID or TLBPID) and return it.
3131 * It takes almost as much or more time to search the TLB for a
3132 * specific ASID and flush those entries as it does to flush the entire TLB.
3133 * Therefore, when we allocate a new ASID, we just take the next number. When
3134 * we run out of numbers, we flush the TLB, increment the generation count
3135 * and start over. ASID zero is reserved for kernel use.
3136 */
3137static void
3138pmap_asid_alloc(pmap)
3139	pmap_t pmap;
3140{
3141	if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED &&
3142	    pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
3143	else {
3144		if (PCPU_GET(next_asid) == pmap_max_asid) {
3145			tlb_invalidate_all_user(NULL);
3146			PCPU_SET(asid_generation,
3147			    (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
3148			if (PCPU_GET(asid_generation) == 0) {
3149				PCPU_SET(asid_generation, 1);
3150			}
3151			PCPU_SET(next_asid, 1);	/* 0 means invalid */
3152		}
3153		pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid);
3154		pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation);
3155		PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
3156	}
3157}
3158
3159int
3160page_is_managed(vm_paddr_t pa)
3161{
3162	vm_offset_t pgnum = atop(pa);
3163
3164	if (pgnum >= first_page) {
3165		vm_page_t m;
3166
3167		m = PHYS_TO_VM_PAGE(pa);
3168		if (m == NULL)
3169			return (0);
3170		if ((m->oflags & VPO_UNMANAGED) == 0)
3171			return (1);
3172	}
3173	return (0);
3174}
3175
3176static pt_entry_t
3177init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
3178{
3179	pt_entry_t rw;
3180
3181	if (!(prot & VM_PROT_WRITE))
3182		rw =  PTE_V | PTE_RO | PTE_C_CACHE;
3183	else if ((m->oflags & VPO_UNMANAGED) == 0) {
3184		if ((m->md.pv_flags & PV_TABLE_MOD) != 0)
3185			rw =  PTE_V | PTE_D | PTE_C_CACHE;
3186		else
3187			rw = PTE_V | PTE_C_CACHE;
3188		vm_page_aflag_set(m, PGA_WRITEABLE);
3189	} else
3190		/* Needn't emulate a modified bit for unmanaged pages. */
3191		rw =  PTE_V | PTE_D | PTE_C_CACHE;
3192	return (rw);
3193}
3194
3195/*
3196 * pmap_emulate_modified : do dirty bit emulation
3197 *
3198 * On SMP, update just the local TLB, other CPUs will update their
3199 * TLBs from PTE lazily, if they get the exception.
3200 * Returns 0 in case of sucess, 1 if the page is read only and we
3201 * need to fault.
3202 */
3203int
3204pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
3205{
3206	vm_page_t m;
3207	pt_entry_t *pte;
3208 	vm_paddr_t pa;
3209
3210	PMAP_LOCK(pmap);
3211	pte = pmap_pte(pmap, va);
3212	if (pte == NULL)
3213		panic("pmap_emulate_modified: can't find PTE");
3214#ifdef SMP
3215	/* It is possible that some other CPU changed m-bit */
3216	if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
3217		pmap_update_page_local(pmap, va, *pte);
3218		PMAP_UNLOCK(pmap);
3219		return (0);
3220	}
3221#else
3222	if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
3223		panic("pmap_emulate_modified: invalid pte");
3224#endif
3225	if (pte_test(pte, PTE_RO)) {
3226		/* write to read only page in the kernel */
3227		PMAP_UNLOCK(pmap);
3228		return (1);
3229	}
3230	pte_set(pte, PTE_D);
3231	pmap_update_page_local(pmap, va, *pte);
3232	pa = TLBLO_PTE_TO_PA(*pte);
3233	if (!page_is_managed(pa))
3234		panic("pmap_emulate_modified: unmanaged page");
3235	m = PHYS_TO_VM_PAGE(pa);
3236	m->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD);
3237	PMAP_UNLOCK(pmap);
3238	return (0);
3239}
3240
3241/*
3242 *	Routine:	pmap_kextract
3243 *	Function:
3244 *		Extract the physical page address associated
3245 *		virtual address.
3246 */
3247 /* PMAP_INLINE */ vm_offset_t
3248pmap_kextract(vm_offset_t va)
3249{
3250	int mapped;
3251
3252	/*
3253	 * First, the direct-mapped regions.
3254	 */
3255#if defined(__mips_n64)
3256	if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END)
3257		return (MIPS_XKPHYS_TO_PHYS(va));
3258#endif
3259	if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END)
3260		return (MIPS_KSEG0_TO_PHYS(va));
3261
3262	if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END)
3263		return (MIPS_KSEG1_TO_PHYS(va));
3264
3265	/*
3266	 * User virtual addresses.
3267	 */
3268	if (va < VM_MAXUSER_ADDRESS) {
3269		pt_entry_t *ptep;
3270
3271		if (curproc && curproc->p_vmspace) {
3272			ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
3273			if (ptep) {
3274				return (TLBLO_PTE_TO_PA(*ptep) |
3275				    (va & PAGE_MASK));
3276			}
3277			return (0);
3278		}
3279	}
3280
3281	/*
3282	 * Should be kernel virtual here, otherwise fail
3283	 */
3284	mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END);
3285#if defined(__mips_n64)
3286	mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END);
3287#endif
3288	/*
3289	 * Kernel virtual.
3290	 */
3291
3292	if (mapped) {
3293		pt_entry_t *ptep;
3294
3295		/* Is the kernel pmap initialized? */
3296		if (!CPU_EMPTY(&kernel_pmap->pm_active)) {
3297			/* It's inside the virtual address range */
3298			ptep = pmap_pte(kernel_pmap, va);
3299			if (ptep) {
3300				return (TLBLO_PTE_TO_PA(*ptep) |
3301				    (va & PAGE_MASK));
3302			}
3303		}
3304		return (0);
3305	}
3306
3307	panic("%s for unknown address space %p.", __func__, (void *)va);
3308}
3309
3310
3311void
3312pmap_flush_pvcache(vm_page_t m)
3313{
3314	pv_entry_t pv;
3315
3316	if (m != NULL) {
3317		for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3318		    pv = TAILQ_NEXT(pv, pv_list)) {
3319			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
3320		}
3321	}
3322}
3323