pmap.c revision 239236
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
38 *	from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps
39 *	JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish
40 */
41
42/*
43 *	Manages physical address maps.
44 *
45 *	In addition to hardware address maps, this
46 *	module is called upon to provide software-use-only
47 *	maps which may or may not be stored in the same
48 *	form as hardware maps.	These pseudo-maps are
49 *	used to store intermediate results from copy
50 *	operations to and from address spaces.
51 *
52 *	Since the information managed by this module is
53 *	also stored by the logical address mapping module,
54 *	this module may throw away valid virtual-to-physical
55 *	mappings at almost any time.  However, invalidations
56 *	of virtual-to-physical mappings must be done as
57 *	requested.
58 *
59 *	In order to cope with hardware architectures which
60 *	make virtual-to-physical map invalidates expensive,
61 *	this module may delay invalidate or reduced protection
62 *	operations until such time as they are actually
63 *	necessary.  This module is given full information as
64 *	to which processors are currently using which maps,
65 *	and to when physical maps must be made correct.
66 */
67
68#include <sys/cdefs.h>
69__FBSDID("$FreeBSD: head/sys/mips/mips/pmap.c 239236 2012-08-13 17:38:38Z alc $");
70
71#include "opt_ddb.h"
72#include "opt_pmap.h"
73
74#include <sys/param.h>
75#include <sys/systm.h>
76#include <sys/proc.h>
77#include <sys/msgbuf.h>
78#include <sys/vmmeter.h>
79#include <sys/mman.h>
80#include <sys/smp.h>
81#include <sys/sysctl.h>
82#ifdef DDB
83#include <ddb/ddb.h>
84#endif
85
86#include <vm/vm.h>
87#include <vm/vm_param.h>
88#include <vm/vm_phys.h>
89#include <sys/lock.h>
90#include <sys/mutex.h>
91#include <vm/vm_kern.h>
92#include <vm/vm_page.h>
93#include <vm/vm_map.h>
94#include <vm/vm_object.h>
95#include <vm/vm_extern.h>
96#include <vm/vm_pageout.h>
97#include <vm/vm_pager.h>
98#include <vm/uma.h>
99#include <sys/pcpu.h>
100#include <sys/sched.h>
101#ifdef SMP
102#include <sys/smp.h>
103#endif
104
105#include <machine/cache.h>
106#include <machine/md_var.h>
107#include <machine/tlb.h>
108
109#undef PMAP_DEBUG
110
111#ifndef PMAP_SHPGPERPROC
112#define	PMAP_SHPGPERPROC 200
113#endif
114
115#if !defined(DIAGNOSTIC)
116#define	PMAP_INLINE __inline
117#else
118#define	PMAP_INLINE
119#endif
120
121#ifdef PV_STATS
122#define PV_STAT(x)	do { x ; } while (0)
123#else
124#define PV_STAT(x)	do { } while (0)
125#endif
126
127/*
128 * Get PDEs and PTEs for user/kernel address space
129 */
130#define	pmap_seg_index(v)	(((v) >> SEGSHIFT) & (NPDEPG - 1))
131#define	pmap_pde_index(v)	(((v) >> PDRSHIFT) & (NPDEPG - 1))
132#define	pmap_pte_index(v)	(((v) >> PAGE_SHIFT) & (NPTEPG - 1))
133#define	pmap_pde_pindex(v)	((v) >> PDRSHIFT)
134
135#ifdef __mips_n64
136#define	NUPDE			(NPDEPG * NPDEPG)
137#define	NUSERPGTBLS		(NUPDE + NPDEPG)
138#else
139#define	NUPDE			(NPDEPG)
140#define	NUSERPGTBLS		(NUPDE)
141#endif
142
143#define	is_kernel_pmap(x)	((x) == kernel_pmap)
144
145struct pmap kernel_pmap_store;
146pd_entry_t *kernel_segmap;
147
148vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
149vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
150
151static int nkpt;
152unsigned pmap_max_asid;		/* max ASID supported by the system */
153
154#define	PMAP_ASID_RESERVED	0
155
156vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
157
158static void pmap_asid_alloc(pmap_t pmap);
159
160/*
161 * Data for the pv entry allocation mechanism
162 */
163static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
164static int pv_entry_count;
165
166static void free_pv_chunk(struct pv_chunk *pc);
167static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
168static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
169static vm_page_t pmap_pv_reclaim(pmap_t locked_pmap);
170static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
171static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
172    vm_offset_t va);
173static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
174static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
175    vm_page_t m, vm_prot_t prot, vm_page_t mpte);
176static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
177    pd_entry_t pde);
178static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
179static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
180static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
181    vm_offset_t va, vm_page_t m);
182static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
183static void pmap_invalidate_all(pmap_t pmap);
184static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
185static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
186
187static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
188static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
189static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
190static pt_entry_t init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
191
192#ifdef SMP
193static void pmap_invalidate_page_action(void *arg);
194static void pmap_update_page_action(void *arg);
195#endif
196
197#ifndef __mips_n64
198/*
199 * This structure is for high memory (memory above 512Meg in 32 bit) support.
200 * The highmem area does not have a KSEG0 mapping, and we need a mechanism to
201 * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc.
202 *
203 * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To
204 * access a highmem physical address on a CPU, we map the physical address to
205 * the reserved virtual address for the CPU in the kernel pagetable.  This is
206 * done with interrupts disabled(although a spinlock and sched_pin would be
207 * sufficient).
208 */
209struct local_sysmaps {
210	vm_offset_t	base;
211	uint32_t	saved_intr;
212	uint16_t	valid1, valid2;
213};
214static struct local_sysmaps sysmap_lmem[MAXCPU];
215
216static __inline void
217pmap_alloc_lmem_map(void)
218{
219	int i;
220
221	for (i = 0; i < MAXCPU; i++) {
222		sysmap_lmem[i].base = virtual_avail;
223		virtual_avail += PAGE_SIZE * 2;
224		sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
225	}
226}
227
228static __inline vm_offset_t
229pmap_lmem_map1(vm_paddr_t phys)
230{
231	struct local_sysmaps *sysm;
232	pt_entry_t *pte, npte;
233	vm_offset_t va;
234	uint32_t intr;
235	int cpu;
236
237	intr = intr_disable();
238	cpu = PCPU_GET(cpuid);
239	sysm = &sysmap_lmem[cpu];
240	sysm->saved_intr = intr;
241	va = sysm->base;
242	npte = TLBLO_PA_TO_PFN(phys) |
243	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
244	pte = pmap_pte(kernel_pmap, va);
245	*pte = npte;
246	sysm->valid1 = 1;
247	return (va);
248}
249
250static __inline vm_offset_t
251pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
252{
253	struct local_sysmaps *sysm;
254	pt_entry_t *pte, npte;
255	vm_offset_t va1, va2;
256	uint32_t intr;
257	int cpu;
258
259	intr = intr_disable();
260	cpu = PCPU_GET(cpuid);
261	sysm = &sysmap_lmem[cpu];
262	sysm->saved_intr = intr;
263	va1 = sysm->base;
264	va2 = sysm->base + PAGE_SIZE;
265	npte = TLBLO_PA_TO_PFN(phys1) |
266	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
267	pte = pmap_pte(kernel_pmap, va1);
268	*pte = npte;
269	npte =  TLBLO_PA_TO_PFN(phys2) |
270	    PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
271	pte = pmap_pte(kernel_pmap, va2);
272	*pte = npte;
273	sysm->valid1 = 1;
274	sysm->valid2 = 1;
275	return (va1);
276}
277
278static __inline void
279pmap_lmem_unmap(void)
280{
281	struct local_sysmaps *sysm;
282	pt_entry_t *pte;
283	int cpu;
284
285	cpu = PCPU_GET(cpuid);
286	sysm = &sysmap_lmem[cpu];
287	pte = pmap_pte(kernel_pmap, sysm->base);
288	*pte = PTE_G;
289	tlb_invalidate_address(kernel_pmap, sysm->base);
290	sysm->valid1 = 0;
291	if (sysm->valid2) {
292		pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);
293		*pte = PTE_G;
294		tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);
295		sysm->valid2 = 0;
296	}
297	intr_restore(sysm->saved_intr);
298}
299#else  /* __mips_n64 */
300
301static __inline void
302pmap_alloc_lmem_map(void)
303{
304}
305
306static __inline vm_offset_t
307pmap_lmem_map1(vm_paddr_t phys)
308{
309
310	return (0);
311}
312
313static __inline vm_offset_t
314pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
315{
316
317	return (0);
318}
319
320static __inline vm_offset_t
321pmap_lmem_unmap(void)
322{
323
324	return (0);
325}
326#endif /* !__mips_n64 */
327
328/*
329 * Page table entry lookup routines.
330 */
331static __inline pd_entry_t *
332pmap_segmap(pmap_t pmap, vm_offset_t va)
333{
334
335	return (&pmap->pm_segtab[pmap_seg_index(va)]);
336}
337
338#ifdef __mips_n64
339static __inline pd_entry_t *
340pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
341{
342	pd_entry_t *pde;
343
344	pde = (pd_entry_t *)*pdpe;
345	return (&pde[pmap_pde_index(va)]);
346}
347
348static __inline pd_entry_t *
349pmap_pde(pmap_t pmap, vm_offset_t va)
350{
351	pd_entry_t *pdpe;
352
353	pdpe = pmap_segmap(pmap, va);
354	if (pdpe == NULL || *pdpe == NULL)
355		return (NULL);
356
357	return (pmap_pdpe_to_pde(pdpe, va));
358}
359#else
360static __inline pd_entry_t *
361pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
362{
363
364	return (pdpe);
365}
366
367static __inline
368pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
369{
370
371	return (pmap_segmap(pmap, va));
372}
373#endif
374
375static __inline pt_entry_t *
376pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
377{
378	pt_entry_t *pte;
379
380	pte = (pt_entry_t *)*pde;
381	return (&pte[pmap_pte_index(va)]);
382}
383
384pt_entry_t *
385pmap_pte(pmap_t pmap, vm_offset_t va)
386{
387	pd_entry_t *pde;
388
389	pde = pmap_pde(pmap, va);
390	if (pde == NULL || *pde == NULL)
391		return (NULL);
392
393	return (pmap_pde_to_pte(pde, va));
394}
395
396vm_offset_t
397pmap_steal_memory(vm_size_t size)
398{
399	vm_paddr_t bank_size, pa;
400	vm_offset_t va;
401
402	size = round_page(size);
403	bank_size = phys_avail[1] - phys_avail[0];
404	while (size > bank_size) {
405		int i;
406
407		for (i = 0; phys_avail[i + 2]; i += 2) {
408			phys_avail[i] = phys_avail[i + 2];
409			phys_avail[i + 1] = phys_avail[i + 3];
410		}
411		phys_avail[i] = 0;
412		phys_avail[i + 1] = 0;
413		if (!phys_avail[0])
414			panic("pmap_steal_memory: out of memory");
415		bank_size = phys_avail[1] - phys_avail[0];
416	}
417
418	pa = phys_avail[0];
419	phys_avail[0] += size;
420	if (MIPS_DIRECT_MAPPABLE(pa) == 0)
421		panic("Out of memory below 512Meg?");
422	va = MIPS_PHYS_TO_DIRECT(pa);
423	bzero((caddr_t)va, size);
424	return (va);
425}
426
427/*
428 * Bootstrap the system enough to run with virtual memory.  This
429 * assumes that the phys_avail array has been initialized.
430 */
431static void
432pmap_create_kernel_pagetable(void)
433{
434	int i, j;
435	vm_offset_t ptaddr;
436	pt_entry_t *pte;
437#ifdef __mips_n64
438	pd_entry_t *pde;
439	vm_offset_t pdaddr;
440	int npt, npde;
441#endif
442
443	/*
444	 * Allocate segment table for the kernel
445	 */
446	kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
447
448	/*
449	 * Allocate second level page tables for the kernel
450	 */
451#ifdef __mips_n64
452	npde = howmany(NKPT, NPDEPG);
453	pdaddr = pmap_steal_memory(PAGE_SIZE * npde);
454#endif
455	nkpt = NKPT;
456	ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt);
457
458	/*
459	 * The R[4-7]?00 stores only one copy of the Global bit in the
460	 * translation lookaside buffer for each 2 page entry. Thus invalid
461	 * entrys must have the Global bit set so when Entry LO and Entry HI
462	 * G bits are anded together they will produce a global bit to store
463	 * in the tlb.
464	 */
465	for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++)
466		*pte = PTE_G;
467
468#ifdef __mips_n64
469	for (i = 0,  npt = nkpt; npt > 0; i++) {
470		kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE);
471		pde = (pd_entry_t *)kernel_segmap[i];
472
473		for (j = 0; j < NPDEPG && npt > 0; j++, npt--)
474			pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE);
475	}
476#else
477	for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++)
478		kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE));
479#endif
480
481	PMAP_LOCK_INIT(kernel_pmap);
482	kernel_pmap->pm_segtab = kernel_segmap;
483	CPU_FILL(&kernel_pmap->pm_active);
484	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
485	kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
486	kernel_pmap->pm_asid[0].gen = 0;
487	kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE;
488}
489
490void
491pmap_bootstrap(void)
492{
493	int i;
494	int need_local_mappings = 0;
495
496	/* Sort. */
497again:
498	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
499		/*
500		 * Keep the memory aligned on page boundary.
501		 */
502		phys_avail[i] = round_page(phys_avail[i]);
503		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
504
505		if (i < 2)
506			continue;
507		if (phys_avail[i - 2] > phys_avail[i]) {
508			vm_paddr_t ptemp[2];
509
510			ptemp[0] = phys_avail[i + 0];
511			ptemp[1] = phys_avail[i + 1];
512
513			phys_avail[i + 0] = phys_avail[i - 2];
514			phys_avail[i + 1] = phys_avail[i - 1];
515
516			phys_avail[i - 2] = ptemp[0];
517			phys_avail[i - 1] = ptemp[1];
518			goto again;
519		}
520	}
521
522       	/*
523	 * In 32 bit, we may have memory which cannot be mapped directly.
524	 * This memory will need temporary mapping before it can be
525	 * accessed.
526	 */
527	if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1))
528		need_local_mappings = 1;
529
530	/*
531	 * Copy the phys_avail[] array before we start stealing memory from it.
532	 */
533	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
534		physmem_desc[i] = phys_avail[i];
535		physmem_desc[i + 1] = phys_avail[i + 1];
536	}
537
538	Maxmem = atop(phys_avail[i - 1]);
539
540	if (bootverbose) {
541		printf("Physical memory chunk(s):\n");
542		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
543			vm_paddr_t size;
544
545			size = phys_avail[i + 1] - phys_avail[i];
546			printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
547			    (uintmax_t) phys_avail[i],
548			    (uintmax_t) phys_avail[i + 1] - 1,
549			    (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
550		}
551		printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem));
552	}
553	/*
554	 * Steal the message buffer from the beginning of memory.
555	 */
556	msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize);
557	msgbufinit(msgbufp, msgbufsize);
558
559	/*
560	 * Steal thread0 kstack.
561	 */
562	kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
563
564	virtual_avail = VM_MIN_KERNEL_ADDRESS;
565	virtual_end = VM_MAX_KERNEL_ADDRESS;
566
567#ifdef SMP
568	/*
569	 * Steal some virtual address space to map the pcpu area.
570	 */
571	virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
572	pcpup = (struct pcpu *)virtual_avail;
573	virtual_avail += PAGE_SIZE * 2;
574
575	/*
576	 * Initialize the wired TLB entry mapping the pcpu region for
577	 * the BSP at 'pcpup'. Up until this point we were operating
578	 * with the 'pcpup' for the BSP pointing to a virtual address
579	 * in KSEG0 so there was no need for a TLB mapping.
580	 */
581	mips_pcpu_tlb_init(PCPU_ADDR(0));
582
583	if (bootverbose)
584		printf("pcpu is available at virtual address %p.\n", pcpup);
585#endif
586
587	if (need_local_mappings)
588		pmap_alloc_lmem_map();
589	pmap_create_kernel_pagetable();
590	pmap_max_asid = VMNUM_PIDS;
591	mips_wr_entryhi(0);
592	mips_wr_pagemask(0);
593}
594
595/*
596 * Initialize a vm_page's machine-dependent fields.
597 */
598void
599pmap_page_init(vm_page_t m)
600{
601
602	TAILQ_INIT(&m->md.pv_list);
603	m->md.pv_flags = 0;
604}
605
606/*
607 *	Initialize the pmap module.
608 *	Called by vm_init, to initialize any structures that the pmap
609 *	system needs to map virtual memory.
610 */
611void
612pmap_init(void)
613{
614}
615
616/***************************************************
617 * Low level helper routines.....
618 ***************************************************/
619
620#ifdef	SMP
621static __inline void
622pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
623{
624	int	cpuid, cpu, self;
625	cpuset_t active_cpus;
626
627	sched_pin();
628	if (is_kernel_pmap(pmap)) {
629		smp_rendezvous(NULL, fn, NULL, arg);
630		goto out;
631	}
632	/* Force ASID update on inactive CPUs */
633	CPU_FOREACH(cpu) {
634		if (!CPU_ISSET(cpu, &pmap->pm_active))
635			pmap->pm_asid[cpu].gen = 0;
636	}
637	cpuid = PCPU_GET(cpuid);
638	/*
639	 * XXX: barrier/locking for active?
640	 *
641	 * Take a snapshot of active here, any further changes are ignored.
642	 * tlb update/invalidate should be harmless on inactive CPUs
643	 */
644	active_cpus = pmap->pm_active;
645	self = CPU_ISSET(cpuid, &active_cpus);
646	CPU_CLR(cpuid, &active_cpus);
647	/* Optimize for the case where this cpu is the only active one */
648	if (CPU_EMPTY(&active_cpus)) {
649		if (self)
650			fn(arg);
651	} else {
652		if (self)
653			CPU_SET(cpuid, &active_cpus);
654		smp_rendezvous_cpus(active_cpus, NULL, fn, NULL, arg);
655	}
656out:
657	sched_unpin();
658}
659#else /* !SMP */
660static __inline void
661pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
662{
663	int	cpuid;
664
665	if (is_kernel_pmap(pmap)) {
666		fn(arg);
667		return;
668	}
669	cpuid = PCPU_GET(cpuid);
670	if (!CPU_ISSET(cpuid, &pmap->pm_active))
671		pmap->pm_asid[cpuid].gen = 0;
672	else
673		fn(arg);
674}
675#endif /* SMP */
676
677static void
678pmap_invalidate_all(pmap_t pmap)
679{
680
681	pmap_call_on_active_cpus(pmap,
682	    (void (*)(void *))tlb_invalidate_all_user, pmap);
683}
684
685struct pmap_invalidate_page_arg {
686	pmap_t pmap;
687	vm_offset_t va;
688};
689
690static void
691pmap_invalidate_page_action(void *arg)
692{
693	struct pmap_invalidate_page_arg *p = arg;
694
695	tlb_invalidate_address(p->pmap, p->va);
696}
697
698static void
699pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
700{
701	struct pmap_invalidate_page_arg arg;
702
703	arg.pmap = pmap;
704	arg.va = va;
705	pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg);
706}
707
708struct pmap_update_page_arg {
709	pmap_t pmap;
710	vm_offset_t va;
711	pt_entry_t pte;
712};
713
714static void
715pmap_update_page_action(void *arg)
716{
717	struct pmap_update_page_arg *p = arg;
718
719	tlb_update(p->pmap, p->va, p->pte);
720}
721
722static void
723pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
724{
725	struct pmap_update_page_arg arg;
726
727	arg.pmap = pmap;
728	arg.va = va;
729	arg.pte = pte;
730	pmap_call_on_active_cpus(pmap, pmap_update_page_action, &arg);
731}
732
733/*
734 *	Routine:	pmap_extract
735 *	Function:
736 *		Extract the physical page address associated
737 *		with the given map/virtual_address pair.
738 */
739vm_paddr_t
740pmap_extract(pmap_t pmap, vm_offset_t va)
741{
742	pt_entry_t *pte;
743	vm_offset_t retval = 0;
744
745	PMAP_LOCK(pmap);
746	pte = pmap_pte(pmap, va);
747	if (pte) {
748		retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK);
749	}
750	PMAP_UNLOCK(pmap);
751	return (retval);
752}
753
754/*
755 *	Routine:	pmap_extract_and_hold
756 *	Function:
757 *		Atomically extract and hold the physical page
758 *		with the given pmap and virtual address pair
759 *		if that mapping permits the given protection.
760 */
761vm_page_t
762pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
763{
764	pt_entry_t *ptep;
765	pt_entry_t pte;
766	vm_page_t m;
767	vm_paddr_t pa;
768
769	m = NULL;
770	pa = 0;
771	PMAP_LOCK(pmap);
772retry:
773	ptep = pmap_pte(pmap, va);
774	if ((ptep != NULL)  && ((pte = *ptep) != 0) &&
775	    pte_test(&pte, PTE_V) &&
776	    (pte_test(&pte, PTE_D) || (prot & VM_PROT_WRITE) == 0)) {
777		if (vm_page_pa_tryrelock(pmap, TLBLO_PTE_TO_PA(pte), &pa))
778			goto retry;
779
780		m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(pte));
781		vm_page_hold(m);
782	}
783	PA_UNLOCK_COND(pa);
784	PMAP_UNLOCK(pmap);
785	return (m);
786}
787
788/***************************************************
789 * Low level mapping routines.....
790 ***************************************************/
791
792/*
793 * add a wired page to the kva
794 */
795void
796pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr)
797{
798	pt_entry_t *pte;
799	pt_entry_t opte, npte;
800
801#ifdef PMAP_DEBUG
802	printf("pmap_kenter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
803#endif
804	npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | attr;
805
806	pte = pmap_pte(kernel_pmap, va);
807	opte = *pte;
808	*pte = npte;
809	if (pte_test(&opte, PTE_V) && opte != npte)
810		pmap_update_page(kernel_pmap, va, npte);
811}
812
813void
814pmap_kenter(vm_offset_t va, vm_paddr_t pa)
815{
816
817	KASSERT(is_cacheable_mem(pa),
818		("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa));
819
820	pmap_kenter_attr(va, pa, PTE_C_CACHE);
821}
822
823/*
824 * remove a page from the kernel pagetables
825 */
826 /* PMAP_INLINE */ void
827pmap_kremove(vm_offset_t va)
828{
829	pt_entry_t *pte;
830
831	/*
832	 * Write back all caches from the page being destroyed
833	 */
834	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
835
836	pte = pmap_pte(kernel_pmap, va);
837	*pte = PTE_G;
838	pmap_invalidate_page(kernel_pmap, va);
839}
840
841/*
842 *	Used to map a range of physical addresses into kernel
843 *	virtual address space.
844 *
845 *	The value passed in '*virt' is a suggested virtual address for
846 *	the mapping. Architectures which can support a direct-mapped
847 *	physical to virtual region can return the appropriate address
848 *	within that region, leaving '*virt' unchanged. Other
849 *	architectures should map the pages starting at '*virt' and
850 *	update '*virt' with the first usable address after the mapped
851 *	region.
852 *
853 *	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
854 */
855vm_offset_t
856pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
857{
858	vm_offset_t va, sva;
859
860	if (MIPS_DIRECT_MAPPABLE(end - 1))
861		return (MIPS_PHYS_TO_DIRECT(start));
862
863	va = sva = *virt;
864	while (start < end) {
865		pmap_kenter(va, start);
866		va += PAGE_SIZE;
867		start += PAGE_SIZE;
868	}
869	*virt = va;
870	return (sva);
871}
872
873/*
874 * Add a list of wired pages to the kva
875 * this routine is only used for temporary
876 * kernel mappings that do not need to have
877 * page modification or references recorded.
878 * Note that old mappings are simply written
879 * over.  The page *must* be wired.
880 */
881void
882pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
883{
884	int i;
885	vm_offset_t origva = va;
886
887	for (i = 0; i < count; i++) {
888		pmap_flush_pvcache(m[i]);
889		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
890		va += PAGE_SIZE;
891	}
892
893	mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count);
894}
895
896/*
897 * this routine jerks page mappings from the
898 * kernel -- it is meant only for temporary mappings.
899 */
900void
901pmap_qremove(vm_offset_t va, int count)
902{
903	/*
904	 * No need to wb/inv caches here,
905	 *   pmap_kremove will do it for us
906	 */
907
908	while (count-- > 0) {
909		pmap_kremove(va);
910		va += PAGE_SIZE;
911	}
912}
913
914/***************************************************
915 * Page table page management routines.....
916 ***************************************************/
917
918/*  Revision 1.507
919 *
920 * Simplify the reference counting of page table pages.	 Specifically, use
921 * the page table page's wired count rather than its hold count to contain
922 * the reference count.
923 */
924
925/*
926 * This routine unholds page table pages, and if the hold count
927 * drops to zero, then it decrements the wire count.
928 */
929static PMAP_INLINE int
930pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
931{
932	--m->wire_count;
933	if (m->wire_count == 0)
934		return (_pmap_unwire_pte_hold(pmap, va, m));
935	else
936		return (0);
937}
938
939static int
940_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
941{
942	pd_entry_t *pde;
943
944	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
945	/*
946	 * unmap the page table page
947	 */
948#ifdef __mips_n64
949	if (m->pindex < NUPDE)
950		pde = pmap_pde(pmap, va);
951	else
952		pde = pmap_segmap(pmap, va);
953#else
954	pde = pmap_pde(pmap, va);
955#endif
956	*pde = 0;
957	pmap->pm_stats.resident_count--;
958
959#ifdef __mips_n64
960	if (m->pindex < NUPDE) {
961		pd_entry_t *pdp;
962		vm_page_t pdpg;
963
964		/*
965		 * Recursively decrement next level pagetable refcount
966		 */
967		pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
968		pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
969		pmap_unwire_pte_hold(pmap, va, pdpg);
970	}
971#endif
972
973	/*
974	 * If the page is finally unwired, simply free it.
975	 */
976	vm_page_free_zero(m);
977	atomic_subtract_int(&cnt.v_wire_count, 1);
978	return (1);
979}
980
981/*
982 * After removing a page table entry, this routine is used to
983 * conditionally free the page, and manage the hold/wire counts.
984 */
985static int
986pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
987{
988	vm_page_t mpte;
989
990	if (va >= VM_MAXUSER_ADDRESS)
991		return (0);
992	KASSERT(pde != 0, ("pmap_unuse_pt: pde != 0"));
993	mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pde));
994	return (pmap_unwire_pte_hold(pmap, va, mpte));
995}
996
997void
998pmap_pinit0(pmap_t pmap)
999{
1000	int i;
1001
1002	PMAP_LOCK_INIT(pmap);
1003	pmap->pm_segtab = kernel_segmap;
1004	CPU_ZERO(&pmap->pm_active);
1005	for (i = 0; i < MAXCPU; i++) {
1006		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1007		pmap->pm_asid[i].gen = 0;
1008	}
1009	PCPU_SET(curpmap, pmap);
1010	TAILQ_INIT(&pmap->pm_pvchunk);
1011	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1012}
1013
1014void
1015pmap_grow_direct_page_cache()
1016{
1017
1018#ifdef __mips_n64
1019	vm_pageout_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
1020#else
1021	vm_pageout_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
1022#endif
1023}
1024
1025vm_page_t
1026pmap_alloc_direct_page(unsigned int index, int req)
1027{
1028	vm_page_t m;
1029
1030	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_WIRED |
1031	    VM_ALLOC_ZERO);
1032	if (m == NULL)
1033		return (NULL);
1034
1035	if ((m->flags & PG_ZERO) == 0)
1036		pmap_zero_page(m);
1037
1038	m->pindex = index;
1039	return (m);
1040}
1041
1042/*
1043 * Initialize a preallocated and zeroed pmap structure,
1044 * such as one in a vmspace structure.
1045 */
1046int
1047pmap_pinit(pmap_t pmap)
1048{
1049	vm_offset_t ptdva;
1050	vm_page_t ptdpg;
1051	int i;
1052
1053	PMAP_LOCK_INIT(pmap);
1054
1055	/*
1056	 * allocate the page directory page
1057	 */
1058	while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL)
1059	       pmap_grow_direct_page_cache();
1060
1061	ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg));
1062	pmap->pm_segtab = (pd_entry_t *)ptdva;
1063	CPU_ZERO(&pmap->pm_active);
1064	for (i = 0; i < MAXCPU; i++) {
1065		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1066		pmap->pm_asid[i].gen = 0;
1067	}
1068	TAILQ_INIT(&pmap->pm_pvchunk);
1069	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1070
1071	return (1);
1072}
1073
1074/*
1075 * this routine is called if the page table page is not
1076 * mapped correctly.
1077 */
1078static vm_page_t
1079_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
1080{
1081	vm_offset_t pageva;
1082	vm_page_t m;
1083
1084	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1085	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1086	    ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1087
1088	/*
1089	 * Find or fabricate a new pagetable page
1090	 */
1091	if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) {
1092		if (flags & M_WAITOK) {
1093			PMAP_UNLOCK(pmap);
1094			vm_page_unlock_queues();
1095			pmap_grow_direct_page_cache();
1096			vm_page_lock_queues();
1097			PMAP_LOCK(pmap);
1098		}
1099
1100		/*
1101		 * Indicate the need to retry.	While waiting, the page
1102		 * table page may have been allocated.
1103		 */
1104		return (NULL);
1105	}
1106
1107	/*
1108	 * Map the pagetable page into the process address space, if it
1109	 * isn't already there.
1110	 */
1111	pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1112
1113#ifdef __mips_n64
1114	if (ptepindex >= NUPDE) {
1115		pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva;
1116	} else {
1117		pd_entry_t *pdep, *pde;
1118		int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT);
1119		int pdeindex = ptepindex & (NPDEPG - 1);
1120		vm_page_t pg;
1121
1122		pdep = &pmap->pm_segtab[segindex];
1123		if (*pdep == NULL) {
1124			/* recurse for allocating page dir */
1125			if (_pmap_allocpte(pmap, NUPDE + segindex,
1126			    flags) == NULL) {
1127				/* alloc failed, release current */
1128				--m->wire_count;
1129				atomic_subtract_int(&cnt.v_wire_count, 1);
1130				vm_page_free_zero(m);
1131				return (NULL);
1132			}
1133		} else {
1134			pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
1135			pg->wire_count++;
1136		}
1137		/* Next level entry */
1138		pde = (pd_entry_t *)*pdep;
1139		pde[pdeindex] = (pd_entry_t)pageva;
1140	}
1141#else
1142	pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva;
1143#endif
1144	pmap->pm_stats.resident_count++;
1145	return (m);
1146}
1147
1148static vm_page_t
1149pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1150{
1151	unsigned ptepindex;
1152	pd_entry_t *pde;
1153	vm_page_t m;
1154
1155	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1156	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1157	    ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1158
1159	/*
1160	 * Calculate pagetable page index
1161	 */
1162	ptepindex = pmap_pde_pindex(va);
1163retry:
1164	/*
1165	 * Get the page directory entry
1166	 */
1167	pde = pmap_pde(pmap, va);
1168
1169	/*
1170	 * If the page table page is mapped, we just increment the hold
1171	 * count, and activate it.
1172	 */
1173	if (pde != NULL && *pde != NULL) {
1174		m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
1175		m->wire_count++;
1176	} else {
1177		/*
1178		 * Here if the pte page isn't mapped, or if it has been
1179		 * deallocated.
1180		 */
1181		m = _pmap_allocpte(pmap, ptepindex, flags);
1182		if (m == NULL && (flags & M_WAITOK))
1183			goto retry;
1184	}
1185	return (m);
1186}
1187
1188
1189/***************************************************
1190* Pmap allocation/deallocation routines.
1191 ***************************************************/
1192/*
1193 *  Revision 1.397
1194 *  - Merged pmap_release and pmap_release_free_page.  When pmap_release is
1195 *    called only the page directory page(s) can be left in the pmap pte
1196 *    object, since all page table pages will have been freed by
1197 *    pmap_remove_pages and pmap_remove.  In addition, there can only be one
1198 *    reference to the pmap and the page directory is wired, so the page(s)
1199 *    can never be busy.  So all there is to do is clear the magic mappings
1200 *    from the page directory and free the page(s).
1201 */
1202
1203
1204/*
1205 * Release any resources held by the given physical map.
1206 * Called when a pmap initialized by pmap_pinit is being released.
1207 * Should only be called if the map contains no valid mappings.
1208 */
1209void
1210pmap_release(pmap_t pmap)
1211{
1212	vm_offset_t ptdva;
1213	vm_page_t ptdpg;
1214
1215	KASSERT(pmap->pm_stats.resident_count == 0,
1216	    ("pmap_release: pmap resident count %ld != 0",
1217	    pmap->pm_stats.resident_count));
1218
1219	ptdva = (vm_offset_t)pmap->pm_segtab;
1220	ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
1221
1222	ptdpg->wire_count--;
1223	atomic_subtract_int(&cnt.v_wire_count, 1);
1224	vm_page_free_zero(ptdpg);
1225	PMAP_LOCK_DESTROY(pmap);
1226}
1227
1228/*
1229 * grow the number of kernel page table entries, if needed
1230 */
1231void
1232pmap_growkernel(vm_offset_t addr)
1233{
1234	vm_page_t nkpg;
1235	pd_entry_t *pde, *pdpe;
1236	pt_entry_t *pte;
1237	int i;
1238
1239	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1240	addr = roundup2(addr, NBSEG);
1241	if (addr - 1 >= kernel_map->max_offset)
1242		addr = kernel_map->max_offset;
1243	while (kernel_vm_end < addr) {
1244		pdpe = pmap_segmap(kernel_pmap, kernel_vm_end);
1245#ifdef __mips_n64
1246		if (*pdpe == 0) {
1247			/* new intermediate page table entry */
1248			nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1249			if (nkpg == NULL)
1250				panic("pmap_growkernel: no memory to grow kernel");
1251			*pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1252			continue; /* try again */
1253		}
1254#endif
1255		pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
1256		if (*pde != 0) {
1257			kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1258			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1259				kernel_vm_end = kernel_map->max_offset;
1260				break;
1261			}
1262			continue;
1263		}
1264
1265		/*
1266		 * This index is bogus, but out of the way
1267		 */
1268		nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1269		if (!nkpg)
1270			panic("pmap_growkernel: no memory to grow kernel");
1271		nkpt++;
1272		*pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1273
1274		/*
1275		 * The R[4-7]?00 stores only one copy of the Global bit in
1276		 * the translation lookaside buffer for each 2 page entry.
1277		 * Thus invalid entrys must have the Global bit set so when
1278		 * Entry LO and Entry HI G bits are anded together they will
1279		 * produce a global bit to store in the tlb.
1280		 */
1281		pte = (pt_entry_t *)*pde;
1282		for (i = 0; i < NPTEPG; i++)
1283			pte[i] = PTE_G;
1284
1285		kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1286		if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1287			kernel_vm_end = kernel_map->max_offset;
1288			break;
1289		}
1290	}
1291}
1292
1293/***************************************************
1294 * page management routines.
1295 ***************************************************/
1296
1297CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1298#ifdef __mips_n64
1299CTASSERT(_NPCM == 3);
1300CTASSERT(_NPCPV == 168);
1301#else
1302CTASSERT(_NPCM == 11);
1303CTASSERT(_NPCPV == 336);
1304#endif
1305
1306static __inline struct pv_chunk *
1307pv_to_chunk(pv_entry_t pv)
1308{
1309
1310	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1311}
1312
1313#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1314
1315#ifdef __mips_n64
1316#define	PC_FREE0_1	0xfffffffffffffffful
1317#define	PC_FREE2	0x000000fffffffffful
1318#else
1319#define	PC_FREE0_9	0xfffffffful	/* Free values for index 0 through 9 */
1320#define	PC_FREE10	0x0000fffful	/* Free values for index 10 */
1321#endif
1322
1323static const u_long pc_freemask[_NPCM] = {
1324#ifdef __mips_n64
1325	PC_FREE0_1, PC_FREE0_1, PC_FREE2
1326#else
1327	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1328	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1329	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1330	PC_FREE0_9, PC_FREE10
1331#endif
1332};
1333
1334static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
1335
1336SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1337    "Current number of pv entries");
1338
1339#ifdef PV_STATS
1340static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1341
1342SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1343    "Current number of pv entry chunks");
1344SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1345    "Current number of pv entry chunks allocated");
1346SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1347    "Current number of pv entry chunks frees");
1348SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1349    "Number of times tried to get a chunk page but failed.");
1350
1351static long pv_entry_frees, pv_entry_allocs;
1352static int pv_entry_spare;
1353
1354SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1355    "Current number of pv entry frees");
1356SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1357    "Current number of pv entry allocs");
1358SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1359    "Current number of spare pv entries");
1360#endif
1361
1362/*
1363 * We are in a serious low memory condition.  Resort to
1364 * drastic measures to free some pages so we can allocate
1365 * another pv entry chunk.
1366 */
1367static vm_page_t
1368pmap_pv_reclaim(pmap_t locked_pmap)
1369{
1370	struct pch newtail;
1371	struct pv_chunk *pc;
1372	pd_entry_t *pde;
1373	pmap_t pmap;
1374	pt_entry_t *pte, oldpte;
1375	pv_entry_t pv;
1376	vm_offset_t va;
1377	vm_page_t m, m_pc;
1378	u_long inuse;
1379	int bit, field, freed, idx;
1380
1381	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1382	pmap = NULL;
1383	m_pc = NULL;
1384	TAILQ_INIT(&newtail);
1385	while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL) {
1386		TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1387		if (pmap != pc->pc_pmap) {
1388			if (pmap != NULL) {
1389				pmap_invalidate_all(pmap);
1390				if (pmap != locked_pmap)
1391					PMAP_UNLOCK(pmap);
1392			}
1393			pmap = pc->pc_pmap;
1394			/* Avoid deadlock and lock recursion. */
1395			if (pmap > locked_pmap)
1396				PMAP_LOCK(pmap);
1397			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
1398				pmap = NULL;
1399				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1400				continue;
1401			}
1402		}
1403
1404		/*
1405		 * Destroy every non-wired, 4 KB page mapping in the chunk.
1406		 */
1407		freed = 0;
1408		for (field = 0; field < _NPCM; field++) {
1409			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1410			    inuse != 0; inuse &= ~(1UL << bit)) {
1411				bit = ffsl(inuse) - 1;
1412				idx = field * sizeof(inuse) * NBBY + bit;
1413				pv = &pc->pc_pventry[idx];
1414				va = pv->pv_va;
1415				pde = pmap_pde(pmap, va);
1416				KASSERT(pde != NULL && *pde != 0,
1417				    ("pmap_pv_reclaim: pde"));
1418				pte = pmap_pde_to_pte(pde, va);
1419				oldpte = *pte;
1420				KASSERT(!pte_test(&oldpte, PTE_W),
1421				    ("wired pte for unwired page"));
1422				if (is_kernel_pmap(pmap))
1423					*pte = PTE_G;
1424				else
1425					*pte = 0;
1426				pmap_invalidate_page(pmap, va);
1427				m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(oldpte));
1428				if (pte_test(&oldpte, PTE_D))
1429					vm_page_dirty(m);
1430				if (m->md.pv_flags & PV_TABLE_REF)
1431					vm_page_aflag_set(m, PGA_REFERENCED);
1432				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1433				if (TAILQ_EMPTY(&m->md.pv_list)) {
1434					vm_page_aflag_clear(m, PGA_WRITEABLE);
1435					m->md.pv_flags &= ~(PV_TABLE_REF |
1436					    PV_TABLE_MOD);
1437				}
1438				pc->pc_map[field] |= 1UL << bit;
1439				pmap_unuse_pt(pmap, va, *pde);
1440				freed++;
1441			}
1442		}
1443		if (freed == 0) {
1444			TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1445			continue;
1446		}
1447		/* Every freed mapping is for a 4 KB page. */
1448		pmap->pm_stats.resident_count -= freed;
1449		PV_STAT(pv_entry_frees += freed);
1450		PV_STAT(pv_entry_spare += freed);
1451		pv_entry_count -= freed;
1452		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1453		for (field = 0; field < _NPCM; field++)
1454			if (pc->pc_map[field] != pc_freemask[field]) {
1455				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
1456				    pc_list);
1457				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1458
1459				/*
1460				 * One freed pv entry in locked_pmap is
1461				 * sufficient.
1462				 */
1463				if (pmap == locked_pmap)
1464					goto out;
1465				break;
1466			}
1467		if (field == _NPCM) {
1468			PV_STAT(pv_entry_spare -= _NPCPV);
1469			PV_STAT(pc_chunk_count--);
1470			PV_STAT(pc_chunk_frees++);
1471			/* Entire chunk is free; return it. */
1472			m_pc = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(
1473			    (vm_offset_t)pc));
1474			break;
1475		}
1476	}
1477out:
1478	TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
1479	if (pmap != NULL) {
1480		pmap_invalidate_all(pmap);
1481		if (pmap != locked_pmap)
1482			PMAP_UNLOCK(pmap);
1483	}
1484	return (m_pc);
1485}
1486
1487/*
1488 * free the pv_entry back to the free list
1489 */
1490static void
1491free_pv_entry(pmap_t pmap, pv_entry_t pv)
1492{
1493	struct pv_chunk *pc;
1494	int bit, field, idx;
1495
1496	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1497	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1498	PV_STAT(pv_entry_frees++);
1499	PV_STAT(pv_entry_spare++);
1500	pv_entry_count--;
1501	pc = pv_to_chunk(pv);
1502	idx = pv - &pc->pc_pventry[0];
1503	field = idx / (sizeof(u_long) * NBBY);
1504	bit = idx % (sizeof(u_long) * NBBY);
1505	pc->pc_map[field] |= 1ul << bit;
1506	for (idx = 0; idx < _NPCM; idx++)
1507		if (pc->pc_map[idx] != pc_freemask[idx]) {
1508			/*
1509			 * 98% of the time, pc is already at the head of the
1510			 * list.  If it isn't already, move it to the head.
1511			 */
1512			if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
1513			    pc)) {
1514				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1515				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
1516				    pc_list);
1517			}
1518			return;
1519		}
1520	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1521	free_pv_chunk(pc);
1522}
1523
1524static void
1525free_pv_chunk(struct pv_chunk *pc)
1526{
1527	vm_page_t m;
1528
1529 	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1530	PV_STAT(pv_entry_spare -= _NPCPV);
1531	PV_STAT(pc_chunk_count--);
1532	PV_STAT(pc_chunk_frees++);
1533	/* entire chunk is free, return it */
1534	m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS((vm_offset_t)pc));
1535	vm_page_unwire(m, 0);
1536	vm_page_free(m);
1537}
1538
1539/*
1540 * get a new pv_entry, allocating a block from the system
1541 * when needed.
1542 */
1543static pv_entry_t
1544get_pv_entry(pmap_t pmap, boolean_t try)
1545{
1546	struct pv_chunk *pc;
1547	pv_entry_t pv;
1548	vm_page_t m;
1549	int bit, field, idx;
1550
1551	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1552	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1553	PV_STAT(pv_entry_allocs++);
1554	pv_entry_count++;
1555retry:
1556	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1557	if (pc != NULL) {
1558		for (field = 0; field < _NPCM; field++) {
1559			if (pc->pc_map[field]) {
1560				bit = ffsl(pc->pc_map[field]) - 1;
1561				break;
1562			}
1563		}
1564		if (field < _NPCM) {
1565			idx = field * sizeof(pc->pc_map[field]) * NBBY + bit;
1566			pv = &pc->pc_pventry[idx];
1567			pc->pc_map[field] &= ~(1ul << bit);
1568			/* If this was the last item, move it to tail */
1569			for (field = 0; field < _NPCM; field++)
1570				if (pc->pc_map[field] != 0) {
1571					PV_STAT(pv_entry_spare--);
1572					return (pv);	/* not full, return */
1573				}
1574			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1575			TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1576			PV_STAT(pv_entry_spare--);
1577			return (pv);
1578		}
1579	}
1580	/* No free items, allocate another chunk */
1581	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, VM_ALLOC_NORMAL |
1582	    VM_ALLOC_WIRED);
1583	if (m == NULL) {
1584		if (try) {
1585			pv_entry_count--;
1586			PV_STAT(pc_chunk_tryfail++);
1587			return (NULL);
1588		}
1589		m = pmap_pv_reclaim(pmap);
1590		if (m == NULL)
1591			goto retry;
1592	}
1593	PV_STAT(pc_chunk_count++);
1594	PV_STAT(pc_chunk_allocs++);
1595	pc = (struct pv_chunk *)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1596	pc->pc_pmap = pmap;
1597	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
1598	for (field = 1; field < _NPCM; field++)
1599		pc->pc_map[field] = pc_freemask[field];
1600	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1601	pv = &pc->pc_pventry[0];
1602	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1603	PV_STAT(pv_entry_spare += _NPCPV - 1);
1604	return (pv);
1605}
1606
1607/*
1608 * If it is the first entry on the list, it is actually
1609 * in the header and we must copy the following entry up
1610 * to the header.  Otherwise we must search the list for
1611 * the entry.  In either case we free the now unused entry.
1612 */
1613
1614static pv_entry_t
1615pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1616{
1617	pv_entry_t pv;
1618
1619	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1620	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
1621		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1622			TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
1623			break;
1624		}
1625	}
1626	return (pv);
1627}
1628
1629static void
1630pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1631{
1632	pv_entry_t pv;
1633
1634	pv = pmap_pvh_remove(pvh, pmap, va);
1635	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx",
1636	     (u_long)VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh)),
1637	     (u_long)va));
1638	free_pv_entry(pmap, pv);
1639}
1640
1641static void
1642pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1643{
1644
1645	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1646	pmap_pvh_free(&m->md, pmap, va);
1647	if (TAILQ_EMPTY(&m->md.pv_list))
1648		vm_page_aflag_clear(m, PGA_WRITEABLE);
1649}
1650
1651/*
1652 * Conditionally create a pv entry.
1653 */
1654static boolean_t
1655pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va,
1656    vm_page_t m)
1657{
1658	pv_entry_t pv;
1659
1660	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1661	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1662	if ((pv = get_pv_entry(pmap, TRUE)) != NULL) {
1663		pv->pv_va = va;
1664		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1665		return (TRUE);
1666	} else
1667		return (FALSE);
1668}
1669
1670/*
1671 * pmap_remove_pte: do the things to unmap a page in a process
1672 */
1673static int
1674pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
1675    pd_entry_t pde)
1676{
1677	pt_entry_t oldpte;
1678	vm_page_t m;
1679	vm_paddr_t pa;
1680
1681	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1682	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1683
1684	oldpte = *ptq;
1685	if (is_kernel_pmap(pmap))
1686		*ptq = PTE_G;
1687	else
1688		*ptq = 0;
1689
1690	if (pte_test(&oldpte, PTE_W))
1691		pmap->pm_stats.wired_count -= 1;
1692
1693	pmap->pm_stats.resident_count -= 1;
1694	pa = TLBLO_PTE_TO_PA(oldpte);
1695
1696	if (page_is_managed(pa)) {
1697		m = PHYS_TO_VM_PAGE(pa);
1698		if (pte_test(&oldpte, PTE_D)) {
1699			KASSERT(!pte_test(&oldpte, PTE_RO),
1700			    ("%s: modified page not writable: va: %p, pte: %#jx",
1701			    __func__, (void *)va, (uintmax_t)oldpte));
1702			vm_page_dirty(m);
1703		}
1704		if (m->md.pv_flags & PV_TABLE_REF)
1705			vm_page_aflag_set(m, PGA_REFERENCED);
1706		m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1707
1708		pmap_remove_entry(pmap, m, va);
1709	}
1710	return (pmap_unuse_pt(pmap, va, pde));
1711}
1712
1713/*
1714 * Remove a single page from a process address space
1715 */
1716static void
1717pmap_remove_page(struct pmap *pmap, vm_offset_t va)
1718{
1719	pd_entry_t *pde;
1720	pt_entry_t *ptq;
1721
1722	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1723	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1724	pde = pmap_pde(pmap, va);
1725	if (pde == NULL || *pde == 0)
1726		return;
1727	ptq = pmap_pde_to_pte(pde, va);
1728
1729	/*
1730	 * if there is no pte for this address, just skip it!!!
1731	 */
1732	if (!pte_test(ptq, PTE_V)) {
1733		return;
1734	}
1735
1736	/*
1737	 * Write back all caches from the page being destroyed
1738	 */
1739	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
1740
1741	/*
1742	 * get a local va for mappings for this pmap.
1743	 */
1744	(void)pmap_remove_pte(pmap, ptq, va, *pde);
1745	pmap_invalidate_page(pmap, va);
1746
1747	return;
1748}
1749
1750/*
1751 *	Remove the given range of addresses from the specified map.
1752 *
1753 *	It is assumed that the start and end are properly
1754 *	rounded to the page size.
1755 */
1756void
1757pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
1758{
1759	vm_offset_t va_next;
1760	pd_entry_t *pde, *pdpe;
1761	pt_entry_t *pte;
1762
1763	if (pmap->pm_stats.resident_count == 0)
1764		return;
1765
1766	vm_page_lock_queues();
1767	PMAP_LOCK(pmap);
1768
1769	/*
1770	 * special handling of removing one page.  a very common operation
1771	 * and easy to short circuit some code.
1772	 */
1773	if ((sva + PAGE_SIZE) == eva) {
1774		pmap_remove_page(pmap, sva);
1775		goto out;
1776	}
1777	for (; sva < eva; sva = va_next) {
1778		pdpe = pmap_segmap(pmap, sva);
1779#ifdef __mips_n64
1780		if (*pdpe == 0) {
1781			va_next = (sva + NBSEG) & ~SEGMASK;
1782			if (va_next < sva)
1783				va_next = eva;
1784			continue;
1785		}
1786#endif
1787		va_next = (sva + NBPDR) & ~PDRMASK;
1788		if (va_next < sva)
1789			va_next = eva;
1790
1791		pde = pmap_pdpe_to_pde(pdpe, sva);
1792		if (*pde == 0)
1793			continue;
1794		if (va_next > eva)
1795			va_next = eva;
1796		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next;
1797		    pte++, sva += PAGE_SIZE) {
1798			pmap_remove_page(pmap, sva);
1799		}
1800	}
1801out:
1802	vm_page_unlock_queues();
1803	PMAP_UNLOCK(pmap);
1804}
1805
1806/*
1807 *	Routine:	pmap_remove_all
1808 *	Function:
1809 *		Removes this physical page from
1810 *		all physical maps in which it resides.
1811 *		Reflects back modify bits to the pager.
1812 *
1813 *	Notes:
1814 *		Original versions of this routine were very
1815 *		inefficient because they iteratively called
1816 *		pmap_remove (slow...)
1817 */
1818
1819void
1820pmap_remove_all(vm_page_t m)
1821{
1822	pv_entry_t pv;
1823	pmap_t pmap;
1824	pd_entry_t *pde;
1825	pt_entry_t *pte, tpte;
1826
1827	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1828	    ("pmap_remove_all: page %p is not managed", m));
1829	vm_page_lock_queues();
1830
1831	if (m->md.pv_flags & PV_TABLE_REF)
1832		vm_page_aflag_set(m, PGA_REFERENCED);
1833
1834	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1835		pmap = PV_PMAP(pv);
1836		PMAP_LOCK(pmap);
1837
1838		/*
1839		 * If it's last mapping writeback all caches from
1840		 * the page being destroyed
1841	 	 */
1842		if (TAILQ_NEXT(pv, pv_list) == NULL)
1843			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
1844
1845		pmap->pm_stats.resident_count--;
1846
1847		pde = pmap_pde(pmap, pv->pv_va);
1848		KASSERT(pde != NULL && *pde != 0, ("pmap_remove_all: pde"));
1849		pte = pmap_pde_to_pte(pde, pv->pv_va);
1850
1851		tpte = *pte;
1852		if (is_kernel_pmap(pmap))
1853			*pte = PTE_G;
1854		else
1855			*pte = 0;
1856
1857		if (pte_test(&tpte, PTE_W))
1858			pmap->pm_stats.wired_count--;
1859
1860		/*
1861		 * Update the vm_page_t clean and reference bits.
1862		 */
1863		if (pte_test(&tpte, PTE_D)) {
1864			KASSERT(!pte_test(&tpte, PTE_RO),
1865			    ("%s: modified page not writable: va: %p, pte: %#jx",
1866			    __func__, (void *)pv->pv_va, (uintmax_t)tpte));
1867			vm_page_dirty(m);
1868		}
1869		pmap_invalidate_page(pmap, pv->pv_va);
1870
1871		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1872		pmap_unuse_pt(pmap, pv->pv_va, *pde);
1873		free_pv_entry(pmap, pv);
1874		PMAP_UNLOCK(pmap);
1875	}
1876
1877	vm_page_aflag_clear(m, PGA_WRITEABLE);
1878	m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1879	vm_page_unlock_queues();
1880}
1881
1882/*
1883 *	Set the physical protection on the
1884 *	specified range of this map as requested.
1885 */
1886void
1887pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1888{
1889	pt_entry_t *pte;
1890	pd_entry_t *pde, *pdpe;
1891	vm_offset_t va_next;
1892
1893	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1894		pmap_remove(pmap, sva, eva);
1895		return;
1896	}
1897	if (prot & VM_PROT_WRITE)
1898		return;
1899
1900	vm_page_lock_queues();
1901	PMAP_LOCK(pmap);
1902	for (; sva < eva; sva = va_next) {
1903		pt_entry_t pbits;
1904		vm_page_t m;
1905		vm_paddr_t pa;
1906
1907		pdpe = pmap_segmap(pmap, sva);
1908#ifdef __mips_n64
1909		if (*pdpe == 0) {
1910			va_next = (sva + NBSEG) & ~SEGMASK;
1911			if (va_next < sva)
1912				va_next = eva;
1913			continue;
1914		}
1915#endif
1916		va_next = (sva + NBPDR) & ~PDRMASK;
1917		if (va_next < sva)
1918			va_next = eva;
1919
1920		pde = pmap_pdpe_to_pde(pdpe, sva);
1921		if (pde == NULL || *pde == NULL)
1922			continue;
1923		if (va_next > eva)
1924			va_next = eva;
1925
1926		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1927		     sva += PAGE_SIZE) {
1928
1929			/* Skip invalid PTEs */
1930			if (!pte_test(pte, PTE_V))
1931				continue;
1932			pbits = *pte;
1933			pa = TLBLO_PTE_TO_PA(pbits);
1934			if (page_is_managed(pa) && pte_test(&pbits, PTE_D)) {
1935				m = PHYS_TO_VM_PAGE(pa);
1936				vm_page_dirty(m);
1937				m->md.pv_flags &= ~PV_TABLE_MOD;
1938			}
1939			pte_clear(&pbits, PTE_D);
1940			pte_set(&pbits, PTE_RO);
1941
1942			if (pbits != *pte) {
1943				*pte = pbits;
1944				pmap_update_page(pmap, sva, pbits);
1945			}
1946		}
1947	}
1948	vm_page_unlock_queues();
1949	PMAP_UNLOCK(pmap);
1950}
1951
1952/*
1953 *	Insert the given physical page (p) at
1954 *	the specified virtual address (v) in the
1955 *	target physical map with the protection requested.
1956 *
1957 *	If specified, the page will be wired down, meaning
1958 *	that the related pte can not be reclaimed.
1959 *
1960 *	NB:  This is the only routine which MAY NOT lazy-evaluate
1961 *	or lose information.  That is, this routine must actually
1962 *	insert this page into the given map NOW.
1963 */
1964void
1965pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1966    vm_prot_t prot, boolean_t wired)
1967{
1968	vm_paddr_t pa, opa;
1969	pt_entry_t *pte;
1970	pt_entry_t origpte, newpte;
1971	pv_entry_t pv;
1972	vm_page_t mpte, om;
1973	pt_entry_t rw = 0;
1974
1975	va &= ~PAGE_MASK;
1976 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
1977	KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
1978	    ("pmap_enter: page %p is not busy", m));
1979
1980	mpte = NULL;
1981
1982	vm_page_lock_queues();
1983	PMAP_LOCK(pmap);
1984
1985	/*
1986	 * In the case that a page table page is not resident, we are
1987	 * creating it here.
1988	 */
1989	if (va < VM_MAXUSER_ADDRESS) {
1990		mpte = pmap_allocpte(pmap, va, M_WAITOK);
1991	}
1992	pte = pmap_pte(pmap, va);
1993
1994	/*
1995	 * Page Directory table entry not valid, we need a new PT page
1996	 */
1997	if (pte == NULL) {
1998		panic("pmap_enter: invalid page directory, pdir=%p, va=%p",
1999		    (void *)pmap->pm_segtab, (void *)va);
2000	}
2001	pa = VM_PAGE_TO_PHYS(m);
2002	om = NULL;
2003	origpte = *pte;
2004	opa = TLBLO_PTE_TO_PA(origpte);
2005
2006	/*
2007	 * Mapping has not changed, must be protection or wiring change.
2008	 */
2009	if (pte_test(&origpte, PTE_V) && opa == pa) {
2010		/*
2011		 * Wiring change, just update stats. We don't worry about
2012		 * wiring PT pages as they remain resident as long as there
2013		 * are valid mappings in them. Hence, if a user page is
2014		 * wired, the PT page will be also.
2015		 */
2016		if (wired && !pte_test(&origpte, PTE_W))
2017			pmap->pm_stats.wired_count++;
2018		else if (!wired && pte_test(&origpte, PTE_W))
2019			pmap->pm_stats.wired_count--;
2020
2021		KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
2022		    ("%s: modified page not writable: va: %p, pte: %#jx",
2023		    __func__, (void *)va, (uintmax_t)origpte));
2024
2025		/*
2026		 * Remove extra pte reference
2027		 */
2028		if (mpte)
2029			mpte->wire_count--;
2030
2031		if (page_is_managed(opa)) {
2032			om = m;
2033		}
2034		goto validate;
2035	}
2036
2037	pv = NULL;
2038
2039	/*
2040	 * Mapping has changed, invalidate old range and fall through to
2041	 * handle validating new mapping.
2042	 */
2043	if (opa) {
2044		if (pte_test(&origpte, PTE_W))
2045			pmap->pm_stats.wired_count--;
2046
2047		if (page_is_managed(opa)) {
2048			om = PHYS_TO_VM_PAGE(opa);
2049			pv = pmap_pvh_remove(&om->md, pmap, va);
2050		}
2051		if (mpte != NULL) {
2052			mpte->wire_count--;
2053			KASSERT(mpte->wire_count > 0,
2054			    ("pmap_enter: missing reference to page table page,"
2055			    " va: %p", (void *)va));
2056		}
2057	} else
2058		pmap->pm_stats.resident_count++;
2059
2060	/*
2061	 * Enter on the PV list if part of our managed memory. Note that we
2062	 * raise IPL while manipulating pv_table since pmap_enter can be
2063	 * called at interrupt time.
2064	 */
2065	if ((m->oflags & VPO_UNMANAGED) == 0) {
2066		KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
2067		    ("pmap_enter: managed mapping within the clean submap"));
2068		if (pv == NULL)
2069			pv = get_pv_entry(pmap, FALSE);
2070		pv->pv_va = va;
2071		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2072	} else if (pv != NULL)
2073		free_pv_entry(pmap, pv);
2074
2075	/*
2076	 * Increment counters
2077	 */
2078	if (wired)
2079		pmap->pm_stats.wired_count++;
2080
2081validate:
2082	if ((access & VM_PROT_WRITE) != 0)
2083		m->md.pv_flags |= PV_TABLE_MOD | PV_TABLE_REF;
2084	rw = init_pte_prot(va, m, prot);
2085
2086#ifdef PMAP_DEBUG
2087	printf("pmap_enter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
2088#endif
2089	/*
2090	 * Now validate mapping with desired protection/wiring.
2091	 */
2092	newpte = TLBLO_PA_TO_PFN(pa) | rw | PTE_V;
2093
2094	if (is_cacheable_mem(pa))
2095		newpte |= PTE_C_CACHE;
2096	else
2097		newpte |= PTE_C_UNCACHED;
2098
2099	if (wired)
2100		newpte |= PTE_W;
2101
2102	if (is_kernel_pmap(pmap))
2103	         newpte |= PTE_G;
2104
2105	/*
2106	 * if the mapping or permission bits are different, we need to
2107	 * update the pte.
2108	 */
2109	if (origpte != newpte) {
2110		if (pte_test(&origpte, PTE_V)) {
2111			*pte = newpte;
2112			if (page_is_managed(opa) && (opa != pa)) {
2113				if (om->md.pv_flags & PV_TABLE_REF)
2114					vm_page_aflag_set(om, PGA_REFERENCED);
2115				om->md.pv_flags &=
2116				    ~(PV_TABLE_REF | PV_TABLE_MOD);
2117			}
2118			if (pte_test(&origpte, PTE_D)) {
2119				KASSERT(!pte_test(&origpte, PTE_RO),
2120				    ("pmap_enter: modified page not writable:"
2121				    " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte));
2122				if (page_is_managed(opa))
2123					vm_page_dirty(om);
2124			}
2125			if (page_is_managed(opa) &&
2126			    TAILQ_EMPTY(&om->md.pv_list))
2127				vm_page_aflag_clear(om, PGA_WRITEABLE);
2128		} else {
2129			*pte = newpte;
2130		}
2131	}
2132	pmap_update_page(pmap, va, newpte);
2133
2134	/*
2135	 * Sync I & D caches for executable pages.  Do this only if the
2136	 * target pmap belongs to the current process.  Otherwise, an
2137	 * unresolvable TLB miss may occur.
2138	 */
2139	if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) &&
2140	    (prot & VM_PROT_EXECUTE)) {
2141		mips_icache_sync_range(va, PAGE_SIZE);
2142		mips_dcache_wbinv_range(va, PAGE_SIZE);
2143	}
2144	vm_page_unlock_queues();
2145	PMAP_UNLOCK(pmap);
2146}
2147
2148/*
2149 * this code makes some *MAJOR* assumptions:
2150 * 1. Current pmap & pmap exists.
2151 * 2. Not wired.
2152 * 3. Read access.
2153 * 4. No page table pages.
2154 * but is *MUCH* faster than pmap_enter...
2155 */
2156
2157void
2158pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2159{
2160
2161	vm_page_lock_queues();
2162	PMAP_LOCK(pmap);
2163	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
2164	vm_page_unlock_queues();
2165	PMAP_UNLOCK(pmap);
2166}
2167
2168static vm_page_t
2169pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2170    vm_prot_t prot, vm_page_t mpte)
2171{
2172	pt_entry_t *pte;
2173	vm_paddr_t pa;
2174
2175	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2176	    (m->oflags & VPO_UNMANAGED) != 0,
2177	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2178	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2179	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2180
2181	/*
2182	 * In the case that a page table page is not resident, we are
2183	 * creating it here.
2184	 */
2185	if (va < VM_MAXUSER_ADDRESS) {
2186		pd_entry_t *pde;
2187		unsigned ptepindex;
2188
2189		/*
2190		 * Calculate pagetable page index
2191		 */
2192		ptepindex = pmap_pde_pindex(va);
2193		if (mpte && (mpte->pindex == ptepindex)) {
2194			mpte->wire_count++;
2195		} else {
2196			/*
2197			 * Get the page directory entry
2198			 */
2199			pde = pmap_pde(pmap, va);
2200
2201			/*
2202			 * If the page table page is mapped, we just
2203			 * increment the hold count, and activate it.
2204			 */
2205			if (pde && *pde != 0) {
2206				mpte = PHYS_TO_VM_PAGE(
2207				    MIPS_DIRECT_TO_PHYS(*pde));
2208				mpte->wire_count++;
2209			} else {
2210				mpte = _pmap_allocpte(pmap, ptepindex,
2211				    M_NOWAIT);
2212				if (mpte == NULL)
2213					return (mpte);
2214			}
2215		}
2216	} else {
2217		mpte = NULL;
2218	}
2219
2220	pte = pmap_pte(pmap, va);
2221	if (pte_test(pte, PTE_V)) {
2222		if (mpte != NULL) {
2223			mpte->wire_count--;
2224			mpte = NULL;
2225		}
2226		return (mpte);
2227	}
2228
2229	/*
2230	 * Enter on the PV list if part of our managed memory.
2231	 */
2232	if ((m->oflags & VPO_UNMANAGED) == 0 &&
2233	    !pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
2234		if (mpte != NULL) {
2235			pmap_unwire_pte_hold(pmap, va, mpte);
2236			mpte = NULL;
2237		}
2238		return (mpte);
2239	}
2240
2241	/*
2242	 * Increment counters
2243	 */
2244	pmap->pm_stats.resident_count++;
2245
2246	pa = VM_PAGE_TO_PHYS(m);
2247
2248	/*
2249	 * Now validate mapping with RO protection
2250	 */
2251	*pte = TLBLO_PA_TO_PFN(pa) | PTE_V;
2252
2253	if (is_cacheable_mem(pa))
2254		*pte |= PTE_C_CACHE;
2255	else
2256		*pte |= PTE_C_UNCACHED;
2257
2258	if (is_kernel_pmap(pmap))
2259		*pte |= PTE_G;
2260	else {
2261		*pte |= PTE_RO;
2262		/*
2263		 * Sync I & D caches.  Do this only if the target pmap
2264		 * belongs to the current process.  Otherwise, an
2265		 * unresolvable TLB miss may occur. */
2266		if (pmap == &curproc->p_vmspace->vm_pmap) {
2267			va &= ~PAGE_MASK;
2268			mips_icache_sync_range(va, PAGE_SIZE);
2269			mips_dcache_wbinv_range(va, PAGE_SIZE);
2270		}
2271	}
2272	return (mpte);
2273}
2274
2275/*
2276 * Make a temporary mapping for a physical address.  This is only intended
2277 * to be used for panic dumps.
2278 *
2279 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2280 */
2281void *
2282pmap_kenter_temporary(vm_paddr_t pa, int i)
2283{
2284	vm_offset_t va;
2285
2286	if (i != 0)
2287		printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
2288		    __func__);
2289
2290	if (MIPS_DIRECT_MAPPABLE(pa)) {
2291		va = MIPS_PHYS_TO_DIRECT(pa);
2292	} else {
2293#ifndef __mips_n64    /* XXX : to be converted to new style */
2294		int cpu;
2295		register_t intr;
2296		struct local_sysmaps *sysm;
2297		pt_entry_t *pte, npte;
2298
2299		/* If this is used other than for dumps, we may need to leave
2300		 * interrupts disasbled on return. If crash dumps don't work when
2301		 * we get to this point, we might want to consider this (leaving things
2302		 * disabled as a starting point ;-)
2303	 	 */
2304		intr = intr_disable();
2305		cpu = PCPU_GET(cpuid);
2306		sysm = &sysmap_lmem[cpu];
2307		/* Since this is for the debugger, no locks or any other fun */
2308		npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
2309		pte = pmap_pte(kernel_pmap, sysm->base);
2310		*pte = npte;
2311		sysm->valid1 = 1;
2312		pmap_update_page(kernel_pmap, sysm->base, npte);
2313		va = sysm->base;
2314		intr_restore(intr);
2315#endif
2316	}
2317	return ((void *)va);
2318}
2319
2320void
2321pmap_kenter_temporary_free(vm_paddr_t pa)
2322{
2323#ifndef __mips_n64    /* XXX : to be converted to new style */
2324	int cpu;
2325	register_t intr;
2326	struct local_sysmaps *sysm;
2327#endif
2328
2329	if (MIPS_DIRECT_MAPPABLE(pa)) {
2330		/* nothing to do for this case */
2331		return;
2332	}
2333#ifndef __mips_n64    /* XXX : to be converted to new style */
2334	cpu = PCPU_GET(cpuid);
2335	sysm = &sysmap_lmem[cpu];
2336	if (sysm->valid1) {
2337		pt_entry_t *pte;
2338
2339		intr = intr_disable();
2340		pte = pmap_pte(kernel_pmap, sysm->base);
2341		*pte = PTE_G;
2342		pmap_invalidate_page(kernel_pmap, sysm->base);
2343		intr_restore(intr);
2344		sysm->valid1 = 0;
2345	}
2346#endif
2347}
2348
2349/*
2350 * Moved the code to Machine Independent
2351 *	 vm_map_pmap_enter()
2352 */
2353
2354/*
2355 * Maps a sequence of resident pages belonging to the same object.
2356 * The sequence begins with the given page m_start.  This page is
2357 * mapped at the given virtual address start.  Each subsequent page is
2358 * mapped at a virtual address that is offset from start by the same
2359 * amount as the page is offset from m_start within the object.  The
2360 * last page in the sequence is the page with the largest offset from
2361 * m_start that can be mapped at a virtual address less than the given
2362 * virtual address end.  Not every virtual page between start and end
2363 * is mapped; only those for which a resident page exists with the
2364 * corresponding offset from m_start are mapped.
2365 */
2366void
2367pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2368    vm_page_t m_start, vm_prot_t prot)
2369{
2370	vm_page_t m, mpte;
2371	vm_pindex_t diff, psize;
2372
2373	VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
2374	psize = atop(end - start);
2375	mpte = NULL;
2376	m = m_start;
2377	vm_page_lock_queues();
2378	PMAP_LOCK(pmap);
2379	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2380		mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
2381		    prot, mpte);
2382		m = TAILQ_NEXT(m, listq);
2383	}
2384	vm_page_unlock_queues();
2385 	PMAP_UNLOCK(pmap);
2386}
2387
2388/*
2389 * pmap_object_init_pt preloads the ptes for a given object
2390 * into the specified pmap.  This eliminates the blast of soft
2391 * faults on process startup and immediately after an mmap.
2392 */
2393void
2394pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2395    vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2396{
2397	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2398	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2399	    ("pmap_object_init_pt: non-device object"));
2400}
2401
2402/*
2403 *	Routine:	pmap_change_wiring
2404 *	Function:	Change the wiring attribute for a map/virtual-address
2405 *			pair.
2406 *	In/out conditions:
2407 *			The mapping must already exist in the pmap.
2408 */
2409void
2410pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
2411{
2412	pt_entry_t *pte;
2413
2414	PMAP_LOCK(pmap);
2415	pte = pmap_pte(pmap, va);
2416
2417	if (wired && !pte_test(pte, PTE_W))
2418		pmap->pm_stats.wired_count++;
2419	else if (!wired && pte_test(pte, PTE_W))
2420		pmap->pm_stats.wired_count--;
2421
2422	/*
2423	 * Wiring is not a hardware characteristic so there is no need to
2424	 * invalidate TLB.
2425	 */
2426	if (wired)
2427		pte_set(pte, PTE_W);
2428	else
2429		pte_clear(pte, PTE_W);
2430	PMAP_UNLOCK(pmap);
2431}
2432
2433/*
2434 *	Copy the range specified by src_addr/len
2435 *	from the source map to the range dst_addr/len
2436 *	in the destination map.
2437 *
2438 *	This routine is only advisory and need not do anything.
2439 */
2440
2441void
2442pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2443    vm_size_t len, vm_offset_t src_addr)
2444{
2445}
2446
2447/*
2448 *	pmap_zero_page zeros the specified hardware page by mapping
2449 *	the page into KVM and using bzero to clear its contents.
2450 *
2451 * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2452 */
2453void
2454pmap_zero_page(vm_page_t m)
2455{
2456	vm_offset_t va;
2457	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2458
2459	if (MIPS_DIRECT_MAPPABLE(phys)) {
2460		va = MIPS_PHYS_TO_DIRECT(phys);
2461		bzero((caddr_t)va, PAGE_SIZE);
2462		mips_dcache_wbinv_range(va, PAGE_SIZE);
2463	} else {
2464		va = pmap_lmem_map1(phys);
2465		bzero((caddr_t)va, PAGE_SIZE);
2466		mips_dcache_wbinv_range(va, PAGE_SIZE);
2467		pmap_lmem_unmap();
2468	}
2469}
2470
2471/*
2472 *	pmap_zero_page_area zeros the specified hardware page by mapping
2473 *	the page into KVM and using bzero to clear its contents.
2474 *
2475 *	off and size may not cover an area beyond a single hardware page.
2476 */
2477void
2478pmap_zero_page_area(vm_page_t m, int off, int size)
2479{
2480	vm_offset_t va;
2481	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2482
2483	if (MIPS_DIRECT_MAPPABLE(phys)) {
2484		va = MIPS_PHYS_TO_DIRECT(phys);
2485		bzero((char *)(caddr_t)va + off, size);
2486		mips_dcache_wbinv_range(va + off, size);
2487	} else {
2488		va = pmap_lmem_map1(phys);
2489		bzero((char *)va + off, size);
2490		mips_dcache_wbinv_range(va + off, size);
2491		pmap_lmem_unmap();
2492	}
2493}
2494
2495void
2496pmap_zero_page_idle(vm_page_t m)
2497{
2498	vm_offset_t va;
2499	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2500
2501	if (MIPS_DIRECT_MAPPABLE(phys)) {
2502		va = MIPS_PHYS_TO_DIRECT(phys);
2503		bzero((caddr_t)va, PAGE_SIZE);
2504		mips_dcache_wbinv_range(va, PAGE_SIZE);
2505	} else {
2506		va = pmap_lmem_map1(phys);
2507		bzero((caddr_t)va, PAGE_SIZE);
2508		mips_dcache_wbinv_range(va, PAGE_SIZE);
2509		pmap_lmem_unmap();
2510	}
2511}
2512
2513/*
2514 *	pmap_copy_page copies the specified (machine independent)
2515 *	page by mapping the page into virtual memory and using
2516 *	bcopy to copy the page, one machine dependent page at a
2517 *	time.
2518 *
2519 * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2520 */
2521void
2522pmap_copy_page(vm_page_t src, vm_page_t dst)
2523{
2524	vm_offset_t va_src, va_dst;
2525	vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src);
2526	vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst);
2527
2528	if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) {
2529		/* easy case, all can be accessed via KSEG0 */
2530		/*
2531		 * Flush all caches for VA that are mapped to this page
2532		 * to make sure that data in SDRAM is up to date
2533		 */
2534		pmap_flush_pvcache(src);
2535		mips_dcache_wbinv_range_index(
2536		    MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE);
2537		va_src = MIPS_PHYS_TO_DIRECT(phys_src);
2538		va_dst = MIPS_PHYS_TO_DIRECT(phys_dst);
2539		bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
2540		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2541	} else {
2542		va_src = pmap_lmem_map2(phys_src, phys_dst);
2543		va_dst = va_src + PAGE_SIZE;
2544		bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
2545		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2546		pmap_lmem_unmap();
2547	}
2548}
2549
2550/*
2551 * Returns true if the pmap's pv is one of the first
2552 * 16 pvs linked to from this page.  This count may
2553 * be changed upwards or downwards in the future; it
2554 * is only necessary that true be returned for a small
2555 * subset of pmaps for proper page aging.
2556 */
2557boolean_t
2558pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2559{
2560	pv_entry_t pv;
2561	int loops = 0;
2562	boolean_t rv;
2563
2564	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2565	    ("pmap_page_exists_quick: page %p is not managed", m));
2566	rv = FALSE;
2567	vm_page_lock_queues();
2568	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2569		if (PV_PMAP(pv) == pmap) {
2570			rv = TRUE;
2571			break;
2572		}
2573		loops++;
2574		if (loops >= 16)
2575			break;
2576	}
2577	vm_page_unlock_queues();
2578	return (rv);
2579}
2580
2581/*
2582 * Remove all pages from specified address space
2583 * this aids process exit speeds.  Also, this code
2584 * is special cased for current process only, but
2585 * can have the more generic (and slightly slower)
2586 * mode enabled.  This is much faster than pmap_remove
2587 * in the case of running down an entire address space.
2588 */
2589void
2590pmap_remove_pages(pmap_t pmap)
2591{
2592	pd_entry_t *pde;
2593	pt_entry_t *pte, tpte;
2594	pv_entry_t pv;
2595	vm_page_t m;
2596	struct pv_chunk *pc, *npc;
2597	u_long inuse, bitmask;
2598	int allfree, bit, field, idx;
2599
2600	if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2601		printf("warning: pmap_remove_pages called with non-current pmap\n");
2602		return;
2603	}
2604	vm_page_lock_queues();
2605	PMAP_LOCK(pmap);
2606	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
2607		allfree = 1;
2608		for (field = 0; field < _NPCM; field++) {
2609			inuse = ~pc->pc_map[field] & pc_freemask[field];
2610			while (inuse != 0) {
2611				bit = ffsl(inuse) - 1;
2612				bitmask = 1UL << bit;
2613				idx = field * sizeof(inuse) * NBBY + bit;
2614				pv = &pc->pc_pventry[idx];
2615				inuse &= ~bitmask;
2616
2617				pde = pmap_pde(pmap, pv->pv_va);
2618				KASSERT(pde != NULL && *pde != 0,
2619				    ("pmap_remove_pages: pde"));
2620				pte = pmap_pde_to_pte(pde, pv->pv_va);
2621				if (!pte_test(pte, PTE_V))
2622					panic("pmap_remove_pages: bad pte");
2623				tpte = *pte;
2624
2625/*
2626 * We cannot remove wired pages from a process' mapping at this time
2627 */
2628				if (pte_test(&tpte, PTE_W)) {
2629					allfree = 0;
2630					continue;
2631				}
2632				*pte = is_kernel_pmap(pmap) ? PTE_G : 0;
2633
2634				m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte));
2635				KASSERT(m != NULL,
2636				    ("pmap_remove_pages: bad tpte %#jx",
2637				    (uintmax_t)tpte));
2638
2639				/*
2640				 * Update the vm_page_t clean and reference bits.
2641				 */
2642				if (pte_test(&tpte, PTE_D))
2643					vm_page_dirty(m);
2644
2645				/* Mark free */
2646				PV_STAT(pv_entry_frees++);
2647				PV_STAT(pv_entry_spare++);
2648				pv_entry_count--;
2649				pc->pc_map[field] |= bitmask;
2650				pmap->pm_stats.resident_count--;
2651				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2652				if (TAILQ_EMPTY(&m->md.pv_list))
2653					vm_page_aflag_clear(m, PGA_WRITEABLE);
2654				pmap_unuse_pt(pmap, pv->pv_va, *pde);
2655			}
2656		}
2657		if (allfree) {
2658			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2659			free_pv_chunk(pc);
2660		}
2661	}
2662	pmap_invalidate_all(pmap);
2663	PMAP_UNLOCK(pmap);
2664	vm_page_unlock_queues();
2665}
2666
2667/*
2668 * pmap_testbit tests bits in pte's
2669 * note that the testbit/changebit routines are inline,
2670 * and a lot of things compile-time evaluate.
2671 */
2672static boolean_t
2673pmap_testbit(vm_page_t m, int bit)
2674{
2675	pv_entry_t pv;
2676	pmap_t pmap;
2677	pt_entry_t *pte;
2678	boolean_t rv = FALSE;
2679
2680	if (m->oflags & VPO_UNMANAGED)
2681		return (rv);
2682
2683	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2684	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2685		pmap = PV_PMAP(pv);
2686		PMAP_LOCK(pmap);
2687		pte = pmap_pte(pmap, pv->pv_va);
2688		rv = pte_test(pte, bit);
2689		PMAP_UNLOCK(pmap);
2690		if (rv)
2691			break;
2692	}
2693	return (rv);
2694}
2695
2696/*
2697 * this routine is used to clear dirty bits in ptes
2698 */
2699static __inline void
2700pmap_changebit(vm_page_t m, int bit, boolean_t setem)
2701{
2702	pv_entry_t pv;
2703	pmap_t pmap;
2704	pt_entry_t *pte;
2705
2706	if (m->oflags & VPO_UNMANAGED)
2707		return;
2708
2709	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2710	/*
2711	 * Loop over all current mappings setting/clearing as appropos If
2712	 * setting RO do we need to clear the VAC?
2713	 */
2714	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2715		pmap = PV_PMAP(pv);
2716		PMAP_LOCK(pmap);
2717		pte = pmap_pte(pmap, pv->pv_va);
2718		if (setem) {
2719			*pte |= bit;
2720			pmap_update_page(pmap, pv->pv_va, *pte);
2721		} else {
2722			pt_entry_t pbits = *pte;
2723
2724			if (pbits & bit) {
2725				if (bit == PTE_D) {
2726					if (pbits & PTE_D)
2727						vm_page_dirty(m);
2728					*pte = (pbits & ~PTE_D) | PTE_RO;
2729				} else {
2730					*pte = pbits & ~bit;
2731				}
2732				pmap_update_page(pmap, pv->pv_va, *pte);
2733			}
2734		}
2735		PMAP_UNLOCK(pmap);
2736	}
2737	if (!setem && bit == PTE_D)
2738		vm_page_aflag_clear(m, PGA_WRITEABLE);
2739}
2740
2741/*
2742 *	pmap_page_wired_mappings:
2743 *
2744 *	Return the number of managed mappings to the given physical page
2745 *	that are wired.
2746 */
2747int
2748pmap_page_wired_mappings(vm_page_t m)
2749{
2750	pv_entry_t pv;
2751	pmap_t pmap;
2752	pt_entry_t *pte;
2753	int count;
2754
2755	count = 0;
2756	if ((m->oflags & VPO_UNMANAGED) != 0)
2757		return (count);
2758	vm_page_lock_queues();
2759	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2760		pmap = PV_PMAP(pv);
2761		PMAP_LOCK(pmap);
2762		pte = pmap_pte(pmap, pv->pv_va);
2763		if (pte_test(pte, PTE_W))
2764			count++;
2765		PMAP_UNLOCK(pmap);
2766	}
2767	vm_page_unlock_queues();
2768	return (count);
2769}
2770
2771/*
2772 * Clear the write and modified bits in each of the given page's mappings.
2773 */
2774void
2775pmap_remove_write(vm_page_t m)
2776{
2777	pmap_t pmap;
2778	pt_entry_t pbits, *pte;
2779	pv_entry_t pv;
2780
2781	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2782	    ("pmap_remove_write: page %p is not managed", m));
2783
2784	/*
2785	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
2786	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
2787	 * is clear, no page table entries need updating.
2788	 */
2789	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2790	if ((m->oflags & VPO_BUSY) == 0 &&
2791	    (m->aflags & PGA_WRITEABLE) == 0)
2792		return;
2793	vm_page_lock_queues();
2794	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2795		pmap = PV_PMAP(pv);
2796		PMAP_LOCK(pmap);
2797		pte = pmap_pte(pmap, pv->pv_va);
2798		KASSERT(pte != NULL && pte_test(pte, PTE_V),
2799		    ("page on pv_list has no pte"));
2800		pbits = *pte;
2801		if (pte_test(&pbits, PTE_D)) {
2802			pte_clear(&pbits, PTE_D);
2803			vm_page_dirty(m);
2804			m->md.pv_flags &= ~PV_TABLE_MOD;
2805		}
2806		pte_set(&pbits, PTE_RO);
2807		if (pbits != *pte) {
2808			*pte = pbits;
2809			pmap_update_page(pmap, pv->pv_va, pbits);
2810		}
2811		PMAP_UNLOCK(pmap);
2812	}
2813	vm_page_aflag_clear(m, PGA_WRITEABLE);
2814	vm_page_unlock_queues();
2815}
2816
2817/*
2818 *	pmap_ts_referenced:
2819 *
2820 *	Return the count of reference bits for a page, clearing all of them.
2821 */
2822int
2823pmap_ts_referenced(vm_page_t m)
2824{
2825
2826	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2827	    ("pmap_ts_referenced: page %p is not managed", m));
2828	if (m->md.pv_flags & PV_TABLE_REF) {
2829		vm_page_lock_queues();
2830		m->md.pv_flags &= ~PV_TABLE_REF;
2831		vm_page_unlock_queues();
2832		return (1);
2833	}
2834	return (0);
2835}
2836
2837/*
2838 *	pmap_is_modified:
2839 *
2840 *	Return whether or not the specified physical page was modified
2841 *	in any physical maps.
2842 */
2843boolean_t
2844pmap_is_modified(vm_page_t m)
2845{
2846	boolean_t rv;
2847
2848	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2849	    ("pmap_is_modified: page %p is not managed", m));
2850
2851	/*
2852	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
2853	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
2854	 * is clear, no PTEs can have PTE_D set.
2855	 */
2856	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2857	if ((m->oflags & VPO_BUSY) == 0 &&
2858	    (m->aflags & PGA_WRITEABLE) == 0)
2859		return (FALSE);
2860	vm_page_lock_queues();
2861	if (m->md.pv_flags & PV_TABLE_MOD)
2862		rv = TRUE;
2863	else
2864		rv = pmap_testbit(m, PTE_D);
2865	vm_page_unlock_queues();
2866	return (rv);
2867}
2868
2869/* N/C */
2870
2871/*
2872 *	pmap_is_prefaultable:
2873 *
2874 *	Return whether or not the specified virtual address is elgible
2875 *	for prefault.
2876 */
2877boolean_t
2878pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2879{
2880	pd_entry_t *pde;
2881	pt_entry_t *pte;
2882	boolean_t rv;
2883
2884	rv = FALSE;
2885	PMAP_LOCK(pmap);
2886	pde = pmap_pde(pmap, addr);
2887	if (pde != NULL && *pde != 0) {
2888		pte = pmap_pde_to_pte(pde, addr);
2889		rv = (*pte == 0);
2890	}
2891	PMAP_UNLOCK(pmap);
2892	return (rv);
2893}
2894
2895/*
2896 *	Clear the modify bits on the specified physical page.
2897 */
2898void
2899pmap_clear_modify(vm_page_t m)
2900{
2901
2902	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2903	    ("pmap_clear_modify: page %p is not managed", m));
2904	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2905	KASSERT((m->oflags & VPO_BUSY) == 0,
2906	    ("pmap_clear_modify: page %p is busy", m));
2907
2908	/*
2909	 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set.
2910	 * If the object containing the page is locked and the page is not
2911	 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
2912	 */
2913	if ((m->aflags & PGA_WRITEABLE) == 0)
2914		return;
2915	vm_page_lock_queues();
2916	if (m->md.pv_flags & PV_TABLE_MOD) {
2917		pmap_changebit(m, PTE_D, FALSE);
2918		m->md.pv_flags &= ~PV_TABLE_MOD;
2919	}
2920	vm_page_unlock_queues();
2921}
2922
2923/*
2924 *	pmap_is_referenced:
2925 *
2926 *	Return whether or not the specified physical page was referenced
2927 *	in any physical maps.
2928 */
2929boolean_t
2930pmap_is_referenced(vm_page_t m)
2931{
2932
2933	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2934	    ("pmap_is_referenced: page %p is not managed", m));
2935	return ((m->md.pv_flags & PV_TABLE_REF) != 0);
2936}
2937
2938/*
2939 *	pmap_clear_reference:
2940 *
2941 *	Clear the reference bit on the specified physical page.
2942 */
2943void
2944pmap_clear_reference(vm_page_t m)
2945{
2946
2947	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2948	    ("pmap_clear_reference: page %p is not managed", m));
2949	vm_page_lock_queues();
2950	if (m->md.pv_flags & PV_TABLE_REF) {
2951		m->md.pv_flags &= ~PV_TABLE_REF;
2952	}
2953	vm_page_unlock_queues();
2954}
2955
2956/*
2957 * Miscellaneous support routines follow
2958 */
2959
2960/*
2961 * Map a set of physical memory pages into the kernel virtual
2962 * address space. Return a pointer to where it is mapped. This
2963 * routine is intended to be used for mapping device memory,
2964 * NOT real memory.
2965 */
2966
2967/*
2968 * Map a set of physical memory pages into the kernel virtual
2969 * address space. Return a pointer to where it is mapped. This
2970 * routine is intended to be used for mapping device memory,
2971 * NOT real memory.
2972 *
2973 * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
2974 */
2975void *
2976pmap_mapdev(vm_paddr_t pa, vm_size_t size)
2977{
2978        vm_offset_t va, tmpva, offset;
2979
2980	/*
2981	 * KSEG1 maps only first 512M of phys address space. For
2982	 * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
2983	 */
2984	if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
2985		return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
2986	else {
2987		offset = pa & PAGE_MASK;
2988		size = roundup(size + offset, PAGE_SIZE);
2989
2990		va = kmem_alloc_nofault(kernel_map, size);
2991		if (!va)
2992			panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2993		pa = trunc_page(pa);
2994		for (tmpva = va; size > 0;) {
2995			pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED);
2996			size -= PAGE_SIZE;
2997			tmpva += PAGE_SIZE;
2998			pa += PAGE_SIZE;
2999		}
3000	}
3001
3002	return ((void *)(va + offset));
3003}
3004
3005void
3006pmap_unmapdev(vm_offset_t va, vm_size_t size)
3007{
3008#ifndef __mips_n64
3009	vm_offset_t base, offset, tmpva;
3010
3011	/* If the address is within KSEG1 then there is nothing to do */
3012	if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END)
3013		return;
3014
3015	base = trunc_page(va);
3016	offset = va & PAGE_MASK;
3017	size = roundup(size + offset, PAGE_SIZE);
3018	for (tmpva = base; tmpva < base + size; tmpva += PAGE_SIZE)
3019		pmap_kremove(tmpva);
3020	kmem_free(kernel_map, base, size);
3021#endif
3022}
3023
3024/*
3025 * perform the pmap work for mincore
3026 */
3027int
3028pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
3029{
3030	pt_entry_t *ptep, pte;
3031	vm_paddr_t pa;
3032	vm_page_t m;
3033	int val;
3034	boolean_t managed;
3035
3036	PMAP_LOCK(pmap);
3037retry:
3038	ptep = pmap_pte(pmap, addr);
3039	pte = (ptep != NULL) ? *ptep : 0;
3040	if (!pte_test(&pte, PTE_V)) {
3041		val = 0;
3042		goto out;
3043	}
3044	val = MINCORE_INCORE;
3045	if (pte_test(&pte, PTE_D))
3046		val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
3047	pa = TLBLO_PTE_TO_PA(pte);
3048	managed = page_is_managed(pa);
3049	if (managed) {
3050		/*
3051		 * This may falsely report the given address as
3052		 * MINCORE_REFERENCED.  Unfortunately, due to the lack of
3053		 * per-PTE reference information, it is impossible to
3054		 * determine if the address is MINCORE_REFERENCED.
3055		 */
3056		m = PHYS_TO_VM_PAGE(pa);
3057		if ((m->aflags & PGA_REFERENCED) != 0)
3058			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
3059	}
3060	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
3061	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
3062		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
3063		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
3064			goto retry;
3065	} else
3066out:
3067		PA_UNLOCK_COND(*locked_pa);
3068	PMAP_UNLOCK(pmap);
3069	return (val);
3070}
3071
3072void
3073pmap_activate(struct thread *td)
3074{
3075	pmap_t pmap, oldpmap;
3076	struct proc *p = td->td_proc;
3077	u_int cpuid;
3078
3079	critical_enter();
3080
3081	pmap = vmspace_pmap(p->p_vmspace);
3082	oldpmap = PCPU_GET(curpmap);
3083	cpuid = PCPU_GET(cpuid);
3084
3085	if (oldpmap)
3086		CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
3087	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
3088	pmap_asid_alloc(pmap);
3089	if (td == curthread) {
3090		PCPU_SET(segbase, pmap->pm_segtab);
3091		mips_wr_entryhi(pmap->pm_asid[cpuid].asid);
3092	}
3093
3094	PCPU_SET(curpmap, pmap);
3095	critical_exit();
3096}
3097
3098void
3099pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
3100{
3101}
3102
3103/*
3104 *	Increase the starting virtual address of the given mapping if a
3105 *	different alignment might result in more superpage mappings.
3106 */
3107void
3108pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
3109    vm_offset_t *addr, vm_size_t size)
3110{
3111	vm_offset_t superpage_offset;
3112
3113	if (size < NBSEG)
3114		return;
3115	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
3116		offset += ptoa(object->pg_color);
3117	superpage_offset = offset & SEGMASK;
3118	if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG ||
3119	    (*addr & SEGMASK) == superpage_offset)
3120		return;
3121	if ((*addr & SEGMASK) < superpage_offset)
3122		*addr = (*addr & ~SEGMASK) + superpage_offset;
3123	else
3124		*addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset;
3125}
3126
3127/*
3128 * 	Increase the starting virtual address of the given mapping so
3129 * 	that it is aligned to not be the second page in a TLB entry.
3130 * 	This routine assumes that the length is appropriately-sized so
3131 * 	that the allocation does not share a TLB entry at all if required.
3132 */
3133void
3134pmap_align_tlb(vm_offset_t *addr)
3135{
3136	if ((*addr & PAGE_SIZE) == 0)
3137		return;
3138	*addr += PAGE_SIZE;
3139	return;
3140}
3141
3142#ifdef DDB
3143DB_SHOW_COMMAND(ptable, ddb_pid_dump)
3144{
3145	pmap_t pmap;
3146	struct thread *td = NULL;
3147	struct proc *p;
3148	int i, j, k;
3149	vm_paddr_t pa;
3150	vm_offset_t va;
3151
3152	if (have_addr) {
3153		td = db_lookup_thread(addr, TRUE);
3154		if (td == NULL) {
3155			db_printf("Invalid pid or tid");
3156			return;
3157		}
3158		p = td->td_proc;
3159		if (p->p_vmspace == NULL) {
3160			db_printf("No vmspace for process");
3161			return;
3162		}
3163			pmap = vmspace_pmap(p->p_vmspace);
3164	} else
3165		pmap = kernel_pmap;
3166
3167	db_printf("pmap:%p segtab:%p asid:%x generation:%x\n",
3168	    pmap, pmap->pm_segtab, pmap->pm_asid[0].asid,
3169	    pmap->pm_asid[0].gen);
3170	for (i = 0; i < NPDEPG; i++) {
3171		pd_entry_t *pdpe;
3172		pt_entry_t *pde;
3173		pt_entry_t pte;
3174
3175		pdpe = (pd_entry_t *)pmap->pm_segtab[i];
3176		if (pdpe == NULL)
3177			continue;
3178		db_printf("[%4d] %p\n", i, pdpe);
3179#ifdef __mips_n64
3180		for (j = 0; j < NPDEPG; j++) {
3181			pde = (pt_entry_t *)pdpe[j];
3182			if (pde == NULL)
3183				continue;
3184			db_printf("\t[%4d] %p\n", j, pde);
3185#else
3186		{
3187			j = 0;
3188			pde =  (pt_entry_t *)pdpe;
3189#endif
3190			for (k = 0; k < NPTEPG; k++) {
3191				pte = pde[k];
3192				if (pte == 0 || !pte_test(&pte, PTE_V))
3193					continue;
3194				pa = TLBLO_PTE_TO_PA(pte);
3195				va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT);
3196				db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n",
3197				       k, (void *)va, (uintmax_t)pte, (uintmax_t)pa);
3198			}
3199		}
3200	}
3201}
3202#endif
3203
3204#if defined(DEBUG)
3205
3206static void pads(pmap_t pm);
3207void pmap_pvdump(vm_offset_t pa);
3208
3209/* print address space of pmap*/
3210static void
3211pads(pmap_t pm)
3212{
3213	unsigned va, i, j;
3214	pt_entry_t *ptep;
3215
3216	if (pm == kernel_pmap)
3217		return;
3218	for (i = 0; i < NPTEPG; i++)
3219		if (pm->pm_segtab[i])
3220			for (j = 0; j < NPTEPG; j++) {
3221				va = (i << SEGSHIFT) + (j << PAGE_SHIFT);
3222				if (pm == kernel_pmap && va < KERNBASE)
3223					continue;
3224				if (pm != kernel_pmap &&
3225				    va >= VM_MAXUSER_ADDRESS)
3226					continue;
3227				ptep = pmap_pte(pm, va);
3228				if (pte_test(ptep, PTE_V))
3229					printf("%x:%x ", va, *(int *)ptep);
3230			}
3231
3232}
3233
3234void
3235pmap_pvdump(vm_offset_t pa)
3236{
3237	register pv_entry_t pv;
3238	vm_page_t m;
3239
3240	printf("pa %x", pa);
3241	m = PHYS_TO_VM_PAGE(pa);
3242	for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3243	    pv = TAILQ_NEXT(pv, pv_list)) {
3244		printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3245		pads(pv->pv_pmap);
3246	}
3247	printf(" ");
3248}
3249
3250/* N/C */
3251#endif
3252
3253
3254/*
3255 * Allocate TLB address space tag (called ASID or TLBPID) and return it.
3256 * It takes almost as much or more time to search the TLB for a
3257 * specific ASID and flush those entries as it does to flush the entire TLB.
3258 * Therefore, when we allocate a new ASID, we just take the next number. When
3259 * we run out of numbers, we flush the TLB, increment the generation count
3260 * and start over. ASID zero is reserved for kernel use.
3261 */
3262static void
3263pmap_asid_alloc(pmap)
3264	pmap_t pmap;
3265{
3266	if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED &&
3267	    pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
3268	else {
3269		if (PCPU_GET(next_asid) == pmap_max_asid) {
3270			tlb_invalidate_all_user(NULL);
3271			PCPU_SET(asid_generation,
3272			    (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
3273			if (PCPU_GET(asid_generation) == 0) {
3274				PCPU_SET(asid_generation, 1);
3275			}
3276			PCPU_SET(next_asid, 1);	/* 0 means invalid */
3277		}
3278		pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid);
3279		pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation);
3280		PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
3281	}
3282}
3283
3284int
3285page_is_managed(vm_paddr_t pa)
3286{
3287	vm_offset_t pgnum = atop(pa);
3288
3289	if (pgnum >= first_page) {
3290		vm_page_t m;
3291
3292		m = PHYS_TO_VM_PAGE(pa);
3293		if (m == NULL)
3294			return (0);
3295		if ((m->oflags & VPO_UNMANAGED) == 0)
3296			return (1);
3297	}
3298	return (0);
3299}
3300
3301static pt_entry_t
3302init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
3303{
3304	pt_entry_t rw;
3305
3306	if (!(prot & VM_PROT_WRITE))
3307		rw =  PTE_V | PTE_RO;
3308	else if ((m->oflags & VPO_UNMANAGED) == 0) {
3309		if ((m->md.pv_flags & PV_TABLE_MOD) != 0)
3310			rw =  PTE_V | PTE_D;
3311		else
3312			rw = PTE_V;
3313		vm_page_aflag_set(m, PGA_WRITEABLE);
3314	} else
3315		/* Needn't emulate a modified bit for unmanaged pages. */
3316		rw =  PTE_V | PTE_D;
3317	return (rw);
3318}
3319
3320/*
3321 * pmap_emulate_modified : do dirty bit emulation
3322 *
3323 * On SMP, update just the local TLB, other CPUs will update their
3324 * TLBs from PTE lazily, if they get the exception.
3325 * Returns 0 in case of sucess, 1 if the page is read only and we
3326 * need to fault.
3327 */
3328int
3329pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
3330{
3331	vm_page_t m;
3332	pt_entry_t *pte;
3333 	vm_paddr_t pa;
3334
3335	PMAP_LOCK(pmap);
3336	pte = pmap_pte(pmap, va);
3337	if (pte == NULL)
3338		panic("pmap_emulate_modified: can't find PTE");
3339#ifdef SMP
3340	/* It is possible that some other CPU changed m-bit */
3341	if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
3342		tlb_update(pmap, va, *pte);
3343		PMAP_UNLOCK(pmap);
3344		return (0);
3345	}
3346#else
3347	if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
3348		panic("pmap_emulate_modified: invalid pte");
3349#endif
3350	if (pte_test(pte, PTE_RO)) {
3351		/* write to read only page in the kernel */
3352		PMAP_UNLOCK(pmap);
3353		return (1);
3354	}
3355	pte_set(pte, PTE_D);
3356	tlb_update(pmap, va, *pte);
3357	pa = TLBLO_PTE_TO_PA(*pte);
3358	if (!page_is_managed(pa))
3359		panic("pmap_emulate_modified: unmanaged page");
3360	m = PHYS_TO_VM_PAGE(pa);
3361	m->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD);
3362	PMAP_UNLOCK(pmap);
3363	return (0);
3364}
3365
3366/*
3367 *	Routine:	pmap_kextract
3368 *	Function:
3369 *		Extract the physical page address associated
3370 *		virtual address.
3371 */
3372vm_paddr_t
3373pmap_kextract(vm_offset_t va)
3374{
3375	int mapped;
3376
3377	/*
3378	 * First, the direct-mapped regions.
3379	 */
3380#if defined(__mips_n64)
3381	if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END)
3382		return (MIPS_XKPHYS_TO_PHYS(va));
3383#endif
3384	if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END)
3385		return (MIPS_KSEG0_TO_PHYS(va));
3386
3387	if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END)
3388		return (MIPS_KSEG1_TO_PHYS(va));
3389
3390	/*
3391	 * User virtual addresses.
3392	 */
3393	if (va < VM_MAXUSER_ADDRESS) {
3394		pt_entry_t *ptep;
3395
3396		if (curproc && curproc->p_vmspace) {
3397			ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
3398			if (ptep) {
3399				return (TLBLO_PTE_TO_PA(*ptep) |
3400				    (va & PAGE_MASK));
3401			}
3402			return (0);
3403		}
3404	}
3405
3406	/*
3407	 * Should be kernel virtual here, otherwise fail
3408	 */
3409	mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END);
3410#if defined(__mips_n64)
3411	mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END);
3412#endif
3413	/*
3414	 * Kernel virtual.
3415	 */
3416
3417	if (mapped) {
3418		pt_entry_t *ptep;
3419
3420		/* Is the kernel pmap initialized? */
3421		if (!CPU_EMPTY(&kernel_pmap->pm_active)) {
3422			/* It's inside the virtual address range */
3423			ptep = pmap_pte(kernel_pmap, va);
3424			if (ptep) {
3425				return (TLBLO_PTE_TO_PA(*ptep) |
3426				    (va & PAGE_MASK));
3427			}
3428		}
3429		return (0);
3430	}
3431
3432	panic("%s for unknown address space %p.", __func__, (void *)va);
3433}
3434
3435
3436void
3437pmap_flush_pvcache(vm_page_t m)
3438{
3439	pv_entry_t pv;
3440
3441	if (m != NULL) {
3442		for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3443		    pv = TAILQ_NEXT(pv, pv_list)) {
3444			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
3445		}
3446	}
3447}
3448