1178172Simp/*
2178172Simp * Copyright (c) 1991 Regents of the University of California.
3178172Simp * All rights reserved.
4178172Simp * Copyright (c) 1994 John S. Dyson
5178172Simp * All rights reserved.
6178172Simp * Copyright (c) 1994 David Greenman
7178172Simp * All rights reserved.
8178172Simp *
9178172Simp * This code is derived from software contributed to Berkeley by
10178172Simp * the Systems Programming Group of the University of Utah Computer
11178172Simp * Science Department and William Jolitz of UUNET Technologies Inc.
12178172Simp *
13178172Simp * Redistribution and use in source and binary forms, with or without
14178172Simp * modification, are permitted provided that the following conditions
15178172Simp * are met:
16178172Simp * 1. Redistributions of source code must retain the above copyright
17178172Simp *    notice, this list of conditions and the following disclaimer.
18178172Simp * 2. Redistributions in binary form must reproduce the above copyright
19178172Simp *    notice, this list of conditions and the following disclaimer in the
20178172Simp *    documentation and/or other materials provided with the distribution.
21178172Simp * 4. Neither the name of the University nor the names of its contributors
22178172Simp *    may be used to endorse or promote products derived from this software
23178172Simp *    without specific prior written permission.
24178172Simp *
25178172Simp * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28178172Simp * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29178172Simp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35178172Simp * SUCH DAMAGE.
36178172Simp *
37178172Simp *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
38178172Simp *	from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps
39178172Simp *	JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish
40178172Simp */
41178172Simp
42178172Simp/*
43178172Simp *	Manages physical address maps.
44178172Simp *
45178172Simp *	Since the information managed by this module is
46178172Simp *	also stored by the logical address mapping module,
47178172Simp *	this module may throw away valid virtual-to-physical
48178172Simp *	mappings at almost any time.  However, invalidations
49178172Simp *	of virtual-to-physical mappings must be done as
50178172Simp *	requested.
51178172Simp *
52178172Simp *	In order to cope with hardware architectures which
53178172Simp *	make virtual-to-physical map invalidates expensive,
54178172Simp *	this module may delay invalidate or reduced protection
55178172Simp *	operations until such time as they are actually
56178172Simp *	necessary.  This module is given full information as
57178172Simp *	to which processors are currently using which maps,
58178172Simp *	and to when physical maps must be made correct.
59178172Simp */
60178172Simp
61178172Simp#include <sys/cdefs.h>
62178172Simp__FBSDID("$FreeBSD: stable/10/sys/mips/mips/pmap.c 310133 2016-12-16 01:06:35Z jhb $");
63178172Simp
64210846Sjchandra#include "opt_ddb.h"
65239236Salc#include "opt_pmap.h"
66210846Sjchandra
67178172Simp#include <sys/param.h>
68178172Simp#include <sys/systm.h>
69239317Salc#include <sys/lock.h>
70239317Salc#include <sys/mman.h>
71239317Salc#include <sys/msgbuf.h>
72239317Salc#include <sys/mutex.h>
73239317Salc#include <sys/pcpu.h>
74178172Simp#include <sys/proc.h>
75239317Salc#include <sys/rwlock.h>
76239317Salc#include <sys/sched.h>
77205064Sneel#include <sys/smp.h>
78239236Salc#include <sys/sysctl.h>
79239317Salc#include <sys/vmmeter.h>
80239317Salc
81210846Sjchandra#ifdef DDB
82210846Sjchandra#include <ddb/ddb.h>
83210846Sjchandra#endif
84178172Simp
85178172Simp#include <vm/vm.h>
86178172Simp#include <vm/vm_param.h>
87178172Simp#include <vm/vm_kern.h>
88178172Simp#include <vm/vm_page.h>
89178172Simp#include <vm/vm_map.h>
90178172Simp#include <vm/vm_object.h>
91178172Simp#include <vm/vm_extern.h>
92178172Simp#include <vm/vm_pageout.h>
93178172Simp#include <vm/vm_pager.h>
94178172Simp#include <vm/uma.h>
95178172Simp
96178172Simp#include <machine/cache.h>
97178172Simp#include <machine/md_var.h>
98209243Sjchandra#include <machine/tlb.h>
99178172Simp
100187301Sgonzo#undef PMAP_DEBUG
101187301Sgonzo
102211958Sjchandra#if !defined(DIAGNOSTIC)
103178172Simp#define	PMAP_INLINE __inline
104178172Simp#else
105178172Simp#define	PMAP_INLINE
106178172Simp#endif
107178172Simp
108239236Salc#ifdef PV_STATS
109239236Salc#define PV_STAT(x)	do { x ; } while (0)
110239236Salc#else
111239236Salc#define PV_STAT(x)	do { } while (0)
112239236Salc#endif
113239236Salc
114178172Simp/*
115178172Simp * Get PDEs and PTEs for user/kernel address space
116178172Simp */
117210846Sjchandra#define	pmap_seg_index(v)	(((v) >> SEGSHIFT) & (NPDEPG - 1))
118210846Sjchandra#define	pmap_pde_index(v)	(((v) >> PDRSHIFT) & (NPDEPG - 1))
119210846Sjchandra#define	pmap_pte_index(v)	(((v) >> PAGE_SHIFT) & (NPTEPG - 1))
120210846Sjchandra#define	pmap_pde_pindex(v)	((v) >> PDRSHIFT)
121178172Simp
122210846Sjchandra#ifdef __mips_n64
123210846Sjchandra#define	NUPDE			(NPDEPG * NPDEPG)
124210846Sjchandra#define	NUSERPGTBLS		(NUPDE + NPDEPG)
125209930Sjchandra#else
126210846Sjchandra#define	NUPDE			(NPDEPG)
127210846Sjchandra#define	NUSERPGTBLS		(NUPDE)
128209930Sjchandra#endif
129210846Sjchandra
130178172Simp#define	is_kernel_pmap(x)	((x) == kernel_pmap)
131178172Simp
132191735Salcstruct pmap kernel_pmap_store;
133178172Simppd_entry_t *kernel_segmap;
134178172Simp
135178172Simpvm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
136178172Simpvm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
137178172Simp
138178172Simpstatic int nkpt;
139178172Simpunsigned pmap_max_asid;		/* max ASID supported by the system */
140178172Simp
141178172Simp#define	PMAP_ASID_RESERVED	0
142178172Simp
143210846Sjchandravm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
144178172Simp
145178172Simpstatic void pmap_asid_alloc(pmap_t pmap);
146178172Simp
147242534Sattiliostatic struct rwlock_padalign pvh_global_lock;
148239317Salc
149239317Salc/*
150178172Simp * Data for the pv entry allocation mechanism
151178172Simp */
152239236Salcstatic TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
153239236Salcstatic int pv_entry_count;
154178172Simp
155239236Salcstatic void free_pv_chunk(struct pv_chunk *pc);
156239236Salcstatic void free_pv_entry(pmap_t pmap, pv_entry_t pv);
157239236Salcstatic pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
158239236Salcstatic vm_page_t pmap_pv_reclaim(pmap_t locked_pmap);
159208665Salcstatic void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
160208665Salcstatic pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
161208665Salc    vm_offset_t va);
162243030Salcstatic vm_page_t pmap_alloc_direct_page(unsigned int index, int req);
163191300Salcstatic vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
164191300Salc    vm_page_t m, vm_prot_t prot, vm_page_t mpte);
165239152Salcstatic int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
166239152Salc    pd_entry_t pde);
167178172Simpstatic void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
168178172Simpstatic void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
169191300Salcstatic boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
170191300Salc    vm_offset_t va, vm_page_t m);
171211217Sjchandrastatic void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
172211215Sjchandrastatic void pmap_invalidate_all(pmap_t pmap);
173211215Sjchandrastatic void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
174240126Salcstatic void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m);
175178172Simp
176270439Skibstatic vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
177270439Skibstatic vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags);
178239152Salcstatic int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
179239681Salcstatic pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot);
180178172Simp
181178172Simpstatic void pmap_invalidate_page_action(void *arg);
182241123Salcstatic void pmap_invalidate_range_action(void *arg);
183178172Simpstatic void pmap_update_page_action(void *arg);
184178172Simp
185211453Sjchandra#ifndef __mips_n64
186211453Sjchandra/*
187216157Sjchandra * This structure is for high memory (memory above 512Meg in 32 bit) support.
188216157Sjchandra * The highmem area does not have a KSEG0 mapping, and we need a mechanism to
189216157Sjchandra * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc.
190211453Sjchandra *
191216157Sjchandra * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To
192216157Sjchandra * access a highmem physical address on a CPU, we map the physical address to
193216157Sjchandra * the reserved virtual address for the CPU in the kernel pagetable.  This is
194216157Sjchandra * done with interrupts disabled(although a spinlock and sched_pin would be
195216157Sjchandra * sufficient).
196211453Sjchandra */
197178172Simpstruct local_sysmaps {
198211453Sjchandra	vm_offset_t	base;
199211453Sjchandra	uint32_t	saved_intr;
200211453Sjchandra	uint16_t	valid1, valid2;
201178172Simp};
202178172Simpstatic struct local_sysmaps sysmap_lmem[MAXCPU];
203178172Simp
204211453Sjchandrastatic __inline void
205211453Sjchandrapmap_alloc_lmem_map(void)
206211453Sjchandra{
207211453Sjchandra	int i;
208206717Sjmallett
209211453Sjchandra	for (i = 0; i < MAXCPU; i++) {
210211453Sjchandra		sysmap_lmem[i].base = virtual_avail;
211211453Sjchandra		virtual_avail += PAGE_SIZE * 2;
212211453Sjchandra		sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
213211453Sjchandra	}
214211453Sjchandra}
215211453Sjchandra
216211453Sjchandrastatic __inline vm_offset_t
217211453Sjchandrapmap_lmem_map1(vm_paddr_t phys)
218211453Sjchandra{
219211453Sjchandra	struct local_sysmaps *sysm;
220211453Sjchandra	pt_entry_t *pte, npte;
221211453Sjchandra	vm_offset_t va;
222211453Sjchandra	uint32_t intr;
223211453Sjchandra	int cpu;
224211453Sjchandra
225211453Sjchandra	intr = intr_disable();
226211453Sjchandra	cpu = PCPU_GET(cpuid);
227211453Sjchandra	sysm = &sysmap_lmem[cpu];
228211453Sjchandra	sysm->saved_intr = intr;
229211453Sjchandra	va = sysm->base;
230241287Salc	npte = TLBLO_PA_TO_PFN(phys) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
231211453Sjchandra	pte = pmap_pte(kernel_pmap, va);
232211453Sjchandra	*pte = npte;
233211453Sjchandra	sysm->valid1 = 1;
234211453Sjchandra	return (va);
235211453Sjchandra}
236211453Sjchandra
237211453Sjchandrastatic __inline vm_offset_t
238211453Sjchandrapmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
239211453Sjchandra{
240211453Sjchandra	struct local_sysmaps *sysm;
241211453Sjchandra	pt_entry_t *pte, npte;
242211453Sjchandra	vm_offset_t va1, va2;
243211453Sjchandra	uint32_t intr;
244211453Sjchandra	int cpu;
245211453Sjchandra
246211453Sjchandra	intr = intr_disable();
247211453Sjchandra	cpu = PCPU_GET(cpuid);
248211453Sjchandra	sysm = &sysmap_lmem[cpu];
249211453Sjchandra	sysm->saved_intr = intr;
250211453Sjchandra	va1 = sysm->base;
251211453Sjchandra	va2 = sysm->base + PAGE_SIZE;
252241287Salc	npte = TLBLO_PA_TO_PFN(phys1) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
253211453Sjchandra	pte = pmap_pte(kernel_pmap, va1);
254211453Sjchandra	*pte = npte;
255241287Salc	npte = TLBLO_PA_TO_PFN(phys2) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
256211453Sjchandra	pte = pmap_pte(kernel_pmap, va2);
257211453Sjchandra	*pte = npte;
258211453Sjchandra	sysm->valid1 = 1;
259206717Sjmallett	sysm->valid2 = 1;
260211453Sjchandra	return (va1);
261211453Sjchandra}
262206717Sjmallett
263211453Sjchandrastatic __inline void
264211453Sjchandrapmap_lmem_unmap(void)
265211453Sjchandra{
266211453Sjchandra	struct local_sysmaps *sysm;
267211453Sjchandra	pt_entry_t *pte;
268211453Sjchandra	int cpu;
269206717Sjmallett
270211453Sjchandra	cpu = PCPU_GET(cpuid);
271211453Sjchandra	sysm = &sysmap_lmem[cpu];
272211453Sjchandra	pte = pmap_pte(kernel_pmap, sysm->base);
273211453Sjchandra	*pte = PTE_G;
274211453Sjchandra	tlb_invalidate_address(kernel_pmap, sysm->base);
275211453Sjchandra	sysm->valid1 = 0;
276211453Sjchandra	if (sysm->valid2) {
277211453Sjchandra		pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);
278211453Sjchandra		*pte = PTE_G;
279211453Sjchandra		tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);
280211453Sjchandra		sysm->valid2 = 0;
281211453Sjchandra	}
282211453Sjchandra	intr_restore(sysm->saved_intr);
283211453Sjchandra}
284211453Sjchandra#else  /* __mips_n64 */
285211453Sjchandra
286211453Sjchandrastatic __inline void
287211453Sjchandrapmap_alloc_lmem_map(void)
288211453Sjchandra{
289211453Sjchandra}
290211453Sjchandra
291211453Sjchandrastatic __inline vm_offset_t
292211453Sjchandrapmap_lmem_map1(vm_paddr_t phys)
293211453Sjchandra{
294211453Sjchandra
295211453Sjchandra	return (0);
296211453Sjchandra}
297211453Sjchandra
298211453Sjchandrastatic __inline vm_offset_t
299211453Sjchandrapmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
300211453Sjchandra{
301211453Sjchandra
302211453Sjchandra	return (0);
303211453Sjchandra}
304211453Sjchandra
305211453Sjchandrastatic __inline vm_offset_t
306211453Sjchandrapmap_lmem_unmap(void)
307211453Sjchandra{
308211453Sjchandra
309211453Sjchandra	return (0);
310211453Sjchandra}
311211453Sjchandra#endif /* !__mips_n64 */
312211453Sjchandra
313210846Sjchandra/*
314210846Sjchandra * Page table entry lookup routines.
315210846Sjchandra */
316210846Sjchandrastatic __inline pd_entry_t *
317178172Simppmap_segmap(pmap_t pmap, vm_offset_t va)
318178172Simp{
319211445Sjchandra
320210846Sjchandra	return (&pmap->pm_segtab[pmap_seg_index(va)]);
321210846Sjchandra}
322210846Sjchandra
323210846Sjchandra#ifdef __mips_n64
324210846Sjchandrastatic __inline pd_entry_t *
325210846Sjchandrapmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
326210846Sjchandra{
327210846Sjchandra	pd_entry_t *pde;
328210846Sjchandra
329210846Sjchandra	pde = (pd_entry_t *)*pdpe;
330210846Sjchandra	return (&pde[pmap_pde_index(va)]);
331210846Sjchandra}
332210846Sjchandra
333210846Sjchandrastatic __inline pd_entry_t *
334210846Sjchandrapmap_pde(pmap_t pmap, vm_offset_t va)
335210846Sjchandra{
336210846Sjchandra	pd_entry_t *pdpe;
337210846Sjchandra
338210846Sjchandra	pdpe = pmap_segmap(pmap, va);
339240185Salc	if (*pdpe == NULL)
340209805Sjchandra		return (NULL);
341210846Sjchandra
342210846Sjchandra	return (pmap_pdpe_to_pde(pdpe, va));
343178172Simp}
344210846Sjchandra#else
345210846Sjchandrastatic __inline pd_entry_t *
346210846Sjchandrapmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
347210846Sjchandra{
348211445Sjchandra
349211445Sjchandra	return (pdpe);
350210846Sjchandra}
351178172Simp
352210846Sjchandrastatic __inline
353210846Sjchandrapd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
354210846Sjchandra{
355211445Sjchandra
356211445Sjchandra	return (pmap_segmap(pmap, va));
357210846Sjchandra}
358210846Sjchandra#endif
359210846Sjchandra
360210846Sjchandrastatic __inline pt_entry_t *
361210846Sjchandrapmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
362210846Sjchandra{
363210846Sjchandra	pt_entry_t *pte;
364210846Sjchandra
365210846Sjchandra	pte = (pt_entry_t *)*pde;
366210846Sjchandra	return (&pte[pmap_pte_index(va)]);
367210846Sjchandra}
368210846Sjchandra
369178172Simppt_entry_t *
370178172Simppmap_pte(pmap_t pmap, vm_offset_t va)
371178172Simp{
372210846Sjchandra	pd_entry_t *pde;
373178172Simp
374210846Sjchandra	pde = pmap_pde(pmap, va);
375210846Sjchandra	if (pde == NULL || *pde == NULL)
376210846Sjchandra		return (NULL);
377210846Sjchandra
378210846Sjchandra	return (pmap_pde_to_pte(pde, va));
379178172Simp}
380178172Simp
381178172Simpvm_offset_t
382178172Simppmap_steal_memory(vm_size_t size)
383178172Simp{
384219106Sjchandra	vm_paddr_t bank_size, pa;
385219106Sjchandra	vm_offset_t va;
386178172Simp
387178172Simp	size = round_page(size);
388178172Simp	bank_size = phys_avail[1] - phys_avail[0];
389178172Simp	while (size > bank_size) {
390178172Simp		int i;
391178172Simp
392178172Simp		for (i = 0; phys_avail[i + 2]; i += 2) {
393178172Simp			phys_avail[i] = phys_avail[i + 2];
394178172Simp			phys_avail[i + 1] = phys_avail[i + 3];
395178172Simp		}
396178172Simp		phys_avail[i] = 0;
397178172Simp		phys_avail[i + 1] = 0;
398178172Simp		if (!phys_avail[0])
399178172Simp			panic("pmap_steal_memory: out of memory");
400178172Simp		bank_size = phys_avail[1] - phys_avail[0];
401178172Simp	}
402178172Simp
403178172Simp	pa = phys_avail[0];
404178172Simp	phys_avail[0] += size;
405211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa) == 0)
406178172Simp		panic("Out of memory below 512Meg?");
407211453Sjchandra	va = MIPS_PHYS_TO_DIRECT(pa);
408178172Simp	bzero((caddr_t)va, size);
409211445Sjchandra	return (va);
410178172Simp}
411178172Simp
412178172Simp/*
413209930Sjchandra * Bootstrap the system enough to run with virtual memory.  This
414178172Simp * assumes that the phys_avail array has been initialized.
415178172Simp */
416210846Sjchandrastatic void
417210846Sjchandrapmap_create_kernel_pagetable(void)
418210846Sjchandra{
419210846Sjchandra	int i, j;
420210846Sjchandra	vm_offset_t ptaddr;
421210846Sjchandra	pt_entry_t *pte;
422210846Sjchandra#ifdef __mips_n64
423210846Sjchandra	pd_entry_t *pde;
424210846Sjchandra	vm_offset_t pdaddr;
425210846Sjchandra	int npt, npde;
426210846Sjchandra#endif
427210846Sjchandra
428210846Sjchandra	/*
429210846Sjchandra	 * Allocate segment table for the kernel
430210846Sjchandra	 */
431210846Sjchandra	kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
432210846Sjchandra
433210846Sjchandra	/*
434210846Sjchandra	 * Allocate second level page tables for the kernel
435210846Sjchandra	 */
436210846Sjchandra#ifdef __mips_n64
437210846Sjchandra	npde = howmany(NKPT, NPDEPG);
438210846Sjchandra	pdaddr = pmap_steal_memory(PAGE_SIZE * npde);
439210846Sjchandra#endif
440210846Sjchandra	nkpt = NKPT;
441210846Sjchandra	ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt);
442210846Sjchandra
443210846Sjchandra	/*
444210846Sjchandra	 * The R[4-7]?00 stores only one copy of the Global bit in the
445210846Sjchandra	 * translation lookaside buffer for each 2 page entry. Thus invalid
446210846Sjchandra	 * entrys must have the Global bit set so when Entry LO and Entry HI
447210846Sjchandra	 * G bits are anded together they will produce a global bit to store
448210846Sjchandra	 * in the tlb.
449210846Sjchandra	 */
450210846Sjchandra	for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++)
451210846Sjchandra		*pte = PTE_G;
452210846Sjchandra
453210846Sjchandra#ifdef __mips_n64
454210846Sjchandra	for (i = 0,  npt = nkpt; npt > 0; i++) {
455210846Sjchandra		kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE);
456210846Sjchandra		pde = (pd_entry_t *)kernel_segmap[i];
457210846Sjchandra
458210846Sjchandra		for (j = 0; j < NPDEPG && npt > 0; j++, npt--)
459210846Sjchandra			pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE);
460210846Sjchandra	}
461210846Sjchandra#else
462210846Sjchandra	for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++)
463210846Sjchandra		kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE));
464210846Sjchandra#endif
465210846Sjchandra
466210846Sjchandra	PMAP_LOCK_INIT(kernel_pmap);
467210846Sjchandra	kernel_pmap->pm_segtab = kernel_segmap;
468222813Sattilio	CPU_FILL(&kernel_pmap->pm_active);
469239236Salc	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
470210846Sjchandra	kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
471210846Sjchandra	kernel_pmap->pm_asid[0].gen = 0;
472210846Sjchandra	kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE;
473210846Sjchandra}
474210846Sjchandra
475178172Simpvoid
476178172Simppmap_bootstrap(void)
477178172Simp{
478210846Sjchandra	int i;
479211453Sjchandra	int need_local_mappings = 0;
480178172Simp
481178172Simp	/* Sort. */
482178172Simpagain:
483178172Simp	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
484202046Simp		/*
485202046Simp		 * Keep the memory aligned on page boundary.
486202046Simp		 */
487202046Simp		phys_avail[i] = round_page(phys_avail[i]);
488202046Simp		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
489202046Simp
490178172Simp		if (i < 2)
491178172Simp			continue;
492178172Simp		if (phys_avail[i - 2] > phys_avail[i]) {
493178172Simp			vm_paddr_t ptemp[2];
494178172Simp
495178172Simp			ptemp[0] = phys_avail[i + 0];
496178172Simp			ptemp[1] = phys_avail[i + 1];
497178172Simp
498178172Simp			phys_avail[i + 0] = phys_avail[i - 2];
499178172Simp			phys_avail[i + 1] = phys_avail[i - 1];
500178172Simp
501178172Simp			phys_avail[i - 2] = ptemp[0];
502178172Simp			phys_avail[i - 1] = ptemp[1];
503178172Simp			goto again;
504178172Simp		}
505178172Simp	}
506178172Simp
507211453Sjchandra       	/*
508216157Sjchandra	 * In 32 bit, we may have memory which cannot be mapped directly.
509216157Sjchandra	 * This memory will need temporary mapping before it can be
510211453Sjchandra	 * accessed.
511211453Sjchandra	 */
512216157Sjchandra	if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1))
513211453Sjchandra		need_local_mappings = 1;
514209930Sjchandra
515202046Simp	/*
516202046Simp	 * Copy the phys_avail[] array before we start stealing memory from it.
517202046Simp	 */
518202046Simp	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
519202046Simp		physmem_desc[i] = phys_avail[i];
520202046Simp		physmem_desc[i + 1] = phys_avail[i + 1];
521202046Simp	}
522202046Simp
523202046Simp	Maxmem = atop(phys_avail[i - 1]);
524202046Simp
525178172Simp	if (bootverbose) {
526178172Simp		printf("Physical memory chunk(s):\n");
527178172Simp		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
528178172Simp			vm_paddr_t size;
529178172Simp
530178172Simp			size = phys_avail[i + 1] - phys_avail[i];
531178172Simp			printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
532178172Simp			    (uintmax_t) phys_avail[i],
533178172Simp			    (uintmax_t) phys_avail[i + 1] - 1,
534178172Simp			    (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
535178172Simp		}
536219106Sjchandra		printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem));
537178172Simp	}
538178172Simp	/*
539178172Simp	 * Steal the message buffer from the beginning of memory.
540178172Simp	 */
541217688Spluknet	msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize);
542217688Spluknet	msgbufinit(msgbufp, msgbufsize);
543178172Simp
544178172Simp	/*
545178172Simp	 * Steal thread0 kstack.
546178172Simp	 */
547178172Simp	kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
548178172Simp
549206716Sjmallett	virtual_avail = VM_MIN_KERNEL_ADDRESS;
550178172Simp	virtual_end = VM_MAX_KERNEL_ADDRESS;
551178172Simp
552203180Sneel#ifdef SMP
553178172Simp	/*
554203180Sneel	 * Steal some virtual address space to map the pcpu area.
555203180Sneel	 */
556203180Sneel	virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
557203180Sneel	pcpup = (struct pcpu *)virtual_avail;
558203180Sneel	virtual_avail += PAGE_SIZE * 2;
559203697Sneel
560203697Sneel	/*
561203697Sneel	 * Initialize the wired TLB entry mapping the pcpu region for
562203697Sneel	 * the BSP at 'pcpup'. Up until this point we were operating
563203697Sneel	 * with the 'pcpup' for the BSP pointing to a virtual address
564203697Sneel	 * in KSEG0 so there was no need for a TLB mapping.
565203697Sneel	 */
566203697Sneel	mips_pcpu_tlb_init(PCPU_ADDR(0));
567203697Sneel
568203180Sneel	if (bootverbose)
569203180Sneel		printf("pcpu is available at virtual address %p.\n", pcpup);
570203180Sneel#endif
571203180Sneel
572211453Sjchandra	if (need_local_mappings)
573211453Sjchandra		pmap_alloc_lmem_map();
574210846Sjchandra	pmap_create_kernel_pagetable();
575178172Simp	pmap_max_asid = VMNUM_PIDS;
576209243Sjchandra	mips_wr_entryhi(0);
577210846Sjchandra	mips_wr_pagemask(0);
578239317Salc
579239317Salc 	/*
580239317Salc	 * Initialize the global pv list lock.
581239317Salc	 */
582239317Salc	rw_init(&pvh_global_lock, "pmap pv global");
583178172Simp}
584178172Simp
585178172Simp/*
586178172Simp * Initialize a vm_page's machine-dependent fields.
587178172Simp */
588178172Simpvoid
589178172Simppmap_page_init(vm_page_t m)
590178172Simp{
591178172Simp
592178172Simp	TAILQ_INIT(&m->md.pv_list);
593178172Simp	m->md.pv_flags = 0;
594178172Simp}
595178172Simp
596178172Simp/*
597178172Simp *	Initialize the pmap module.
598178172Simp *	Called by vm_init, to initialize any structures that the pmap
599178172Simp *	system needs to map virtual memory.
600178172Simp */
601178172Simpvoid
602178172Simppmap_init(void)
603178172Simp{
604178172Simp}
605178172Simp
606178172Simp/***************************************************
607178172Simp * Low level helper routines.....
608178172Simp ***************************************************/
609178172Simp
610227623Sjchandra#ifdef	SMP
611211215Sjchandrastatic __inline void
612227623Sjchandrapmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
613211215Sjchandra{
614227623Sjchandra	int	cpuid, cpu, self;
615227623Sjchandra	cpuset_t active_cpus;
616211215Sjchandra
617227623Sjchandra	sched_pin();
618227623Sjchandra	if (is_kernel_pmap(pmap)) {
619227623Sjchandra		smp_rendezvous(NULL, fn, NULL, arg);
620227623Sjchandra		goto out;
621227623Sjchandra	}
622227623Sjchandra	/* Force ASID update on inactive CPUs */
623227623Sjchandra	CPU_FOREACH(cpu) {
624227623Sjchandra		if (!CPU_ISSET(cpu, &pmap->pm_active))
625227623Sjchandra			pmap->pm_asid[cpu].gen = 0;
626227623Sjchandra	}
627223758Sattilio	cpuid = PCPU_GET(cpuid);
628227623Sjchandra	/*
629227623Sjchandra	 * XXX: barrier/locking for active?
630227623Sjchandra	 *
631227623Sjchandra	 * Take a snapshot of active here, any further changes are ignored.
632227623Sjchandra	 * tlb update/invalidate should be harmless on inactive CPUs
633227623Sjchandra	 */
634227623Sjchandra	active_cpus = pmap->pm_active;
635227623Sjchandra	self = CPU_ISSET(cpuid, &active_cpus);
636227623Sjchandra	CPU_CLR(cpuid, &active_cpus);
637227623Sjchandra	/* Optimize for the case where this cpu is the only active one */
638227623Sjchandra	if (CPU_EMPTY(&active_cpus)) {
639227623Sjchandra		if (self)
640227623Sjchandra			fn(arg);
641227623Sjchandra	} else {
642227623Sjchandra		if (self)
643227623Sjchandra			CPU_SET(cpuid, &active_cpus);
644227623Sjchandra		smp_rendezvous_cpus(active_cpus, NULL, fn, NULL, arg);
645227623Sjchandra	}
646227623Sjchandraout:
647227623Sjchandra	sched_unpin();
648227623Sjchandra}
649227623Sjchandra#else /* !SMP */
650227623Sjchandrastatic __inline void
651227623Sjchandrapmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
652227623Sjchandra{
653227623Sjchandra	int	cpuid;
654223758Sattilio
655227623Sjchandra	if (is_kernel_pmap(pmap)) {
656227623Sjchandra		fn(arg);
657211215Sjchandra		return;
658211215Sjchandra	}
659227623Sjchandra	cpuid = PCPU_GET(cpuid);
660227623Sjchandra	if (!CPU_ISSET(cpuid, &pmap->pm_active))
661227623Sjchandra		pmap->pm_asid[cpuid].gen = 0;
662223758Sattilio	else
663227623Sjchandra		fn(arg);
664211215Sjchandra}
665227623Sjchandra#endif /* SMP */
666211215Sjchandra
667178172Simpstatic void
668178172Simppmap_invalidate_all(pmap_t pmap)
669178172Simp{
670211215Sjchandra
671227623Sjchandra	pmap_call_on_active_cpus(pmap,
672227623Sjchandra	    (void (*)(void *))tlb_invalidate_all_user, pmap);
673178172Simp}
674178172Simp
675178172Simpstruct pmap_invalidate_page_arg {
676178172Simp	pmap_t pmap;
677178172Simp	vm_offset_t va;
678178172Simp};
679178172Simp
680211215Sjchandrastatic void
681178172Simppmap_invalidate_page_action(void *arg)
682178172Simp{
683211215Sjchandra	struct pmap_invalidate_page_arg *p = arg;
684178172Simp
685227623Sjchandra	tlb_invalidate_address(p->pmap, p->va);
686211215Sjchandra}
687227623Sjchandra
688211215Sjchandrastatic void
689211215Sjchandrapmap_invalidate_page(pmap_t pmap, vm_offset_t va)
690211215Sjchandra{
691227623Sjchandra	struct pmap_invalidate_page_arg arg;
692211215Sjchandra
693227623Sjchandra	arg.pmap = pmap;
694227623Sjchandra	arg.va = va;
695227623Sjchandra	pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg);
696211215Sjchandra}
697178172Simp
698241123Salcstruct pmap_invalidate_range_arg {
699241123Salc	pmap_t pmap;
700241123Salc	vm_offset_t sva;
701241123Salc	vm_offset_t eva;
702241123Salc};
703241123Salc
704241123Salcstatic void
705241123Salcpmap_invalidate_range_action(void *arg)
706241123Salc{
707241123Salc	struct pmap_invalidate_range_arg *p = arg;
708241123Salc
709241123Salc	tlb_invalidate_range(p->pmap, p->sva, p->eva);
710241123Salc}
711241123Salc
712241123Salcstatic void
713241123Salcpmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
714241123Salc{
715241123Salc	struct pmap_invalidate_range_arg arg;
716241123Salc
717241123Salc	arg.pmap = pmap;
718241123Salc	arg.sva = sva;
719241123Salc	arg.eva = eva;
720241123Salc	pmap_call_on_active_cpus(pmap, pmap_invalidate_range_action, &arg);
721241123Salc}
722241123Salc
723178172Simpstruct pmap_update_page_arg {
724178172Simp	pmap_t pmap;
725178172Simp	vm_offset_t va;
726178172Simp	pt_entry_t pte;
727178172Simp};
728178172Simp
729211217Sjchandrastatic void
730178172Simppmap_update_page_action(void *arg)
731178172Simp{
732211215Sjchandra	struct pmap_update_page_arg *p = arg;
733178172Simp
734227623Sjchandra	tlb_update(p->pmap, p->va, p->pte);
735178172Simp}
736227623Sjchandra
737211217Sjchandrastatic void
738211215Sjchandrapmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
739211215Sjchandra{
740227623Sjchandra	struct pmap_update_page_arg arg;
741178172Simp
742227623Sjchandra	arg.pmap = pmap;
743227623Sjchandra	arg.va = va;
744227623Sjchandra	arg.pte = pte;
745227623Sjchandra	pmap_call_on_active_cpus(pmap, pmap_update_page_action, &arg);
746211215Sjchandra}
747211215Sjchandra
748178172Simp/*
749178172Simp *	Routine:	pmap_extract
750178172Simp *	Function:
751178172Simp *		Extract the physical page address associated
752178172Simp *		with the given map/virtual_address pair.
753178172Simp */
754178172Simpvm_paddr_t
755178172Simppmap_extract(pmap_t pmap, vm_offset_t va)
756178172Simp{
757178172Simp	pt_entry_t *pte;
758178172Simp	vm_offset_t retval = 0;
759178172Simp
760178172Simp	PMAP_LOCK(pmap);
761178172Simp	pte = pmap_pte(pmap, va);
762178172Simp	if (pte) {
763209243Sjchandra		retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK);
764178172Simp	}
765178172Simp	PMAP_UNLOCK(pmap);
766211445Sjchandra	return (retval);
767178172Simp}
768178172Simp
769178172Simp/*
770178172Simp *	Routine:	pmap_extract_and_hold
771178172Simp *	Function:
772178172Simp *		Atomically extract and hold the physical page
773178172Simp *		with the given pmap and virtual address pair
774178172Simp *		if that mapping permits the given protection.
775178172Simp */
776178172Simpvm_page_t
777178172Simppmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
778178172Simp{
779241276Salc	pt_entry_t pte, *ptep;
780241276Salc	vm_paddr_t pa, pte_pa;
781178172Simp	vm_page_t m;
782178172Simp
783178172Simp	m = NULL;
784207410Skmacy	pa = 0;
785178172Simp	PMAP_LOCK(pmap);
786207410Skmacyretry:
787237566Sgonzo	ptep = pmap_pte(pmap, va);
788241276Salc	if (ptep != NULL) {
789241276Salc		pte = *ptep;
790241276Salc		if (pte_test(&pte, PTE_V) && (!pte_test(&pte, PTE_RO) ||
791241276Salc		    (prot & VM_PROT_WRITE) == 0)) {
792241276Salc			pte_pa = TLBLO_PTE_TO_PA(pte);
793241276Salc			if (vm_page_pa_tryrelock(pmap, pte_pa, &pa))
794241276Salc				goto retry;
795241276Salc			m = PHYS_TO_VM_PAGE(pte_pa);
796241276Salc			vm_page_hold(m);
797241276Salc		}
798178172Simp	}
799207410Skmacy	PA_UNLOCK_COND(pa);
800178172Simp	PMAP_UNLOCK(pmap);
801178172Simp	return (m);
802178172Simp}
803178172Simp
804178172Simp/***************************************************
805178172Simp * Low level mapping routines.....
806178172Simp ***************************************************/
807178172Simp
808178172Simp/*
809178172Simp * add a wired page to the kva
810178172Simp */
811212989Sneelvoid
812212589Sneelpmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr)
813178172Simp{
814209482Sjchandra	pt_entry_t *pte;
815209482Sjchandra	pt_entry_t opte, npte;
816178172Simp
817187301Sgonzo#ifdef PMAP_DEBUG
818209482Sjchandra	printf("pmap_kenter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
819187301Sgonzo#endif
820178172Simp
821178172Simp	pte = pmap_pte(kernel_pmap, va);
822178172Simp	opte = *pte;
823240317Salc	npte = TLBLO_PA_TO_PFN(pa) | attr | PTE_D | PTE_V | PTE_G;
824178172Simp	*pte = npte;
825211216Sjchandra	if (pte_test(&opte, PTE_V) && opte != npte)
826211216Sjchandra		pmap_update_page(kernel_pmap, va, npte);
827178172Simp}
828178172Simp
829212589Sneelvoid
830212589Sneelpmap_kenter(vm_offset_t va, vm_paddr_t pa)
831212589Sneel{
832212589Sneel
833212989Sneel	KASSERT(is_cacheable_mem(pa),
834212989Sneel		("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa));
835212589Sneel
836212989Sneel	pmap_kenter_attr(va, pa, PTE_C_CACHE);
837212589Sneel}
838212589Sneel
839178172Simp/*
840178172Simp * remove a page from the kernel pagetables
841178172Simp */
842178172Simp /* PMAP_INLINE */ void
843178172Simppmap_kremove(vm_offset_t va)
844178172Simp{
845209482Sjchandra	pt_entry_t *pte;
846178172Simp
847202046Simp	/*
848202046Simp	 * Write back all caches from the page being destroyed
849202046Simp	 */
850206746Sjmallett	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
851202046Simp
852178172Simp	pte = pmap_pte(kernel_pmap, va);
853178172Simp	*pte = PTE_G;
854178172Simp	pmap_invalidate_page(kernel_pmap, va);
855178172Simp}
856178172Simp
857178172Simp/*
858178172Simp *	Used to map a range of physical addresses into kernel
859178172Simp *	virtual address space.
860178172Simp *
861178172Simp *	The value passed in '*virt' is a suggested virtual address for
862178172Simp *	the mapping. Architectures which can support a direct-mapped
863178172Simp *	physical to virtual region can return the appropriate address
864178172Simp *	within that region, leaving '*virt' unchanged. Other
865178172Simp *	architectures should map the pages starting at '*virt' and
866178172Simp *	update '*virt' with the first usable address after the mapped
867178172Simp *	region.
868209930Sjchandra *
869209930Sjchandra *	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
870178172Simp */
871178172Simpvm_offset_t
872217345Sjchandrapmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
873178172Simp{
874178172Simp	vm_offset_t va, sva;
875178172Simp
876216157Sjchandra	if (MIPS_DIRECT_MAPPABLE(end - 1))
877211453Sjchandra		return (MIPS_PHYS_TO_DIRECT(start));
878209930Sjchandra
879178172Simp	va = sva = *virt;
880178172Simp	while (start < end) {
881178172Simp		pmap_kenter(va, start);
882178172Simp		va += PAGE_SIZE;
883178172Simp		start += PAGE_SIZE;
884178172Simp	}
885178172Simp	*virt = va;
886178172Simp	return (sva);
887178172Simp}
888178172Simp
889178172Simp/*
890178172Simp * Add a list of wired pages to the kva
891178172Simp * this routine is only used for temporary
892178172Simp * kernel mappings that do not need to have
893178172Simp * page modification or references recorded.
894178172Simp * Note that old mappings are simply written
895178172Simp * over.  The page *must* be wired.
896178172Simp */
897178172Simpvoid
898178172Simppmap_qenter(vm_offset_t va, vm_page_t *m, int count)
899178172Simp{
900178172Simp	int i;
901202046Simp	vm_offset_t origva = va;
902178172Simp
903178172Simp	for (i = 0; i < count; i++) {
904202046Simp		pmap_flush_pvcache(m[i]);
905178172Simp		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
906178172Simp		va += PAGE_SIZE;
907178172Simp	}
908202046Simp
909202046Simp	mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count);
910178172Simp}
911178172Simp
912178172Simp/*
913178172Simp * this routine jerks page mappings from the
914178172Simp * kernel -- it is meant only for temporary mappings.
915178172Simp */
916178172Simpvoid
917178172Simppmap_qremove(vm_offset_t va, int count)
918178172Simp{
919241156Salc	pt_entry_t *pte;
920241156Salc	vm_offset_t origva;
921202046Simp
922241156Salc	if (count < 1)
923241156Salc		return;
924241156Salc	mips_dcache_wbinv_range_index(va, PAGE_SIZE * count);
925241156Salc	origva = va;
926241156Salc	do {
927241156Salc		pte = pmap_pte(kernel_pmap, va);
928241156Salc		*pte = PTE_G;
929178172Simp		va += PAGE_SIZE;
930241156Salc	} while (--count > 0);
931241156Salc	pmap_invalidate_range(kernel_pmap, origva, va);
932178172Simp}
933178172Simp
934178172Simp/***************************************************
935178172Simp * Page table page management routines.....
936178172Simp ***************************************************/
937178172Simp
938178172Simp/*
939240126Salc * Decrements a page table page's wire count, which is used to record the
940240126Salc * number of valid page table entries within the page.  If the wire count
941240126Salc * drops to zero, then the page table page is unmapped.  Returns TRUE if the
942240126Salc * page table page was unmapped and FALSE otherwise.
943178172Simp */
944240126Salcstatic PMAP_INLINE boolean_t
945240126Salcpmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
946210846Sjchandra{
947240126Salc
948210846Sjchandra	--m->wire_count;
949240126Salc	if (m->wire_count == 0) {
950240126Salc		_pmap_unwire_ptp(pmap, va, m);
951240126Salc		return (TRUE);
952240126Salc	} else
953240126Salc		return (FALSE);
954210846Sjchandra}
955210846Sjchandra
956240126Salcstatic void
957240126Salc_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
958178172Simp{
959210846Sjchandra	pd_entry_t *pde;
960178172Simp
961210846Sjchandra	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
962178172Simp	/*
963178172Simp	 * unmap the page table page
964178172Simp	 */
965210846Sjchandra#ifdef __mips_n64
966210846Sjchandra	if (m->pindex < NUPDE)
967210846Sjchandra		pde = pmap_pde(pmap, va);
968210846Sjchandra	else
969210846Sjchandra		pde = pmap_segmap(pmap, va);
970210846Sjchandra#else
971210846Sjchandra	pde = pmap_pde(pmap, va);
972210846Sjchandra#endif
973210846Sjchandra	*pde = 0;
974210846Sjchandra	pmap->pm_stats.resident_count--;
975178172Simp
976210846Sjchandra#ifdef __mips_n64
977210846Sjchandra	if (m->pindex < NUPDE) {
978210846Sjchandra		pd_entry_t *pdp;
979210846Sjchandra		vm_page_t pdpg;
980210846Sjchandra
981210846Sjchandra		/*
982210846Sjchandra		 * Recursively decrement next level pagetable refcount
983210846Sjchandra		 */
984210846Sjchandra		pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
985211453Sjchandra		pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
986240126Salc		pmap_unwire_ptp(pmap, va, pdpg);
987210846Sjchandra	}
988210846Sjchandra#endif
989178172Simp
990178172Simp	/*
991178172Simp	 * If the page is finally unwired, simply free it.
992178172Simp	 */
993210327Sjchandra	vm_page_free_zero(m);
994208616Sjchandra	atomic_subtract_int(&cnt.v_wire_count, 1);
995178172Simp}
996178172Simp
997178172Simp/*
998178172Simp * After removing a page table entry, this routine is used to
999178172Simp * conditionally free the page, and manage the hold/wire counts.
1000178172Simp */
1001178172Simpstatic int
1002239152Salcpmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
1003178172Simp{
1004239152Salc	vm_page_t mpte;
1005178172Simp
1006178172Simp	if (va >= VM_MAXUSER_ADDRESS)
1007178172Simp		return (0);
1008239152Salc	KASSERT(pde != 0, ("pmap_unuse_pt: pde != 0"));
1009239152Salc	mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pde));
1010240126Salc	return (pmap_unwire_ptp(pmap, va, mpte));
1011178172Simp}
1012178172Simp
1013178172Simpvoid
1014178172Simppmap_pinit0(pmap_t pmap)
1015178172Simp{
1016178172Simp	int i;
1017178172Simp
1018178172Simp	PMAP_LOCK_INIT(pmap);
1019178172Simp	pmap->pm_segtab = kernel_segmap;
1020222813Sattilio	CPU_ZERO(&pmap->pm_active);
1021178172Simp	for (i = 0; i < MAXCPU; i++) {
1022178172Simp		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1023178172Simp		pmap->pm_asid[i].gen = 0;
1024178172Simp	}
1025178172Simp	PCPU_SET(curpmap, pmap);
1026239236Salc	TAILQ_INIT(&pmap->pm_pvchunk);
1027178172Simp	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1028178172Simp}
1029178172Simp
1030216315Sjchandravoid
1031216315Sjchandrapmap_grow_direct_page_cache()
1032208422Sneel{
1033208422Sneel
1034211453Sjchandra#ifdef __mips_n64
1035238561Salc	vm_pageout_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
1036211453Sjchandra#else
1037238561Salc	vm_pageout_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
1038211453Sjchandra#endif
1039208422Sneel}
1040208422Sneel
1041243030Salcstatic vm_page_t
1042216315Sjchandrapmap_alloc_direct_page(unsigned int index, int req)
1043208165Srrs{
1044208165Srrs	vm_page_t m;
1045208165Srrs
1046227012Salc	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_WIRED |
1047227012Salc	    VM_ALLOC_ZERO);
1048210327Sjchandra	if (m == NULL)
1049208165Srrs		return (NULL);
1050208165Srrs
1051210327Sjchandra	if ((m->flags & PG_ZERO) == 0)
1052210327Sjchandra		pmap_zero_page(m);
1053210327Sjchandra
1054208165Srrs	m->pindex = index;
1055208165Srrs	return (m);
1056208165Srrs}
1057208165Srrs
1058178172Simp/*
1059178172Simp * Initialize a preallocated and zeroed pmap structure,
1060178172Simp * such as one in a vmspace structure.
1061178172Simp */
1062178172Simpint
1063178172Simppmap_pinit(pmap_t pmap)
1064178172Simp{
1065205360Sneel	vm_offset_t ptdva;
1066178172Simp	vm_page_t ptdpg;
1067178172Simp	int i;
1068178172Simp
1069178172Simp	/*
1070178172Simp	 * allocate the page directory page
1071178172Simp	 */
1072216315Sjchandra	while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL)
1073216315Sjchandra	       pmap_grow_direct_page_cache();
1074208589Sjchandra
1075211453Sjchandra	ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg));
1076205360Sneel	pmap->pm_segtab = (pd_entry_t *)ptdva;
1077222813Sattilio	CPU_ZERO(&pmap->pm_active);
1078178172Simp	for (i = 0; i < MAXCPU; i++) {
1079178172Simp		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1080178172Simp		pmap->pm_asid[i].gen = 0;
1081178172Simp	}
1082239236Salc	TAILQ_INIT(&pmap->pm_pvchunk);
1083178172Simp	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1084178172Simp
1085178172Simp	return (1);
1086178172Simp}
1087178172Simp
1088178172Simp/*
1089178172Simp * this routine is called if the page table page is not
1090178172Simp * mapped correctly.
1091178172Simp */
1092178172Simpstatic vm_page_t
1093270439Skib_pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags)
1094178172Simp{
1095210846Sjchandra	vm_offset_t pageva;
1096178172Simp	vm_page_t m;
1097178172Simp
1098178172Simp	/*
1099178172Simp	 * Find or fabricate a new pagetable page
1100178172Simp	 */
1101216315Sjchandra	if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) {
1102270439Skib		if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
1103210327Sjchandra			PMAP_UNLOCK(pmap);
1104239317Salc			rw_wunlock(&pvh_global_lock);
1105216315Sjchandra			pmap_grow_direct_page_cache();
1106239317Salc			rw_wlock(&pvh_global_lock);
1107210327Sjchandra			PMAP_LOCK(pmap);
1108210327Sjchandra		}
1109210327Sjchandra
1110210327Sjchandra		/*
1111210327Sjchandra		 * Indicate the need to retry.	While waiting, the page
1112210327Sjchandra		 * table page may have been allocated.
1113210327Sjchandra		 */
1114178172Simp		return (NULL);
1115210327Sjchandra	}
1116178172Simp
1117178172Simp	/*
1118178172Simp	 * Map the pagetable page into the process address space, if it
1119178172Simp	 * isn't already there.
1120178172Simp	 */
1121211453Sjchandra	pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1122178172Simp
1123210846Sjchandra#ifdef __mips_n64
1124210846Sjchandra	if (ptepindex >= NUPDE) {
1125210846Sjchandra		pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva;
1126210846Sjchandra	} else {
1127210846Sjchandra		pd_entry_t *pdep, *pde;
1128210846Sjchandra		int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT);
1129210846Sjchandra		int pdeindex = ptepindex & (NPDEPG - 1);
1130210846Sjchandra		vm_page_t pg;
1131210846Sjchandra
1132210846Sjchandra		pdep = &pmap->pm_segtab[segindex];
1133210846Sjchandra		if (*pdep == NULL) {
1134210846Sjchandra			/* recurse for allocating page dir */
1135210846Sjchandra			if (_pmap_allocpte(pmap, NUPDE + segindex,
1136210846Sjchandra			    flags) == NULL) {
1137210846Sjchandra				/* alloc failed, release current */
1138210846Sjchandra				--m->wire_count;
1139210846Sjchandra				atomic_subtract_int(&cnt.v_wire_count, 1);
1140210846Sjchandra				vm_page_free_zero(m);
1141210846Sjchandra				return (NULL);
1142210846Sjchandra			}
1143210846Sjchandra		} else {
1144211453Sjchandra			pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
1145210846Sjchandra			pg->wire_count++;
1146210846Sjchandra		}
1147210846Sjchandra		/* Next level entry */
1148210846Sjchandra		pde = (pd_entry_t *)*pdep;
1149210846Sjchandra		pde[pdeindex] = (pd_entry_t)pageva;
1150210846Sjchandra	}
1151210846Sjchandra#else
1152210846Sjchandra	pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva;
1153210846Sjchandra#endif
1154178172Simp	pmap->pm_stats.resident_count++;
1155178172Simp	return (m);
1156178172Simp}
1157178172Simp
1158178172Simpstatic vm_page_t
1159270439Skibpmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
1160178172Simp{
1161178172Simp	unsigned ptepindex;
1162210846Sjchandra	pd_entry_t *pde;
1163178172Simp	vm_page_t m;
1164178172Simp
1165178172Simp	/*
1166178172Simp	 * Calculate pagetable page index
1167178172Simp	 */
1168210846Sjchandra	ptepindex = pmap_pde_pindex(va);
1169178172Simpretry:
1170178172Simp	/*
1171178172Simp	 * Get the page directory entry
1172178172Simp	 */
1173210846Sjchandra	pde = pmap_pde(pmap, va);
1174178172Simp
1175178172Simp	/*
1176178172Simp	 * If the page table page is mapped, we just increment the hold
1177178172Simp	 * count, and activate it.
1178178172Simp	 */
1179210846Sjchandra	if (pde != NULL && *pde != NULL) {
1180239152Salc		m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
1181178172Simp		m->wire_count++;
1182178172Simp	} else {
1183178172Simp		/*
1184178172Simp		 * Here if the pte page isn't mapped, or if it has been
1185178172Simp		 * deallocated.
1186178172Simp		 */
1187178172Simp		m = _pmap_allocpte(pmap, ptepindex, flags);
1188270439Skib		if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
1189178172Simp			goto retry;
1190178172Simp	}
1191210846Sjchandra	return (m);
1192178172Simp}
1193178172Simp
1194178172Simp
1195178172Simp/***************************************************
1196239317Salc * Pmap allocation/deallocation routines.
1197178172Simp ***************************************************/
1198178172Simp
1199178172Simp/*
1200178172Simp * Release any resources held by the given physical map.
1201178172Simp * Called when a pmap initialized by pmap_pinit is being released.
1202178172Simp * Should only be called if the map contains no valid mappings.
1203178172Simp */
1204178172Simpvoid
1205178172Simppmap_release(pmap_t pmap)
1206178172Simp{
1207205360Sneel	vm_offset_t ptdva;
1208178172Simp	vm_page_t ptdpg;
1209178172Simp
1210178172Simp	KASSERT(pmap->pm_stats.resident_count == 0,
1211178172Simp	    ("pmap_release: pmap resident count %ld != 0",
1212178172Simp	    pmap->pm_stats.resident_count));
1213178172Simp
1214205360Sneel	ptdva = (vm_offset_t)pmap->pm_segtab;
1215211453Sjchandra	ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
1216205360Sneel
1217178172Simp	ptdpg->wire_count--;
1218178172Simp	atomic_subtract_int(&cnt.v_wire_count, 1);
1219210327Sjchandra	vm_page_free_zero(ptdpg);
1220178172Simp}
1221178172Simp
1222178172Simp/*
1223178172Simp * grow the number of kernel page table entries, if needed
1224178172Simp */
1225178172Simpvoid
1226178172Simppmap_growkernel(vm_offset_t addr)
1227178172Simp{
1228178172Simp	vm_page_t nkpg;
1229210846Sjchandra	pd_entry_t *pde, *pdpe;
1230178172Simp	pt_entry_t *pte;
1231208165Srrs	int i;
1232178172Simp
1233183510Simp	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1234210846Sjchandra	addr = roundup2(addr, NBSEG);
1235178172Simp	if (addr - 1 >= kernel_map->max_offset)
1236178172Simp		addr = kernel_map->max_offset;
1237178172Simp	while (kernel_vm_end < addr) {
1238210846Sjchandra		pdpe = pmap_segmap(kernel_pmap, kernel_vm_end);
1239210846Sjchandra#ifdef __mips_n64
1240210846Sjchandra		if (*pdpe == 0) {
1241210846Sjchandra			/* new intermediate page table entry */
1242216315Sjchandra			nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1243210846Sjchandra			if (nkpg == NULL)
1244210846Sjchandra				panic("pmap_growkernel: no memory to grow kernel");
1245211453Sjchandra			*pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1246210846Sjchandra			continue; /* try again */
1247210846Sjchandra		}
1248210846Sjchandra#endif
1249210846Sjchandra		pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
1250210846Sjchandra		if (*pde != 0) {
1251210846Sjchandra			kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1252178172Simp			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1253178172Simp				kernel_vm_end = kernel_map->max_offset;
1254178172Simp				break;
1255178172Simp			}
1256178172Simp			continue;
1257178172Simp		}
1258210846Sjchandra
1259178172Simp		/*
1260178172Simp		 * This index is bogus, but out of the way
1261178172Simp		 */
1262216315Sjchandra		nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1263178172Simp		if (!nkpg)
1264178172Simp			panic("pmap_growkernel: no memory to grow kernel");
1265178172Simp		nkpt++;
1266211453Sjchandra		*pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1267178172Simp
1268178172Simp		/*
1269178172Simp		 * The R[4-7]?00 stores only one copy of the Global bit in
1270178172Simp		 * the translation lookaside buffer for each 2 page entry.
1271178172Simp		 * Thus invalid entrys must have the Global bit set so when
1272178172Simp		 * Entry LO and Entry HI G bits are anded together they will
1273178172Simp		 * produce a global bit to store in the tlb.
1274178172Simp		 */
1275210846Sjchandra		pte = (pt_entry_t *)*pde;
1276210846Sjchandra		for (i = 0; i < NPTEPG; i++)
1277210846Sjchandra			pte[i] = PTE_G;
1278178172Simp
1279210846Sjchandra		kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1280178172Simp		if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1281178172Simp			kernel_vm_end = kernel_map->max_offset;
1282178172Simp			break;
1283178172Simp		}
1284178172Simp	}
1285178172Simp}
1286178172Simp
1287178172Simp/***************************************************
1288239236Salc * page management routines.
1289178172Simp ***************************************************/
1290178172Simp
1291239236SalcCTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1292239236Salc#ifdef __mips_n64
1293239236SalcCTASSERT(_NPCM == 3);
1294239236SalcCTASSERT(_NPCPV == 168);
1295239236Salc#else
1296239236SalcCTASSERT(_NPCM == 11);
1297239236SalcCTASSERT(_NPCPV == 336);
1298239236Salc#endif
1299239236Salc
1300239236Salcstatic __inline struct pv_chunk *
1301239236Salcpv_to_chunk(pv_entry_t pv)
1302239236Salc{
1303239236Salc
1304239236Salc	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1305239236Salc}
1306239236Salc
1307239236Salc#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1308239236Salc
1309239236Salc#ifdef __mips_n64
1310239236Salc#define	PC_FREE0_1	0xfffffffffffffffful
1311239236Salc#define	PC_FREE2	0x000000fffffffffful
1312239236Salc#else
1313239236Salc#define	PC_FREE0_9	0xfffffffful	/* Free values for index 0 through 9 */
1314239236Salc#define	PC_FREE10	0x0000fffful	/* Free values for index 10 */
1315239236Salc#endif
1316239236Salc
1317239236Salcstatic const u_long pc_freemask[_NPCM] = {
1318239236Salc#ifdef __mips_n64
1319239236Salc	PC_FREE0_1, PC_FREE0_1, PC_FREE2
1320239236Salc#else
1321239236Salc	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1322239236Salc	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1323239236Salc	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1324239236Salc	PC_FREE0_9, PC_FREE10
1325239236Salc#endif
1326239236Salc};
1327239236Salc
1328239236Salcstatic SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
1329239236Salc
1330239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1331239236Salc    "Current number of pv entries");
1332239236Salc
1333239236Salc#ifdef PV_STATS
1334239236Salcstatic int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1335239236Salc
1336239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1337239236Salc    "Current number of pv entry chunks");
1338239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1339239236Salc    "Current number of pv entry chunks allocated");
1340239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1341239236Salc    "Current number of pv entry chunks frees");
1342239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1343239236Salc    "Number of times tried to get a chunk page but failed.");
1344239236Salc
1345239236Salcstatic long pv_entry_frees, pv_entry_allocs;
1346239236Salcstatic int pv_entry_spare;
1347239236Salc
1348239236SalcSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1349239236Salc    "Current number of pv entry frees");
1350239236SalcSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1351239236Salc    "Current number of pv entry allocs");
1352239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1353239236Salc    "Current number of spare pv entries");
1354239236Salc#endif
1355239236Salc
1356178172Simp/*
1357239236Salc * We are in a serious low memory condition.  Resort to
1358239236Salc * drastic measures to free some pages so we can allocate
1359239236Salc * another pv entry chunk.
1360239236Salc */
1361239236Salcstatic vm_page_t
1362239236Salcpmap_pv_reclaim(pmap_t locked_pmap)
1363239236Salc{
1364239236Salc	struct pch newtail;
1365239236Salc	struct pv_chunk *pc;
1366239236Salc	pd_entry_t *pde;
1367239236Salc	pmap_t pmap;
1368239236Salc	pt_entry_t *pte, oldpte;
1369239236Salc	pv_entry_t pv;
1370239236Salc	vm_offset_t va;
1371239236Salc	vm_page_t m, m_pc;
1372239236Salc	u_long inuse;
1373239236Salc	int bit, field, freed, idx;
1374239236Salc
1375239236Salc	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1376239236Salc	pmap = NULL;
1377239236Salc	m_pc = NULL;
1378239236Salc	TAILQ_INIT(&newtail);
1379239236Salc	while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL) {
1380239236Salc		TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1381239236Salc		if (pmap != pc->pc_pmap) {
1382239236Salc			if (pmap != NULL) {
1383239236Salc				pmap_invalidate_all(pmap);
1384239236Salc				if (pmap != locked_pmap)
1385239236Salc					PMAP_UNLOCK(pmap);
1386239236Salc			}
1387239236Salc			pmap = pc->pc_pmap;
1388239236Salc			/* Avoid deadlock and lock recursion. */
1389239236Salc			if (pmap > locked_pmap)
1390239236Salc				PMAP_LOCK(pmap);
1391239236Salc			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
1392239236Salc				pmap = NULL;
1393239236Salc				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1394239236Salc				continue;
1395239236Salc			}
1396239236Salc		}
1397239236Salc
1398239236Salc		/*
1399239236Salc		 * Destroy every non-wired, 4 KB page mapping in the chunk.
1400239236Salc		 */
1401239236Salc		freed = 0;
1402239236Salc		for (field = 0; field < _NPCM; field++) {
1403239236Salc			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1404239236Salc			    inuse != 0; inuse &= ~(1UL << bit)) {
1405239236Salc				bit = ffsl(inuse) - 1;
1406239236Salc				idx = field * sizeof(inuse) * NBBY + bit;
1407239236Salc				pv = &pc->pc_pventry[idx];
1408239236Salc				va = pv->pv_va;
1409239236Salc				pde = pmap_pde(pmap, va);
1410239236Salc				KASSERT(pde != NULL && *pde != 0,
1411239236Salc				    ("pmap_pv_reclaim: pde"));
1412239236Salc				pte = pmap_pde_to_pte(pde, va);
1413239236Salc				oldpte = *pte;
1414241520Salc				if (pte_test(&oldpte, PTE_W))
1415241520Salc					continue;
1416239236Salc				if (is_kernel_pmap(pmap))
1417239236Salc					*pte = PTE_G;
1418239236Salc				else
1419239236Salc					*pte = 0;
1420239236Salc				m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(oldpte));
1421239236Salc				if (pte_test(&oldpte, PTE_D))
1422239236Salc					vm_page_dirty(m);
1423239236Salc				if (m->md.pv_flags & PV_TABLE_REF)
1424239236Salc					vm_page_aflag_set(m, PGA_REFERENCED);
1425239681Salc				m->md.pv_flags &= ~PV_TABLE_REF;
1426239236Salc				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1427239681Salc				if (TAILQ_EMPTY(&m->md.pv_list))
1428239236Salc					vm_page_aflag_clear(m, PGA_WRITEABLE);
1429239236Salc				pc->pc_map[field] |= 1UL << bit;
1430239236Salc				pmap_unuse_pt(pmap, va, *pde);
1431239236Salc				freed++;
1432239236Salc			}
1433239236Salc		}
1434239236Salc		if (freed == 0) {
1435239236Salc			TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1436239236Salc			continue;
1437239236Salc		}
1438239236Salc		/* Every freed mapping is for a 4 KB page. */
1439239236Salc		pmap->pm_stats.resident_count -= freed;
1440239236Salc		PV_STAT(pv_entry_frees += freed);
1441239236Salc		PV_STAT(pv_entry_spare += freed);
1442239236Salc		pv_entry_count -= freed;
1443239236Salc		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1444239236Salc		for (field = 0; field < _NPCM; field++)
1445239236Salc			if (pc->pc_map[field] != pc_freemask[field]) {
1446239236Salc				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
1447239236Salc				    pc_list);
1448239236Salc				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1449239236Salc
1450239236Salc				/*
1451239236Salc				 * One freed pv entry in locked_pmap is
1452239236Salc				 * sufficient.
1453239236Salc				 */
1454239236Salc				if (pmap == locked_pmap)
1455239236Salc					goto out;
1456239236Salc				break;
1457239236Salc			}
1458239236Salc		if (field == _NPCM) {
1459239236Salc			PV_STAT(pv_entry_spare -= _NPCPV);
1460239236Salc			PV_STAT(pc_chunk_count--);
1461239236Salc			PV_STAT(pc_chunk_frees++);
1462239236Salc			/* Entire chunk is free; return it. */
1463239236Salc			m_pc = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(
1464239236Salc			    (vm_offset_t)pc));
1465239236Salc			break;
1466239236Salc		}
1467239236Salc	}
1468239236Salcout:
1469239236Salc	TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
1470239236Salc	if (pmap != NULL) {
1471239236Salc		pmap_invalidate_all(pmap);
1472239236Salc		if (pmap != locked_pmap)
1473239236Salc			PMAP_UNLOCK(pmap);
1474239236Salc	}
1475239236Salc	return (m_pc);
1476239236Salc}
1477239236Salc
1478239236Salc/*
1479178172Simp * free the pv_entry back to the free list
1480178172Simp */
1481239236Salcstatic void
1482239236Salcfree_pv_entry(pmap_t pmap, pv_entry_t pv)
1483178172Simp{
1484239236Salc	struct pv_chunk *pc;
1485239236Salc	int bit, field, idx;
1486178172Simp
1487239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1488239236Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1489239236Salc	PV_STAT(pv_entry_frees++);
1490239236Salc	PV_STAT(pv_entry_spare++);
1491178172Simp	pv_entry_count--;
1492239236Salc	pc = pv_to_chunk(pv);
1493239236Salc	idx = pv - &pc->pc_pventry[0];
1494239236Salc	field = idx / (sizeof(u_long) * NBBY);
1495239236Salc	bit = idx % (sizeof(u_long) * NBBY);
1496239236Salc	pc->pc_map[field] |= 1ul << bit;
1497239236Salc	for (idx = 0; idx < _NPCM; idx++)
1498239236Salc		if (pc->pc_map[idx] != pc_freemask[idx]) {
1499239236Salc			/*
1500239236Salc			 * 98% of the time, pc is already at the head of the
1501239236Salc			 * list.  If it isn't already, move it to the head.
1502239236Salc			 */
1503239236Salc			if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
1504239236Salc			    pc)) {
1505239236Salc				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1506239236Salc				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
1507239236Salc				    pc_list);
1508239236Salc			}
1509239236Salc			return;
1510239236Salc		}
1511239236Salc	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1512239236Salc	free_pv_chunk(pc);
1513178172Simp}
1514178172Simp
1515239236Salcstatic void
1516239236Salcfree_pv_chunk(struct pv_chunk *pc)
1517239236Salc{
1518239236Salc	vm_page_t m;
1519239236Salc
1520239236Salc 	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1521239236Salc	PV_STAT(pv_entry_spare -= _NPCPV);
1522239236Salc	PV_STAT(pc_chunk_count--);
1523239236Salc	PV_STAT(pc_chunk_frees++);
1524239236Salc	/* entire chunk is free, return it */
1525239236Salc	m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS((vm_offset_t)pc));
1526239236Salc	vm_page_unwire(m, 0);
1527239236Salc	vm_page_free(m);
1528239236Salc}
1529239236Salc
1530178172Simp/*
1531178172Simp * get a new pv_entry, allocating a block from the system
1532178172Simp * when needed.
1533178172Simp */
1534178172Simpstatic pv_entry_t
1535239236Salcget_pv_entry(pmap_t pmap, boolean_t try)
1536178172Simp{
1537239236Salc	struct pv_chunk *pc;
1538239236Salc	pv_entry_t pv;
1539188507Simp	vm_page_t m;
1540239236Salc	int bit, field, idx;
1541178172Simp
1542239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1543239236Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1544239236Salc	PV_STAT(pv_entry_allocs++);
1545239236Salc	pv_entry_count++;
1546188507Simpretry:
1547239236Salc	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1548239236Salc	if (pc != NULL) {
1549239236Salc		for (field = 0; field < _NPCM; field++) {
1550239236Salc			if (pc->pc_map[field]) {
1551239236Salc				bit = ffsl(pc->pc_map[field]) - 1;
1552239236Salc				break;
1553239236Salc			}
1554188507Simp		}
1555239236Salc		if (field < _NPCM) {
1556239236Salc			idx = field * sizeof(pc->pc_map[field]) * NBBY + bit;
1557239236Salc			pv = &pc->pc_pventry[idx];
1558239236Salc			pc->pc_map[field] &= ~(1ul << bit);
1559239236Salc			/* If this was the last item, move it to tail */
1560239236Salc			for (field = 0; field < _NPCM; field++)
1561239236Salc				if (pc->pc_map[field] != 0) {
1562239236Salc					PV_STAT(pv_entry_spare--);
1563239236Salc					return (pv);	/* not full, return */
1564239236Salc				}
1565239236Salc			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1566239236Salc			TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1567239236Salc			PV_STAT(pv_entry_spare--);
1568239236Salc			return (pv);
1569208659Salc		}
1570188507Simp	}
1571239236Salc	/* No free items, allocate another chunk */
1572239236Salc	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, VM_ALLOC_NORMAL |
1573239236Salc	    VM_ALLOC_WIRED);
1574239236Salc	if (m == NULL) {
1575239236Salc		if (try) {
1576239236Salc			pv_entry_count--;
1577239236Salc			PV_STAT(pc_chunk_tryfail++);
1578239236Salc			return (NULL);
1579239236Salc		}
1580239236Salc		m = pmap_pv_reclaim(pmap);
1581239236Salc		if (m == NULL)
1582188507Simp			goto retry;
1583188507Simp	}
1584239236Salc	PV_STAT(pc_chunk_count++);
1585239236Salc	PV_STAT(pc_chunk_allocs++);
1586239236Salc	pc = (struct pv_chunk *)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1587239236Salc	pc->pc_pmap = pmap;
1588239236Salc	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
1589239236Salc	for (field = 1; field < _NPCM; field++)
1590239236Salc		pc->pc_map[field] = pc_freemask[field];
1591239236Salc	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1592239236Salc	pv = &pc->pc_pventry[0];
1593239236Salc	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1594239236Salc	PV_STAT(pv_entry_spare += _NPCPV - 1);
1595239236Salc	return (pv);
1596178172Simp}
1597178172Simp
1598208665Salcstatic pv_entry_t
1599208665Salcpmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1600178172Simp{
1601178172Simp	pv_entry_t pv;
1602178172Simp
1603239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1604239236Salc	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
1605239236Salc		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1606239236Salc			TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
1607239236Salc			break;
1608178172Simp		}
1609178172Simp	}
1610208665Salc	return (pv);
1611208665Salc}
1612178172Simp
1613208665Salcstatic void
1614208665Salcpmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1615208665Salc{
1616208665Salc	pv_entry_t pv;
1617208665Salc
1618208665Salc	pv = pmap_pvh_remove(pvh, pmap, va);
1619208665Salc	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx",
1620240539Sed	     (u_long)VM_PAGE_TO_PHYS(__containerof(pvh, struct vm_page, md)),
1621208686Salc	     (u_long)va));
1622239236Salc	free_pv_entry(pmap, pv);
1623178172Simp}
1624178172Simp
1625178172Simpstatic void
1626208665Salcpmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1627178172Simp{
1628178172Simp
1629239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1630208665Salc	pmap_pvh_free(&m->md, pmap, va);
1631208665Salc	if (TAILQ_EMPTY(&m->md.pv_list))
1632225418Skib		vm_page_aflag_clear(m, PGA_WRITEABLE);
1633178172Simp}
1634178172Simp
1635178172Simp/*
1636191300Salc * Conditionally create a pv entry.
1637191300Salc */
1638191300Salcstatic boolean_t
1639191300Salcpmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va,
1640191300Salc    vm_page_t m)
1641191300Salc{
1642191300Salc	pv_entry_t pv;
1643191300Salc
1644239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1645191300Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1646239236Salc	if ((pv = get_pv_entry(pmap, TRUE)) != NULL) {
1647191300Salc		pv->pv_va = va;
1648191300Salc		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1649191300Salc		return (TRUE);
1650191300Salc	} else
1651191300Salc		return (FALSE);
1652191300Salc}
1653191300Salc
1654191300Salc/*
1655178172Simp * pmap_remove_pte: do the things to unmap a page in a process
1656178172Simp */
1657178172Simpstatic int
1658239152Salcpmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
1659239152Salc    pd_entry_t pde)
1660178172Simp{
1661178172Simp	pt_entry_t oldpte;
1662178172Simp	vm_page_t m;
1663217345Sjchandra	vm_paddr_t pa;
1664178172Simp
1665239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1666178172Simp	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1667178172Simp
1668240241Salc	/*
1669240241Salc	 * Write back all cache lines from the page being unmapped.
1670240241Salc	 */
1671240241Salc	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
1672240241Salc
1673211068Sjchandra	oldpte = *ptq;
1674178172Simp	if (is_kernel_pmap(pmap))
1675178172Simp		*ptq = PTE_G;
1676211068Sjchandra	else
1677211068Sjchandra		*ptq = 0;
1678178172Simp
1679209482Sjchandra	if (pte_test(&oldpte, PTE_W))
1680178172Simp		pmap->pm_stats.wired_count -= 1;
1681178172Simp
1682178172Simp	pmap->pm_stats.resident_count -= 1;
1683178172Simp
1684239964Salc	if (pte_test(&oldpte, PTE_MANAGED)) {
1685239964Salc		pa = TLBLO_PTE_TO_PA(oldpte);
1686178172Simp		m = PHYS_TO_VM_PAGE(pa);
1687209482Sjchandra		if (pte_test(&oldpte, PTE_D)) {
1688211958Sjchandra			KASSERT(!pte_test(&oldpte, PTE_RO),
1689217345Sjchandra			    ("%s: modified page not writable: va: %p, pte: %#jx",
1690217345Sjchandra			    __func__, (void *)va, (uintmax_t)oldpte));
1691187319Sgonzo			vm_page_dirty(m);
1692178172Simp		}
1693178172Simp		if (m->md.pv_flags & PV_TABLE_REF)
1694225418Skib			vm_page_aflag_set(m, PGA_REFERENCED);
1695239681Salc		m->md.pv_flags &= ~PV_TABLE_REF;
1696178172Simp
1697178172Simp		pmap_remove_entry(pmap, m, va);
1698178172Simp	}
1699239152Salc	return (pmap_unuse_pt(pmap, va, pde));
1700178172Simp}
1701178172Simp
1702178172Simp/*
1703178172Simp * Remove a single page from a process address space
1704178172Simp */
1705178172Simpstatic void
1706178172Simppmap_remove_page(struct pmap *pmap, vm_offset_t va)
1707178172Simp{
1708239152Salc	pd_entry_t *pde;
1709209482Sjchandra	pt_entry_t *ptq;
1710178172Simp
1711239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1712178172Simp	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1713239152Salc	pde = pmap_pde(pmap, va);
1714239152Salc	if (pde == NULL || *pde == 0)
1715239152Salc		return;
1716239152Salc	ptq = pmap_pde_to_pte(pde, va);
1717178172Simp
1718178172Simp	/*
1719240241Salc	 * If there is no pte for this address, just skip it!
1720178172Simp	 */
1721240241Salc	if (!pte_test(ptq, PTE_V))
1722178172Simp		return;
1723202046Simp
1724239152Salc	(void)pmap_remove_pte(pmap, ptq, va, *pde);
1725178172Simp	pmap_invalidate_page(pmap, va);
1726178172Simp}
1727178172Simp
1728178172Simp/*
1729178172Simp *	Remove the given range of addresses from the specified map.
1730178172Simp *
1731178172Simp *	It is assumed that the start and end are properly
1732178172Simp *	rounded to the page size.
1733178172Simp */
1734178172Simpvoid
1735241123Salcpmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1736178172Simp{
1737210846Sjchandra	pd_entry_t *pde, *pdpe;
1738210846Sjchandra	pt_entry_t *pte;
1739241123Salc	vm_offset_t va, va_next;
1740178172Simp
1741241123Salc	/*
1742241123Salc	 * Perform an unsynchronized read.  This is, however, safe.
1743241123Salc	 */
1744178172Simp	if (pmap->pm_stats.resident_count == 0)
1745178172Simp		return;
1746178172Simp
1747239317Salc	rw_wlock(&pvh_global_lock);
1748178172Simp	PMAP_LOCK(pmap);
1749178172Simp
1750178172Simp	/*
1751178172Simp	 * special handling of removing one page.  a very common operation
1752178172Simp	 * and easy to short circuit some code.
1753178172Simp	 */
1754178172Simp	if ((sva + PAGE_SIZE) == eva) {
1755178172Simp		pmap_remove_page(pmap, sva);
1756178172Simp		goto out;
1757178172Simp	}
1758210846Sjchandra	for (; sva < eva; sva = va_next) {
1759210846Sjchandra		pdpe = pmap_segmap(pmap, sva);
1760210846Sjchandra#ifdef __mips_n64
1761210846Sjchandra		if (*pdpe == 0) {
1762210846Sjchandra			va_next = (sva + NBSEG) & ~SEGMASK;
1763210846Sjchandra			if (va_next < sva)
1764210846Sjchandra				va_next = eva;
1765178172Simp			continue;
1766178172Simp		}
1767210846Sjchandra#endif
1768210846Sjchandra		va_next = (sva + NBPDR) & ~PDRMASK;
1769210846Sjchandra		if (va_next < sva)
1770210846Sjchandra			va_next = eva;
1771210846Sjchandra
1772210846Sjchandra		pde = pmap_pdpe_to_pde(pdpe, sva);
1773241123Salc		if (*pde == NULL)
1774210846Sjchandra			continue;
1775241123Salc
1776241123Salc		/*
1777241123Salc		 * Limit our scan to either the end of the va represented
1778241123Salc		 * by the current page table page, or to the end of the
1779241123Salc		 * range being removed.
1780241123Salc		 */
1781210846Sjchandra		if (va_next > eva)
1782210846Sjchandra			va_next = eva;
1783241123Salc
1784241123Salc		va = va_next;
1785240241Salc		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1786240241Salc		    sva += PAGE_SIZE) {
1787241123Salc			if (!pte_test(pte, PTE_V)) {
1788241123Salc				if (va != va_next) {
1789241123Salc					pmap_invalidate_range(pmap, va, sva);
1790241123Salc					va = va_next;
1791241123Salc				}
1792240241Salc				continue;
1793241123Salc			}
1794241123Salc			if (va == va_next)
1795241123Salc				va = sva;
1796241123Salc			if (pmap_remove_pte(pmap, pte, sva, *pde)) {
1797241123Salc				sva += PAGE_SIZE;
1798241123Salc				break;
1799241123Salc			}
1800210846Sjchandra		}
1801241123Salc		if (va != va_next)
1802241123Salc			pmap_invalidate_range(pmap, va, sva);
1803178172Simp	}
1804178172Simpout:
1805239317Salc	rw_wunlock(&pvh_global_lock);
1806178172Simp	PMAP_UNLOCK(pmap);
1807178172Simp}
1808178172Simp
1809178172Simp/*
1810178172Simp *	Routine:	pmap_remove_all
1811178172Simp *	Function:
1812178172Simp *		Removes this physical page from
1813178172Simp *		all physical maps in which it resides.
1814178172Simp *		Reflects back modify bits to the pager.
1815178172Simp *
1816178172Simp *	Notes:
1817178172Simp *		Original versions of this routine were very
1818178172Simp *		inefficient because they iteratively called
1819178172Simp *		pmap_remove (slow...)
1820178172Simp */
1821178172Simp
1822178172Simpvoid
1823178172Simppmap_remove_all(vm_page_t m)
1824178172Simp{
1825209482Sjchandra	pv_entry_t pv;
1826239236Salc	pmap_t pmap;
1827239152Salc	pd_entry_t *pde;
1828209482Sjchandra	pt_entry_t *pte, tpte;
1829178172Simp
1830224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1831223677Salc	    ("pmap_remove_all: page %p is not managed", m));
1832239317Salc	rw_wlock(&pvh_global_lock);
1833178172Simp
1834178172Simp	if (m->md.pv_flags & PV_TABLE_REF)
1835225418Skib		vm_page_aflag_set(m, PGA_REFERENCED);
1836178172Simp
1837178172Simp	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1838239236Salc		pmap = PV_PMAP(pv);
1839239236Salc		PMAP_LOCK(pmap);
1840202046Simp
1841202046Simp		/*
1842202046Simp		 * If it's last mapping writeback all caches from
1843202046Simp		 * the page being destroyed
1844202046Simp	 	 */
1845239236Salc		if (TAILQ_NEXT(pv, pv_list) == NULL)
1846206746Sjmallett			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
1847202046Simp
1848239236Salc		pmap->pm_stats.resident_count--;
1849178172Simp
1850239236Salc		pde = pmap_pde(pmap, pv->pv_va);
1851239152Salc		KASSERT(pde != NULL && *pde != 0, ("pmap_remove_all: pde"));
1852239152Salc		pte = pmap_pde_to_pte(pde, pv->pv_va);
1853178172Simp
1854211068Sjchandra		tpte = *pte;
1855239236Salc		if (is_kernel_pmap(pmap))
1856178172Simp			*pte = PTE_G;
1857211068Sjchandra		else
1858211068Sjchandra			*pte = 0;
1859178172Simp
1860209482Sjchandra		if (pte_test(&tpte, PTE_W))
1861239236Salc			pmap->pm_stats.wired_count--;
1862178172Simp
1863178172Simp		/*
1864178172Simp		 * Update the vm_page_t clean and reference bits.
1865178172Simp		 */
1866209482Sjchandra		if (pte_test(&tpte, PTE_D)) {
1867211958Sjchandra			KASSERT(!pte_test(&tpte, PTE_RO),
1868217345Sjchandra			    ("%s: modified page not writable: va: %p, pte: %#jx",
1869217345Sjchandra			    __func__, (void *)pv->pv_va, (uintmax_t)tpte));
1870178606Salc			vm_page_dirty(m);
1871178172Simp		}
1872239236Salc		pmap_invalidate_page(pmap, pv->pv_va);
1873178172Simp
1874178172Simp		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1875239236Salc		pmap_unuse_pt(pmap, pv->pv_va, *pde);
1876239236Salc		free_pv_entry(pmap, pv);
1877239236Salc		PMAP_UNLOCK(pmap);
1878178172Simp	}
1879178172Simp
1880225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
1881239681Salc	m->md.pv_flags &= ~PV_TABLE_REF;
1882239317Salc	rw_wunlock(&pvh_global_lock);
1883178172Simp}
1884178172Simp
1885178172Simp/*
1886178172Simp *	Set the physical protection on the
1887178172Simp *	specified range of this map as requested.
1888178172Simp */
1889178172Simpvoid
1890178172Simppmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1891178172Simp{
1892241313Salc	pt_entry_t pbits, *pte;
1893210846Sjchandra	pd_entry_t *pde, *pdpe;
1894241313Salc	vm_offset_t va, va_next;
1895241313Salc	vm_paddr_t pa;
1896241313Salc	vm_page_t m;
1897178172Simp
1898178172Simp	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1899178172Simp		pmap_remove(pmap, sva, eva);
1900178172Simp		return;
1901178172Simp	}
1902178172Simp	if (prot & VM_PROT_WRITE)
1903178172Simp		return;
1904178172Simp
1905178172Simp	PMAP_LOCK(pmap);
1906210846Sjchandra	for (; sva < eva; sva = va_next) {
1907210846Sjchandra		pdpe = pmap_segmap(pmap, sva);
1908210846Sjchandra#ifdef __mips_n64
1909210846Sjchandra		if (*pdpe == 0) {
1910210846Sjchandra			va_next = (sva + NBSEG) & ~SEGMASK;
1911210846Sjchandra			if (va_next < sva)
1912210846Sjchandra				va_next = eva;
1913178172Simp			continue;
1914178172Simp		}
1915210846Sjchandra#endif
1916210846Sjchandra		va_next = (sva + NBPDR) & ~PDRMASK;
1917210846Sjchandra		if (va_next < sva)
1918210846Sjchandra			va_next = eva;
1919210846Sjchandra
1920210846Sjchandra		pde = pmap_pdpe_to_pde(pdpe, sva);
1921240185Salc		if (*pde == NULL)
1922178172Simp			continue;
1923241313Salc
1924241313Salc		/*
1925241313Salc		 * Limit our scan to either the end of the va represented
1926241313Salc		 * by the current page table page, or to the end of the
1927241313Salc		 * range being write protected.
1928241313Salc		 */
1929210846Sjchandra		if (va_next > eva)
1930210846Sjchandra			va_next = eva;
1931178172Simp
1932241313Salc		va = va_next;
1933210846Sjchandra		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1934241313Salc		    sva += PAGE_SIZE) {
1935241313Salc			pbits = *pte;
1936241313Salc			if (!pte_test(&pbits, PTE_V) || pte_test(&pbits,
1937241313Salc			    PTE_RO)) {
1938241313Salc				if (va != va_next) {
1939241313Salc					pmap_invalidate_range(pmap, va, sva);
1940241313Salc					va = va_next;
1941241313Salc				}
1942210846Sjchandra				continue;
1943210846Sjchandra			}
1944210846Sjchandra			pte_set(&pbits, PTE_RO);
1945241313Salc			if (pte_test(&pbits, PTE_D)) {
1946241313Salc				pte_clear(&pbits, PTE_D);
1947241313Salc				if (pte_test(&pbits, PTE_MANAGED)) {
1948241313Salc					pa = TLBLO_PTE_TO_PA(pbits);
1949241313Salc					m = PHYS_TO_VM_PAGE(pa);
1950241313Salc					vm_page_dirty(m);
1951241313Salc				}
1952241313Salc				if (va == va_next)
1953241313Salc					va = sva;
1954241313Salc			} else {
1955241313Salc				/*
1956241313Salc				 * Unless PTE_D is set, any TLB entries
1957241313Salc				 * mapping "sva" don't allow write access, so
1958241313Salc				 * they needn't be invalidated.
1959241313Salc				 */
1960241313Salc				if (va != va_next) {
1961241313Salc					pmap_invalidate_range(pmap, va, sva);
1962241313Salc					va = va_next;
1963241313Salc				}
1964210846Sjchandra			}
1965241313Salc			*pte = pbits;
1966178172Simp		}
1967241313Salc		if (va != va_next)
1968241313Salc			pmap_invalidate_range(pmap, va, sva);
1969178172Simp	}
1970178172Simp	PMAP_UNLOCK(pmap);
1971178172Simp}
1972178172Simp
1973178172Simp/*
1974178172Simp *	Insert the given physical page (p) at
1975178172Simp *	the specified virtual address (v) in the
1976178172Simp *	target physical map with the protection requested.
1977178172Simp *
1978178172Simp *	If specified, the page will be wired down, meaning
1979178172Simp *	that the related pte can not be reclaimed.
1980178172Simp *
1981178172Simp *	NB:  This is the only routine which MAY NOT lazy-evaluate
1982178172Simp *	or lose information.  That is, this routine must actually
1983178172Simp *	insert this page into the given map NOW.
1984178172Simp */
1985270439Skibint
1986270439Skibpmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1987270439Skib    u_int flags, int8_t psind __unused)
1988178172Simp{
1989217345Sjchandra	vm_paddr_t pa, opa;
1990209482Sjchandra	pt_entry_t *pte;
1991178172Simp	pt_entry_t origpte, newpte;
1992208665Salc	pv_entry_t pv;
1993178172Simp	vm_page_t mpte, om;
1994178172Simp
1995178172Simp	va &= ~PAGE_MASK;
1996208175Salc 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
1997240000Salc	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
1998240000Salc	    va >= kmi.clean_eva,
1999240000Salc	    ("pmap_enter: managed mapping within the clean submap"));
2000270439Skib	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2001270439Skib		VM_OBJECT_ASSERT_LOCKED(m->object);
2002239964Salc	pa = VM_PAGE_TO_PHYS(m);
2003270439Skib	newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, flags, prot);
2004270439Skib	if ((flags & PMAP_ENTER_WIRED) != 0)
2005240000Salc		newpte |= PTE_W;
2006240000Salc	if (is_kernel_pmap(pmap))
2007240000Salc		newpte |= PTE_G;
2008240000Salc	if (is_cacheable_mem(pa))
2009240000Salc		newpte |= PTE_C_CACHE;
2010240000Salc	else
2011240000Salc		newpte |= PTE_C_UNCACHED;
2012178172Simp
2013178172Simp	mpte = NULL;
2014178172Simp
2015239317Salc	rw_wlock(&pvh_global_lock);
2016178172Simp	PMAP_LOCK(pmap);
2017178172Simp
2018178172Simp	/*
2019178172Simp	 * In the case that a page table page is not resident, we are
2020178172Simp	 * creating it here.
2021178172Simp	 */
2022178172Simp	if (va < VM_MAXUSER_ADDRESS) {
2023270439Skib		mpte = pmap_allocpte(pmap, va, flags);
2024270439Skib		if (mpte == NULL) {
2025270439Skib			KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
2026270439Skib			    ("pmap_allocpte failed with sleep allowed"));
2027270439Skib			rw_wunlock(&pvh_global_lock);
2028270439Skib			PMAP_UNLOCK(pmap);
2029270439Skib			return (KERN_RESOURCE_SHORTAGE);
2030270439Skib		}
2031178172Simp	}
2032178172Simp	pte = pmap_pte(pmap, va);
2033178172Simp
2034178172Simp	/*
2035178172Simp	 * Page Directory table entry not valid, we need a new PT page
2036178172Simp	 */
2037178172Simp	if (pte == NULL) {
2038211958Sjchandra		panic("pmap_enter: invalid page directory, pdir=%p, va=%p",
2039202046Simp		    (void *)pmap->pm_segtab, (void *)va);
2040178172Simp	}
2041178172Simp	om = NULL;
2042178172Simp	origpte = *pte;
2043209243Sjchandra	opa = TLBLO_PTE_TO_PA(origpte);
2044178172Simp
2045178172Simp	/*
2046178172Simp	 * Mapping has not changed, must be protection or wiring change.
2047178172Simp	 */
2048209482Sjchandra	if (pte_test(&origpte, PTE_V) && opa == pa) {
2049178172Simp		/*
2050178172Simp		 * Wiring change, just update stats. We don't worry about
2051178172Simp		 * wiring PT pages as they remain resident as long as there
2052178172Simp		 * are valid mappings in them. Hence, if a user page is
2053178172Simp		 * wired, the PT page will be also.
2054178172Simp		 */
2055270439Skib		if (pte_test(&newpte, PTE_W) && !pte_test(&origpte, PTE_W))
2056178172Simp			pmap->pm_stats.wired_count++;
2057270439Skib		else if (!pte_test(&newpte, PTE_W) && pte_test(&origpte,
2058270439Skib		    PTE_W))
2059178172Simp			pmap->pm_stats.wired_count--;
2060178172Simp
2061211958Sjchandra		KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
2062217345Sjchandra		    ("%s: modified page not writable: va: %p, pte: %#jx",
2063217345Sjchandra		    __func__, (void *)va, (uintmax_t)origpte));
2064178172Simp
2065178172Simp		/*
2066178172Simp		 * Remove extra pte reference
2067178172Simp		 */
2068178172Simp		if (mpte)
2069178172Simp			mpte->wire_count--;
2070178172Simp
2071239964Salc		if (pte_test(&origpte, PTE_MANAGED)) {
2072240241Salc			m->md.pv_flags |= PV_TABLE_REF;
2073178172Simp			om = m;
2074239964Salc			newpte |= PTE_MANAGED;
2075240000Salc			if (!pte_test(&newpte, PTE_RO))
2076240000Salc				vm_page_aflag_set(m, PGA_WRITEABLE);
2077178172Simp		}
2078178172Simp		goto validate;
2079178172Simp	}
2080208665Salc
2081208665Salc	pv = NULL;
2082208665Salc
2083178172Simp	/*
2084178172Simp	 * Mapping has changed, invalidate old range and fall through to
2085178172Simp	 * handle validating new mapping.
2086178172Simp	 */
2087178172Simp	if (opa) {
2088209482Sjchandra		if (pte_test(&origpte, PTE_W))
2089178172Simp			pmap->pm_stats.wired_count--;
2090178172Simp
2091239964Salc		if (pte_test(&origpte, PTE_MANAGED)) {
2092178172Simp			om = PHYS_TO_VM_PAGE(opa);
2093208665Salc			pv = pmap_pvh_remove(&om->md, pmap, va);
2094178172Simp		}
2095178172Simp		if (mpte != NULL) {
2096178172Simp			mpte->wire_count--;
2097178172Simp			KASSERT(mpte->wire_count > 0,
2098178172Simp			    ("pmap_enter: missing reference to page table page,"
2099202046Simp			    " va: %p", (void *)va));
2100178172Simp		}
2101178172Simp	} else
2102178172Simp		pmap->pm_stats.resident_count++;
2103178172Simp
2104178172Simp	/*
2105240000Salc	 * Enter on the PV list if part of our managed memory.
2106178172Simp	 */
2107224746Skib	if ((m->oflags & VPO_UNMANAGED) == 0) {
2108240241Salc		m->md.pv_flags |= PV_TABLE_REF;
2109208665Salc		if (pv == NULL)
2110239236Salc			pv = get_pv_entry(pmap, FALSE);
2111208665Salc		pv->pv_va = va;
2112208665Salc		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2113239964Salc		newpte |= PTE_MANAGED;
2114240000Salc		if (!pte_test(&newpte, PTE_RO))
2115240000Salc			vm_page_aflag_set(m, PGA_WRITEABLE);
2116208665Salc	} else if (pv != NULL)
2117239236Salc		free_pv_entry(pmap, pv);
2118208665Salc
2119178172Simp	/*
2120178172Simp	 * Increment counters
2121178172Simp	 */
2122270439Skib	if (pte_test(&newpte, PTE_W))
2123178172Simp		pmap->pm_stats.wired_count++;
2124178172Simp
2125178172Simpvalidate:
2126178172Simp
2127187301Sgonzo#ifdef PMAP_DEBUG
2128209482Sjchandra	printf("pmap_enter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
2129187301Sgonzo#endif
2130178172Simp
2131178172Simp	/*
2132178172Simp	 * if the mapping or permission bits are different, we need to
2133178172Simp	 * update the pte.
2134178172Simp	 */
2135178172Simp	if (origpte != newpte) {
2136240241Salc		*pte = newpte;
2137209482Sjchandra		if (pte_test(&origpte, PTE_V)) {
2138239964Salc			if (pte_test(&origpte, PTE_MANAGED) && opa != pa) {
2139178172Simp				if (om->md.pv_flags & PV_TABLE_REF)
2140225418Skib					vm_page_aflag_set(om, PGA_REFERENCED);
2141239681Salc				om->md.pv_flags &= ~PV_TABLE_REF;
2142178172Simp			}
2143209482Sjchandra			if (pte_test(&origpte, PTE_D)) {
2144209482Sjchandra				KASSERT(!pte_test(&origpte, PTE_RO),
2145178172Simp				    ("pmap_enter: modified page not writable:"
2146217345Sjchandra				    " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte));
2147239964Salc				if (pte_test(&origpte, PTE_MANAGED))
2148178172Simp					vm_page_dirty(om);
2149178172Simp			}
2150239964Salc			if (pte_test(&origpte, PTE_MANAGED) &&
2151208665Salc			    TAILQ_EMPTY(&om->md.pv_list))
2152225418Skib				vm_page_aflag_clear(om, PGA_WRITEABLE);
2153240241Salc			pmap_update_page(pmap, va, newpte);
2154178172Simp		}
2155178172Simp	}
2156178172Simp
2157178172Simp	/*
2158218909Sbrucec	 * Sync I & D caches for executable pages.  Do this only if the
2159178172Simp	 * target pmap belongs to the current process.  Otherwise, an
2160178172Simp	 * unresolvable TLB miss may occur.
2161178172Simp	 */
2162178172Simp	if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) &&
2163178172Simp	    (prot & VM_PROT_EXECUTE)) {
2164206746Sjmallett		mips_icache_sync_range(va, PAGE_SIZE);
2165206746Sjmallett		mips_dcache_wbinv_range(va, PAGE_SIZE);
2166178172Simp	}
2167239317Salc	rw_wunlock(&pvh_global_lock);
2168178172Simp	PMAP_UNLOCK(pmap);
2169270439Skib	return (KERN_SUCCESS);
2170178172Simp}
2171178172Simp
2172178172Simp/*
2173178172Simp * this code makes some *MAJOR* assumptions:
2174178172Simp * 1. Current pmap & pmap exists.
2175178172Simp * 2. Not wired.
2176178172Simp * 3. Read access.
2177178172Simp * 4. No page table pages.
2178178172Simp * but is *MUCH* faster than pmap_enter...
2179178172Simp */
2180178172Simp
2181178172Simpvoid
2182178172Simppmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2183178172Simp{
2184191300Salc
2185239317Salc	rw_wlock(&pvh_global_lock);
2186191300Salc	PMAP_LOCK(pmap);
2187191300Salc	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
2188239317Salc	rw_wunlock(&pvh_global_lock);
2189191300Salc	PMAP_UNLOCK(pmap);
2190191300Salc}
2191191300Salc
2192191300Salcstatic vm_page_t
2193191300Salcpmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2194191300Salc    vm_prot_t prot, vm_page_t mpte)
2195191300Salc{
2196178172Simp	pt_entry_t *pte;
2197217345Sjchandra	vm_paddr_t pa;
2198178172Simp
2199178606Salc	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2200224746Skib	    (m->oflags & VPO_UNMANAGED) != 0,
2201191300Salc	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2202239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
2203191300Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2204191300Salc
2205178172Simp	/*
2206178172Simp	 * In the case that a page table page is not resident, we are
2207178172Simp	 * creating it here.
2208178172Simp	 */
2209178172Simp	if (va < VM_MAXUSER_ADDRESS) {
2210210846Sjchandra		pd_entry_t *pde;
2211178172Simp		unsigned ptepindex;
2212178172Simp
2213178172Simp		/*
2214178172Simp		 * Calculate pagetable page index
2215178172Simp		 */
2216210846Sjchandra		ptepindex = pmap_pde_pindex(va);
2217178172Simp		if (mpte && (mpte->pindex == ptepindex)) {
2218178172Simp			mpte->wire_count++;
2219178172Simp		} else {
2220178172Simp			/*
2221178172Simp			 * Get the page directory entry
2222178172Simp			 */
2223210846Sjchandra			pde = pmap_pde(pmap, va);
2224178172Simp
2225178172Simp			/*
2226178172Simp			 * If the page table page is mapped, we just
2227178172Simp			 * increment the hold count, and activate it.
2228178172Simp			 */
2229210846Sjchandra			if (pde && *pde != 0) {
2230239152Salc				mpte = PHYS_TO_VM_PAGE(
2231239152Salc				    MIPS_DIRECT_TO_PHYS(*pde));
2232178172Simp				mpte->wire_count++;
2233178172Simp			} else {
2234191300Salc				mpte = _pmap_allocpte(pmap, ptepindex,
2235270439Skib				    PMAP_ENTER_NOSLEEP);
2236191300Salc				if (mpte == NULL)
2237191300Salc					return (mpte);
2238178172Simp			}
2239178172Simp		}
2240178172Simp	} else {
2241178172Simp		mpte = NULL;
2242178172Simp	}
2243178172Simp
2244178172Simp	pte = pmap_pte(pmap, va);
2245209482Sjchandra	if (pte_test(pte, PTE_V)) {
2246191300Salc		if (mpte != NULL) {
2247191300Salc			mpte->wire_count--;
2248191300Salc			mpte = NULL;
2249191300Salc		}
2250191300Salc		return (mpte);
2251178172Simp	}
2252191300Salc
2253178172Simp	/*
2254191300Salc	 * Enter on the PV list if part of our managed memory.
2255178172Simp	 */
2256224746Skib	if ((m->oflags & VPO_UNMANAGED) == 0 &&
2257191300Salc	    !pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
2258191300Salc		if (mpte != NULL) {
2259240126Salc			pmap_unwire_ptp(pmap, va, mpte);
2260191300Salc			mpte = NULL;
2261191300Salc		}
2262191300Salc		return (mpte);
2263191300Salc	}
2264178172Simp
2265178172Simp	/*
2266178172Simp	 * Increment counters
2267178172Simp	 */
2268178172Simp	pmap->pm_stats.resident_count++;
2269178172Simp
2270178172Simp	pa = VM_PAGE_TO_PHYS(m);
2271178172Simp
2272178172Simp	/*
2273178172Simp	 * Now validate mapping with RO protection
2274178172Simp	 */
2275240241Salc	*pte = PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_V;
2276239964Salc	if ((m->oflags & VPO_UNMANAGED) == 0)
2277239964Salc		*pte |= PTE_MANAGED;
2278178172Simp
2279178172Simp	if (is_cacheable_mem(pa))
2280209482Sjchandra		*pte |= PTE_C_CACHE;
2281178172Simp	else
2282209482Sjchandra		*pte |= PTE_C_UNCACHED;
2283178172Simp
2284178172Simp	if (is_kernel_pmap(pmap))
2285178172Simp		*pte |= PTE_G;
2286178172Simp	else {
2287178172Simp		/*
2288218909Sbrucec		 * Sync I & D caches.  Do this only if the target pmap
2289178172Simp		 * belongs to the current process.  Otherwise, an
2290178172Simp		 * unresolvable TLB miss may occur. */
2291178172Simp		if (pmap == &curproc->p_vmspace->vm_pmap) {
2292178172Simp			va &= ~PAGE_MASK;
2293206746Sjmallett			mips_icache_sync_range(va, PAGE_SIZE);
2294206746Sjmallett			mips_dcache_wbinv_range(va, PAGE_SIZE);
2295178172Simp		}
2296178172Simp	}
2297191300Salc	return (mpte);
2298178172Simp}
2299178172Simp
2300178172Simp/*
2301178172Simp * Make a temporary mapping for a physical address.  This is only intended
2302178172Simp * to be used for panic dumps.
2303209930Sjchandra *
2304209930Sjchandra * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2305178172Simp */
2306178172Simpvoid *
2307178172Simppmap_kenter_temporary(vm_paddr_t pa, int i)
2308178172Simp{
2309178172Simp	vm_offset_t va;
2310211453Sjchandra
2311178172Simp	if (i != 0)
2312178172Simp		printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
2313178172Simp		    __func__);
2314178172Simp
2315211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa)) {
2316211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(pa);
2317178172Simp	} else {
2318211453Sjchandra#ifndef __mips_n64    /* XXX : to be converted to new style */
2319178172Simp		int cpu;
2320211453Sjchandra		register_t intr;
2321178172Simp		struct local_sysmaps *sysm;
2322206717Sjmallett		pt_entry_t *pte, npte;
2323206717Sjmallett
2324203151Srrs		/* If this is used other than for dumps, we may need to leave
2325203151Srrs		 * interrupts disasbled on return. If crash dumps don't work when
2326203151Srrs		 * we get to this point, we might want to consider this (leaving things
2327203151Srrs		 * disabled as a starting point ;-)
2328203151Srrs	 	 */
2329206717Sjmallett		intr = intr_disable();
2330178172Simp		cpu = PCPU_GET(cpuid);
2331178172Simp		sysm = &sysmap_lmem[cpu];
2332178172Simp		/* Since this is for the debugger, no locks or any other fun */
2333241287Salc		npte = TLBLO_PA_TO_PFN(pa) | PTE_C_CACHE | PTE_D | PTE_V |
2334241287Salc		    PTE_G;
2335206717Sjmallett		pte = pmap_pte(kernel_pmap, sysm->base);
2336206717Sjmallett		*pte = npte;
2337203151Srrs		sysm->valid1 = 1;
2338206717Sjmallett		pmap_update_page(kernel_pmap, sysm->base, npte);
2339206717Sjmallett		va = sysm->base;
2340206717Sjmallett		intr_restore(intr);
2341211453Sjchandra#endif
2342178172Simp	}
2343178172Simp	return ((void *)va);
2344178172Simp}
2345178172Simp
2346178172Simpvoid
2347178172Simppmap_kenter_temporary_free(vm_paddr_t pa)
2348178172Simp{
2349211453Sjchandra#ifndef __mips_n64    /* XXX : to be converted to new style */
2350178172Simp	int cpu;
2351206717Sjmallett	register_t intr;
2352178172Simp	struct local_sysmaps *sysm;
2353211453Sjchandra#endif
2354178172Simp
2355211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa)) {
2356178172Simp		/* nothing to do for this case */
2357178172Simp		return;
2358178172Simp	}
2359211453Sjchandra#ifndef __mips_n64    /* XXX : to be converted to new style */
2360178172Simp	cpu = PCPU_GET(cpuid);
2361178172Simp	sysm = &sysmap_lmem[cpu];
2362178172Simp	if (sysm->valid1) {
2363206717Sjmallett		pt_entry_t *pte;
2364206717Sjmallett
2365206717Sjmallett		intr = intr_disable();
2366206717Sjmallett		pte = pmap_pte(kernel_pmap, sysm->base);
2367206717Sjmallett		*pte = PTE_G;
2368206717Sjmallett		pmap_invalidate_page(kernel_pmap, sysm->base);
2369206717Sjmallett		intr_restore(intr);
2370178172Simp		sysm->valid1 = 0;
2371178172Simp	}
2372211453Sjchandra#endif
2373178172Simp}
2374178172Simp
2375178172Simp/*
2376178172Simp * Maps a sequence of resident pages belonging to the same object.
2377178172Simp * The sequence begins with the given page m_start.  This page is
2378178172Simp * mapped at the given virtual address start.  Each subsequent page is
2379178172Simp * mapped at a virtual address that is offset from start by the same
2380178172Simp * amount as the page is offset from m_start within the object.  The
2381178172Simp * last page in the sequence is the page with the largest offset from
2382178172Simp * m_start that can be mapped at a virtual address less than the given
2383178172Simp * virtual address end.  Not every virtual page between start and end
2384178172Simp * is mapped; only those for which a resident page exists with the
2385178172Simp * corresponding offset from m_start are mapped.
2386178172Simp */
2387178172Simpvoid
2388178172Simppmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2389178172Simp    vm_page_t m_start, vm_prot_t prot)
2390178172Simp{
2391191300Salc	vm_page_t m, mpte;
2392178172Simp	vm_pindex_t diff, psize;
2393178172Simp
2394250884Sattilio	VM_OBJECT_ASSERT_LOCKED(m_start->object);
2395250884Sattilio
2396178172Simp	psize = atop(end - start);
2397191300Salc	mpte = NULL;
2398178172Simp	m = m_start;
2399239317Salc	rw_wlock(&pvh_global_lock);
2400191300Salc	PMAP_LOCK(pmap);
2401178172Simp	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2402191300Salc		mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
2403191300Salc		    prot, mpte);
2404178172Simp		m = TAILQ_NEXT(m, listq);
2405178172Simp	}
2406239317Salc	rw_wunlock(&pvh_global_lock);
2407191300Salc 	PMAP_UNLOCK(pmap);
2408178172Simp}
2409178172Simp
2410178172Simp/*
2411178172Simp * pmap_object_init_pt preloads the ptes for a given object
2412178172Simp * into the specified pmap.  This eliminates the blast of soft
2413178172Simp * faults on process startup and immediately after an mmap.
2414178172Simp */
2415178172Simpvoid
2416178172Simppmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2417178172Simp    vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2418178172Simp{
2419248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(object);
2420195840Sjhb	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2421178172Simp	    ("pmap_object_init_pt: non-device object"));
2422178172Simp}
2423178172Simp
2424178172Simp/*
2425270920Skib *	Clear the wired attribute from the mappings for the specified range of
2426270920Skib *	addresses in the given pmap.  Every valid mapping within that range
2427270920Skib *	must have the wired attribute set.  In contrast, invalid mappings
2428270920Skib *	cannot have the wired attribute set, so they are ignored.
2429270920Skib *
2430270920Skib *	The wired attribute of the page table entry is not a hardware feature,
2431270920Skib *	so there is no need to invalidate any TLB entries.
2432178172Simp */
2433178172Simpvoid
2434270920Skibpmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2435178172Simp{
2436270920Skib	pd_entry_t *pde, *pdpe;
2437209482Sjchandra	pt_entry_t *pte;
2438270920Skib	vm_offset_t va_next;
2439178172Simp
2440178172Simp	PMAP_LOCK(pmap);
2441270920Skib	for (; sva < eva; sva = va_next) {
2442270920Skib		pdpe = pmap_segmap(pmap, sva);
2443270920Skib#ifdef __mips_n64
2444270920Skib		if (*pdpe == NULL) {
2445270920Skib			va_next = (sva + NBSEG) & ~SEGMASK;
2446270920Skib			if (va_next < sva)
2447270920Skib				va_next = eva;
2448270920Skib			continue;
2449270920Skib		}
2450270920Skib#endif
2451270920Skib		va_next = (sva + NBPDR) & ~PDRMASK;
2452270920Skib		if (va_next < sva)
2453270920Skib			va_next = eva;
2454270920Skib		pde = pmap_pdpe_to_pde(pdpe, sva);
2455270920Skib		if (*pde == NULL)
2456270920Skib			continue;
2457270920Skib		if (va_next > eva)
2458270920Skib			va_next = eva;
2459270920Skib		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2460270920Skib		    sva += PAGE_SIZE) {
2461270920Skib			if (!pte_test(pte, PTE_V))
2462270920Skib				continue;
2463270920Skib			if (!pte_test(pte, PTE_W))
2464270920Skib				panic("pmap_unwire: pte %#jx is missing PG_W",
2465270920Skib				    (uintmax_t)*pte);
2466270920Skib			pte_clear(pte, PTE_W);
2467270920Skib			pmap->pm_stats.wired_count--;
2468270920Skib		}
2469270920Skib	}
2470178172Simp	PMAP_UNLOCK(pmap);
2471178172Simp}
2472178172Simp
2473178172Simp/*
2474178172Simp *	Copy the range specified by src_addr/len
2475178172Simp *	from the source map to the range dst_addr/len
2476178172Simp *	in the destination map.
2477178172Simp *
2478178172Simp *	This routine is only advisory and need not do anything.
2479178172Simp */
2480178172Simp
2481178172Simpvoid
2482178172Simppmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2483178172Simp    vm_size_t len, vm_offset_t src_addr)
2484178172Simp{
2485178172Simp}
2486178172Simp
2487178172Simp/*
2488178172Simp *	pmap_zero_page zeros the specified hardware page by mapping
2489178172Simp *	the page into KVM and using bzero to clear its contents.
2490209930Sjchandra *
2491209930Sjchandra * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2492178172Simp */
2493178172Simpvoid
2494178172Simppmap_zero_page(vm_page_t m)
2495178172Simp{
2496178172Simp	vm_offset_t va;
2497178172Simp	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2498209930Sjchandra
2499211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys)) {
2500211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(phys);
2501178172Simp		bzero((caddr_t)va, PAGE_SIZE);
2502187301Sgonzo		mips_dcache_wbinv_range(va, PAGE_SIZE);
2503178172Simp	} else {
2504211453Sjchandra		va = pmap_lmem_map1(phys);
2505206717Sjmallett		bzero((caddr_t)va, PAGE_SIZE);
2506206717Sjmallett		mips_dcache_wbinv_range(va, PAGE_SIZE);
2507211453Sjchandra		pmap_lmem_unmap();
2508178172Simp	}
2509178172Simp}
2510211453Sjchandra
2511178172Simp/*
2512178172Simp *	pmap_zero_page_area zeros the specified hardware page by mapping
2513178172Simp *	the page into KVM and using bzero to clear its contents.
2514178172Simp *
2515178172Simp *	off and size may not cover an area beyond a single hardware page.
2516178172Simp */
2517178172Simpvoid
2518178172Simppmap_zero_page_area(vm_page_t m, int off, int size)
2519178172Simp{
2520178172Simp	vm_offset_t va;
2521178172Simp	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2522209930Sjchandra
2523211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys)) {
2524211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(phys);
2525178172Simp		bzero((char *)(caddr_t)va + off, size);
2526187301Sgonzo		mips_dcache_wbinv_range(va + off, size);
2527178172Simp	} else {
2528211453Sjchandra		va = pmap_lmem_map1(phys);
2529206717Sjmallett		bzero((char *)va + off, size);
2530206717Sjmallett		mips_dcache_wbinv_range(va + off, size);
2531211453Sjchandra		pmap_lmem_unmap();
2532178172Simp	}
2533178172Simp}
2534178172Simp
2535178172Simpvoid
2536178172Simppmap_zero_page_idle(vm_page_t m)
2537178172Simp{
2538178172Simp	vm_offset_t va;
2539178172Simp	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2540209930Sjchandra
2541211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys)) {
2542211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(phys);
2543178172Simp		bzero((caddr_t)va, PAGE_SIZE);
2544187301Sgonzo		mips_dcache_wbinv_range(va, PAGE_SIZE);
2545178172Simp	} else {
2546211453Sjchandra		va = pmap_lmem_map1(phys);
2547206717Sjmallett		bzero((caddr_t)va, PAGE_SIZE);
2548206717Sjmallett		mips_dcache_wbinv_range(va, PAGE_SIZE);
2549211453Sjchandra		pmap_lmem_unmap();
2550178172Simp	}
2551178172Simp}
2552178172Simp
2553178172Simp/*
2554178172Simp *	pmap_copy_page copies the specified (machine independent)
2555178172Simp *	page by mapping the page into virtual memory and using
2556178172Simp *	bcopy to copy the page, one machine dependent page at a
2557178172Simp *	time.
2558209930Sjchandra *
2559209930Sjchandra * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2560178172Simp */
2561178172Simpvoid
2562178172Simppmap_copy_page(vm_page_t src, vm_page_t dst)
2563178172Simp{
2564178172Simp	vm_offset_t va_src, va_dst;
2565211453Sjchandra	vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src);
2566211453Sjchandra	vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst);
2567209930Sjchandra
2568211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) {
2569206716Sjmallett		/* easy case, all can be accessed via KSEG0 */
2570206716Sjmallett		/*
2571206716Sjmallett		 * Flush all caches for VA that are mapped to this page
2572206716Sjmallett		 * to make sure that data in SDRAM is up to date
2573206716Sjmallett		 */
2574206716Sjmallett		pmap_flush_pvcache(src);
2575206716Sjmallett		mips_dcache_wbinv_range_index(
2576211453Sjchandra		    MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE);
2577211453Sjchandra		va_src = MIPS_PHYS_TO_DIRECT(phys_src);
2578211453Sjchandra		va_dst = MIPS_PHYS_TO_DIRECT(phys_dst);
2579178172Simp		bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
2580206716Sjmallett		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2581206716Sjmallett	} else {
2582211453Sjchandra		va_src = pmap_lmem_map2(phys_src, phys_dst);
2583211453Sjchandra		va_dst = va_src + PAGE_SIZE;
2584206716Sjmallett		bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
2585206717Sjmallett		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2586211453Sjchandra		pmap_lmem_unmap();
2587178172Simp	}
2588178172Simp}
2589178172Simp
2590248508Skibint unmapped_buf_allowed;
2591248508Skib
2592248280Skibvoid
2593248280Skibpmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2594248280Skib    vm_offset_t b_offset, int xfersize)
2595248280Skib{
2596248280Skib	char *a_cp, *b_cp;
2597248280Skib	vm_page_t a_m, b_m;
2598248280Skib	vm_offset_t a_pg_offset, b_pg_offset;
2599248280Skib	vm_paddr_t a_phys, b_phys;
2600248280Skib	int cnt;
2601248280Skib
2602248280Skib	while (xfersize > 0) {
2603248280Skib		a_pg_offset = a_offset & PAGE_MASK;
2604248280Skib		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2605248280Skib		a_m = ma[a_offset >> PAGE_SHIFT];
2606248280Skib		a_phys = VM_PAGE_TO_PHYS(a_m);
2607248280Skib		b_pg_offset = b_offset & PAGE_MASK;
2608248280Skib		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2609248280Skib		b_m = mb[b_offset >> PAGE_SHIFT];
2610248280Skib		b_phys = VM_PAGE_TO_PHYS(b_m);
2611248280Skib		if (MIPS_DIRECT_MAPPABLE(a_phys) &&
2612248280Skib		    MIPS_DIRECT_MAPPABLE(b_phys)) {
2613248280Skib			pmap_flush_pvcache(a_m);
2614248280Skib			mips_dcache_wbinv_range_index(
2615248280Skib			    MIPS_PHYS_TO_DIRECT(b_phys), PAGE_SIZE);
2616248280Skib			a_cp = (char *)MIPS_PHYS_TO_DIRECT(a_phys) +
2617248280Skib			    a_pg_offset;
2618248280Skib			b_cp = (char *)MIPS_PHYS_TO_DIRECT(b_phys) +
2619248280Skib			    b_pg_offset;
2620248280Skib			bcopy(a_cp, b_cp, cnt);
2621248280Skib			mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2622248280Skib		} else {
2623248280Skib			a_cp = (char *)pmap_lmem_map2(a_phys, b_phys);
2624248280Skib			b_cp = (char *)a_cp + PAGE_SIZE;
2625248280Skib			a_cp += a_pg_offset;
2626248280Skib			b_cp += b_pg_offset;
2627248280Skib			bcopy(a_cp, b_cp, cnt);
2628248280Skib			mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2629248280Skib			pmap_lmem_unmap();
2630248280Skib		}
2631248280Skib		a_offset += cnt;
2632248280Skib		b_offset += cnt;
2633248280Skib		xfersize -= cnt;
2634248280Skib	}
2635248280Skib}
2636248280Skib
2637178172Simp/*
2638178172Simp * Returns true if the pmap's pv is one of the first
2639178172Simp * 16 pvs linked to from this page.  This count may
2640178172Simp * be changed upwards or downwards in the future; it
2641178172Simp * is only necessary that true be returned for a small
2642178172Simp * subset of pmaps for proper page aging.
2643178172Simp */
2644178172Simpboolean_t
2645178172Simppmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2646178172Simp{
2647178172Simp	pv_entry_t pv;
2648178172Simp	int loops = 0;
2649208990Salc	boolean_t rv;
2650178172Simp
2651224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2652208990Salc	    ("pmap_page_exists_quick: page %p is not managed", m));
2653208990Salc	rv = FALSE;
2654239317Salc	rw_wlock(&pvh_global_lock);
2655178172Simp	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2656239236Salc		if (PV_PMAP(pv) == pmap) {
2657208990Salc			rv = TRUE;
2658208990Salc			break;
2659178172Simp		}
2660178172Simp		loops++;
2661178172Simp		if (loops >= 16)
2662178172Simp			break;
2663178172Simp	}
2664239317Salc	rw_wunlock(&pvh_global_lock);
2665208990Salc	return (rv);
2666178172Simp}
2667178172Simp
2668178172Simp/*
2669178172Simp * Remove all pages from specified address space
2670178172Simp * this aids process exit speeds.  Also, this code
2671178172Simp * is special cased for current process only, but
2672178172Simp * can have the more generic (and slightly slower)
2673178172Simp * mode enabled.  This is much faster than pmap_remove
2674178172Simp * in the case of running down an entire address space.
2675178172Simp */
2676178172Simpvoid
2677178172Simppmap_remove_pages(pmap_t pmap)
2678178172Simp{
2679239152Salc	pd_entry_t *pde;
2680178172Simp	pt_entry_t *pte, tpte;
2681239236Salc	pv_entry_t pv;
2682178172Simp	vm_page_t m;
2683239236Salc	struct pv_chunk *pc, *npc;
2684239236Salc	u_long inuse, bitmask;
2685239236Salc	int allfree, bit, field, idx;
2686178172Simp
2687178172Simp	if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2688178172Simp		printf("warning: pmap_remove_pages called with non-current pmap\n");
2689178172Simp		return;
2690178172Simp	}
2691239317Salc	rw_wlock(&pvh_global_lock);
2692178172Simp	PMAP_LOCK(pmap);
2693239236Salc	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
2694239236Salc		allfree = 1;
2695239236Salc		for (field = 0; field < _NPCM; field++) {
2696239236Salc			inuse = ~pc->pc_map[field] & pc_freemask[field];
2697239236Salc			while (inuse != 0) {
2698239236Salc				bit = ffsl(inuse) - 1;
2699239236Salc				bitmask = 1UL << bit;
2700239236Salc				idx = field * sizeof(inuse) * NBBY + bit;
2701239236Salc				pv = &pc->pc_pventry[idx];
2702239236Salc				inuse &= ~bitmask;
2703178172Simp
2704239236Salc				pde = pmap_pde(pmap, pv->pv_va);
2705239236Salc				KASSERT(pde != NULL && *pde != 0,
2706239236Salc				    ("pmap_remove_pages: pde"));
2707239236Salc				pte = pmap_pde_to_pte(pde, pv->pv_va);
2708239236Salc				if (!pte_test(pte, PTE_V))
2709239236Salc					panic("pmap_remove_pages: bad pte");
2710239236Salc				tpte = *pte;
2711178172Simp
2712178172Simp/*
2713178172Simp * We cannot remove wired pages from a process' mapping at this time
2714178172Simp */
2715239236Salc				if (pte_test(&tpte, PTE_W)) {
2716239236Salc					allfree = 0;
2717239236Salc					continue;
2718239236Salc				}
2719239236Salc				*pte = is_kernel_pmap(pmap) ? PTE_G : 0;
2720178172Simp
2721239236Salc				m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte));
2722239236Salc				KASSERT(m != NULL,
2723239236Salc				    ("pmap_remove_pages: bad tpte %#jx",
2724239236Salc				    (uintmax_t)tpte));
2725178172Simp
2726239236Salc				/*
2727239236Salc				 * Update the vm_page_t clean and reference bits.
2728239236Salc				 */
2729239236Salc				if (pte_test(&tpte, PTE_D))
2730239236Salc					vm_page_dirty(m);
2731178172Simp
2732239236Salc				/* Mark free */
2733239236Salc				PV_STAT(pv_entry_frees++);
2734239236Salc				PV_STAT(pv_entry_spare++);
2735239236Salc				pv_entry_count--;
2736239236Salc				pc->pc_map[field] |= bitmask;
2737239236Salc				pmap->pm_stats.resident_count--;
2738239236Salc				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2739239236Salc				if (TAILQ_EMPTY(&m->md.pv_list))
2740239236Salc					vm_page_aflag_clear(m, PGA_WRITEABLE);
2741239236Salc				pmap_unuse_pt(pmap, pv->pv_va, *pde);
2742239236Salc			}
2743178172Simp		}
2744239236Salc		if (allfree) {
2745239236Salc			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2746239236Salc			free_pv_chunk(pc);
2747178172Simp		}
2748178172Simp	}
2749178172Simp	pmap_invalidate_all(pmap);
2750178172Simp	PMAP_UNLOCK(pmap);
2751239317Salc	rw_wunlock(&pvh_global_lock);
2752178172Simp}
2753178172Simp
2754178172Simp/*
2755178172Simp * pmap_testbit tests bits in pte's
2756178172Simp */
2757178172Simpstatic boolean_t
2758178172Simppmap_testbit(vm_page_t m, int bit)
2759178172Simp{
2760178172Simp	pv_entry_t pv;
2761239236Salc	pmap_t pmap;
2762178172Simp	pt_entry_t *pte;
2763178172Simp	boolean_t rv = FALSE;
2764178172Simp
2765224746Skib	if (m->oflags & VPO_UNMANAGED)
2766211445Sjchandra		return (rv);
2767178172Simp
2768239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
2769178172Simp	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2770239236Salc		pmap = PV_PMAP(pv);
2771239236Salc		PMAP_LOCK(pmap);
2772239236Salc		pte = pmap_pte(pmap, pv->pv_va);
2773209482Sjchandra		rv = pte_test(pte, bit);
2774239236Salc		PMAP_UNLOCK(pmap);
2775178172Simp		if (rv)
2776178172Simp			break;
2777178172Simp	}
2778178172Simp	return (rv);
2779178172Simp}
2780178172Simp
2781178172Simp/*
2782178172Simp *	pmap_page_wired_mappings:
2783178172Simp *
2784178172Simp *	Return the number of managed mappings to the given physical page
2785178172Simp *	that are wired.
2786178172Simp */
2787178172Simpint
2788178172Simppmap_page_wired_mappings(vm_page_t m)
2789178172Simp{
2790178172Simp	pv_entry_t pv;
2791210914Sjchandra	pmap_t pmap;
2792210914Sjchandra	pt_entry_t *pte;
2793178172Simp	int count;
2794178172Simp
2795178172Simp	count = 0;
2796224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0)
2797178172Simp		return (count);
2798239317Salc	rw_wlock(&pvh_global_lock);
2799210914Sjchandra	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2800239236Salc		pmap = PV_PMAP(pv);
2801210914Sjchandra		PMAP_LOCK(pmap);
2802210914Sjchandra		pte = pmap_pte(pmap, pv->pv_va);
2803210914Sjchandra		if (pte_test(pte, PTE_W))
2804210914Sjchandra			count++;
2805210914Sjchandra		PMAP_UNLOCK(pmap);
2806210914Sjchandra	}
2807239317Salc	rw_wunlock(&pvh_global_lock);
2808178172Simp	return (count);
2809178172Simp}
2810178172Simp
2811178172Simp/*
2812178172Simp * Clear the write and modified bits in each of the given page's mappings.
2813178172Simp */
2814178172Simpvoid
2815178172Simppmap_remove_write(vm_page_t m)
2816178172Simp{
2817239236Salc	pmap_t pmap;
2818239236Salc	pt_entry_t pbits, *pte;
2819239236Salc	pv_entry_t pv;
2820178172Simp
2821224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2822208175Salc	    ("pmap_remove_write: page %p is not managed", m));
2823208175Salc
2824208175Salc	/*
2825254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2826254138Sattilio	 * set by another thread while the object is locked.  Thus,
2827254138Sattilio	 * if PGA_WRITEABLE is clear, no page table entries need updating.
2828208175Salc	 */
2829248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
2830254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2831178172Simp		return;
2832239317Salc	rw_wlock(&pvh_global_lock);
2833239236Salc	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2834239236Salc		pmap = PV_PMAP(pv);
2835239236Salc		PMAP_LOCK(pmap);
2836239236Salc		pte = pmap_pte(pmap, pv->pv_va);
2837239236Salc		KASSERT(pte != NULL && pte_test(pte, PTE_V),
2838239236Salc		    ("page on pv_list has no pte"));
2839239236Salc		pbits = *pte;
2840239236Salc		if (pte_test(&pbits, PTE_D)) {
2841239236Salc			pte_clear(&pbits, PTE_D);
2842239236Salc			vm_page_dirty(m);
2843239236Salc		}
2844239236Salc		pte_set(&pbits, PTE_RO);
2845239236Salc		if (pbits != *pte) {
2846239236Salc			*pte = pbits;
2847239236Salc			pmap_update_page(pmap, pv->pv_va, pbits);
2848239236Salc		}
2849239236Salc		PMAP_UNLOCK(pmap);
2850178172Simp	}
2851225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
2852239317Salc	rw_wunlock(&pvh_global_lock);
2853178172Simp}
2854178172Simp
2855178172Simp/*
2856178172Simp *	pmap_ts_referenced:
2857178172Simp *
2858178172Simp *	Return the count of reference bits for a page, clearing all of them.
2859178172Simp */
2860178172Simpint
2861178172Simppmap_ts_referenced(vm_page_t m)
2862178172Simp{
2863178172Simp
2864224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2865208990Salc	    ("pmap_ts_referenced: page %p is not managed", m));
2866178172Simp	if (m->md.pv_flags & PV_TABLE_REF) {
2867239317Salc		rw_wlock(&pvh_global_lock);
2868178172Simp		m->md.pv_flags &= ~PV_TABLE_REF;
2869239317Salc		rw_wunlock(&pvh_global_lock);
2870208990Salc		return (1);
2871178172Simp	}
2872208990Salc	return (0);
2873178172Simp}
2874178172Simp
2875178172Simp/*
2876178172Simp *	pmap_is_modified:
2877178172Simp *
2878178172Simp *	Return whether or not the specified physical page was modified
2879178172Simp *	in any physical maps.
2880178172Simp */
2881178172Simpboolean_t
2882178172Simppmap_is_modified(vm_page_t m)
2883178172Simp{
2884208504Salc	boolean_t rv;
2885178172Simp
2886224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2887208504Salc	    ("pmap_is_modified: page %p is not managed", m));
2888208504Salc
2889208504Salc	/*
2890254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2891225418Skib	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
2892209482Sjchandra	 * is clear, no PTEs can have PTE_D set.
2893208504Salc	 */
2894248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
2895254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2896208504Salc		return (FALSE);
2897239317Salc	rw_wlock(&pvh_global_lock);
2898239681Salc	rv = pmap_testbit(m, PTE_D);
2899239317Salc	rw_wunlock(&pvh_global_lock);
2900208504Salc	return (rv);
2901178172Simp}
2902178172Simp
2903178172Simp/* N/C */
2904178172Simp
2905178172Simp/*
2906178172Simp *	pmap_is_prefaultable:
2907178172Simp *
2908178172Simp *	Return whether or not the specified virtual address is elgible
2909178172Simp *	for prefault.
2910178172Simp */
2911178172Simpboolean_t
2912178172Simppmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2913178172Simp{
2914210846Sjchandra	pd_entry_t *pde;
2915178172Simp	pt_entry_t *pte;
2916178172Simp	boolean_t rv;
2917178172Simp
2918178172Simp	rv = FALSE;
2919178172Simp	PMAP_LOCK(pmap);
2920210846Sjchandra	pde = pmap_pde(pmap, addr);
2921210846Sjchandra	if (pde != NULL && *pde != 0) {
2922210846Sjchandra		pte = pmap_pde_to_pte(pde, addr);
2923178172Simp		rv = (*pte == 0);
2924178172Simp	}
2925178172Simp	PMAP_UNLOCK(pmap);
2926178172Simp	return (rv);
2927178172Simp}
2928178172Simp
2929178172Simp/*
2930255098Salc *	Apply the given advice to the specified range of addresses within the
2931255098Salc *	given pmap.  Depending on the advice, clear the referenced and/or
2932255098Salc *	modified flags in each mapping and set the mapped page's dirty field.
2933255028Salc */
2934255028Salcvoid
2935255028Salcpmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
2936255028Salc{
2937255098Salc	pd_entry_t *pde, *pdpe;
2938255098Salc	pt_entry_t *pte;
2939255098Salc	vm_offset_t va, va_next;
2940255098Salc	vm_paddr_t pa;
2941255098Salc	vm_page_t m;
2942255098Salc
2943255098Salc	if (advice != MADV_DONTNEED && advice != MADV_FREE)
2944255098Salc		return;
2945255098Salc	rw_wlock(&pvh_global_lock);
2946255098Salc	PMAP_LOCK(pmap);
2947255098Salc	for (; sva < eva; sva = va_next) {
2948255098Salc		pdpe = pmap_segmap(pmap, sva);
2949255098Salc#ifdef __mips_n64
2950255098Salc		if (*pdpe == 0) {
2951255098Salc			va_next = (sva + NBSEG) & ~SEGMASK;
2952255098Salc			if (va_next < sva)
2953255098Salc				va_next = eva;
2954255098Salc			continue;
2955255098Salc		}
2956255098Salc#endif
2957255098Salc		va_next = (sva + NBPDR) & ~PDRMASK;
2958255098Salc		if (va_next < sva)
2959255098Salc			va_next = eva;
2960255098Salc
2961255098Salc		pde = pmap_pdpe_to_pde(pdpe, sva);
2962255098Salc		if (*pde == NULL)
2963255098Salc			continue;
2964255098Salc
2965255098Salc		/*
2966255098Salc		 * Limit our scan to either the end of the va represented
2967255098Salc		 * by the current page table page, or to the end of the
2968255098Salc		 * range being write protected.
2969255098Salc		 */
2970255098Salc		if (va_next > eva)
2971255098Salc			va_next = eva;
2972255098Salc
2973255098Salc		va = va_next;
2974255098Salc		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2975255098Salc		    sva += PAGE_SIZE) {
2976255098Salc			if (!pte_test(pte, PTE_MANAGED | PTE_V)) {
2977255098Salc				if (va != va_next) {
2978255098Salc					pmap_invalidate_range(pmap, va, sva);
2979255098Salc					va = va_next;
2980255098Salc				}
2981255098Salc				continue;
2982255098Salc			}
2983255098Salc			pa = TLBLO_PTE_TO_PA(*pte);
2984255098Salc			m = PHYS_TO_VM_PAGE(pa);
2985255098Salc			m->md.pv_flags &= ~PV_TABLE_REF;
2986255098Salc			if (pte_test(pte, PTE_D)) {
2987255098Salc				if (advice == MADV_DONTNEED) {
2988255098Salc					/*
2989255098Salc					 * Future calls to pmap_is_modified()
2990255098Salc					 * can be avoided by making the page
2991255098Salc					 * dirty now.
2992255098Salc					 */
2993255098Salc					vm_page_dirty(m);
2994255098Salc				} else {
2995255098Salc					pte_clear(pte, PTE_D);
2996255098Salc					if (va == va_next)
2997255098Salc						va = sva;
2998255098Salc				}
2999255098Salc			} else {
3000255098Salc				/*
3001255098Salc				 * Unless PTE_D is set, any TLB entries
3002255098Salc				 * mapping "sva" don't allow write access, so
3003255098Salc				 * they needn't be invalidated.
3004255098Salc				 */
3005255098Salc				if (va != va_next) {
3006255098Salc					pmap_invalidate_range(pmap, va, sva);
3007255098Salc					va = va_next;
3008255098Salc				}
3009255098Salc			}
3010255098Salc		}
3011255098Salc		if (va != va_next)
3012255098Salc			pmap_invalidate_range(pmap, va, sva);
3013255098Salc	}
3014255098Salc	rw_wunlock(&pvh_global_lock);
3015255098Salc	PMAP_UNLOCK(pmap);
3016255028Salc}
3017255028Salc
3018255028Salc/*
3019178172Simp *	Clear the modify bits on the specified physical page.
3020178172Simp */
3021178172Simpvoid
3022178172Simppmap_clear_modify(vm_page_t m)
3023178172Simp{
3024239352Salc	pmap_t pmap;
3025239352Salc	pt_entry_t *pte;
3026239352Salc	pv_entry_t pv;
3027208504Salc
3028224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3029208504Salc	    ("pmap_clear_modify: page %p is not managed", m));
3030248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
3031254138Sattilio	KASSERT(!vm_page_xbusied(m),
3032254138Sattilio	    ("pmap_clear_modify: page %p is exclusive busied", m));
3033208504Salc
3034208504Salc	/*
3035225418Skib	 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set.
3036208504Salc	 * If the object containing the page is locked and the page is not
3037254138Sattilio	 * write busied, then PGA_WRITEABLE cannot be concurrently set.
3038208504Salc	 */
3039225418Skib	if ((m->aflags & PGA_WRITEABLE) == 0)
3040178172Simp		return;
3041239317Salc	rw_wlock(&pvh_global_lock);
3042239352Salc	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3043239352Salc		pmap = PV_PMAP(pv);
3044239352Salc		PMAP_LOCK(pmap);
3045239352Salc		pte = pmap_pte(pmap, pv->pv_va);
3046239352Salc		if (pte_test(pte, PTE_D)) {
3047239352Salc			pte_clear(pte, PTE_D);
3048239352Salc			pmap_update_page(pmap, pv->pv_va, *pte);
3049239352Salc		}
3050239352Salc		PMAP_UNLOCK(pmap);
3051178172Simp	}
3052239317Salc	rw_wunlock(&pvh_global_lock);
3053178172Simp}
3054178172Simp
3055178172Simp/*
3056207155Salc *	pmap_is_referenced:
3057207155Salc *
3058207155Salc *	Return whether or not the specified physical page was referenced
3059207155Salc *	in any physical maps.
3060207155Salc */
3061207155Salcboolean_t
3062207155Salcpmap_is_referenced(vm_page_t m)
3063207155Salc{
3064207155Salc
3065224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3066208574Salc	    ("pmap_is_referenced: page %p is not managed", m));
3067208574Salc	return ((m->md.pv_flags & PV_TABLE_REF) != 0);
3068207155Salc}
3069207155Salc
3070207155Salc/*
3071178172Simp * Miscellaneous support routines follow
3072178172Simp */
3073178172Simp
3074178172Simp/*
3075178172Simp * Map a set of physical memory pages into the kernel virtual
3076178172Simp * address space. Return a pointer to where it is mapped. This
3077178172Simp * routine is intended to be used for mapping device memory,
3078178172Simp * NOT real memory.
3079209930Sjchandra *
3080209930Sjchandra * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
3081178172Simp */
3082178172Simpvoid *
3083217345Sjchandrapmap_mapdev(vm_paddr_t pa, vm_size_t size)
3084178172Simp{
3085178172Simp        vm_offset_t va, tmpva, offset;
3086178172Simp
3087178172Simp	/*
3088178172Simp	 * KSEG1 maps only first 512M of phys address space. For
3089178172Simp	 * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
3090178172Simp	 */
3091211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
3092211453Sjchandra		return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
3093178172Simp	else {
3094178172Simp		offset = pa & PAGE_MASK;
3095202046Simp		size = roundup(size + offset, PAGE_SIZE);
3096178172Simp
3097254025Sjeff		va = kva_alloc(size);
3098178172Simp		if (!va)
3099178172Simp			panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3100202046Simp		pa = trunc_page(pa);
3101178172Simp		for (tmpva = va; size > 0;) {
3102212589Sneel			pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED);
3103178172Simp			size -= PAGE_SIZE;
3104178172Simp			tmpva += PAGE_SIZE;
3105178172Simp			pa += PAGE_SIZE;
3106178172Simp		}
3107178172Simp	}
3108178172Simp
3109178172Simp	return ((void *)(va + offset));
3110178172Simp}
3111178172Simp
3112178172Simpvoid
3113178172Simppmap_unmapdev(vm_offset_t va, vm_size_t size)
3114178172Simp{
3115211453Sjchandra#ifndef __mips_n64
3116240317Salc	vm_offset_t base, offset;
3117202046Simp
3118202046Simp	/* If the address is within KSEG1 then there is nothing to do */
3119202046Simp	if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END)
3120202046Simp		return;
3121202046Simp
3122202046Simp	base = trunc_page(va);
3123202046Simp	offset = va & PAGE_MASK;
3124202046Simp	size = roundup(size + offset, PAGE_SIZE);
3125254025Sjeff	kva_free(base, size);
3126211453Sjchandra#endif
3127178172Simp}
3128178172Simp
3129178172Simp/*
3130178172Simp * perform the pmap work for mincore
3131178172Simp */
3132178172Simpint
3133208504Salcpmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
3134178172Simp{
3135178172Simp	pt_entry_t *ptep, pte;
3136217345Sjchandra	vm_paddr_t pa;
3137208532Sneel	vm_page_t m;
3138208504Salc	int val;
3139178172Simp
3140178172Simp	PMAP_LOCK(pmap);
3141208504Salcretry:
3142178172Simp	ptep = pmap_pte(pmap, addr);
3143178172Simp	pte = (ptep != NULL) ? *ptep : 0;
3144209482Sjchandra	if (!pte_test(&pte, PTE_V)) {
3145208504Salc		val = 0;
3146208504Salc		goto out;
3147208504Salc	}
3148208504Salc	val = MINCORE_INCORE;
3149209482Sjchandra	if (pte_test(&pte, PTE_D))
3150208504Salc		val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
3151209243Sjchandra	pa = TLBLO_PTE_TO_PA(pte);
3152239964Salc	if (pte_test(&pte, PTE_MANAGED)) {
3153178172Simp		/*
3154208504Salc		 * This may falsely report the given address as
3155208504Salc		 * MINCORE_REFERENCED.  Unfortunately, due to the lack of
3156208504Salc		 * per-PTE reference information, it is impossible to
3157208504Salc		 * determine if the address is MINCORE_REFERENCED.
3158178172Simp		 */
3159208504Salc		m = PHYS_TO_VM_PAGE(pa);
3160225418Skib		if ((m->aflags & PGA_REFERENCED) != 0)
3161178172Simp			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
3162178172Simp	}
3163208504Salc	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
3164239964Salc	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
3165239964Salc	    pte_test(&pte, PTE_MANAGED)) {
3166208504Salc		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
3167208504Salc		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
3168208504Salc			goto retry;
3169208504Salc	} else
3170208504Salcout:
3171208504Salc		PA_UNLOCK_COND(*locked_pa);
3172208504Salc	PMAP_UNLOCK(pmap);
3173208504Salc	return (val);
3174178172Simp}
3175178172Simp
3176178172Simpvoid
3177178172Simppmap_activate(struct thread *td)
3178178172Simp{
3179178172Simp	pmap_t pmap, oldpmap;
3180178172Simp	struct proc *p = td->td_proc;
3181223758Sattilio	u_int cpuid;
3182178172Simp
3183178172Simp	critical_enter();
3184178172Simp
3185178172Simp	pmap = vmspace_pmap(p->p_vmspace);
3186178172Simp	oldpmap = PCPU_GET(curpmap);
3187223758Sattilio	cpuid = PCPU_GET(cpuid);
3188178172Simp
3189178172Simp	if (oldpmap)
3190223758Sattilio		CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
3191223758Sattilio	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
3192178172Simp	pmap_asid_alloc(pmap);
3193178172Simp	if (td == curthread) {
3194178172Simp		PCPU_SET(segbase, pmap->pm_segtab);
3195223758Sattilio		mips_wr_entryhi(pmap->pm_asid[cpuid].asid);
3196178172Simp	}
3197202046Simp
3198178172Simp	PCPU_SET(curpmap, pmap);
3199178172Simp	critical_exit();
3200178172Simp}
3201178172Simp
3202310133Sjhbstatic void
3203310133Sjhbpmap_sync_icache_one(void *arg __unused)
3204310133Sjhb{
3205310133Sjhb
3206310133Sjhb	mips_icache_sync_all();
3207310133Sjhb	mips_dcache_wbinv_all();
3208310133Sjhb}
3209310133Sjhb
3210198341Smarcelvoid
3211198341Smarcelpmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
3212198341Smarcel{
3213310133Sjhb
3214310133Sjhb	smp_rendezvous(NULL, pmap_sync_icache_one, NULL, NULL);
3215198341Smarcel}
3216198341Smarcel
3217178893Salc/*
3218178893Salc *	Increase the starting virtual address of the given mapping if a
3219178893Salc *	different alignment might result in more superpage mappings.
3220178893Salc */
3221178893Salcvoid
3222178893Salcpmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
3223178893Salc    vm_offset_t *addr, vm_size_t size)
3224178893Salc{
3225179081Salc	vm_offset_t superpage_offset;
3226179081Salc
3227179081Salc	if (size < NBSEG)
3228179081Salc		return;
3229179081Salc	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
3230179081Salc		offset += ptoa(object->pg_color);
3231210627Sjchandra	superpage_offset = offset & SEGMASK;
3232210627Sjchandra	if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG ||
3233210627Sjchandra	    (*addr & SEGMASK) == superpage_offset)
3234179081Salc		return;
3235210627Sjchandra	if ((*addr & SEGMASK) < superpage_offset)
3236210627Sjchandra		*addr = (*addr & ~SEGMASK) + superpage_offset;
3237179081Salc	else
3238210627Sjchandra		*addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset;
3239178893Salc}
3240178893Salc
3241211167Sjchandra#ifdef DDB
3242210846SjchandraDB_SHOW_COMMAND(ptable, ddb_pid_dump)
3243178172Simp{
3244178172Simp	pmap_t pmap;
3245210846Sjchandra	struct thread *td = NULL;
3246178172Simp	struct proc *p;
3247210846Sjchandra	int i, j, k;
3248210846Sjchandra	vm_paddr_t pa;
3249210846Sjchandra	vm_offset_t va;
3250178172Simp
3251210846Sjchandra	if (have_addr) {
3252210846Sjchandra		td = db_lookup_thread(addr, TRUE);
3253210846Sjchandra		if (td == NULL) {
3254210846Sjchandra			db_printf("Invalid pid or tid");
3255210846Sjchandra			return;
3256210846Sjchandra		}
3257210846Sjchandra		p = td->td_proc;
3258210846Sjchandra		if (p->p_vmspace == NULL) {
3259210846Sjchandra			db_printf("No vmspace for process");
3260210846Sjchandra			return;
3261210846Sjchandra		}
3262210846Sjchandra			pmap = vmspace_pmap(p->p_vmspace);
3263210846Sjchandra	} else
3264210846Sjchandra		pmap = kernel_pmap;
3265178172Simp
3266210846Sjchandra	db_printf("pmap:%p segtab:%p asid:%x generation:%x\n",
3267211167Sjchandra	    pmap, pmap->pm_segtab, pmap->pm_asid[0].asid,
3268211167Sjchandra	    pmap->pm_asid[0].gen);
3269210846Sjchandra	for (i = 0; i < NPDEPG; i++) {
3270210846Sjchandra		pd_entry_t *pdpe;
3271210846Sjchandra		pt_entry_t *pde;
3272210846Sjchandra		pt_entry_t pte;
3273178172Simp
3274210846Sjchandra		pdpe = (pd_entry_t *)pmap->pm_segtab[i];
3275210846Sjchandra		if (pdpe == NULL)
3276210846Sjchandra			continue;
3277210846Sjchandra		db_printf("[%4d] %p\n", i, pdpe);
3278210846Sjchandra#ifdef __mips_n64
3279210846Sjchandra		for (j = 0; j < NPDEPG; j++) {
3280210846Sjchandra			pde = (pt_entry_t *)pdpe[j];
3281210846Sjchandra			if (pde == NULL)
3282210846Sjchandra				continue;
3283210846Sjchandra			db_printf("\t[%4d] %p\n", j, pde);
3284210846Sjchandra#else
3285210846Sjchandra		{
3286210846Sjchandra			j = 0;
3287210846Sjchandra			pde =  (pt_entry_t *)pdpe;
3288210846Sjchandra#endif
3289210846Sjchandra			for (k = 0; k < NPTEPG; k++) {
3290210846Sjchandra				pte = pde[k];
3291210846Sjchandra				if (pte == 0 || !pte_test(&pte, PTE_V))
3292210846Sjchandra					continue;
3293210846Sjchandra				pa = TLBLO_PTE_TO_PA(pte);
3294210846Sjchandra				va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT);
3295217345Sjchandra				db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n",
3296217345Sjchandra				       k, (void *)va, (uintmax_t)pte, (uintmax_t)pa);
3297178172Simp			}
3298178172Simp		}
3299178172Simp	}
3300178172Simp}
3301211167Sjchandra#endif
3302178172Simp
3303178172Simp#if defined(DEBUG)
3304178172Simp
3305178172Simpstatic void pads(pmap_t pm);
3306178172Simpvoid pmap_pvdump(vm_offset_t pa);
3307178172Simp
3308178172Simp/* print address space of pmap*/
3309178172Simpstatic void
3310178172Simppads(pmap_t pm)
3311178172Simp{
3312178172Simp	unsigned va, i, j;
3313178172Simp	pt_entry_t *ptep;
3314178172Simp
3315178172Simp	if (pm == kernel_pmap)
3316178172Simp		return;
3317178172Simp	for (i = 0; i < NPTEPG; i++)
3318178172Simp		if (pm->pm_segtab[i])
3319178172Simp			for (j = 0; j < NPTEPG; j++) {
3320178172Simp				va = (i << SEGSHIFT) + (j << PAGE_SHIFT);
3321178172Simp				if (pm == kernel_pmap && va < KERNBASE)
3322178172Simp					continue;
3323178172Simp				if (pm != kernel_pmap &&
3324178172Simp				    va >= VM_MAXUSER_ADDRESS)
3325178172Simp					continue;
3326178172Simp				ptep = pmap_pte(pm, va);
3327216324Sjchandra				if (pte_test(ptep, PTE_V))
3328178172Simp					printf("%x:%x ", va, *(int *)ptep);
3329178172Simp			}
3330178172Simp
3331178172Simp}
3332178172Simp
3333178172Simpvoid
3334178172Simppmap_pvdump(vm_offset_t pa)
3335178172Simp{
3336178172Simp	register pv_entry_t pv;
3337178172Simp	vm_page_t m;
3338178172Simp
3339178172Simp	printf("pa %x", pa);
3340178172Simp	m = PHYS_TO_VM_PAGE(pa);
3341178172Simp	for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3342178172Simp	    pv = TAILQ_NEXT(pv, pv_list)) {
3343178172Simp		printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3344178172Simp		pads(pv->pv_pmap);
3345178172Simp	}
3346178172Simp	printf(" ");
3347178172Simp}
3348178172Simp
3349178172Simp/* N/C */
3350178172Simp#endif
3351178172Simp
3352178172Simp
3353178172Simp/*
3354178172Simp * Allocate TLB address space tag (called ASID or TLBPID) and return it.
3355178172Simp * It takes almost as much or more time to search the TLB for a
3356178172Simp * specific ASID and flush those entries as it does to flush the entire TLB.
3357178172Simp * Therefore, when we allocate a new ASID, we just take the next number. When
3358178172Simp * we run out of numbers, we flush the TLB, increment the generation count
3359178172Simp * and start over. ASID zero is reserved for kernel use.
3360178172Simp */
3361178172Simpstatic void
3362178172Simppmap_asid_alloc(pmap)
3363178172Simp	pmap_t pmap;
3364178172Simp{
3365178172Simp	if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED &&
3366178172Simp	    pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
3367178172Simp	else {
3368178172Simp		if (PCPU_GET(next_asid) == pmap_max_asid) {
3369209243Sjchandra			tlb_invalidate_all_user(NULL);
3370178172Simp			PCPU_SET(asid_generation,
3371178172Simp			    (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
3372178172Simp			if (PCPU_GET(asid_generation) == 0) {
3373178172Simp				PCPU_SET(asid_generation, 1);
3374178172Simp			}
3375178172Simp			PCPU_SET(next_asid, 1);	/* 0 means invalid */
3376178172Simp		}
3377178172Simp		pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid);
3378178172Simp		pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation);
3379178172Simp		PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
3380178172Simp	}
3381178172Simp}
3382178172Simp
3383217345Sjchandrastatic pt_entry_t
3384239681Salcinit_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot)
3385178172Simp{
3386217345Sjchandra	pt_entry_t rw;
3387178172Simp
3388178172Simp	if (!(prot & VM_PROT_WRITE))
3389239321Salc		rw = PTE_V | PTE_RO;
3390224746Skib	else if ((m->oflags & VPO_UNMANAGED) == 0) {
3391239681Salc		if ((access & VM_PROT_WRITE) != 0)
3392239321Salc			rw = PTE_V | PTE_D;
3393178172Simp		else
3394238861Srwatson			rw = PTE_V;
3395208866Salc	} else
3396208866Salc		/* Needn't emulate a modified bit for unmanaged pages. */
3397239321Salc		rw = PTE_V | PTE_D;
3398208866Salc	return (rw);
3399178172Simp}
3400178172Simp
3401178172Simp/*
3402211217Sjchandra * pmap_emulate_modified : do dirty bit emulation
3403178172Simp *
3404211217Sjchandra * On SMP, update just the local TLB, other CPUs will update their
3405211217Sjchandra * TLBs from PTE lazily, if they get the exception.
3406211217Sjchandra * Returns 0 in case of sucess, 1 if the page is read only and we
3407211217Sjchandra * need to fault.
3408178172Simp */
3409211217Sjchandraint
3410211217Sjchandrapmap_emulate_modified(pmap_t pmap, vm_offset_t va)
3411178172Simp{
3412211217Sjchandra	pt_entry_t *pte;
3413178172Simp
3414211217Sjchandra	PMAP_LOCK(pmap);
3415211217Sjchandra	pte = pmap_pte(pmap, va);
3416211217Sjchandra	if (pte == NULL)
3417211217Sjchandra		panic("pmap_emulate_modified: can't find PTE");
3418211217Sjchandra#ifdef SMP
3419211217Sjchandra	/* It is possible that some other CPU changed m-bit */
3420211217Sjchandra	if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
3421227623Sjchandra		tlb_update(pmap, va, *pte);
3422211217Sjchandra		PMAP_UNLOCK(pmap);
3423211217Sjchandra		return (0);
3424211217Sjchandra	}
3425211217Sjchandra#else
3426211217Sjchandra	if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
3427211217Sjchandra		panic("pmap_emulate_modified: invalid pte");
3428211217Sjchandra#endif
3429211217Sjchandra	if (pte_test(pte, PTE_RO)) {
3430211217Sjchandra		PMAP_UNLOCK(pmap);
3431211217Sjchandra		return (1);
3432211217Sjchandra	}
3433211217Sjchandra	pte_set(pte, PTE_D);
3434227623Sjchandra	tlb_update(pmap, va, *pte);
3435239964Salc	if (!pte_test(pte, PTE_MANAGED))
3436239964Salc		panic("pmap_emulate_modified: unmanaged page");
3437211217Sjchandra	PMAP_UNLOCK(pmap);
3438211217Sjchandra	return (0);
3439178172Simp}
3440178172Simp
3441178172Simp/*
3442178172Simp *	Routine:	pmap_kextract
3443178172Simp *	Function:
3444178172Simp *		Extract the physical page address associated
3445178172Simp *		virtual address.
3446178172Simp */
3447233308Sjchandravm_paddr_t
3448178172Simppmap_kextract(vm_offset_t va)
3449178172Simp{
3450209930Sjchandra	int mapped;
3451178172Simp
3452209930Sjchandra	/*
3453209930Sjchandra	 * First, the direct-mapped regions.
3454209930Sjchandra	 */
3455209930Sjchandra#if defined(__mips_n64)
3456209930Sjchandra	if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END)
3457209930Sjchandra		return (MIPS_XKPHYS_TO_PHYS(va));
3458209930Sjchandra#endif
3459209930Sjchandra	if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END)
3460209930Sjchandra		return (MIPS_KSEG0_TO_PHYS(va));
3461209930Sjchandra
3462209930Sjchandra	if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END)
3463209930Sjchandra		return (MIPS_KSEG1_TO_PHYS(va));
3464209930Sjchandra
3465209930Sjchandra	/*
3466209930Sjchandra	 * User virtual addresses.
3467209930Sjchandra	 */
3468209930Sjchandra	if (va < VM_MAXUSER_ADDRESS) {
3469178172Simp		pt_entry_t *ptep;
3470178172Simp
3471178172Simp		if (curproc && curproc->p_vmspace) {
3472178172Simp			ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
3473209930Sjchandra			if (ptep) {
3474209930Sjchandra				return (TLBLO_PTE_TO_PA(*ptep) |
3475209930Sjchandra				    (va & PAGE_MASK));
3476209930Sjchandra			}
3477209930Sjchandra			return (0);
3478178172Simp		}
3479209930Sjchandra	}
3480209930Sjchandra
3481209930Sjchandra	/*
3482209930Sjchandra	 * Should be kernel virtual here, otherwise fail
3483209930Sjchandra	 */
3484209930Sjchandra	mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END);
3485209930Sjchandra#if defined(__mips_n64)
3486209930Sjchandra	mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END);
3487209930Sjchandra#endif
3488209930Sjchandra	/*
3489209930Sjchandra	 * Kernel virtual.
3490209930Sjchandra	 */
3491209930Sjchandra
3492209930Sjchandra	if (mapped) {
3493178172Simp		pt_entry_t *ptep;
3494178172Simp
3495191735Salc		/* Is the kernel pmap initialized? */
3496222813Sattilio		if (!CPU_EMPTY(&kernel_pmap->pm_active)) {
3497209930Sjchandra			/* It's inside the virtual address range */
3498206717Sjmallett			ptep = pmap_pte(kernel_pmap, va);
3499209243Sjchandra			if (ptep) {
3500209243Sjchandra				return (TLBLO_PTE_TO_PA(*ptep) |
3501209243Sjchandra				    (va & PAGE_MASK));
3502209243Sjchandra			}
3503178172Simp		}
3504209930Sjchandra		return (0);
3505178172Simp	}
3506209930Sjchandra
3507209930Sjchandra	panic("%s for unknown address space %p.", __func__, (void *)va);
3508178172Simp}
3509202046Simp
3510209930Sjchandra
3511202046Simpvoid
3512202046Simppmap_flush_pvcache(vm_page_t m)
3513202046Simp{
3514202046Simp	pv_entry_t pv;
3515202046Simp
3516202046Simp	if (m != NULL) {
3517202046Simp		for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3518210846Sjchandra		    pv = TAILQ_NEXT(pv, pv_list)) {
3519206746Sjmallett			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
3520202046Simp		}
3521202046Simp	}
3522202046Simp}
3523