1178172Simp/*
2178172Simp * Copyright (c) 1991 Regents of the University of California.
3178172Simp * All rights reserved.
4178172Simp * Copyright (c) 1994 John S. Dyson
5178172Simp * All rights reserved.
6178172Simp * Copyright (c) 1994 David Greenman
7178172Simp * All rights reserved.
8178172Simp *
9178172Simp * This code is derived from software contributed to Berkeley by
10178172Simp * the Systems Programming Group of the University of Utah Computer
11178172Simp * Science Department and William Jolitz of UUNET Technologies Inc.
12178172Simp *
13178172Simp * Redistribution and use in source and binary forms, with or without
14178172Simp * modification, are permitted provided that the following conditions
15178172Simp * are met:
16178172Simp * 1. Redistributions of source code must retain the above copyright
17178172Simp *    notice, this list of conditions and the following disclaimer.
18178172Simp * 2. Redistributions in binary form must reproduce the above copyright
19178172Simp *    notice, this list of conditions and the following disclaimer in the
20178172Simp *    documentation and/or other materials provided with the distribution.
21178172Simp * 4. Neither the name of the University nor the names of its contributors
22178172Simp *    may be used to endorse or promote products derived from this software
23178172Simp *    without specific prior written permission.
24178172Simp *
25178172Simp * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28178172Simp * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29178172Simp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35178172Simp * SUCH DAMAGE.
36178172Simp *
37178172Simp *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
38178172Simp *	from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps
39178172Simp *	JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish
40178172Simp */
41178172Simp
42178172Simp/*
43178172Simp *	Manages physical address maps.
44178172Simp *
45178172Simp *	Since the information managed by this module is
46178172Simp *	also stored by the logical address mapping module,
47178172Simp *	this module may throw away valid virtual-to-physical
48178172Simp *	mappings at almost any time.  However, invalidations
49178172Simp *	of virtual-to-physical mappings must be done as
50178172Simp *	requested.
51178172Simp *
52178172Simp *	In order to cope with hardware architectures which
53178172Simp *	make virtual-to-physical map invalidates expensive,
54178172Simp *	this module may delay invalidate or reduced protection
55178172Simp *	operations until such time as they are actually
56178172Simp *	necessary.  This module is given full information as
57178172Simp *	to which processors are currently using which maps,
58178172Simp *	and to when physical maps must be made correct.
59178172Simp */
60178172Simp
61178172Simp#include <sys/cdefs.h>
62178172Simp__FBSDID("$FreeBSD: releng/10.3/sys/mips/mips/pmap.c 270920 2014-09-01 07:58:15Z kib $");
63178172Simp
64210846Sjchandra#include "opt_ddb.h"
65239236Salc#include "opt_pmap.h"
66210846Sjchandra
67178172Simp#include <sys/param.h>
68178172Simp#include <sys/systm.h>
69239317Salc#include <sys/lock.h>
70239317Salc#include <sys/mman.h>
71239317Salc#include <sys/msgbuf.h>
72239317Salc#include <sys/mutex.h>
73239317Salc#include <sys/pcpu.h>
74178172Simp#include <sys/proc.h>
75239317Salc#include <sys/rwlock.h>
76239317Salc#include <sys/sched.h>
77239317Salc#ifdef SMP
78205064Sneel#include <sys/smp.h>
79239317Salc#else
80239317Salc#include <sys/cpuset.h>
81239317Salc#endif
82239236Salc#include <sys/sysctl.h>
83239317Salc#include <sys/vmmeter.h>
84239317Salc
85210846Sjchandra#ifdef DDB
86210846Sjchandra#include <ddb/ddb.h>
87210846Sjchandra#endif
88178172Simp
89178172Simp#include <vm/vm.h>
90178172Simp#include <vm/vm_param.h>
91178172Simp#include <vm/vm_kern.h>
92178172Simp#include <vm/vm_page.h>
93178172Simp#include <vm/vm_map.h>
94178172Simp#include <vm/vm_object.h>
95178172Simp#include <vm/vm_extern.h>
96178172Simp#include <vm/vm_pageout.h>
97178172Simp#include <vm/vm_pager.h>
98178172Simp#include <vm/uma.h>
99178172Simp
100178172Simp#include <machine/cache.h>
101178172Simp#include <machine/md_var.h>
102209243Sjchandra#include <machine/tlb.h>
103178172Simp
104187301Sgonzo#undef PMAP_DEBUG
105187301Sgonzo
106211958Sjchandra#if !defined(DIAGNOSTIC)
107178172Simp#define	PMAP_INLINE __inline
108178172Simp#else
109178172Simp#define	PMAP_INLINE
110178172Simp#endif
111178172Simp
112239236Salc#ifdef PV_STATS
113239236Salc#define PV_STAT(x)	do { x ; } while (0)
114239236Salc#else
115239236Salc#define PV_STAT(x)	do { } while (0)
116239236Salc#endif
117239236Salc
118178172Simp/*
119178172Simp * Get PDEs and PTEs for user/kernel address space
120178172Simp */
121210846Sjchandra#define	pmap_seg_index(v)	(((v) >> SEGSHIFT) & (NPDEPG - 1))
122210846Sjchandra#define	pmap_pde_index(v)	(((v) >> PDRSHIFT) & (NPDEPG - 1))
123210846Sjchandra#define	pmap_pte_index(v)	(((v) >> PAGE_SHIFT) & (NPTEPG - 1))
124210846Sjchandra#define	pmap_pde_pindex(v)	((v) >> PDRSHIFT)
125178172Simp
126210846Sjchandra#ifdef __mips_n64
127210846Sjchandra#define	NUPDE			(NPDEPG * NPDEPG)
128210846Sjchandra#define	NUSERPGTBLS		(NUPDE + NPDEPG)
129209930Sjchandra#else
130210846Sjchandra#define	NUPDE			(NPDEPG)
131210846Sjchandra#define	NUSERPGTBLS		(NUPDE)
132209930Sjchandra#endif
133210846Sjchandra
134178172Simp#define	is_kernel_pmap(x)	((x) == kernel_pmap)
135178172Simp
136191735Salcstruct pmap kernel_pmap_store;
137178172Simppd_entry_t *kernel_segmap;
138178172Simp
139178172Simpvm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
140178172Simpvm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
141178172Simp
142178172Simpstatic int nkpt;
143178172Simpunsigned pmap_max_asid;		/* max ASID supported by the system */
144178172Simp
145178172Simp#define	PMAP_ASID_RESERVED	0
146178172Simp
147210846Sjchandravm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
148178172Simp
149178172Simpstatic void pmap_asid_alloc(pmap_t pmap);
150178172Simp
151242534Sattiliostatic struct rwlock_padalign pvh_global_lock;
152239317Salc
153239317Salc/*
154178172Simp * Data for the pv entry allocation mechanism
155178172Simp */
156239236Salcstatic TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
157239236Salcstatic int pv_entry_count;
158178172Simp
159239236Salcstatic void free_pv_chunk(struct pv_chunk *pc);
160239236Salcstatic void free_pv_entry(pmap_t pmap, pv_entry_t pv);
161239236Salcstatic pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
162239236Salcstatic vm_page_t pmap_pv_reclaim(pmap_t locked_pmap);
163208665Salcstatic void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
164208665Salcstatic pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
165208665Salc    vm_offset_t va);
166243030Salcstatic vm_page_t pmap_alloc_direct_page(unsigned int index, int req);
167191300Salcstatic vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
168191300Salc    vm_page_t m, vm_prot_t prot, vm_page_t mpte);
169239152Salcstatic int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
170239152Salc    pd_entry_t pde);
171178172Simpstatic void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
172178172Simpstatic void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
173191300Salcstatic boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
174191300Salc    vm_offset_t va, vm_page_t m);
175211217Sjchandrastatic void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
176211215Sjchandrastatic void pmap_invalidate_all(pmap_t pmap);
177211215Sjchandrastatic void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
178240126Salcstatic void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m);
179178172Simp
180270439Skibstatic vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
181270439Skibstatic vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags);
182239152Salcstatic int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
183239681Salcstatic pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot);
184178172Simp
185178172Simpstatic void pmap_invalidate_page_action(void *arg);
186241123Salcstatic void pmap_invalidate_range_action(void *arg);
187178172Simpstatic void pmap_update_page_action(void *arg);
188178172Simp
189211453Sjchandra#ifndef __mips_n64
190211453Sjchandra/*
191216157Sjchandra * This structure is for high memory (memory above 512Meg in 32 bit) support.
192216157Sjchandra * The highmem area does not have a KSEG0 mapping, and we need a mechanism to
193216157Sjchandra * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc.
194211453Sjchandra *
195216157Sjchandra * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To
196216157Sjchandra * access a highmem physical address on a CPU, we map the physical address to
197216157Sjchandra * the reserved virtual address for the CPU in the kernel pagetable.  This is
198216157Sjchandra * done with interrupts disabled(although a spinlock and sched_pin would be
199216157Sjchandra * sufficient).
200211453Sjchandra */
201178172Simpstruct local_sysmaps {
202211453Sjchandra	vm_offset_t	base;
203211453Sjchandra	uint32_t	saved_intr;
204211453Sjchandra	uint16_t	valid1, valid2;
205178172Simp};
206178172Simpstatic struct local_sysmaps sysmap_lmem[MAXCPU];
207178172Simp
208211453Sjchandrastatic __inline void
209211453Sjchandrapmap_alloc_lmem_map(void)
210211453Sjchandra{
211211453Sjchandra	int i;
212206717Sjmallett
213211453Sjchandra	for (i = 0; i < MAXCPU; i++) {
214211453Sjchandra		sysmap_lmem[i].base = virtual_avail;
215211453Sjchandra		virtual_avail += PAGE_SIZE * 2;
216211453Sjchandra		sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
217211453Sjchandra	}
218211453Sjchandra}
219211453Sjchandra
220211453Sjchandrastatic __inline vm_offset_t
221211453Sjchandrapmap_lmem_map1(vm_paddr_t phys)
222211453Sjchandra{
223211453Sjchandra	struct local_sysmaps *sysm;
224211453Sjchandra	pt_entry_t *pte, npte;
225211453Sjchandra	vm_offset_t va;
226211453Sjchandra	uint32_t intr;
227211453Sjchandra	int cpu;
228211453Sjchandra
229211453Sjchandra	intr = intr_disable();
230211453Sjchandra	cpu = PCPU_GET(cpuid);
231211453Sjchandra	sysm = &sysmap_lmem[cpu];
232211453Sjchandra	sysm->saved_intr = intr;
233211453Sjchandra	va = sysm->base;
234241287Salc	npte = TLBLO_PA_TO_PFN(phys) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
235211453Sjchandra	pte = pmap_pte(kernel_pmap, va);
236211453Sjchandra	*pte = npte;
237211453Sjchandra	sysm->valid1 = 1;
238211453Sjchandra	return (va);
239211453Sjchandra}
240211453Sjchandra
241211453Sjchandrastatic __inline vm_offset_t
242211453Sjchandrapmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
243211453Sjchandra{
244211453Sjchandra	struct local_sysmaps *sysm;
245211453Sjchandra	pt_entry_t *pte, npte;
246211453Sjchandra	vm_offset_t va1, va2;
247211453Sjchandra	uint32_t intr;
248211453Sjchandra	int cpu;
249211453Sjchandra
250211453Sjchandra	intr = intr_disable();
251211453Sjchandra	cpu = PCPU_GET(cpuid);
252211453Sjchandra	sysm = &sysmap_lmem[cpu];
253211453Sjchandra	sysm->saved_intr = intr;
254211453Sjchandra	va1 = sysm->base;
255211453Sjchandra	va2 = sysm->base + PAGE_SIZE;
256241287Salc	npte = TLBLO_PA_TO_PFN(phys1) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
257211453Sjchandra	pte = pmap_pte(kernel_pmap, va1);
258211453Sjchandra	*pte = npte;
259241287Salc	npte = TLBLO_PA_TO_PFN(phys2) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
260211453Sjchandra	pte = pmap_pte(kernel_pmap, va2);
261211453Sjchandra	*pte = npte;
262211453Sjchandra	sysm->valid1 = 1;
263206717Sjmallett	sysm->valid2 = 1;
264211453Sjchandra	return (va1);
265211453Sjchandra}
266206717Sjmallett
267211453Sjchandrastatic __inline void
268211453Sjchandrapmap_lmem_unmap(void)
269211453Sjchandra{
270211453Sjchandra	struct local_sysmaps *sysm;
271211453Sjchandra	pt_entry_t *pte;
272211453Sjchandra	int cpu;
273206717Sjmallett
274211453Sjchandra	cpu = PCPU_GET(cpuid);
275211453Sjchandra	sysm = &sysmap_lmem[cpu];
276211453Sjchandra	pte = pmap_pte(kernel_pmap, sysm->base);
277211453Sjchandra	*pte = PTE_G;
278211453Sjchandra	tlb_invalidate_address(kernel_pmap, sysm->base);
279211453Sjchandra	sysm->valid1 = 0;
280211453Sjchandra	if (sysm->valid2) {
281211453Sjchandra		pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);
282211453Sjchandra		*pte = PTE_G;
283211453Sjchandra		tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);
284211453Sjchandra		sysm->valid2 = 0;
285211453Sjchandra	}
286211453Sjchandra	intr_restore(sysm->saved_intr);
287211453Sjchandra}
288211453Sjchandra#else  /* __mips_n64 */
289211453Sjchandra
290211453Sjchandrastatic __inline void
291211453Sjchandrapmap_alloc_lmem_map(void)
292211453Sjchandra{
293211453Sjchandra}
294211453Sjchandra
295211453Sjchandrastatic __inline vm_offset_t
296211453Sjchandrapmap_lmem_map1(vm_paddr_t phys)
297211453Sjchandra{
298211453Sjchandra
299211453Sjchandra	return (0);
300211453Sjchandra}
301211453Sjchandra
302211453Sjchandrastatic __inline vm_offset_t
303211453Sjchandrapmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
304211453Sjchandra{
305211453Sjchandra
306211453Sjchandra	return (0);
307211453Sjchandra}
308211453Sjchandra
309211453Sjchandrastatic __inline vm_offset_t
310211453Sjchandrapmap_lmem_unmap(void)
311211453Sjchandra{
312211453Sjchandra
313211453Sjchandra	return (0);
314211453Sjchandra}
315211453Sjchandra#endif /* !__mips_n64 */
316211453Sjchandra
317210846Sjchandra/*
318210846Sjchandra * Page table entry lookup routines.
319210846Sjchandra */
320210846Sjchandrastatic __inline pd_entry_t *
321178172Simppmap_segmap(pmap_t pmap, vm_offset_t va)
322178172Simp{
323211445Sjchandra
324210846Sjchandra	return (&pmap->pm_segtab[pmap_seg_index(va)]);
325210846Sjchandra}
326210846Sjchandra
327210846Sjchandra#ifdef __mips_n64
328210846Sjchandrastatic __inline pd_entry_t *
329210846Sjchandrapmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
330210846Sjchandra{
331210846Sjchandra	pd_entry_t *pde;
332210846Sjchandra
333210846Sjchandra	pde = (pd_entry_t *)*pdpe;
334210846Sjchandra	return (&pde[pmap_pde_index(va)]);
335210846Sjchandra}
336210846Sjchandra
337210846Sjchandrastatic __inline pd_entry_t *
338210846Sjchandrapmap_pde(pmap_t pmap, vm_offset_t va)
339210846Sjchandra{
340210846Sjchandra	pd_entry_t *pdpe;
341210846Sjchandra
342210846Sjchandra	pdpe = pmap_segmap(pmap, va);
343240185Salc	if (*pdpe == NULL)
344209805Sjchandra		return (NULL);
345210846Sjchandra
346210846Sjchandra	return (pmap_pdpe_to_pde(pdpe, va));
347178172Simp}
348210846Sjchandra#else
349210846Sjchandrastatic __inline pd_entry_t *
350210846Sjchandrapmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
351210846Sjchandra{
352211445Sjchandra
353211445Sjchandra	return (pdpe);
354210846Sjchandra}
355178172Simp
356210846Sjchandrastatic __inline
357210846Sjchandrapd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
358210846Sjchandra{
359211445Sjchandra
360211445Sjchandra	return (pmap_segmap(pmap, va));
361210846Sjchandra}
362210846Sjchandra#endif
363210846Sjchandra
364210846Sjchandrastatic __inline pt_entry_t *
365210846Sjchandrapmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
366210846Sjchandra{
367210846Sjchandra	pt_entry_t *pte;
368210846Sjchandra
369210846Sjchandra	pte = (pt_entry_t *)*pde;
370210846Sjchandra	return (&pte[pmap_pte_index(va)]);
371210846Sjchandra}
372210846Sjchandra
373178172Simppt_entry_t *
374178172Simppmap_pte(pmap_t pmap, vm_offset_t va)
375178172Simp{
376210846Sjchandra	pd_entry_t *pde;
377178172Simp
378210846Sjchandra	pde = pmap_pde(pmap, va);
379210846Sjchandra	if (pde == NULL || *pde == NULL)
380210846Sjchandra		return (NULL);
381210846Sjchandra
382210846Sjchandra	return (pmap_pde_to_pte(pde, va));
383178172Simp}
384178172Simp
385178172Simpvm_offset_t
386178172Simppmap_steal_memory(vm_size_t size)
387178172Simp{
388219106Sjchandra	vm_paddr_t bank_size, pa;
389219106Sjchandra	vm_offset_t va;
390178172Simp
391178172Simp	size = round_page(size);
392178172Simp	bank_size = phys_avail[1] - phys_avail[0];
393178172Simp	while (size > bank_size) {
394178172Simp		int i;
395178172Simp
396178172Simp		for (i = 0; phys_avail[i + 2]; i += 2) {
397178172Simp			phys_avail[i] = phys_avail[i + 2];
398178172Simp			phys_avail[i + 1] = phys_avail[i + 3];
399178172Simp		}
400178172Simp		phys_avail[i] = 0;
401178172Simp		phys_avail[i + 1] = 0;
402178172Simp		if (!phys_avail[0])
403178172Simp			panic("pmap_steal_memory: out of memory");
404178172Simp		bank_size = phys_avail[1] - phys_avail[0];
405178172Simp	}
406178172Simp
407178172Simp	pa = phys_avail[0];
408178172Simp	phys_avail[0] += size;
409211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa) == 0)
410178172Simp		panic("Out of memory below 512Meg?");
411211453Sjchandra	va = MIPS_PHYS_TO_DIRECT(pa);
412178172Simp	bzero((caddr_t)va, size);
413211445Sjchandra	return (va);
414178172Simp}
415178172Simp
416178172Simp/*
417209930Sjchandra * Bootstrap the system enough to run with virtual memory.  This
418178172Simp * assumes that the phys_avail array has been initialized.
419178172Simp */
420210846Sjchandrastatic void
421210846Sjchandrapmap_create_kernel_pagetable(void)
422210846Sjchandra{
423210846Sjchandra	int i, j;
424210846Sjchandra	vm_offset_t ptaddr;
425210846Sjchandra	pt_entry_t *pte;
426210846Sjchandra#ifdef __mips_n64
427210846Sjchandra	pd_entry_t *pde;
428210846Sjchandra	vm_offset_t pdaddr;
429210846Sjchandra	int npt, npde;
430210846Sjchandra#endif
431210846Sjchandra
432210846Sjchandra	/*
433210846Sjchandra	 * Allocate segment table for the kernel
434210846Sjchandra	 */
435210846Sjchandra	kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
436210846Sjchandra
437210846Sjchandra	/*
438210846Sjchandra	 * Allocate second level page tables for the kernel
439210846Sjchandra	 */
440210846Sjchandra#ifdef __mips_n64
441210846Sjchandra	npde = howmany(NKPT, NPDEPG);
442210846Sjchandra	pdaddr = pmap_steal_memory(PAGE_SIZE * npde);
443210846Sjchandra#endif
444210846Sjchandra	nkpt = NKPT;
445210846Sjchandra	ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt);
446210846Sjchandra
447210846Sjchandra	/*
448210846Sjchandra	 * The R[4-7]?00 stores only one copy of the Global bit in the
449210846Sjchandra	 * translation lookaside buffer for each 2 page entry. Thus invalid
450210846Sjchandra	 * entrys must have the Global bit set so when Entry LO and Entry HI
451210846Sjchandra	 * G bits are anded together they will produce a global bit to store
452210846Sjchandra	 * in the tlb.
453210846Sjchandra	 */
454210846Sjchandra	for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++)
455210846Sjchandra		*pte = PTE_G;
456210846Sjchandra
457210846Sjchandra#ifdef __mips_n64
458210846Sjchandra	for (i = 0,  npt = nkpt; npt > 0; i++) {
459210846Sjchandra		kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE);
460210846Sjchandra		pde = (pd_entry_t *)kernel_segmap[i];
461210846Sjchandra
462210846Sjchandra		for (j = 0; j < NPDEPG && npt > 0; j++, npt--)
463210846Sjchandra			pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE);
464210846Sjchandra	}
465210846Sjchandra#else
466210846Sjchandra	for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++)
467210846Sjchandra		kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE));
468210846Sjchandra#endif
469210846Sjchandra
470210846Sjchandra	PMAP_LOCK_INIT(kernel_pmap);
471210846Sjchandra	kernel_pmap->pm_segtab = kernel_segmap;
472222813Sattilio	CPU_FILL(&kernel_pmap->pm_active);
473239236Salc	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
474210846Sjchandra	kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
475210846Sjchandra	kernel_pmap->pm_asid[0].gen = 0;
476210846Sjchandra	kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE;
477210846Sjchandra}
478210846Sjchandra
479178172Simpvoid
480178172Simppmap_bootstrap(void)
481178172Simp{
482210846Sjchandra	int i;
483211453Sjchandra	int need_local_mappings = 0;
484178172Simp
485178172Simp	/* Sort. */
486178172Simpagain:
487178172Simp	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
488202046Simp		/*
489202046Simp		 * Keep the memory aligned on page boundary.
490202046Simp		 */
491202046Simp		phys_avail[i] = round_page(phys_avail[i]);
492202046Simp		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
493202046Simp
494178172Simp		if (i < 2)
495178172Simp			continue;
496178172Simp		if (phys_avail[i - 2] > phys_avail[i]) {
497178172Simp			vm_paddr_t ptemp[2];
498178172Simp
499178172Simp			ptemp[0] = phys_avail[i + 0];
500178172Simp			ptemp[1] = phys_avail[i + 1];
501178172Simp
502178172Simp			phys_avail[i + 0] = phys_avail[i - 2];
503178172Simp			phys_avail[i + 1] = phys_avail[i - 1];
504178172Simp
505178172Simp			phys_avail[i - 2] = ptemp[0];
506178172Simp			phys_avail[i - 1] = ptemp[1];
507178172Simp			goto again;
508178172Simp		}
509178172Simp	}
510178172Simp
511211453Sjchandra       	/*
512216157Sjchandra	 * In 32 bit, we may have memory which cannot be mapped directly.
513216157Sjchandra	 * This memory will need temporary mapping before it can be
514211453Sjchandra	 * accessed.
515211453Sjchandra	 */
516216157Sjchandra	if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1))
517211453Sjchandra		need_local_mappings = 1;
518209930Sjchandra
519202046Simp	/*
520202046Simp	 * Copy the phys_avail[] array before we start stealing memory from it.
521202046Simp	 */
522202046Simp	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
523202046Simp		physmem_desc[i] = phys_avail[i];
524202046Simp		physmem_desc[i + 1] = phys_avail[i + 1];
525202046Simp	}
526202046Simp
527202046Simp	Maxmem = atop(phys_avail[i - 1]);
528202046Simp
529178172Simp	if (bootverbose) {
530178172Simp		printf("Physical memory chunk(s):\n");
531178172Simp		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
532178172Simp			vm_paddr_t size;
533178172Simp
534178172Simp			size = phys_avail[i + 1] - phys_avail[i];
535178172Simp			printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
536178172Simp			    (uintmax_t) phys_avail[i],
537178172Simp			    (uintmax_t) phys_avail[i + 1] - 1,
538178172Simp			    (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
539178172Simp		}
540219106Sjchandra		printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem));
541178172Simp	}
542178172Simp	/*
543178172Simp	 * Steal the message buffer from the beginning of memory.
544178172Simp	 */
545217688Spluknet	msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize);
546217688Spluknet	msgbufinit(msgbufp, msgbufsize);
547178172Simp
548178172Simp	/*
549178172Simp	 * Steal thread0 kstack.
550178172Simp	 */
551178172Simp	kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
552178172Simp
553206716Sjmallett	virtual_avail = VM_MIN_KERNEL_ADDRESS;
554178172Simp	virtual_end = VM_MAX_KERNEL_ADDRESS;
555178172Simp
556203180Sneel#ifdef SMP
557178172Simp	/*
558203180Sneel	 * Steal some virtual address space to map the pcpu area.
559203180Sneel	 */
560203180Sneel	virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
561203180Sneel	pcpup = (struct pcpu *)virtual_avail;
562203180Sneel	virtual_avail += PAGE_SIZE * 2;
563203697Sneel
564203697Sneel	/*
565203697Sneel	 * Initialize the wired TLB entry mapping the pcpu region for
566203697Sneel	 * the BSP at 'pcpup'. Up until this point we were operating
567203697Sneel	 * with the 'pcpup' for the BSP pointing to a virtual address
568203697Sneel	 * in KSEG0 so there was no need for a TLB mapping.
569203697Sneel	 */
570203697Sneel	mips_pcpu_tlb_init(PCPU_ADDR(0));
571203697Sneel
572203180Sneel	if (bootverbose)
573203180Sneel		printf("pcpu is available at virtual address %p.\n", pcpup);
574203180Sneel#endif
575203180Sneel
576211453Sjchandra	if (need_local_mappings)
577211453Sjchandra		pmap_alloc_lmem_map();
578210846Sjchandra	pmap_create_kernel_pagetable();
579178172Simp	pmap_max_asid = VMNUM_PIDS;
580209243Sjchandra	mips_wr_entryhi(0);
581210846Sjchandra	mips_wr_pagemask(0);
582239317Salc
583239317Salc 	/*
584239317Salc	 * Initialize the global pv list lock.
585239317Salc	 */
586239317Salc	rw_init(&pvh_global_lock, "pmap pv global");
587178172Simp}
588178172Simp
589178172Simp/*
590178172Simp * Initialize a vm_page's machine-dependent fields.
591178172Simp */
592178172Simpvoid
593178172Simppmap_page_init(vm_page_t m)
594178172Simp{
595178172Simp
596178172Simp	TAILQ_INIT(&m->md.pv_list);
597178172Simp	m->md.pv_flags = 0;
598178172Simp}
599178172Simp
600178172Simp/*
601178172Simp *	Initialize the pmap module.
602178172Simp *	Called by vm_init, to initialize any structures that the pmap
603178172Simp *	system needs to map virtual memory.
604178172Simp */
605178172Simpvoid
606178172Simppmap_init(void)
607178172Simp{
608178172Simp}
609178172Simp
610178172Simp/***************************************************
611178172Simp * Low level helper routines.....
612178172Simp ***************************************************/
613178172Simp
614227623Sjchandra#ifdef	SMP
615211215Sjchandrastatic __inline void
616227623Sjchandrapmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
617211215Sjchandra{
618227623Sjchandra	int	cpuid, cpu, self;
619227623Sjchandra	cpuset_t active_cpus;
620211215Sjchandra
621227623Sjchandra	sched_pin();
622227623Sjchandra	if (is_kernel_pmap(pmap)) {
623227623Sjchandra		smp_rendezvous(NULL, fn, NULL, arg);
624227623Sjchandra		goto out;
625227623Sjchandra	}
626227623Sjchandra	/* Force ASID update on inactive CPUs */
627227623Sjchandra	CPU_FOREACH(cpu) {
628227623Sjchandra		if (!CPU_ISSET(cpu, &pmap->pm_active))
629227623Sjchandra			pmap->pm_asid[cpu].gen = 0;
630227623Sjchandra	}
631223758Sattilio	cpuid = PCPU_GET(cpuid);
632227623Sjchandra	/*
633227623Sjchandra	 * XXX: barrier/locking for active?
634227623Sjchandra	 *
635227623Sjchandra	 * Take a snapshot of active here, any further changes are ignored.
636227623Sjchandra	 * tlb update/invalidate should be harmless on inactive CPUs
637227623Sjchandra	 */
638227623Sjchandra	active_cpus = pmap->pm_active;
639227623Sjchandra	self = CPU_ISSET(cpuid, &active_cpus);
640227623Sjchandra	CPU_CLR(cpuid, &active_cpus);
641227623Sjchandra	/* Optimize for the case where this cpu is the only active one */
642227623Sjchandra	if (CPU_EMPTY(&active_cpus)) {
643227623Sjchandra		if (self)
644227623Sjchandra			fn(arg);
645227623Sjchandra	} else {
646227623Sjchandra		if (self)
647227623Sjchandra			CPU_SET(cpuid, &active_cpus);
648227623Sjchandra		smp_rendezvous_cpus(active_cpus, NULL, fn, NULL, arg);
649227623Sjchandra	}
650227623Sjchandraout:
651227623Sjchandra	sched_unpin();
652227623Sjchandra}
653227623Sjchandra#else /* !SMP */
654227623Sjchandrastatic __inline void
655227623Sjchandrapmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
656227623Sjchandra{
657227623Sjchandra	int	cpuid;
658223758Sattilio
659227623Sjchandra	if (is_kernel_pmap(pmap)) {
660227623Sjchandra		fn(arg);
661211215Sjchandra		return;
662211215Sjchandra	}
663227623Sjchandra	cpuid = PCPU_GET(cpuid);
664227623Sjchandra	if (!CPU_ISSET(cpuid, &pmap->pm_active))
665227623Sjchandra		pmap->pm_asid[cpuid].gen = 0;
666223758Sattilio	else
667227623Sjchandra		fn(arg);
668211215Sjchandra}
669227623Sjchandra#endif /* SMP */
670211215Sjchandra
671178172Simpstatic void
672178172Simppmap_invalidate_all(pmap_t pmap)
673178172Simp{
674211215Sjchandra
675227623Sjchandra	pmap_call_on_active_cpus(pmap,
676227623Sjchandra	    (void (*)(void *))tlb_invalidate_all_user, pmap);
677178172Simp}
678178172Simp
679178172Simpstruct pmap_invalidate_page_arg {
680178172Simp	pmap_t pmap;
681178172Simp	vm_offset_t va;
682178172Simp};
683178172Simp
684211215Sjchandrastatic void
685178172Simppmap_invalidate_page_action(void *arg)
686178172Simp{
687211215Sjchandra	struct pmap_invalidate_page_arg *p = arg;
688178172Simp
689227623Sjchandra	tlb_invalidate_address(p->pmap, p->va);
690211215Sjchandra}
691227623Sjchandra
692211215Sjchandrastatic void
693211215Sjchandrapmap_invalidate_page(pmap_t pmap, vm_offset_t va)
694211215Sjchandra{
695227623Sjchandra	struct pmap_invalidate_page_arg arg;
696211215Sjchandra
697227623Sjchandra	arg.pmap = pmap;
698227623Sjchandra	arg.va = va;
699227623Sjchandra	pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg);
700211215Sjchandra}
701178172Simp
702241123Salcstruct pmap_invalidate_range_arg {
703241123Salc	pmap_t pmap;
704241123Salc	vm_offset_t sva;
705241123Salc	vm_offset_t eva;
706241123Salc};
707241123Salc
708241123Salcstatic void
709241123Salcpmap_invalidate_range_action(void *arg)
710241123Salc{
711241123Salc	struct pmap_invalidate_range_arg *p = arg;
712241123Salc
713241123Salc	tlb_invalidate_range(p->pmap, p->sva, p->eva);
714241123Salc}
715241123Salc
716241123Salcstatic void
717241123Salcpmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
718241123Salc{
719241123Salc	struct pmap_invalidate_range_arg arg;
720241123Salc
721241123Salc	arg.pmap = pmap;
722241123Salc	arg.sva = sva;
723241123Salc	arg.eva = eva;
724241123Salc	pmap_call_on_active_cpus(pmap, pmap_invalidate_range_action, &arg);
725241123Salc}
726241123Salc
727178172Simpstruct pmap_update_page_arg {
728178172Simp	pmap_t pmap;
729178172Simp	vm_offset_t va;
730178172Simp	pt_entry_t pte;
731178172Simp};
732178172Simp
733211217Sjchandrastatic void
734178172Simppmap_update_page_action(void *arg)
735178172Simp{
736211215Sjchandra	struct pmap_update_page_arg *p = arg;
737178172Simp
738227623Sjchandra	tlb_update(p->pmap, p->va, p->pte);
739178172Simp}
740227623Sjchandra
741211217Sjchandrastatic void
742211215Sjchandrapmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
743211215Sjchandra{
744227623Sjchandra	struct pmap_update_page_arg arg;
745178172Simp
746227623Sjchandra	arg.pmap = pmap;
747227623Sjchandra	arg.va = va;
748227623Sjchandra	arg.pte = pte;
749227623Sjchandra	pmap_call_on_active_cpus(pmap, pmap_update_page_action, &arg);
750211215Sjchandra}
751211215Sjchandra
752178172Simp/*
753178172Simp *	Routine:	pmap_extract
754178172Simp *	Function:
755178172Simp *		Extract the physical page address associated
756178172Simp *		with the given map/virtual_address pair.
757178172Simp */
758178172Simpvm_paddr_t
759178172Simppmap_extract(pmap_t pmap, vm_offset_t va)
760178172Simp{
761178172Simp	pt_entry_t *pte;
762178172Simp	vm_offset_t retval = 0;
763178172Simp
764178172Simp	PMAP_LOCK(pmap);
765178172Simp	pte = pmap_pte(pmap, va);
766178172Simp	if (pte) {
767209243Sjchandra		retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK);
768178172Simp	}
769178172Simp	PMAP_UNLOCK(pmap);
770211445Sjchandra	return (retval);
771178172Simp}
772178172Simp
773178172Simp/*
774178172Simp *	Routine:	pmap_extract_and_hold
775178172Simp *	Function:
776178172Simp *		Atomically extract and hold the physical page
777178172Simp *		with the given pmap and virtual address pair
778178172Simp *		if that mapping permits the given protection.
779178172Simp */
780178172Simpvm_page_t
781178172Simppmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
782178172Simp{
783241276Salc	pt_entry_t pte, *ptep;
784241276Salc	vm_paddr_t pa, pte_pa;
785178172Simp	vm_page_t m;
786178172Simp
787178172Simp	m = NULL;
788207410Skmacy	pa = 0;
789178172Simp	PMAP_LOCK(pmap);
790207410Skmacyretry:
791237566Sgonzo	ptep = pmap_pte(pmap, va);
792241276Salc	if (ptep != NULL) {
793241276Salc		pte = *ptep;
794241276Salc		if (pte_test(&pte, PTE_V) && (!pte_test(&pte, PTE_RO) ||
795241276Salc		    (prot & VM_PROT_WRITE) == 0)) {
796241276Salc			pte_pa = TLBLO_PTE_TO_PA(pte);
797241276Salc			if (vm_page_pa_tryrelock(pmap, pte_pa, &pa))
798241276Salc				goto retry;
799241276Salc			m = PHYS_TO_VM_PAGE(pte_pa);
800241276Salc			vm_page_hold(m);
801241276Salc		}
802178172Simp	}
803207410Skmacy	PA_UNLOCK_COND(pa);
804178172Simp	PMAP_UNLOCK(pmap);
805178172Simp	return (m);
806178172Simp}
807178172Simp
808178172Simp/***************************************************
809178172Simp * Low level mapping routines.....
810178172Simp ***************************************************/
811178172Simp
812178172Simp/*
813178172Simp * add a wired page to the kva
814178172Simp */
815212989Sneelvoid
816212589Sneelpmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr)
817178172Simp{
818209482Sjchandra	pt_entry_t *pte;
819209482Sjchandra	pt_entry_t opte, npte;
820178172Simp
821187301Sgonzo#ifdef PMAP_DEBUG
822209482Sjchandra	printf("pmap_kenter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
823187301Sgonzo#endif
824178172Simp
825178172Simp	pte = pmap_pte(kernel_pmap, va);
826178172Simp	opte = *pte;
827240317Salc	npte = TLBLO_PA_TO_PFN(pa) | attr | PTE_D | PTE_V | PTE_G;
828178172Simp	*pte = npte;
829211216Sjchandra	if (pte_test(&opte, PTE_V) && opte != npte)
830211216Sjchandra		pmap_update_page(kernel_pmap, va, npte);
831178172Simp}
832178172Simp
833212589Sneelvoid
834212589Sneelpmap_kenter(vm_offset_t va, vm_paddr_t pa)
835212589Sneel{
836212589Sneel
837212989Sneel	KASSERT(is_cacheable_mem(pa),
838212989Sneel		("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa));
839212589Sneel
840212989Sneel	pmap_kenter_attr(va, pa, PTE_C_CACHE);
841212589Sneel}
842212589Sneel
843178172Simp/*
844178172Simp * remove a page from the kernel pagetables
845178172Simp */
846178172Simp /* PMAP_INLINE */ void
847178172Simppmap_kremove(vm_offset_t va)
848178172Simp{
849209482Sjchandra	pt_entry_t *pte;
850178172Simp
851202046Simp	/*
852202046Simp	 * Write back all caches from the page being destroyed
853202046Simp	 */
854206746Sjmallett	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
855202046Simp
856178172Simp	pte = pmap_pte(kernel_pmap, va);
857178172Simp	*pte = PTE_G;
858178172Simp	pmap_invalidate_page(kernel_pmap, va);
859178172Simp}
860178172Simp
861178172Simp/*
862178172Simp *	Used to map a range of physical addresses into kernel
863178172Simp *	virtual address space.
864178172Simp *
865178172Simp *	The value passed in '*virt' is a suggested virtual address for
866178172Simp *	the mapping. Architectures which can support a direct-mapped
867178172Simp *	physical to virtual region can return the appropriate address
868178172Simp *	within that region, leaving '*virt' unchanged. Other
869178172Simp *	architectures should map the pages starting at '*virt' and
870178172Simp *	update '*virt' with the first usable address after the mapped
871178172Simp *	region.
872209930Sjchandra *
873209930Sjchandra *	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
874178172Simp */
875178172Simpvm_offset_t
876217345Sjchandrapmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
877178172Simp{
878178172Simp	vm_offset_t va, sva;
879178172Simp
880216157Sjchandra	if (MIPS_DIRECT_MAPPABLE(end - 1))
881211453Sjchandra		return (MIPS_PHYS_TO_DIRECT(start));
882209930Sjchandra
883178172Simp	va = sva = *virt;
884178172Simp	while (start < end) {
885178172Simp		pmap_kenter(va, start);
886178172Simp		va += PAGE_SIZE;
887178172Simp		start += PAGE_SIZE;
888178172Simp	}
889178172Simp	*virt = va;
890178172Simp	return (sva);
891178172Simp}
892178172Simp
893178172Simp/*
894178172Simp * Add a list of wired pages to the kva
895178172Simp * this routine is only used for temporary
896178172Simp * kernel mappings that do not need to have
897178172Simp * page modification or references recorded.
898178172Simp * Note that old mappings are simply written
899178172Simp * over.  The page *must* be wired.
900178172Simp */
901178172Simpvoid
902178172Simppmap_qenter(vm_offset_t va, vm_page_t *m, int count)
903178172Simp{
904178172Simp	int i;
905202046Simp	vm_offset_t origva = va;
906178172Simp
907178172Simp	for (i = 0; i < count; i++) {
908202046Simp		pmap_flush_pvcache(m[i]);
909178172Simp		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
910178172Simp		va += PAGE_SIZE;
911178172Simp	}
912202046Simp
913202046Simp	mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count);
914178172Simp}
915178172Simp
916178172Simp/*
917178172Simp * this routine jerks page mappings from the
918178172Simp * kernel -- it is meant only for temporary mappings.
919178172Simp */
920178172Simpvoid
921178172Simppmap_qremove(vm_offset_t va, int count)
922178172Simp{
923241156Salc	pt_entry_t *pte;
924241156Salc	vm_offset_t origva;
925202046Simp
926241156Salc	if (count < 1)
927241156Salc		return;
928241156Salc	mips_dcache_wbinv_range_index(va, PAGE_SIZE * count);
929241156Salc	origva = va;
930241156Salc	do {
931241156Salc		pte = pmap_pte(kernel_pmap, va);
932241156Salc		*pte = PTE_G;
933178172Simp		va += PAGE_SIZE;
934241156Salc	} while (--count > 0);
935241156Salc	pmap_invalidate_range(kernel_pmap, origva, va);
936178172Simp}
937178172Simp
938178172Simp/***************************************************
939178172Simp * Page table page management routines.....
940178172Simp ***************************************************/
941178172Simp
942178172Simp/*
943240126Salc * Decrements a page table page's wire count, which is used to record the
944240126Salc * number of valid page table entries within the page.  If the wire count
945240126Salc * drops to zero, then the page table page is unmapped.  Returns TRUE if the
946240126Salc * page table page was unmapped and FALSE otherwise.
947178172Simp */
948240126Salcstatic PMAP_INLINE boolean_t
949240126Salcpmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
950210846Sjchandra{
951240126Salc
952210846Sjchandra	--m->wire_count;
953240126Salc	if (m->wire_count == 0) {
954240126Salc		_pmap_unwire_ptp(pmap, va, m);
955240126Salc		return (TRUE);
956240126Salc	} else
957240126Salc		return (FALSE);
958210846Sjchandra}
959210846Sjchandra
960240126Salcstatic void
961240126Salc_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
962178172Simp{
963210846Sjchandra	pd_entry_t *pde;
964178172Simp
965210846Sjchandra	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
966178172Simp	/*
967178172Simp	 * unmap the page table page
968178172Simp	 */
969210846Sjchandra#ifdef __mips_n64
970210846Sjchandra	if (m->pindex < NUPDE)
971210846Sjchandra		pde = pmap_pde(pmap, va);
972210846Sjchandra	else
973210846Sjchandra		pde = pmap_segmap(pmap, va);
974210846Sjchandra#else
975210846Sjchandra	pde = pmap_pde(pmap, va);
976210846Sjchandra#endif
977210846Sjchandra	*pde = 0;
978210846Sjchandra	pmap->pm_stats.resident_count--;
979178172Simp
980210846Sjchandra#ifdef __mips_n64
981210846Sjchandra	if (m->pindex < NUPDE) {
982210846Sjchandra		pd_entry_t *pdp;
983210846Sjchandra		vm_page_t pdpg;
984210846Sjchandra
985210846Sjchandra		/*
986210846Sjchandra		 * Recursively decrement next level pagetable refcount
987210846Sjchandra		 */
988210846Sjchandra		pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
989211453Sjchandra		pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
990240126Salc		pmap_unwire_ptp(pmap, va, pdpg);
991210846Sjchandra	}
992210846Sjchandra#endif
993178172Simp
994178172Simp	/*
995178172Simp	 * If the page is finally unwired, simply free it.
996178172Simp	 */
997210327Sjchandra	vm_page_free_zero(m);
998208616Sjchandra	atomic_subtract_int(&cnt.v_wire_count, 1);
999178172Simp}
1000178172Simp
1001178172Simp/*
1002178172Simp * After removing a page table entry, this routine is used to
1003178172Simp * conditionally free the page, and manage the hold/wire counts.
1004178172Simp */
1005178172Simpstatic int
1006239152Salcpmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
1007178172Simp{
1008239152Salc	vm_page_t mpte;
1009178172Simp
1010178172Simp	if (va >= VM_MAXUSER_ADDRESS)
1011178172Simp		return (0);
1012239152Salc	KASSERT(pde != 0, ("pmap_unuse_pt: pde != 0"));
1013239152Salc	mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pde));
1014240126Salc	return (pmap_unwire_ptp(pmap, va, mpte));
1015178172Simp}
1016178172Simp
1017178172Simpvoid
1018178172Simppmap_pinit0(pmap_t pmap)
1019178172Simp{
1020178172Simp	int i;
1021178172Simp
1022178172Simp	PMAP_LOCK_INIT(pmap);
1023178172Simp	pmap->pm_segtab = kernel_segmap;
1024222813Sattilio	CPU_ZERO(&pmap->pm_active);
1025178172Simp	for (i = 0; i < MAXCPU; i++) {
1026178172Simp		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1027178172Simp		pmap->pm_asid[i].gen = 0;
1028178172Simp	}
1029178172Simp	PCPU_SET(curpmap, pmap);
1030239236Salc	TAILQ_INIT(&pmap->pm_pvchunk);
1031178172Simp	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1032178172Simp}
1033178172Simp
1034216315Sjchandravoid
1035216315Sjchandrapmap_grow_direct_page_cache()
1036208422Sneel{
1037208422Sneel
1038211453Sjchandra#ifdef __mips_n64
1039238561Salc	vm_pageout_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
1040211453Sjchandra#else
1041238561Salc	vm_pageout_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
1042211453Sjchandra#endif
1043208422Sneel}
1044208422Sneel
1045243030Salcstatic vm_page_t
1046216315Sjchandrapmap_alloc_direct_page(unsigned int index, int req)
1047208165Srrs{
1048208165Srrs	vm_page_t m;
1049208165Srrs
1050227012Salc	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_WIRED |
1051227012Salc	    VM_ALLOC_ZERO);
1052210327Sjchandra	if (m == NULL)
1053208165Srrs		return (NULL);
1054208165Srrs
1055210327Sjchandra	if ((m->flags & PG_ZERO) == 0)
1056210327Sjchandra		pmap_zero_page(m);
1057210327Sjchandra
1058208165Srrs	m->pindex = index;
1059208165Srrs	return (m);
1060208165Srrs}
1061208165Srrs
1062178172Simp/*
1063178172Simp * Initialize a preallocated and zeroed pmap structure,
1064178172Simp * such as one in a vmspace structure.
1065178172Simp */
1066178172Simpint
1067178172Simppmap_pinit(pmap_t pmap)
1068178172Simp{
1069205360Sneel	vm_offset_t ptdva;
1070178172Simp	vm_page_t ptdpg;
1071178172Simp	int i;
1072178172Simp
1073178172Simp	/*
1074178172Simp	 * allocate the page directory page
1075178172Simp	 */
1076216315Sjchandra	while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL)
1077216315Sjchandra	       pmap_grow_direct_page_cache();
1078208589Sjchandra
1079211453Sjchandra	ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg));
1080205360Sneel	pmap->pm_segtab = (pd_entry_t *)ptdva;
1081222813Sattilio	CPU_ZERO(&pmap->pm_active);
1082178172Simp	for (i = 0; i < MAXCPU; i++) {
1083178172Simp		pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1084178172Simp		pmap->pm_asid[i].gen = 0;
1085178172Simp	}
1086239236Salc	TAILQ_INIT(&pmap->pm_pvchunk);
1087178172Simp	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1088178172Simp
1089178172Simp	return (1);
1090178172Simp}
1091178172Simp
1092178172Simp/*
1093178172Simp * this routine is called if the page table page is not
1094178172Simp * mapped correctly.
1095178172Simp */
1096178172Simpstatic vm_page_t
1097270439Skib_pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags)
1098178172Simp{
1099210846Sjchandra	vm_offset_t pageva;
1100178172Simp	vm_page_t m;
1101178172Simp
1102178172Simp	/*
1103178172Simp	 * Find or fabricate a new pagetable page
1104178172Simp	 */
1105216315Sjchandra	if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) {
1106270439Skib		if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
1107210327Sjchandra			PMAP_UNLOCK(pmap);
1108239317Salc			rw_wunlock(&pvh_global_lock);
1109216315Sjchandra			pmap_grow_direct_page_cache();
1110239317Salc			rw_wlock(&pvh_global_lock);
1111210327Sjchandra			PMAP_LOCK(pmap);
1112210327Sjchandra		}
1113210327Sjchandra
1114210327Sjchandra		/*
1115210327Sjchandra		 * Indicate the need to retry.	While waiting, the page
1116210327Sjchandra		 * table page may have been allocated.
1117210327Sjchandra		 */
1118178172Simp		return (NULL);
1119210327Sjchandra	}
1120178172Simp
1121178172Simp	/*
1122178172Simp	 * Map the pagetable page into the process address space, if it
1123178172Simp	 * isn't already there.
1124178172Simp	 */
1125211453Sjchandra	pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1126178172Simp
1127210846Sjchandra#ifdef __mips_n64
1128210846Sjchandra	if (ptepindex >= NUPDE) {
1129210846Sjchandra		pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva;
1130210846Sjchandra	} else {
1131210846Sjchandra		pd_entry_t *pdep, *pde;
1132210846Sjchandra		int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT);
1133210846Sjchandra		int pdeindex = ptepindex & (NPDEPG - 1);
1134210846Sjchandra		vm_page_t pg;
1135210846Sjchandra
1136210846Sjchandra		pdep = &pmap->pm_segtab[segindex];
1137210846Sjchandra		if (*pdep == NULL) {
1138210846Sjchandra			/* recurse for allocating page dir */
1139210846Sjchandra			if (_pmap_allocpte(pmap, NUPDE + segindex,
1140210846Sjchandra			    flags) == NULL) {
1141210846Sjchandra				/* alloc failed, release current */
1142210846Sjchandra				--m->wire_count;
1143210846Sjchandra				atomic_subtract_int(&cnt.v_wire_count, 1);
1144210846Sjchandra				vm_page_free_zero(m);
1145210846Sjchandra				return (NULL);
1146210846Sjchandra			}
1147210846Sjchandra		} else {
1148211453Sjchandra			pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
1149210846Sjchandra			pg->wire_count++;
1150210846Sjchandra		}
1151210846Sjchandra		/* Next level entry */
1152210846Sjchandra		pde = (pd_entry_t *)*pdep;
1153210846Sjchandra		pde[pdeindex] = (pd_entry_t)pageva;
1154210846Sjchandra	}
1155210846Sjchandra#else
1156210846Sjchandra	pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva;
1157210846Sjchandra#endif
1158178172Simp	pmap->pm_stats.resident_count++;
1159178172Simp	return (m);
1160178172Simp}
1161178172Simp
1162178172Simpstatic vm_page_t
1163270439Skibpmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
1164178172Simp{
1165178172Simp	unsigned ptepindex;
1166210846Sjchandra	pd_entry_t *pde;
1167178172Simp	vm_page_t m;
1168178172Simp
1169178172Simp	/*
1170178172Simp	 * Calculate pagetable page index
1171178172Simp	 */
1172210846Sjchandra	ptepindex = pmap_pde_pindex(va);
1173178172Simpretry:
1174178172Simp	/*
1175178172Simp	 * Get the page directory entry
1176178172Simp	 */
1177210846Sjchandra	pde = pmap_pde(pmap, va);
1178178172Simp
1179178172Simp	/*
1180178172Simp	 * If the page table page is mapped, we just increment the hold
1181178172Simp	 * count, and activate it.
1182178172Simp	 */
1183210846Sjchandra	if (pde != NULL && *pde != NULL) {
1184239152Salc		m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
1185178172Simp		m->wire_count++;
1186178172Simp	} else {
1187178172Simp		/*
1188178172Simp		 * Here if the pte page isn't mapped, or if it has been
1189178172Simp		 * deallocated.
1190178172Simp		 */
1191178172Simp		m = _pmap_allocpte(pmap, ptepindex, flags);
1192270439Skib		if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
1193178172Simp			goto retry;
1194178172Simp	}
1195210846Sjchandra	return (m);
1196178172Simp}
1197178172Simp
1198178172Simp
1199178172Simp/***************************************************
1200239317Salc * Pmap allocation/deallocation routines.
1201178172Simp ***************************************************/
1202178172Simp
1203178172Simp/*
1204178172Simp * Release any resources held by the given physical map.
1205178172Simp * Called when a pmap initialized by pmap_pinit is being released.
1206178172Simp * Should only be called if the map contains no valid mappings.
1207178172Simp */
1208178172Simpvoid
1209178172Simppmap_release(pmap_t pmap)
1210178172Simp{
1211205360Sneel	vm_offset_t ptdva;
1212178172Simp	vm_page_t ptdpg;
1213178172Simp
1214178172Simp	KASSERT(pmap->pm_stats.resident_count == 0,
1215178172Simp	    ("pmap_release: pmap resident count %ld != 0",
1216178172Simp	    pmap->pm_stats.resident_count));
1217178172Simp
1218205360Sneel	ptdva = (vm_offset_t)pmap->pm_segtab;
1219211453Sjchandra	ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
1220205360Sneel
1221178172Simp	ptdpg->wire_count--;
1222178172Simp	atomic_subtract_int(&cnt.v_wire_count, 1);
1223210327Sjchandra	vm_page_free_zero(ptdpg);
1224178172Simp}
1225178172Simp
1226178172Simp/*
1227178172Simp * grow the number of kernel page table entries, if needed
1228178172Simp */
1229178172Simpvoid
1230178172Simppmap_growkernel(vm_offset_t addr)
1231178172Simp{
1232178172Simp	vm_page_t nkpg;
1233210846Sjchandra	pd_entry_t *pde, *pdpe;
1234178172Simp	pt_entry_t *pte;
1235208165Srrs	int i;
1236178172Simp
1237183510Simp	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1238210846Sjchandra	addr = roundup2(addr, NBSEG);
1239178172Simp	if (addr - 1 >= kernel_map->max_offset)
1240178172Simp		addr = kernel_map->max_offset;
1241178172Simp	while (kernel_vm_end < addr) {
1242210846Sjchandra		pdpe = pmap_segmap(kernel_pmap, kernel_vm_end);
1243210846Sjchandra#ifdef __mips_n64
1244210846Sjchandra		if (*pdpe == 0) {
1245210846Sjchandra			/* new intermediate page table entry */
1246216315Sjchandra			nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1247210846Sjchandra			if (nkpg == NULL)
1248210846Sjchandra				panic("pmap_growkernel: no memory to grow kernel");
1249211453Sjchandra			*pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1250210846Sjchandra			continue; /* try again */
1251210846Sjchandra		}
1252210846Sjchandra#endif
1253210846Sjchandra		pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
1254210846Sjchandra		if (*pde != 0) {
1255210846Sjchandra			kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1256178172Simp			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1257178172Simp				kernel_vm_end = kernel_map->max_offset;
1258178172Simp				break;
1259178172Simp			}
1260178172Simp			continue;
1261178172Simp		}
1262210846Sjchandra
1263178172Simp		/*
1264178172Simp		 * This index is bogus, but out of the way
1265178172Simp		 */
1266216315Sjchandra		nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1267178172Simp		if (!nkpg)
1268178172Simp			panic("pmap_growkernel: no memory to grow kernel");
1269178172Simp		nkpt++;
1270211453Sjchandra		*pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1271178172Simp
1272178172Simp		/*
1273178172Simp		 * The R[4-7]?00 stores only one copy of the Global bit in
1274178172Simp		 * the translation lookaside buffer for each 2 page entry.
1275178172Simp		 * Thus invalid entrys must have the Global bit set so when
1276178172Simp		 * Entry LO and Entry HI G bits are anded together they will
1277178172Simp		 * produce a global bit to store in the tlb.
1278178172Simp		 */
1279210846Sjchandra		pte = (pt_entry_t *)*pde;
1280210846Sjchandra		for (i = 0; i < NPTEPG; i++)
1281210846Sjchandra			pte[i] = PTE_G;
1282178172Simp
1283210846Sjchandra		kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1284178172Simp		if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1285178172Simp			kernel_vm_end = kernel_map->max_offset;
1286178172Simp			break;
1287178172Simp		}
1288178172Simp	}
1289178172Simp}
1290178172Simp
1291178172Simp/***************************************************
1292239236Salc * page management routines.
1293178172Simp ***************************************************/
1294178172Simp
1295239236SalcCTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1296239236Salc#ifdef __mips_n64
1297239236SalcCTASSERT(_NPCM == 3);
1298239236SalcCTASSERT(_NPCPV == 168);
1299239236Salc#else
1300239236SalcCTASSERT(_NPCM == 11);
1301239236SalcCTASSERT(_NPCPV == 336);
1302239236Salc#endif
1303239236Salc
1304239236Salcstatic __inline struct pv_chunk *
1305239236Salcpv_to_chunk(pv_entry_t pv)
1306239236Salc{
1307239236Salc
1308239236Salc	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1309239236Salc}
1310239236Salc
1311239236Salc#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1312239236Salc
1313239236Salc#ifdef __mips_n64
1314239236Salc#define	PC_FREE0_1	0xfffffffffffffffful
1315239236Salc#define	PC_FREE2	0x000000fffffffffful
1316239236Salc#else
1317239236Salc#define	PC_FREE0_9	0xfffffffful	/* Free values for index 0 through 9 */
1318239236Salc#define	PC_FREE10	0x0000fffful	/* Free values for index 10 */
1319239236Salc#endif
1320239236Salc
1321239236Salcstatic const u_long pc_freemask[_NPCM] = {
1322239236Salc#ifdef __mips_n64
1323239236Salc	PC_FREE0_1, PC_FREE0_1, PC_FREE2
1324239236Salc#else
1325239236Salc	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1326239236Salc	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1327239236Salc	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1328239236Salc	PC_FREE0_9, PC_FREE10
1329239236Salc#endif
1330239236Salc};
1331239236Salc
1332239236Salcstatic SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
1333239236Salc
1334239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1335239236Salc    "Current number of pv entries");
1336239236Salc
1337239236Salc#ifdef PV_STATS
1338239236Salcstatic int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1339239236Salc
1340239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1341239236Salc    "Current number of pv entry chunks");
1342239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1343239236Salc    "Current number of pv entry chunks allocated");
1344239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1345239236Salc    "Current number of pv entry chunks frees");
1346239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1347239236Salc    "Number of times tried to get a chunk page but failed.");
1348239236Salc
1349239236Salcstatic long pv_entry_frees, pv_entry_allocs;
1350239236Salcstatic int pv_entry_spare;
1351239236Salc
1352239236SalcSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1353239236Salc    "Current number of pv entry frees");
1354239236SalcSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1355239236Salc    "Current number of pv entry allocs");
1356239236SalcSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1357239236Salc    "Current number of spare pv entries");
1358239236Salc#endif
1359239236Salc
1360178172Simp/*
1361239236Salc * We are in a serious low memory condition.  Resort to
1362239236Salc * drastic measures to free some pages so we can allocate
1363239236Salc * another pv entry chunk.
1364239236Salc */
1365239236Salcstatic vm_page_t
1366239236Salcpmap_pv_reclaim(pmap_t locked_pmap)
1367239236Salc{
1368239236Salc	struct pch newtail;
1369239236Salc	struct pv_chunk *pc;
1370239236Salc	pd_entry_t *pde;
1371239236Salc	pmap_t pmap;
1372239236Salc	pt_entry_t *pte, oldpte;
1373239236Salc	pv_entry_t pv;
1374239236Salc	vm_offset_t va;
1375239236Salc	vm_page_t m, m_pc;
1376239236Salc	u_long inuse;
1377239236Salc	int bit, field, freed, idx;
1378239236Salc
1379239236Salc	PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1380239236Salc	pmap = NULL;
1381239236Salc	m_pc = NULL;
1382239236Salc	TAILQ_INIT(&newtail);
1383239236Salc	while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL) {
1384239236Salc		TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1385239236Salc		if (pmap != pc->pc_pmap) {
1386239236Salc			if (pmap != NULL) {
1387239236Salc				pmap_invalidate_all(pmap);
1388239236Salc				if (pmap != locked_pmap)
1389239236Salc					PMAP_UNLOCK(pmap);
1390239236Salc			}
1391239236Salc			pmap = pc->pc_pmap;
1392239236Salc			/* Avoid deadlock and lock recursion. */
1393239236Salc			if (pmap > locked_pmap)
1394239236Salc				PMAP_LOCK(pmap);
1395239236Salc			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
1396239236Salc				pmap = NULL;
1397239236Salc				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1398239236Salc				continue;
1399239236Salc			}
1400239236Salc		}
1401239236Salc
1402239236Salc		/*
1403239236Salc		 * Destroy every non-wired, 4 KB page mapping in the chunk.
1404239236Salc		 */
1405239236Salc		freed = 0;
1406239236Salc		for (field = 0; field < _NPCM; field++) {
1407239236Salc			for (inuse = ~pc->pc_map[field] & pc_freemask[field];
1408239236Salc			    inuse != 0; inuse &= ~(1UL << bit)) {
1409239236Salc				bit = ffsl(inuse) - 1;
1410239236Salc				idx = field * sizeof(inuse) * NBBY + bit;
1411239236Salc				pv = &pc->pc_pventry[idx];
1412239236Salc				va = pv->pv_va;
1413239236Salc				pde = pmap_pde(pmap, va);
1414239236Salc				KASSERT(pde != NULL && *pde != 0,
1415239236Salc				    ("pmap_pv_reclaim: pde"));
1416239236Salc				pte = pmap_pde_to_pte(pde, va);
1417239236Salc				oldpte = *pte;
1418241520Salc				if (pte_test(&oldpte, PTE_W))
1419241520Salc					continue;
1420239236Salc				if (is_kernel_pmap(pmap))
1421239236Salc					*pte = PTE_G;
1422239236Salc				else
1423239236Salc					*pte = 0;
1424239236Salc				m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(oldpte));
1425239236Salc				if (pte_test(&oldpte, PTE_D))
1426239236Salc					vm_page_dirty(m);
1427239236Salc				if (m->md.pv_flags & PV_TABLE_REF)
1428239236Salc					vm_page_aflag_set(m, PGA_REFERENCED);
1429239681Salc				m->md.pv_flags &= ~PV_TABLE_REF;
1430239236Salc				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1431239681Salc				if (TAILQ_EMPTY(&m->md.pv_list))
1432239236Salc					vm_page_aflag_clear(m, PGA_WRITEABLE);
1433239236Salc				pc->pc_map[field] |= 1UL << bit;
1434239236Salc				pmap_unuse_pt(pmap, va, *pde);
1435239236Salc				freed++;
1436239236Salc			}
1437239236Salc		}
1438239236Salc		if (freed == 0) {
1439239236Salc			TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1440239236Salc			continue;
1441239236Salc		}
1442239236Salc		/* Every freed mapping is for a 4 KB page. */
1443239236Salc		pmap->pm_stats.resident_count -= freed;
1444239236Salc		PV_STAT(pv_entry_frees += freed);
1445239236Salc		PV_STAT(pv_entry_spare += freed);
1446239236Salc		pv_entry_count -= freed;
1447239236Salc		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1448239236Salc		for (field = 0; field < _NPCM; field++)
1449239236Salc			if (pc->pc_map[field] != pc_freemask[field]) {
1450239236Salc				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
1451239236Salc				    pc_list);
1452239236Salc				TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
1453239236Salc
1454239236Salc				/*
1455239236Salc				 * One freed pv entry in locked_pmap is
1456239236Salc				 * sufficient.
1457239236Salc				 */
1458239236Salc				if (pmap == locked_pmap)
1459239236Salc					goto out;
1460239236Salc				break;
1461239236Salc			}
1462239236Salc		if (field == _NPCM) {
1463239236Salc			PV_STAT(pv_entry_spare -= _NPCPV);
1464239236Salc			PV_STAT(pc_chunk_count--);
1465239236Salc			PV_STAT(pc_chunk_frees++);
1466239236Salc			/* Entire chunk is free; return it. */
1467239236Salc			m_pc = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(
1468239236Salc			    (vm_offset_t)pc));
1469239236Salc			break;
1470239236Salc		}
1471239236Salc	}
1472239236Salcout:
1473239236Salc	TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
1474239236Salc	if (pmap != NULL) {
1475239236Salc		pmap_invalidate_all(pmap);
1476239236Salc		if (pmap != locked_pmap)
1477239236Salc			PMAP_UNLOCK(pmap);
1478239236Salc	}
1479239236Salc	return (m_pc);
1480239236Salc}
1481239236Salc
1482239236Salc/*
1483178172Simp * free the pv_entry back to the free list
1484178172Simp */
1485239236Salcstatic void
1486239236Salcfree_pv_entry(pmap_t pmap, pv_entry_t pv)
1487178172Simp{
1488239236Salc	struct pv_chunk *pc;
1489239236Salc	int bit, field, idx;
1490178172Simp
1491239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1492239236Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1493239236Salc	PV_STAT(pv_entry_frees++);
1494239236Salc	PV_STAT(pv_entry_spare++);
1495178172Simp	pv_entry_count--;
1496239236Salc	pc = pv_to_chunk(pv);
1497239236Salc	idx = pv - &pc->pc_pventry[0];
1498239236Salc	field = idx / (sizeof(u_long) * NBBY);
1499239236Salc	bit = idx % (sizeof(u_long) * NBBY);
1500239236Salc	pc->pc_map[field] |= 1ul << bit;
1501239236Salc	for (idx = 0; idx < _NPCM; idx++)
1502239236Salc		if (pc->pc_map[idx] != pc_freemask[idx]) {
1503239236Salc			/*
1504239236Salc			 * 98% of the time, pc is already at the head of the
1505239236Salc			 * list.  If it isn't already, move it to the head.
1506239236Salc			 */
1507239236Salc			if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
1508239236Salc			    pc)) {
1509239236Salc				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1510239236Salc				TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
1511239236Salc				    pc_list);
1512239236Salc			}
1513239236Salc			return;
1514239236Salc		}
1515239236Salc	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1516239236Salc	free_pv_chunk(pc);
1517178172Simp}
1518178172Simp
1519239236Salcstatic void
1520239236Salcfree_pv_chunk(struct pv_chunk *pc)
1521239236Salc{
1522239236Salc	vm_page_t m;
1523239236Salc
1524239236Salc 	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1525239236Salc	PV_STAT(pv_entry_spare -= _NPCPV);
1526239236Salc	PV_STAT(pc_chunk_count--);
1527239236Salc	PV_STAT(pc_chunk_frees++);
1528239236Salc	/* entire chunk is free, return it */
1529239236Salc	m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS((vm_offset_t)pc));
1530239236Salc	vm_page_unwire(m, 0);
1531239236Salc	vm_page_free(m);
1532239236Salc}
1533239236Salc
1534178172Simp/*
1535178172Simp * get a new pv_entry, allocating a block from the system
1536178172Simp * when needed.
1537178172Simp */
1538178172Simpstatic pv_entry_t
1539239236Salcget_pv_entry(pmap_t pmap, boolean_t try)
1540178172Simp{
1541239236Salc	struct pv_chunk *pc;
1542239236Salc	pv_entry_t pv;
1543188507Simp	vm_page_t m;
1544239236Salc	int bit, field, idx;
1545178172Simp
1546239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1547239236Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1548239236Salc	PV_STAT(pv_entry_allocs++);
1549239236Salc	pv_entry_count++;
1550188507Simpretry:
1551239236Salc	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1552239236Salc	if (pc != NULL) {
1553239236Salc		for (field = 0; field < _NPCM; field++) {
1554239236Salc			if (pc->pc_map[field]) {
1555239236Salc				bit = ffsl(pc->pc_map[field]) - 1;
1556239236Salc				break;
1557239236Salc			}
1558188507Simp		}
1559239236Salc		if (field < _NPCM) {
1560239236Salc			idx = field * sizeof(pc->pc_map[field]) * NBBY + bit;
1561239236Salc			pv = &pc->pc_pventry[idx];
1562239236Salc			pc->pc_map[field] &= ~(1ul << bit);
1563239236Salc			/* If this was the last item, move it to tail */
1564239236Salc			for (field = 0; field < _NPCM; field++)
1565239236Salc				if (pc->pc_map[field] != 0) {
1566239236Salc					PV_STAT(pv_entry_spare--);
1567239236Salc					return (pv);	/* not full, return */
1568239236Salc				}
1569239236Salc			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1570239236Salc			TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1571239236Salc			PV_STAT(pv_entry_spare--);
1572239236Salc			return (pv);
1573208659Salc		}
1574188507Simp	}
1575239236Salc	/* No free items, allocate another chunk */
1576239236Salc	m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, VM_ALLOC_NORMAL |
1577239236Salc	    VM_ALLOC_WIRED);
1578239236Salc	if (m == NULL) {
1579239236Salc		if (try) {
1580239236Salc			pv_entry_count--;
1581239236Salc			PV_STAT(pc_chunk_tryfail++);
1582239236Salc			return (NULL);
1583239236Salc		}
1584239236Salc		m = pmap_pv_reclaim(pmap);
1585239236Salc		if (m == NULL)
1586188507Simp			goto retry;
1587188507Simp	}
1588239236Salc	PV_STAT(pc_chunk_count++);
1589239236Salc	PV_STAT(pc_chunk_allocs++);
1590239236Salc	pc = (struct pv_chunk *)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1591239236Salc	pc->pc_pmap = pmap;
1592239236Salc	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
1593239236Salc	for (field = 1; field < _NPCM; field++)
1594239236Salc		pc->pc_map[field] = pc_freemask[field];
1595239236Salc	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1596239236Salc	pv = &pc->pc_pventry[0];
1597239236Salc	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1598239236Salc	PV_STAT(pv_entry_spare += _NPCPV - 1);
1599239236Salc	return (pv);
1600178172Simp}
1601178172Simp
1602208665Salcstatic pv_entry_t
1603208665Salcpmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1604178172Simp{
1605178172Simp	pv_entry_t pv;
1606178172Simp
1607239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1608239236Salc	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
1609239236Salc		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1610239236Salc			TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
1611239236Salc			break;
1612178172Simp		}
1613178172Simp	}
1614208665Salc	return (pv);
1615208665Salc}
1616178172Simp
1617208665Salcstatic void
1618208665Salcpmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1619208665Salc{
1620208665Salc	pv_entry_t pv;
1621208665Salc
1622208665Salc	pv = pmap_pvh_remove(pvh, pmap, va);
1623208665Salc	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx",
1624240539Sed	     (u_long)VM_PAGE_TO_PHYS(__containerof(pvh, struct vm_page, md)),
1625208686Salc	     (u_long)va));
1626239236Salc	free_pv_entry(pmap, pv);
1627178172Simp}
1628178172Simp
1629178172Simpstatic void
1630208665Salcpmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1631178172Simp{
1632178172Simp
1633239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1634208665Salc	pmap_pvh_free(&m->md, pmap, va);
1635208665Salc	if (TAILQ_EMPTY(&m->md.pv_list))
1636225418Skib		vm_page_aflag_clear(m, PGA_WRITEABLE);
1637178172Simp}
1638178172Simp
1639178172Simp/*
1640191300Salc * Conditionally create a pv entry.
1641191300Salc */
1642191300Salcstatic boolean_t
1643191300Salcpmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va,
1644191300Salc    vm_page_t m)
1645191300Salc{
1646191300Salc	pv_entry_t pv;
1647191300Salc
1648239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1649191300Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1650239236Salc	if ((pv = get_pv_entry(pmap, TRUE)) != NULL) {
1651191300Salc		pv->pv_va = va;
1652191300Salc		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1653191300Salc		return (TRUE);
1654191300Salc	} else
1655191300Salc		return (FALSE);
1656191300Salc}
1657191300Salc
1658191300Salc/*
1659178172Simp * pmap_remove_pte: do the things to unmap a page in a process
1660178172Simp */
1661178172Simpstatic int
1662239152Salcpmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
1663239152Salc    pd_entry_t pde)
1664178172Simp{
1665178172Simp	pt_entry_t oldpte;
1666178172Simp	vm_page_t m;
1667217345Sjchandra	vm_paddr_t pa;
1668178172Simp
1669239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1670178172Simp	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1671178172Simp
1672240241Salc	/*
1673240241Salc	 * Write back all cache lines from the page being unmapped.
1674240241Salc	 */
1675240241Salc	mips_dcache_wbinv_range_index(va, PAGE_SIZE);
1676240241Salc
1677211068Sjchandra	oldpte = *ptq;
1678178172Simp	if (is_kernel_pmap(pmap))
1679178172Simp		*ptq = PTE_G;
1680211068Sjchandra	else
1681211068Sjchandra		*ptq = 0;
1682178172Simp
1683209482Sjchandra	if (pte_test(&oldpte, PTE_W))
1684178172Simp		pmap->pm_stats.wired_count -= 1;
1685178172Simp
1686178172Simp	pmap->pm_stats.resident_count -= 1;
1687178172Simp
1688239964Salc	if (pte_test(&oldpte, PTE_MANAGED)) {
1689239964Salc		pa = TLBLO_PTE_TO_PA(oldpte);
1690178172Simp		m = PHYS_TO_VM_PAGE(pa);
1691209482Sjchandra		if (pte_test(&oldpte, PTE_D)) {
1692211958Sjchandra			KASSERT(!pte_test(&oldpte, PTE_RO),
1693217345Sjchandra			    ("%s: modified page not writable: va: %p, pte: %#jx",
1694217345Sjchandra			    __func__, (void *)va, (uintmax_t)oldpte));
1695187319Sgonzo			vm_page_dirty(m);
1696178172Simp		}
1697178172Simp		if (m->md.pv_flags & PV_TABLE_REF)
1698225418Skib			vm_page_aflag_set(m, PGA_REFERENCED);
1699239681Salc		m->md.pv_flags &= ~PV_TABLE_REF;
1700178172Simp
1701178172Simp		pmap_remove_entry(pmap, m, va);
1702178172Simp	}
1703239152Salc	return (pmap_unuse_pt(pmap, va, pde));
1704178172Simp}
1705178172Simp
1706178172Simp/*
1707178172Simp * Remove a single page from a process address space
1708178172Simp */
1709178172Simpstatic void
1710178172Simppmap_remove_page(struct pmap *pmap, vm_offset_t va)
1711178172Simp{
1712239152Salc	pd_entry_t *pde;
1713209482Sjchandra	pt_entry_t *ptq;
1714178172Simp
1715239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
1716178172Simp	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1717239152Salc	pde = pmap_pde(pmap, va);
1718239152Salc	if (pde == NULL || *pde == 0)
1719239152Salc		return;
1720239152Salc	ptq = pmap_pde_to_pte(pde, va);
1721178172Simp
1722178172Simp	/*
1723240241Salc	 * If there is no pte for this address, just skip it!
1724178172Simp	 */
1725240241Salc	if (!pte_test(ptq, PTE_V))
1726178172Simp		return;
1727202046Simp
1728239152Salc	(void)pmap_remove_pte(pmap, ptq, va, *pde);
1729178172Simp	pmap_invalidate_page(pmap, va);
1730178172Simp}
1731178172Simp
1732178172Simp/*
1733178172Simp *	Remove the given range of addresses from the specified map.
1734178172Simp *
1735178172Simp *	It is assumed that the start and end are properly
1736178172Simp *	rounded to the page size.
1737178172Simp */
1738178172Simpvoid
1739241123Salcpmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1740178172Simp{
1741210846Sjchandra	pd_entry_t *pde, *pdpe;
1742210846Sjchandra	pt_entry_t *pte;
1743241123Salc	vm_offset_t va, va_next;
1744178172Simp
1745241123Salc	/*
1746241123Salc	 * Perform an unsynchronized read.  This is, however, safe.
1747241123Salc	 */
1748178172Simp	if (pmap->pm_stats.resident_count == 0)
1749178172Simp		return;
1750178172Simp
1751239317Salc	rw_wlock(&pvh_global_lock);
1752178172Simp	PMAP_LOCK(pmap);
1753178172Simp
1754178172Simp	/*
1755178172Simp	 * special handling of removing one page.  a very common operation
1756178172Simp	 * and easy to short circuit some code.
1757178172Simp	 */
1758178172Simp	if ((sva + PAGE_SIZE) == eva) {
1759178172Simp		pmap_remove_page(pmap, sva);
1760178172Simp		goto out;
1761178172Simp	}
1762210846Sjchandra	for (; sva < eva; sva = va_next) {
1763210846Sjchandra		pdpe = pmap_segmap(pmap, sva);
1764210846Sjchandra#ifdef __mips_n64
1765210846Sjchandra		if (*pdpe == 0) {
1766210846Sjchandra			va_next = (sva + NBSEG) & ~SEGMASK;
1767210846Sjchandra			if (va_next < sva)
1768210846Sjchandra				va_next = eva;
1769178172Simp			continue;
1770178172Simp		}
1771210846Sjchandra#endif
1772210846Sjchandra		va_next = (sva + NBPDR) & ~PDRMASK;
1773210846Sjchandra		if (va_next < sva)
1774210846Sjchandra			va_next = eva;
1775210846Sjchandra
1776210846Sjchandra		pde = pmap_pdpe_to_pde(pdpe, sva);
1777241123Salc		if (*pde == NULL)
1778210846Sjchandra			continue;
1779241123Salc
1780241123Salc		/*
1781241123Salc		 * Limit our scan to either the end of the va represented
1782241123Salc		 * by the current page table page, or to the end of the
1783241123Salc		 * range being removed.
1784241123Salc		 */
1785210846Sjchandra		if (va_next > eva)
1786210846Sjchandra			va_next = eva;
1787241123Salc
1788241123Salc		va = va_next;
1789240241Salc		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1790240241Salc		    sva += PAGE_SIZE) {
1791241123Salc			if (!pte_test(pte, PTE_V)) {
1792241123Salc				if (va != va_next) {
1793241123Salc					pmap_invalidate_range(pmap, va, sva);
1794241123Salc					va = va_next;
1795241123Salc				}
1796240241Salc				continue;
1797241123Salc			}
1798241123Salc			if (va == va_next)
1799241123Salc				va = sva;
1800241123Salc			if (pmap_remove_pte(pmap, pte, sva, *pde)) {
1801241123Salc				sva += PAGE_SIZE;
1802241123Salc				break;
1803241123Salc			}
1804210846Sjchandra		}
1805241123Salc		if (va != va_next)
1806241123Salc			pmap_invalidate_range(pmap, va, sva);
1807178172Simp	}
1808178172Simpout:
1809239317Salc	rw_wunlock(&pvh_global_lock);
1810178172Simp	PMAP_UNLOCK(pmap);
1811178172Simp}
1812178172Simp
1813178172Simp/*
1814178172Simp *	Routine:	pmap_remove_all
1815178172Simp *	Function:
1816178172Simp *		Removes this physical page from
1817178172Simp *		all physical maps in which it resides.
1818178172Simp *		Reflects back modify bits to the pager.
1819178172Simp *
1820178172Simp *	Notes:
1821178172Simp *		Original versions of this routine were very
1822178172Simp *		inefficient because they iteratively called
1823178172Simp *		pmap_remove (slow...)
1824178172Simp */
1825178172Simp
1826178172Simpvoid
1827178172Simppmap_remove_all(vm_page_t m)
1828178172Simp{
1829209482Sjchandra	pv_entry_t pv;
1830239236Salc	pmap_t pmap;
1831239152Salc	pd_entry_t *pde;
1832209482Sjchandra	pt_entry_t *pte, tpte;
1833178172Simp
1834224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1835223677Salc	    ("pmap_remove_all: page %p is not managed", m));
1836239317Salc	rw_wlock(&pvh_global_lock);
1837178172Simp
1838178172Simp	if (m->md.pv_flags & PV_TABLE_REF)
1839225418Skib		vm_page_aflag_set(m, PGA_REFERENCED);
1840178172Simp
1841178172Simp	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1842239236Salc		pmap = PV_PMAP(pv);
1843239236Salc		PMAP_LOCK(pmap);
1844202046Simp
1845202046Simp		/*
1846202046Simp		 * If it's last mapping writeback all caches from
1847202046Simp		 * the page being destroyed
1848202046Simp	 	 */
1849239236Salc		if (TAILQ_NEXT(pv, pv_list) == NULL)
1850206746Sjmallett			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
1851202046Simp
1852239236Salc		pmap->pm_stats.resident_count--;
1853178172Simp
1854239236Salc		pde = pmap_pde(pmap, pv->pv_va);
1855239152Salc		KASSERT(pde != NULL && *pde != 0, ("pmap_remove_all: pde"));
1856239152Salc		pte = pmap_pde_to_pte(pde, pv->pv_va);
1857178172Simp
1858211068Sjchandra		tpte = *pte;
1859239236Salc		if (is_kernel_pmap(pmap))
1860178172Simp			*pte = PTE_G;
1861211068Sjchandra		else
1862211068Sjchandra			*pte = 0;
1863178172Simp
1864209482Sjchandra		if (pte_test(&tpte, PTE_W))
1865239236Salc			pmap->pm_stats.wired_count--;
1866178172Simp
1867178172Simp		/*
1868178172Simp		 * Update the vm_page_t clean and reference bits.
1869178172Simp		 */
1870209482Sjchandra		if (pte_test(&tpte, PTE_D)) {
1871211958Sjchandra			KASSERT(!pte_test(&tpte, PTE_RO),
1872217345Sjchandra			    ("%s: modified page not writable: va: %p, pte: %#jx",
1873217345Sjchandra			    __func__, (void *)pv->pv_va, (uintmax_t)tpte));
1874178606Salc			vm_page_dirty(m);
1875178172Simp		}
1876239236Salc		pmap_invalidate_page(pmap, pv->pv_va);
1877178172Simp
1878178172Simp		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1879239236Salc		pmap_unuse_pt(pmap, pv->pv_va, *pde);
1880239236Salc		free_pv_entry(pmap, pv);
1881239236Salc		PMAP_UNLOCK(pmap);
1882178172Simp	}
1883178172Simp
1884225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
1885239681Salc	m->md.pv_flags &= ~PV_TABLE_REF;
1886239317Salc	rw_wunlock(&pvh_global_lock);
1887178172Simp}
1888178172Simp
1889178172Simp/*
1890178172Simp *	Set the physical protection on the
1891178172Simp *	specified range of this map as requested.
1892178172Simp */
1893178172Simpvoid
1894178172Simppmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1895178172Simp{
1896241313Salc	pt_entry_t pbits, *pte;
1897210846Sjchandra	pd_entry_t *pde, *pdpe;
1898241313Salc	vm_offset_t va, va_next;
1899241313Salc	vm_paddr_t pa;
1900241313Salc	vm_page_t m;
1901178172Simp
1902178172Simp	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1903178172Simp		pmap_remove(pmap, sva, eva);
1904178172Simp		return;
1905178172Simp	}
1906178172Simp	if (prot & VM_PROT_WRITE)
1907178172Simp		return;
1908178172Simp
1909178172Simp	PMAP_LOCK(pmap);
1910210846Sjchandra	for (; sva < eva; sva = va_next) {
1911210846Sjchandra		pdpe = pmap_segmap(pmap, sva);
1912210846Sjchandra#ifdef __mips_n64
1913210846Sjchandra		if (*pdpe == 0) {
1914210846Sjchandra			va_next = (sva + NBSEG) & ~SEGMASK;
1915210846Sjchandra			if (va_next < sva)
1916210846Sjchandra				va_next = eva;
1917178172Simp			continue;
1918178172Simp		}
1919210846Sjchandra#endif
1920210846Sjchandra		va_next = (sva + NBPDR) & ~PDRMASK;
1921210846Sjchandra		if (va_next < sva)
1922210846Sjchandra			va_next = eva;
1923210846Sjchandra
1924210846Sjchandra		pde = pmap_pdpe_to_pde(pdpe, sva);
1925240185Salc		if (*pde == NULL)
1926178172Simp			continue;
1927241313Salc
1928241313Salc		/*
1929241313Salc		 * Limit our scan to either the end of the va represented
1930241313Salc		 * by the current page table page, or to the end of the
1931241313Salc		 * range being write protected.
1932241313Salc		 */
1933210846Sjchandra		if (va_next > eva)
1934210846Sjchandra			va_next = eva;
1935178172Simp
1936241313Salc		va = va_next;
1937210846Sjchandra		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1938241313Salc		    sva += PAGE_SIZE) {
1939241313Salc			pbits = *pte;
1940241313Salc			if (!pte_test(&pbits, PTE_V) || pte_test(&pbits,
1941241313Salc			    PTE_RO)) {
1942241313Salc				if (va != va_next) {
1943241313Salc					pmap_invalidate_range(pmap, va, sva);
1944241313Salc					va = va_next;
1945241313Salc				}
1946210846Sjchandra				continue;
1947210846Sjchandra			}
1948210846Sjchandra			pte_set(&pbits, PTE_RO);
1949241313Salc			if (pte_test(&pbits, PTE_D)) {
1950241313Salc				pte_clear(&pbits, PTE_D);
1951241313Salc				if (pte_test(&pbits, PTE_MANAGED)) {
1952241313Salc					pa = TLBLO_PTE_TO_PA(pbits);
1953241313Salc					m = PHYS_TO_VM_PAGE(pa);
1954241313Salc					vm_page_dirty(m);
1955241313Salc				}
1956241313Salc				if (va == va_next)
1957241313Salc					va = sva;
1958241313Salc			} else {
1959241313Salc				/*
1960241313Salc				 * Unless PTE_D is set, any TLB entries
1961241313Salc				 * mapping "sva" don't allow write access, so
1962241313Salc				 * they needn't be invalidated.
1963241313Salc				 */
1964241313Salc				if (va != va_next) {
1965241313Salc					pmap_invalidate_range(pmap, va, sva);
1966241313Salc					va = va_next;
1967241313Salc				}
1968210846Sjchandra			}
1969241313Salc			*pte = pbits;
1970178172Simp		}
1971241313Salc		if (va != va_next)
1972241313Salc			pmap_invalidate_range(pmap, va, sva);
1973178172Simp	}
1974178172Simp	PMAP_UNLOCK(pmap);
1975178172Simp}
1976178172Simp
1977178172Simp/*
1978178172Simp *	Insert the given physical page (p) at
1979178172Simp *	the specified virtual address (v) in the
1980178172Simp *	target physical map with the protection requested.
1981178172Simp *
1982178172Simp *	If specified, the page will be wired down, meaning
1983178172Simp *	that the related pte can not be reclaimed.
1984178172Simp *
1985178172Simp *	NB:  This is the only routine which MAY NOT lazy-evaluate
1986178172Simp *	or lose information.  That is, this routine must actually
1987178172Simp *	insert this page into the given map NOW.
1988178172Simp */
1989270439Skibint
1990270439Skibpmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1991270439Skib    u_int flags, int8_t psind __unused)
1992178172Simp{
1993217345Sjchandra	vm_paddr_t pa, opa;
1994209482Sjchandra	pt_entry_t *pte;
1995178172Simp	pt_entry_t origpte, newpte;
1996208665Salc	pv_entry_t pv;
1997178172Simp	vm_page_t mpte, om;
1998178172Simp
1999178172Simp	va &= ~PAGE_MASK;
2000208175Salc 	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
2001240000Salc	KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
2002240000Salc	    va >= kmi.clean_eva,
2003240000Salc	    ("pmap_enter: managed mapping within the clean submap"));
2004270439Skib	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2005270439Skib		VM_OBJECT_ASSERT_LOCKED(m->object);
2006239964Salc	pa = VM_PAGE_TO_PHYS(m);
2007270439Skib	newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, flags, prot);
2008270439Skib	if ((flags & PMAP_ENTER_WIRED) != 0)
2009240000Salc		newpte |= PTE_W;
2010240000Salc	if (is_kernel_pmap(pmap))
2011240000Salc		newpte |= PTE_G;
2012240000Salc	if (is_cacheable_mem(pa))
2013240000Salc		newpte |= PTE_C_CACHE;
2014240000Salc	else
2015240000Salc		newpte |= PTE_C_UNCACHED;
2016178172Simp
2017178172Simp	mpte = NULL;
2018178172Simp
2019239317Salc	rw_wlock(&pvh_global_lock);
2020178172Simp	PMAP_LOCK(pmap);
2021178172Simp
2022178172Simp	/*
2023178172Simp	 * In the case that a page table page is not resident, we are
2024178172Simp	 * creating it here.
2025178172Simp	 */
2026178172Simp	if (va < VM_MAXUSER_ADDRESS) {
2027270439Skib		mpte = pmap_allocpte(pmap, va, flags);
2028270439Skib		if (mpte == NULL) {
2029270439Skib			KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
2030270439Skib			    ("pmap_allocpte failed with sleep allowed"));
2031270439Skib			rw_wunlock(&pvh_global_lock);
2032270439Skib			PMAP_UNLOCK(pmap);
2033270439Skib			return (KERN_RESOURCE_SHORTAGE);
2034270439Skib		}
2035178172Simp	}
2036178172Simp	pte = pmap_pte(pmap, va);
2037178172Simp
2038178172Simp	/*
2039178172Simp	 * Page Directory table entry not valid, we need a new PT page
2040178172Simp	 */
2041178172Simp	if (pte == NULL) {
2042211958Sjchandra		panic("pmap_enter: invalid page directory, pdir=%p, va=%p",
2043202046Simp		    (void *)pmap->pm_segtab, (void *)va);
2044178172Simp	}
2045178172Simp	om = NULL;
2046178172Simp	origpte = *pte;
2047209243Sjchandra	opa = TLBLO_PTE_TO_PA(origpte);
2048178172Simp
2049178172Simp	/*
2050178172Simp	 * Mapping has not changed, must be protection or wiring change.
2051178172Simp	 */
2052209482Sjchandra	if (pte_test(&origpte, PTE_V) && opa == pa) {
2053178172Simp		/*
2054178172Simp		 * Wiring change, just update stats. We don't worry about
2055178172Simp		 * wiring PT pages as they remain resident as long as there
2056178172Simp		 * are valid mappings in them. Hence, if a user page is
2057178172Simp		 * wired, the PT page will be also.
2058178172Simp		 */
2059270439Skib		if (pte_test(&newpte, PTE_W) && !pte_test(&origpte, PTE_W))
2060178172Simp			pmap->pm_stats.wired_count++;
2061270439Skib		else if (!pte_test(&newpte, PTE_W) && pte_test(&origpte,
2062270439Skib		    PTE_W))
2063178172Simp			pmap->pm_stats.wired_count--;
2064178172Simp
2065211958Sjchandra		KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
2066217345Sjchandra		    ("%s: modified page not writable: va: %p, pte: %#jx",
2067217345Sjchandra		    __func__, (void *)va, (uintmax_t)origpte));
2068178172Simp
2069178172Simp		/*
2070178172Simp		 * Remove extra pte reference
2071178172Simp		 */
2072178172Simp		if (mpte)
2073178172Simp			mpte->wire_count--;
2074178172Simp
2075239964Salc		if (pte_test(&origpte, PTE_MANAGED)) {
2076240241Salc			m->md.pv_flags |= PV_TABLE_REF;
2077178172Simp			om = m;
2078239964Salc			newpte |= PTE_MANAGED;
2079240000Salc			if (!pte_test(&newpte, PTE_RO))
2080240000Salc				vm_page_aflag_set(m, PGA_WRITEABLE);
2081178172Simp		}
2082178172Simp		goto validate;
2083178172Simp	}
2084208665Salc
2085208665Salc	pv = NULL;
2086208665Salc
2087178172Simp	/*
2088178172Simp	 * Mapping has changed, invalidate old range and fall through to
2089178172Simp	 * handle validating new mapping.
2090178172Simp	 */
2091178172Simp	if (opa) {
2092209482Sjchandra		if (pte_test(&origpte, PTE_W))
2093178172Simp			pmap->pm_stats.wired_count--;
2094178172Simp
2095239964Salc		if (pte_test(&origpte, PTE_MANAGED)) {
2096178172Simp			om = PHYS_TO_VM_PAGE(opa);
2097208665Salc			pv = pmap_pvh_remove(&om->md, pmap, va);
2098178172Simp		}
2099178172Simp		if (mpte != NULL) {
2100178172Simp			mpte->wire_count--;
2101178172Simp			KASSERT(mpte->wire_count > 0,
2102178172Simp			    ("pmap_enter: missing reference to page table page,"
2103202046Simp			    " va: %p", (void *)va));
2104178172Simp		}
2105178172Simp	} else
2106178172Simp		pmap->pm_stats.resident_count++;
2107178172Simp
2108178172Simp	/*
2109240000Salc	 * Enter on the PV list if part of our managed memory.
2110178172Simp	 */
2111224746Skib	if ((m->oflags & VPO_UNMANAGED) == 0) {
2112240241Salc		m->md.pv_flags |= PV_TABLE_REF;
2113208665Salc		if (pv == NULL)
2114239236Salc			pv = get_pv_entry(pmap, FALSE);
2115208665Salc		pv->pv_va = va;
2116208665Salc		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2117239964Salc		newpte |= PTE_MANAGED;
2118240000Salc		if (!pte_test(&newpte, PTE_RO))
2119240000Salc			vm_page_aflag_set(m, PGA_WRITEABLE);
2120208665Salc	} else if (pv != NULL)
2121239236Salc		free_pv_entry(pmap, pv);
2122208665Salc
2123178172Simp	/*
2124178172Simp	 * Increment counters
2125178172Simp	 */
2126270439Skib	if (pte_test(&newpte, PTE_W))
2127178172Simp		pmap->pm_stats.wired_count++;
2128178172Simp
2129178172Simpvalidate:
2130178172Simp
2131187301Sgonzo#ifdef PMAP_DEBUG
2132209482Sjchandra	printf("pmap_enter:  va: %p -> pa: %p\n", (void *)va, (void *)pa);
2133187301Sgonzo#endif
2134178172Simp
2135178172Simp	/*
2136178172Simp	 * if the mapping or permission bits are different, we need to
2137178172Simp	 * update the pte.
2138178172Simp	 */
2139178172Simp	if (origpte != newpte) {
2140240241Salc		*pte = newpte;
2141209482Sjchandra		if (pte_test(&origpte, PTE_V)) {
2142239964Salc			if (pte_test(&origpte, PTE_MANAGED) && opa != pa) {
2143178172Simp				if (om->md.pv_flags & PV_TABLE_REF)
2144225418Skib					vm_page_aflag_set(om, PGA_REFERENCED);
2145239681Salc				om->md.pv_flags &= ~PV_TABLE_REF;
2146178172Simp			}
2147209482Sjchandra			if (pte_test(&origpte, PTE_D)) {
2148209482Sjchandra				KASSERT(!pte_test(&origpte, PTE_RO),
2149178172Simp				    ("pmap_enter: modified page not writable:"
2150217345Sjchandra				    " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte));
2151239964Salc				if (pte_test(&origpte, PTE_MANAGED))
2152178172Simp					vm_page_dirty(om);
2153178172Simp			}
2154239964Salc			if (pte_test(&origpte, PTE_MANAGED) &&
2155208665Salc			    TAILQ_EMPTY(&om->md.pv_list))
2156225418Skib				vm_page_aflag_clear(om, PGA_WRITEABLE);
2157240241Salc			pmap_update_page(pmap, va, newpte);
2158178172Simp		}
2159178172Simp	}
2160178172Simp
2161178172Simp	/*
2162218909Sbrucec	 * Sync I & D caches for executable pages.  Do this only if the
2163178172Simp	 * target pmap belongs to the current process.  Otherwise, an
2164178172Simp	 * unresolvable TLB miss may occur.
2165178172Simp	 */
2166178172Simp	if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) &&
2167178172Simp	    (prot & VM_PROT_EXECUTE)) {
2168206746Sjmallett		mips_icache_sync_range(va, PAGE_SIZE);
2169206746Sjmallett		mips_dcache_wbinv_range(va, PAGE_SIZE);
2170178172Simp	}
2171239317Salc	rw_wunlock(&pvh_global_lock);
2172178172Simp	PMAP_UNLOCK(pmap);
2173270439Skib	return (KERN_SUCCESS);
2174178172Simp}
2175178172Simp
2176178172Simp/*
2177178172Simp * this code makes some *MAJOR* assumptions:
2178178172Simp * 1. Current pmap & pmap exists.
2179178172Simp * 2. Not wired.
2180178172Simp * 3. Read access.
2181178172Simp * 4. No page table pages.
2182178172Simp * but is *MUCH* faster than pmap_enter...
2183178172Simp */
2184178172Simp
2185178172Simpvoid
2186178172Simppmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2187178172Simp{
2188191300Salc
2189239317Salc	rw_wlock(&pvh_global_lock);
2190191300Salc	PMAP_LOCK(pmap);
2191191300Salc	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
2192239317Salc	rw_wunlock(&pvh_global_lock);
2193191300Salc	PMAP_UNLOCK(pmap);
2194191300Salc}
2195191300Salc
2196191300Salcstatic vm_page_t
2197191300Salcpmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2198191300Salc    vm_prot_t prot, vm_page_t mpte)
2199191300Salc{
2200178172Simp	pt_entry_t *pte;
2201217345Sjchandra	vm_paddr_t pa;
2202178172Simp
2203178606Salc	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2204224746Skib	    (m->oflags & VPO_UNMANAGED) != 0,
2205191300Salc	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2206239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
2207191300Salc	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2208191300Salc
2209178172Simp	/*
2210178172Simp	 * In the case that a page table page is not resident, we are
2211178172Simp	 * creating it here.
2212178172Simp	 */
2213178172Simp	if (va < VM_MAXUSER_ADDRESS) {
2214210846Sjchandra		pd_entry_t *pde;
2215178172Simp		unsigned ptepindex;
2216178172Simp
2217178172Simp		/*
2218178172Simp		 * Calculate pagetable page index
2219178172Simp		 */
2220210846Sjchandra		ptepindex = pmap_pde_pindex(va);
2221178172Simp		if (mpte && (mpte->pindex == ptepindex)) {
2222178172Simp			mpte->wire_count++;
2223178172Simp		} else {
2224178172Simp			/*
2225178172Simp			 * Get the page directory entry
2226178172Simp			 */
2227210846Sjchandra			pde = pmap_pde(pmap, va);
2228178172Simp
2229178172Simp			/*
2230178172Simp			 * If the page table page is mapped, we just
2231178172Simp			 * increment the hold count, and activate it.
2232178172Simp			 */
2233210846Sjchandra			if (pde && *pde != 0) {
2234239152Salc				mpte = PHYS_TO_VM_PAGE(
2235239152Salc				    MIPS_DIRECT_TO_PHYS(*pde));
2236178172Simp				mpte->wire_count++;
2237178172Simp			} else {
2238191300Salc				mpte = _pmap_allocpte(pmap, ptepindex,
2239270439Skib				    PMAP_ENTER_NOSLEEP);
2240191300Salc				if (mpte == NULL)
2241191300Salc					return (mpte);
2242178172Simp			}
2243178172Simp		}
2244178172Simp	} else {
2245178172Simp		mpte = NULL;
2246178172Simp	}
2247178172Simp
2248178172Simp	pte = pmap_pte(pmap, va);
2249209482Sjchandra	if (pte_test(pte, PTE_V)) {
2250191300Salc		if (mpte != NULL) {
2251191300Salc			mpte->wire_count--;
2252191300Salc			mpte = NULL;
2253191300Salc		}
2254191300Salc		return (mpte);
2255178172Simp	}
2256191300Salc
2257178172Simp	/*
2258191300Salc	 * Enter on the PV list if part of our managed memory.
2259178172Simp	 */
2260224746Skib	if ((m->oflags & VPO_UNMANAGED) == 0 &&
2261191300Salc	    !pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
2262191300Salc		if (mpte != NULL) {
2263240126Salc			pmap_unwire_ptp(pmap, va, mpte);
2264191300Salc			mpte = NULL;
2265191300Salc		}
2266191300Salc		return (mpte);
2267191300Salc	}
2268178172Simp
2269178172Simp	/*
2270178172Simp	 * Increment counters
2271178172Simp	 */
2272178172Simp	pmap->pm_stats.resident_count++;
2273178172Simp
2274178172Simp	pa = VM_PAGE_TO_PHYS(m);
2275178172Simp
2276178172Simp	/*
2277178172Simp	 * Now validate mapping with RO protection
2278178172Simp	 */
2279240241Salc	*pte = PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_V;
2280239964Salc	if ((m->oflags & VPO_UNMANAGED) == 0)
2281239964Salc		*pte |= PTE_MANAGED;
2282178172Simp
2283178172Simp	if (is_cacheable_mem(pa))
2284209482Sjchandra		*pte |= PTE_C_CACHE;
2285178172Simp	else
2286209482Sjchandra		*pte |= PTE_C_UNCACHED;
2287178172Simp
2288178172Simp	if (is_kernel_pmap(pmap))
2289178172Simp		*pte |= PTE_G;
2290178172Simp	else {
2291178172Simp		/*
2292218909Sbrucec		 * Sync I & D caches.  Do this only if the target pmap
2293178172Simp		 * belongs to the current process.  Otherwise, an
2294178172Simp		 * unresolvable TLB miss may occur. */
2295178172Simp		if (pmap == &curproc->p_vmspace->vm_pmap) {
2296178172Simp			va &= ~PAGE_MASK;
2297206746Sjmallett			mips_icache_sync_range(va, PAGE_SIZE);
2298206746Sjmallett			mips_dcache_wbinv_range(va, PAGE_SIZE);
2299178172Simp		}
2300178172Simp	}
2301191300Salc	return (mpte);
2302178172Simp}
2303178172Simp
2304178172Simp/*
2305178172Simp * Make a temporary mapping for a physical address.  This is only intended
2306178172Simp * to be used for panic dumps.
2307209930Sjchandra *
2308209930Sjchandra * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2309178172Simp */
2310178172Simpvoid *
2311178172Simppmap_kenter_temporary(vm_paddr_t pa, int i)
2312178172Simp{
2313178172Simp	vm_offset_t va;
2314211453Sjchandra
2315178172Simp	if (i != 0)
2316178172Simp		printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
2317178172Simp		    __func__);
2318178172Simp
2319211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa)) {
2320211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(pa);
2321178172Simp	} else {
2322211453Sjchandra#ifndef __mips_n64    /* XXX : to be converted to new style */
2323178172Simp		int cpu;
2324211453Sjchandra		register_t intr;
2325178172Simp		struct local_sysmaps *sysm;
2326206717Sjmallett		pt_entry_t *pte, npte;
2327206717Sjmallett
2328203151Srrs		/* If this is used other than for dumps, we may need to leave
2329203151Srrs		 * interrupts disasbled on return. If crash dumps don't work when
2330203151Srrs		 * we get to this point, we might want to consider this (leaving things
2331203151Srrs		 * disabled as a starting point ;-)
2332203151Srrs	 	 */
2333206717Sjmallett		intr = intr_disable();
2334178172Simp		cpu = PCPU_GET(cpuid);
2335178172Simp		sysm = &sysmap_lmem[cpu];
2336178172Simp		/* Since this is for the debugger, no locks or any other fun */
2337241287Salc		npte = TLBLO_PA_TO_PFN(pa) | PTE_C_CACHE | PTE_D | PTE_V |
2338241287Salc		    PTE_G;
2339206717Sjmallett		pte = pmap_pte(kernel_pmap, sysm->base);
2340206717Sjmallett		*pte = npte;
2341203151Srrs		sysm->valid1 = 1;
2342206717Sjmallett		pmap_update_page(kernel_pmap, sysm->base, npte);
2343206717Sjmallett		va = sysm->base;
2344206717Sjmallett		intr_restore(intr);
2345211453Sjchandra#endif
2346178172Simp	}
2347178172Simp	return ((void *)va);
2348178172Simp}
2349178172Simp
2350178172Simpvoid
2351178172Simppmap_kenter_temporary_free(vm_paddr_t pa)
2352178172Simp{
2353211453Sjchandra#ifndef __mips_n64    /* XXX : to be converted to new style */
2354178172Simp	int cpu;
2355206717Sjmallett	register_t intr;
2356178172Simp	struct local_sysmaps *sysm;
2357211453Sjchandra#endif
2358178172Simp
2359211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa)) {
2360178172Simp		/* nothing to do for this case */
2361178172Simp		return;
2362178172Simp	}
2363211453Sjchandra#ifndef __mips_n64    /* XXX : to be converted to new style */
2364178172Simp	cpu = PCPU_GET(cpuid);
2365178172Simp	sysm = &sysmap_lmem[cpu];
2366178172Simp	if (sysm->valid1) {
2367206717Sjmallett		pt_entry_t *pte;
2368206717Sjmallett
2369206717Sjmallett		intr = intr_disable();
2370206717Sjmallett		pte = pmap_pte(kernel_pmap, sysm->base);
2371206717Sjmallett		*pte = PTE_G;
2372206717Sjmallett		pmap_invalidate_page(kernel_pmap, sysm->base);
2373206717Sjmallett		intr_restore(intr);
2374178172Simp		sysm->valid1 = 0;
2375178172Simp	}
2376211453Sjchandra#endif
2377178172Simp}
2378178172Simp
2379178172Simp/*
2380178172Simp * Maps a sequence of resident pages belonging to the same object.
2381178172Simp * The sequence begins with the given page m_start.  This page is
2382178172Simp * mapped at the given virtual address start.  Each subsequent page is
2383178172Simp * mapped at a virtual address that is offset from start by the same
2384178172Simp * amount as the page is offset from m_start within the object.  The
2385178172Simp * last page in the sequence is the page with the largest offset from
2386178172Simp * m_start that can be mapped at a virtual address less than the given
2387178172Simp * virtual address end.  Not every virtual page between start and end
2388178172Simp * is mapped; only those for which a resident page exists with the
2389178172Simp * corresponding offset from m_start are mapped.
2390178172Simp */
2391178172Simpvoid
2392178172Simppmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2393178172Simp    vm_page_t m_start, vm_prot_t prot)
2394178172Simp{
2395191300Salc	vm_page_t m, mpte;
2396178172Simp	vm_pindex_t diff, psize;
2397178172Simp
2398250884Sattilio	VM_OBJECT_ASSERT_LOCKED(m_start->object);
2399250884Sattilio
2400178172Simp	psize = atop(end - start);
2401191300Salc	mpte = NULL;
2402178172Simp	m = m_start;
2403239317Salc	rw_wlock(&pvh_global_lock);
2404191300Salc	PMAP_LOCK(pmap);
2405178172Simp	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2406191300Salc		mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
2407191300Salc		    prot, mpte);
2408178172Simp		m = TAILQ_NEXT(m, listq);
2409178172Simp	}
2410239317Salc	rw_wunlock(&pvh_global_lock);
2411191300Salc 	PMAP_UNLOCK(pmap);
2412178172Simp}
2413178172Simp
2414178172Simp/*
2415178172Simp * pmap_object_init_pt preloads the ptes for a given object
2416178172Simp * into the specified pmap.  This eliminates the blast of soft
2417178172Simp * faults on process startup and immediately after an mmap.
2418178172Simp */
2419178172Simpvoid
2420178172Simppmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2421178172Simp    vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2422178172Simp{
2423248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(object);
2424195840Sjhb	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2425178172Simp	    ("pmap_object_init_pt: non-device object"));
2426178172Simp}
2427178172Simp
2428178172Simp/*
2429270920Skib *	Clear the wired attribute from the mappings for the specified range of
2430270920Skib *	addresses in the given pmap.  Every valid mapping within that range
2431270920Skib *	must have the wired attribute set.  In contrast, invalid mappings
2432270920Skib *	cannot have the wired attribute set, so they are ignored.
2433270920Skib *
2434270920Skib *	The wired attribute of the page table entry is not a hardware feature,
2435270920Skib *	so there is no need to invalidate any TLB entries.
2436178172Simp */
2437178172Simpvoid
2438270920Skibpmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2439178172Simp{
2440270920Skib	pd_entry_t *pde, *pdpe;
2441209482Sjchandra	pt_entry_t *pte;
2442270920Skib	vm_offset_t va_next;
2443178172Simp
2444178172Simp	PMAP_LOCK(pmap);
2445270920Skib	for (; sva < eva; sva = va_next) {
2446270920Skib		pdpe = pmap_segmap(pmap, sva);
2447270920Skib#ifdef __mips_n64
2448270920Skib		if (*pdpe == NULL) {
2449270920Skib			va_next = (sva + NBSEG) & ~SEGMASK;
2450270920Skib			if (va_next < sva)
2451270920Skib				va_next = eva;
2452270920Skib			continue;
2453270920Skib		}
2454270920Skib#endif
2455270920Skib		va_next = (sva + NBPDR) & ~PDRMASK;
2456270920Skib		if (va_next < sva)
2457270920Skib			va_next = eva;
2458270920Skib		pde = pmap_pdpe_to_pde(pdpe, sva);
2459270920Skib		if (*pde == NULL)
2460270920Skib			continue;
2461270920Skib		if (va_next > eva)
2462270920Skib			va_next = eva;
2463270920Skib		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2464270920Skib		    sva += PAGE_SIZE) {
2465270920Skib			if (!pte_test(pte, PTE_V))
2466270920Skib				continue;
2467270920Skib			if (!pte_test(pte, PTE_W))
2468270920Skib				panic("pmap_unwire: pte %#jx is missing PG_W",
2469270920Skib				    (uintmax_t)*pte);
2470270920Skib			pte_clear(pte, PTE_W);
2471270920Skib			pmap->pm_stats.wired_count--;
2472270920Skib		}
2473270920Skib	}
2474178172Simp	PMAP_UNLOCK(pmap);
2475178172Simp}
2476178172Simp
2477178172Simp/*
2478178172Simp *	Copy the range specified by src_addr/len
2479178172Simp *	from the source map to the range dst_addr/len
2480178172Simp *	in the destination map.
2481178172Simp *
2482178172Simp *	This routine is only advisory and need not do anything.
2483178172Simp */
2484178172Simp
2485178172Simpvoid
2486178172Simppmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2487178172Simp    vm_size_t len, vm_offset_t src_addr)
2488178172Simp{
2489178172Simp}
2490178172Simp
2491178172Simp/*
2492178172Simp *	pmap_zero_page zeros the specified hardware page by mapping
2493178172Simp *	the page into KVM and using bzero to clear its contents.
2494209930Sjchandra *
2495209930Sjchandra * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2496178172Simp */
2497178172Simpvoid
2498178172Simppmap_zero_page(vm_page_t m)
2499178172Simp{
2500178172Simp	vm_offset_t va;
2501178172Simp	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2502209930Sjchandra
2503211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys)) {
2504211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(phys);
2505178172Simp		bzero((caddr_t)va, PAGE_SIZE);
2506187301Sgonzo		mips_dcache_wbinv_range(va, PAGE_SIZE);
2507178172Simp	} else {
2508211453Sjchandra		va = pmap_lmem_map1(phys);
2509206717Sjmallett		bzero((caddr_t)va, PAGE_SIZE);
2510206717Sjmallett		mips_dcache_wbinv_range(va, PAGE_SIZE);
2511211453Sjchandra		pmap_lmem_unmap();
2512178172Simp	}
2513178172Simp}
2514211453Sjchandra
2515178172Simp/*
2516178172Simp *	pmap_zero_page_area zeros the specified hardware page by mapping
2517178172Simp *	the page into KVM and using bzero to clear its contents.
2518178172Simp *
2519178172Simp *	off and size may not cover an area beyond a single hardware page.
2520178172Simp */
2521178172Simpvoid
2522178172Simppmap_zero_page_area(vm_page_t m, int off, int size)
2523178172Simp{
2524178172Simp	vm_offset_t va;
2525178172Simp	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2526209930Sjchandra
2527211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys)) {
2528211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(phys);
2529178172Simp		bzero((char *)(caddr_t)va + off, size);
2530187301Sgonzo		mips_dcache_wbinv_range(va + off, size);
2531178172Simp	} else {
2532211453Sjchandra		va = pmap_lmem_map1(phys);
2533206717Sjmallett		bzero((char *)va + off, size);
2534206717Sjmallett		mips_dcache_wbinv_range(va + off, size);
2535211453Sjchandra		pmap_lmem_unmap();
2536178172Simp	}
2537178172Simp}
2538178172Simp
2539178172Simpvoid
2540178172Simppmap_zero_page_idle(vm_page_t m)
2541178172Simp{
2542178172Simp	vm_offset_t va;
2543178172Simp	vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2544209930Sjchandra
2545211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys)) {
2546211453Sjchandra		va = MIPS_PHYS_TO_DIRECT(phys);
2547178172Simp		bzero((caddr_t)va, PAGE_SIZE);
2548187301Sgonzo		mips_dcache_wbinv_range(va, PAGE_SIZE);
2549178172Simp	} else {
2550211453Sjchandra		va = pmap_lmem_map1(phys);
2551206717Sjmallett		bzero((caddr_t)va, PAGE_SIZE);
2552206717Sjmallett		mips_dcache_wbinv_range(va, PAGE_SIZE);
2553211453Sjchandra		pmap_lmem_unmap();
2554178172Simp	}
2555178172Simp}
2556178172Simp
2557178172Simp/*
2558178172Simp *	pmap_copy_page copies the specified (machine independent)
2559178172Simp *	page by mapping the page into virtual memory and using
2560178172Simp *	bcopy to copy the page, one machine dependent page at a
2561178172Simp *	time.
2562209930Sjchandra *
2563209930Sjchandra * 	Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2564178172Simp */
2565178172Simpvoid
2566178172Simppmap_copy_page(vm_page_t src, vm_page_t dst)
2567178172Simp{
2568178172Simp	vm_offset_t va_src, va_dst;
2569211453Sjchandra	vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src);
2570211453Sjchandra	vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst);
2571209930Sjchandra
2572211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) {
2573206716Sjmallett		/* easy case, all can be accessed via KSEG0 */
2574206716Sjmallett		/*
2575206716Sjmallett		 * Flush all caches for VA that are mapped to this page
2576206716Sjmallett		 * to make sure that data in SDRAM is up to date
2577206716Sjmallett		 */
2578206716Sjmallett		pmap_flush_pvcache(src);
2579206716Sjmallett		mips_dcache_wbinv_range_index(
2580211453Sjchandra		    MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE);
2581211453Sjchandra		va_src = MIPS_PHYS_TO_DIRECT(phys_src);
2582211453Sjchandra		va_dst = MIPS_PHYS_TO_DIRECT(phys_dst);
2583178172Simp		bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
2584206716Sjmallett		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2585206716Sjmallett	} else {
2586211453Sjchandra		va_src = pmap_lmem_map2(phys_src, phys_dst);
2587211453Sjchandra		va_dst = va_src + PAGE_SIZE;
2588206716Sjmallett		bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
2589206717Sjmallett		mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2590211453Sjchandra		pmap_lmem_unmap();
2591178172Simp	}
2592178172Simp}
2593178172Simp
2594248508Skibint unmapped_buf_allowed;
2595248508Skib
2596248280Skibvoid
2597248280Skibpmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2598248280Skib    vm_offset_t b_offset, int xfersize)
2599248280Skib{
2600248280Skib	char *a_cp, *b_cp;
2601248280Skib	vm_page_t a_m, b_m;
2602248280Skib	vm_offset_t a_pg_offset, b_pg_offset;
2603248280Skib	vm_paddr_t a_phys, b_phys;
2604248280Skib	int cnt;
2605248280Skib
2606248280Skib	while (xfersize > 0) {
2607248280Skib		a_pg_offset = a_offset & PAGE_MASK;
2608248280Skib		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2609248280Skib		a_m = ma[a_offset >> PAGE_SHIFT];
2610248280Skib		a_phys = VM_PAGE_TO_PHYS(a_m);
2611248280Skib		b_pg_offset = b_offset & PAGE_MASK;
2612248280Skib		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2613248280Skib		b_m = mb[b_offset >> PAGE_SHIFT];
2614248280Skib		b_phys = VM_PAGE_TO_PHYS(b_m);
2615248280Skib		if (MIPS_DIRECT_MAPPABLE(a_phys) &&
2616248280Skib		    MIPS_DIRECT_MAPPABLE(b_phys)) {
2617248280Skib			pmap_flush_pvcache(a_m);
2618248280Skib			mips_dcache_wbinv_range_index(
2619248280Skib			    MIPS_PHYS_TO_DIRECT(b_phys), PAGE_SIZE);
2620248280Skib			a_cp = (char *)MIPS_PHYS_TO_DIRECT(a_phys) +
2621248280Skib			    a_pg_offset;
2622248280Skib			b_cp = (char *)MIPS_PHYS_TO_DIRECT(b_phys) +
2623248280Skib			    b_pg_offset;
2624248280Skib			bcopy(a_cp, b_cp, cnt);
2625248280Skib			mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2626248280Skib		} else {
2627248280Skib			a_cp = (char *)pmap_lmem_map2(a_phys, b_phys);
2628248280Skib			b_cp = (char *)a_cp + PAGE_SIZE;
2629248280Skib			a_cp += a_pg_offset;
2630248280Skib			b_cp += b_pg_offset;
2631248280Skib			bcopy(a_cp, b_cp, cnt);
2632248280Skib			mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2633248280Skib			pmap_lmem_unmap();
2634248280Skib		}
2635248280Skib		a_offset += cnt;
2636248280Skib		b_offset += cnt;
2637248280Skib		xfersize -= cnt;
2638248280Skib	}
2639248280Skib}
2640248280Skib
2641178172Simp/*
2642178172Simp * Returns true if the pmap's pv is one of the first
2643178172Simp * 16 pvs linked to from this page.  This count may
2644178172Simp * be changed upwards or downwards in the future; it
2645178172Simp * is only necessary that true be returned for a small
2646178172Simp * subset of pmaps for proper page aging.
2647178172Simp */
2648178172Simpboolean_t
2649178172Simppmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2650178172Simp{
2651178172Simp	pv_entry_t pv;
2652178172Simp	int loops = 0;
2653208990Salc	boolean_t rv;
2654178172Simp
2655224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2656208990Salc	    ("pmap_page_exists_quick: page %p is not managed", m));
2657208990Salc	rv = FALSE;
2658239317Salc	rw_wlock(&pvh_global_lock);
2659178172Simp	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2660239236Salc		if (PV_PMAP(pv) == pmap) {
2661208990Salc			rv = TRUE;
2662208990Salc			break;
2663178172Simp		}
2664178172Simp		loops++;
2665178172Simp		if (loops >= 16)
2666178172Simp			break;
2667178172Simp	}
2668239317Salc	rw_wunlock(&pvh_global_lock);
2669208990Salc	return (rv);
2670178172Simp}
2671178172Simp
2672178172Simp/*
2673178172Simp * Remove all pages from specified address space
2674178172Simp * this aids process exit speeds.  Also, this code
2675178172Simp * is special cased for current process only, but
2676178172Simp * can have the more generic (and slightly slower)
2677178172Simp * mode enabled.  This is much faster than pmap_remove
2678178172Simp * in the case of running down an entire address space.
2679178172Simp */
2680178172Simpvoid
2681178172Simppmap_remove_pages(pmap_t pmap)
2682178172Simp{
2683239152Salc	pd_entry_t *pde;
2684178172Simp	pt_entry_t *pte, tpte;
2685239236Salc	pv_entry_t pv;
2686178172Simp	vm_page_t m;
2687239236Salc	struct pv_chunk *pc, *npc;
2688239236Salc	u_long inuse, bitmask;
2689239236Salc	int allfree, bit, field, idx;
2690178172Simp
2691178172Simp	if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2692178172Simp		printf("warning: pmap_remove_pages called with non-current pmap\n");
2693178172Simp		return;
2694178172Simp	}
2695239317Salc	rw_wlock(&pvh_global_lock);
2696178172Simp	PMAP_LOCK(pmap);
2697239236Salc	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
2698239236Salc		allfree = 1;
2699239236Salc		for (field = 0; field < _NPCM; field++) {
2700239236Salc			inuse = ~pc->pc_map[field] & pc_freemask[field];
2701239236Salc			while (inuse != 0) {
2702239236Salc				bit = ffsl(inuse) - 1;
2703239236Salc				bitmask = 1UL << bit;
2704239236Salc				idx = field * sizeof(inuse) * NBBY + bit;
2705239236Salc				pv = &pc->pc_pventry[idx];
2706239236Salc				inuse &= ~bitmask;
2707178172Simp
2708239236Salc				pde = pmap_pde(pmap, pv->pv_va);
2709239236Salc				KASSERT(pde != NULL && *pde != 0,
2710239236Salc				    ("pmap_remove_pages: pde"));
2711239236Salc				pte = pmap_pde_to_pte(pde, pv->pv_va);
2712239236Salc				if (!pte_test(pte, PTE_V))
2713239236Salc					panic("pmap_remove_pages: bad pte");
2714239236Salc				tpte = *pte;
2715178172Simp
2716178172Simp/*
2717178172Simp * We cannot remove wired pages from a process' mapping at this time
2718178172Simp */
2719239236Salc				if (pte_test(&tpte, PTE_W)) {
2720239236Salc					allfree = 0;
2721239236Salc					continue;
2722239236Salc				}
2723239236Salc				*pte = is_kernel_pmap(pmap) ? PTE_G : 0;
2724178172Simp
2725239236Salc				m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte));
2726239236Salc				KASSERT(m != NULL,
2727239236Salc				    ("pmap_remove_pages: bad tpte %#jx",
2728239236Salc				    (uintmax_t)tpte));
2729178172Simp
2730239236Salc				/*
2731239236Salc				 * Update the vm_page_t clean and reference bits.
2732239236Salc				 */
2733239236Salc				if (pte_test(&tpte, PTE_D))
2734239236Salc					vm_page_dirty(m);
2735178172Simp
2736239236Salc				/* Mark free */
2737239236Salc				PV_STAT(pv_entry_frees++);
2738239236Salc				PV_STAT(pv_entry_spare++);
2739239236Salc				pv_entry_count--;
2740239236Salc				pc->pc_map[field] |= bitmask;
2741239236Salc				pmap->pm_stats.resident_count--;
2742239236Salc				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2743239236Salc				if (TAILQ_EMPTY(&m->md.pv_list))
2744239236Salc					vm_page_aflag_clear(m, PGA_WRITEABLE);
2745239236Salc				pmap_unuse_pt(pmap, pv->pv_va, *pde);
2746239236Salc			}
2747178172Simp		}
2748239236Salc		if (allfree) {
2749239236Salc			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2750239236Salc			free_pv_chunk(pc);
2751178172Simp		}
2752178172Simp	}
2753178172Simp	pmap_invalidate_all(pmap);
2754178172Simp	PMAP_UNLOCK(pmap);
2755239317Salc	rw_wunlock(&pvh_global_lock);
2756178172Simp}
2757178172Simp
2758178172Simp/*
2759178172Simp * pmap_testbit tests bits in pte's
2760178172Simp */
2761178172Simpstatic boolean_t
2762178172Simppmap_testbit(vm_page_t m, int bit)
2763178172Simp{
2764178172Simp	pv_entry_t pv;
2765239236Salc	pmap_t pmap;
2766178172Simp	pt_entry_t *pte;
2767178172Simp	boolean_t rv = FALSE;
2768178172Simp
2769224746Skib	if (m->oflags & VPO_UNMANAGED)
2770211445Sjchandra		return (rv);
2771178172Simp
2772239317Salc	rw_assert(&pvh_global_lock, RA_WLOCKED);
2773178172Simp	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2774239236Salc		pmap = PV_PMAP(pv);
2775239236Salc		PMAP_LOCK(pmap);
2776239236Salc		pte = pmap_pte(pmap, pv->pv_va);
2777209482Sjchandra		rv = pte_test(pte, bit);
2778239236Salc		PMAP_UNLOCK(pmap);
2779178172Simp		if (rv)
2780178172Simp			break;
2781178172Simp	}
2782178172Simp	return (rv);
2783178172Simp}
2784178172Simp
2785178172Simp/*
2786178172Simp *	pmap_page_wired_mappings:
2787178172Simp *
2788178172Simp *	Return the number of managed mappings to the given physical page
2789178172Simp *	that are wired.
2790178172Simp */
2791178172Simpint
2792178172Simppmap_page_wired_mappings(vm_page_t m)
2793178172Simp{
2794178172Simp	pv_entry_t pv;
2795210914Sjchandra	pmap_t pmap;
2796210914Sjchandra	pt_entry_t *pte;
2797178172Simp	int count;
2798178172Simp
2799178172Simp	count = 0;
2800224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0)
2801178172Simp		return (count);
2802239317Salc	rw_wlock(&pvh_global_lock);
2803210914Sjchandra	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2804239236Salc		pmap = PV_PMAP(pv);
2805210914Sjchandra		PMAP_LOCK(pmap);
2806210914Sjchandra		pte = pmap_pte(pmap, pv->pv_va);
2807210914Sjchandra		if (pte_test(pte, PTE_W))
2808210914Sjchandra			count++;
2809210914Sjchandra		PMAP_UNLOCK(pmap);
2810210914Sjchandra	}
2811239317Salc	rw_wunlock(&pvh_global_lock);
2812178172Simp	return (count);
2813178172Simp}
2814178172Simp
2815178172Simp/*
2816178172Simp * Clear the write and modified bits in each of the given page's mappings.
2817178172Simp */
2818178172Simpvoid
2819178172Simppmap_remove_write(vm_page_t m)
2820178172Simp{
2821239236Salc	pmap_t pmap;
2822239236Salc	pt_entry_t pbits, *pte;
2823239236Salc	pv_entry_t pv;
2824178172Simp
2825224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2826208175Salc	    ("pmap_remove_write: page %p is not managed", m));
2827208175Salc
2828208175Salc	/*
2829254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2830254138Sattilio	 * set by another thread while the object is locked.  Thus,
2831254138Sattilio	 * if PGA_WRITEABLE is clear, no page table entries need updating.
2832208175Salc	 */
2833248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
2834254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2835178172Simp		return;
2836239317Salc	rw_wlock(&pvh_global_lock);
2837239236Salc	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2838239236Salc		pmap = PV_PMAP(pv);
2839239236Salc		PMAP_LOCK(pmap);
2840239236Salc		pte = pmap_pte(pmap, pv->pv_va);
2841239236Salc		KASSERT(pte != NULL && pte_test(pte, PTE_V),
2842239236Salc		    ("page on pv_list has no pte"));
2843239236Salc		pbits = *pte;
2844239236Salc		if (pte_test(&pbits, PTE_D)) {
2845239236Salc			pte_clear(&pbits, PTE_D);
2846239236Salc			vm_page_dirty(m);
2847239236Salc		}
2848239236Salc		pte_set(&pbits, PTE_RO);
2849239236Salc		if (pbits != *pte) {
2850239236Salc			*pte = pbits;
2851239236Salc			pmap_update_page(pmap, pv->pv_va, pbits);
2852239236Salc		}
2853239236Salc		PMAP_UNLOCK(pmap);
2854178172Simp	}
2855225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
2856239317Salc	rw_wunlock(&pvh_global_lock);
2857178172Simp}
2858178172Simp
2859178172Simp/*
2860178172Simp *	pmap_ts_referenced:
2861178172Simp *
2862178172Simp *	Return the count of reference bits for a page, clearing all of them.
2863178172Simp */
2864178172Simpint
2865178172Simppmap_ts_referenced(vm_page_t m)
2866178172Simp{
2867178172Simp
2868224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2869208990Salc	    ("pmap_ts_referenced: page %p is not managed", m));
2870178172Simp	if (m->md.pv_flags & PV_TABLE_REF) {
2871239317Salc		rw_wlock(&pvh_global_lock);
2872178172Simp		m->md.pv_flags &= ~PV_TABLE_REF;
2873239317Salc		rw_wunlock(&pvh_global_lock);
2874208990Salc		return (1);
2875178172Simp	}
2876208990Salc	return (0);
2877178172Simp}
2878178172Simp
2879178172Simp/*
2880178172Simp *	pmap_is_modified:
2881178172Simp *
2882178172Simp *	Return whether or not the specified physical page was modified
2883178172Simp *	in any physical maps.
2884178172Simp */
2885178172Simpboolean_t
2886178172Simppmap_is_modified(vm_page_t m)
2887178172Simp{
2888208504Salc	boolean_t rv;
2889178172Simp
2890224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2891208504Salc	    ("pmap_is_modified: page %p is not managed", m));
2892208504Salc
2893208504Salc	/*
2894254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
2895225418Skib	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
2896209482Sjchandra	 * is clear, no PTEs can have PTE_D set.
2897208504Salc	 */
2898248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
2899254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
2900208504Salc		return (FALSE);
2901239317Salc	rw_wlock(&pvh_global_lock);
2902239681Salc	rv = pmap_testbit(m, PTE_D);
2903239317Salc	rw_wunlock(&pvh_global_lock);
2904208504Salc	return (rv);
2905178172Simp}
2906178172Simp
2907178172Simp/* N/C */
2908178172Simp
2909178172Simp/*
2910178172Simp *	pmap_is_prefaultable:
2911178172Simp *
2912178172Simp *	Return whether or not the specified virtual address is elgible
2913178172Simp *	for prefault.
2914178172Simp */
2915178172Simpboolean_t
2916178172Simppmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2917178172Simp{
2918210846Sjchandra	pd_entry_t *pde;
2919178172Simp	pt_entry_t *pte;
2920178172Simp	boolean_t rv;
2921178172Simp
2922178172Simp	rv = FALSE;
2923178172Simp	PMAP_LOCK(pmap);
2924210846Sjchandra	pde = pmap_pde(pmap, addr);
2925210846Sjchandra	if (pde != NULL && *pde != 0) {
2926210846Sjchandra		pte = pmap_pde_to_pte(pde, addr);
2927178172Simp		rv = (*pte == 0);
2928178172Simp	}
2929178172Simp	PMAP_UNLOCK(pmap);
2930178172Simp	return (rv);
2931178172Simp}
2932178172Simp
2933178172Simp/*
2934255098Salc *	Apply the given advice to the specified range of addresses within the
2935255098Salc *	given pmap.  Depending on the advice, clear the referenced and/or
2936255098Salc *	modified flags in each mapping and set the mapped page's dirty field.
2937255028Salc */
2938255028Salcvoid
2939255028Salcpmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
2940255028Salc{
2941255098Salc	pd_entry_t *pde, *pdpe;
2942255098Salc	pt_entry_t *pte;
2943255098Salc	vm_offset_t va, va_next;
2944255098Salc	vm_paddr_t pa;
2945255098Salc	vm_page_t m;
2946255098Salc
2947255098Salc	if (advice != MADV_DONTNEED && advice != MADV_FREE)
2948255098Salc		return;
2949255098Salc	rw_wlock(&pvh_global_lock);
2950255098Salc	PMAP_LOCK(pmap);
2951255098Salc	for (; sva < eva; sva = va_next) {
2952255098Salc		pdpe = pmap_segmap(pmap, sva);
2953255098Salc#ifdef __mips_n64
2954255098Salc		if (*pdpe == 0) {
2955255098Salc			va_next = (sva + NBSEG) & ~SEGMASK;
2956255098Salc			if (va_next < sva)
2957255098Salc				va_next = eva;
2958255098Salc			continue;
2959255098Salc		}
2960255098Salc#endif
2961255098Salc		va_next = (sva + NBPDR) & ~PDRMASK;
2962255098Salc		if (va_next < sva)
2963255098Salc			va_next = eva;
2964255098Salc
2965255098Salc		pde = pmap_pdpe_to_pde(pdpe, sva);
2966255098Salc		if (*pde == NULL)
2967255098Salc			continue;
2968255098Salc
2969255098Salc		/*
2970255098Salc		 * Limit our scan to either the end of the va represented
2971255098Salc		 * by the current page table page, or to the end of the
2972255098Salc		 * range being write protected.
2973255098Salc		 */
2974255098Salc		if (va_next > eva)
2975255098Salc			va_next = eva;
2976255098Salc
2977255098Salc		va = va_next;
2978255098Salc		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2979255098Salc		    sva += PAGE_SIZE) {
2980255098Salc			if (!pte_test(pte, PTE_MANAGED | PTE_V)) {
2981255098Salc				if (va != va_next) {
2982255098Salc					pmap_invalidate_range(pmap, va, sva);
2983255098Salc					va = va_next;
2984255098Salc				}
2985255098Salc				continue;
2986255098Salc			}
2987255098Salc			pa = TLBLO_PTE_TO_PA(*pte);
2988255098Salc			m = PHYS_TO_VM_PAGE(pa);
2989255098Salc			m->md.pv_flags &= ~PV_TABLE_REF;
2990255098Salc			if (pte_test(pte, PTE_D)) {
2991255098Salc				if (advice == MADV_DONTNEED) {
2992255098Salc					/*
2993255098Salc					 * Future calls to pmap_is_modified()
2994255098Salc					 * can be avoided by making the page
2995255098Salc					 * dirty now.
2996255098Salc					 */
2997255098Salc					vm_page_dirty(m);
2998255098Salc				} else {
2999255098Salc					pte_clear(pte, PTE_D);
3000255098Salc					if (va == va_next)
3001255098Salc						va = sva;
3002255098Salc				}
3003255098Salc			} else {
3004255098Salc				/*
3005255098Salc				 * Unless PTE_D is set, any TLB entries
3006255098Salc				 * mapping "sva" don't allow write access, so
3007255098Salc				 * they needn't be invalidated.
3008255098Salc				 */
3009255098Salc				if (va != va_next) {
3010255098Salc					pmap_invalidate_range(pmap, va, sva);
3011255098Salc					va = va_next;
3012255098Salc				}
3013255098Salc			}
3014255098Salc		}
3015255098Salc		if (va != va_next)
3016255098Salc			pmap_invalidate_range(pmap, va, sva);
3017255098Salc	}
3018255098Salc	rw_wunlock(&pvh_global_lock);
3019255098Salc	PMAP_UNLOCK(pmap);
3020255028Salc}
3021255028Salc
3022255028Salc/*
3023178172Simp *	Clear the modify bits on the specified physical page.
3024178172Simp */
3025178172Simpvoid
3026178172Simppmap_clear_modify(vm_page_t m)
3027178172Simp{
3028239352Salc	pmap_t pmap;
3029239352Salc	pt_entry_t *pte;
3030239352Salc	pv_entry_t pv;
3031208504Salc
3032224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3033208504Salc	    ("pmap_clear_modify: page %p is not managed", m));
3034248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
3035254138Sattilio	KASSERT(!vm_page_xbusied(m),
3036254138Sattilio	    ("pmap_clear_modify: page %p is exclusive busied", m));
3037208504Salc
3038208504Salc	/*
3039225418Skib	 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set.
3040208504Salc	 * If the object containing the page is locked and the page is not
3041254138Sattilio	 * write busied, then PGA_WRITEABLE cannot be concurrently set.
3042208504Salc	 */
3043225418Skib	if ((m->aflags & PGA_WRITEABLE) == 0)
3044178172Simp		return;
3045239317Salc	rw_wlock(&pvh_global_lock);
3046239352Salc	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3047239352Salc		pmap = PV_PMAP(pv);
3048239352Salc		PMAP_LOCK(pmap);
3049239352Salc		pte = pmap_pte(pmap, pv->pv_va);
3050239352Salc		if (pte_test(pte, PTE_D)) {
3051239352Salc			pte_clear(pte, PTE_D);
3052239352Salc			pmap_update_page(pmap, pv->pv_va, *pte);
3053239352Salc		}
3054239352Salc		PMAP_UNLOCK(pmap);
3055178172Simp	}
3056239317Salc	rw_wunlock(&pvh_global_lock);
3057178172Simp}
3058178172Simp
3059178172Simp/*
3060207155Salc *	pmap_is_referenced:
3061207155Salc *
3062207155Salc *	Return whether or not the specified physical page was referenced
3063207155Salc *	in any physical maps.
3064207155Salc */
3065207155Salcboolean_t
3066207155Salcpmap_is_referenced(vm_page_t m)
3067207155Salc{
3068207155Salc
3069224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3070208574Salc	    ("pmap_is_referenced: page %p is not managed", m));
3071208574Salc	return ((m->md.pv_flags & PV_TABLE_REF) != 0);
3072207155Salc}
3073207155Salc
3074207155Salc/*
3075178172Simp * Miscellaneous support routines follow
3076178172Simp */
3077178172Simp
3078178172Simp/*
3079178172Simp * Map a set of physical memory pages into the kernel virtual
3080178172Simp * address space. Return a pointer to where it is mapped. This
3081178172Simp * routine is intended to be used for mapping device memory,
3082178172Simp * NOT real memory.
3083209930Sjchandra *
3084209930Sjchandra * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
3085178172Simp */
3086178172Simpvoid *
3087217345Sjchandrapmap_mapdev(vm_paddr_t pa, vm_size_t size)
3088178172Simp{
3089178172Simp        vm_offset_t va, tmpva, offset;
3090178172Simp
3091178172Simp	/*
3092178172Simp	 * KSEG1 maps only first 512M of phys address space. For
3093178172Simp	 * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
3094178172Simp	 */
3095211453Sjchandra	if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
3096211453Sjchandra		return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
3097178172Simp	else {
3098178172Simp		offset = pa & PAGE_MASK;
3099202046Simp		size = roundup(size + offset, PAGE_SIZE);
3100178172Simp
3101254025Sjeff		va = kva_alloc(size);
3102178172Simp		if (!va)
3103178172Simp			panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3104202046Simp		pa = trunc_page(pa);
3105178172Simp		for (tmpva = va; size > 0;) {
3106212589Sneel			pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED);
3107178172Simp			size -= PAGE_SIZE;
3108178172Simp			tmpva += PAGE_SIZE;
3109178172Simp			pa += PAGE_SIZE;
3110178172Simp		}
3111178172Simp	}
3112178172Simp
3113178172Simp	return ((void *)(va + offset));
3114178172Simp}
3115178172Simp
3116178172Simpvoid
3117178172Simppmap_unmapdev(vm_offset_t va, vm_size_t size)
3118178172Simp{
3119211453Sjchandra#ifndef __mips_n64
3120240317Salc	vm_offset_t base, offset;
3121202046Simp
3122202046Simp	/* If the address is within KSEG1 then there is nothing to do */
3123202046Simp	if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END)
3124202046Simp		return;
3125202046Simp
3126202046Simp	base = trunc_page(va);
3127202046Simp	offset = va & PAGE_MASK;
3128202046Simp	size = roundup(size + offset, PAGE_SIZE);
3129254025Sjeff	kva_free(base, size);
3130211453Sjchandra#endif
3131178172Simp}
3132178172Simp
3133178172Simp/*
3134178172Simp * perform the pmap work for mincore
3135178172Simp */
3136178172Simpint
3137208504Salcpmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
3138178172Simp{
3139178172Simp	pt_entry_t *ptep, pte;
3140217345Sjchandra	vm_paddr_t pa;
3141208532Sneel	vm_page_t m;
3142208504Salc	int val;
3143178172Simp
3144178172Simp	PMAP_LOCK(pmap);
3145208504Salcretry:
3146178172Simp	ptep = pmap_pte(pmap, addr);
3147178172Simp	pte = (ptep != NULL) ? *ptep : 0;
3148209482Sjchandra	if (!pte_test(&pte, PTE_V)) {
3149208504Salc		val = 0;
3150208504Salc		goto out;
3151208504Salc	}
3152208504Salc	val = MINCORE_INCORE;
3153209482Sjchandra	if (pte_test(&pte, PTE_D))
3154208504Salc		val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
3155209243Sjchandra	pa = TLBLO_PTE_TO_PA(pte);
3156239964Salc	if (pte_test(&pte, PTE_MANAGED)) {
3157178172Simp		/*
3158208504Salc		 * This may falsely report the given address as
3159208504Salc		 * MINCORE_REFERENCED.  Unfortunately, due to the lack of
3160208504Salc		 * per-PTE reference information, it is impossible to
3161208504Salc		 * determine if the address is MINCORE_REFERENCED.
3162178172Simp		 */
3163208504Salc		m = PHYS_TO_VM_PAGE(pa);
3164225418Skib		if ((m->aflags & PGA_REFERENCED) != 0)
3165178172Simp			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
3166178172Simp	}
3167208504Salc	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
3168239964Salc	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
3169239964Salc	    pte_test(&pte, PTE_MANAGED)) {
3170208504Salc		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
3171208504Salc		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
3172208504Salc			goto retry;
3173208504Salc	} else
3174208504Salcout:
3175208504Salc		PA_UNLOCK_COND(*locked_pa);
3176208504Salc	PMAP_UNLOCK(pmap);
3177208504Salc	return (val);
3178178172Simp}
3179178172Simp
3180178172Simpvoid
3181178172Simppmap_activate(struct thread *td)
3182178172Simp{
3183178172Simp	pmap_t pmap, oldpmap;
3184178172Simp	struct proc *p = td->td_proc;
3185223758Sattilio	u_int cpuid;
3186178172Simp
3187178172Simp	critical_enter();
3188178172Simp
3189178172Simp	pmap = vmspace_pmap(p->p_vmspace);
3190178172Simp	oldpmap = PCPU_GET(curpmap);
3191223758Sattilio	cpuid = PCPU_GET(cpuid);
3192178172Simp
3193178172Simp	if (oldpmap)
3194223758Sattilio		CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
3195223758Sattilio	CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
3196178172Simp	pmap_asid_alloc(pmap);
3197178172Simp	if (td == curthread) {
3198178172Simp		PCPU_SET(segbase, pmap->pm_segtab);
3199223758Sattilio		mips_wr_entryhi(pmap->pm_asid[cpuid].asid);
3200178172Simp	}
3201202046Simp
3202178172Simp	PCPU_SET(curpmap, pmap);
3203178172Simp	critical_exit();
3204178172Simp}
3205178172Simp
3206198341Smarcelvoid
3207198341Smarcelpmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
3208198341Smarcel{
3209198341Smarcel}
3210198341Smarcel
3211178893Salc/*
3212178893Salc *	Increase the starting virtual address of the given mapping if a
3213178893Salc *	different alignment might result in more superpage mappings.
3214178893Salc */
3215178893Salcvoid
3216178893Salcpmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
3217178893Salc    vm_offset_t *addr, vm_size_t size)
3218178893Salc{
3219179081Salc	vm_offset_t superpage_offset;
3220179081Salc
3221179081Salc	if (size < NBSEG)
3222179081Salc		return;
3223179081Salc	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
3224179081Salc		offset += ptoa(object->pg_color);
3225210627Sjchandra	superpage_offset = offset & SEGMASK;
3226210627Sjchandra	if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG ||
3227210627Sjchandra	    (*addr & SEGMASK) == superpage_offset)
3228179081Salc		return;
3229210627Sjchandra	if ((*addr & SEGMASK) < superpage_offset)
3230210627Sjchandra		*addr = (*addr & ~SEGMASK) + superpage_offset;
3231179081Salc	else
3232210627Sjchandra		*addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset;
3233178893Salc}
3234178893Salc
3235211167Sjchandra#ifdef DDB
3236210846SjchandraDB_SHOW_COMMAND(ptable, ddb_pid_dump)
3237178172Simp{
3238178172Simp	pmap_t pmap;
3239210846Sjchandra	struct thread *td = NULL;
3240178172Simp	struct proc *p;
3241210846Sjchandra	int i, j, k;
3242210846Sjchandra	vm_paddr_t pa;
3243210846Sjchandra	vm_offset_t va;
3244178172Simp
3245210846Sjchandra	if (have_addr) {
3246210846Sjchandra		td = db_lookup_thread(addr, TRUE);
3247210846Sjchandra		if (td == NULL) {
3248210846Sjchandra			db_printf("Invalid pid or tid");
3249210846Sjchandra			return;
3250210846Sjchandra		}
3251210846Sjchandra		p = td->td_proc;
3252210846Sjchandra		if (p->p_vmspace == NULL) {
3253210846Sjchandra			db_printf("No vmspace for process");
3254210846Sjchandra			return;
3255210846Sjchandra		}
3256210846Sjchandra			pmap = vmspace_pmap(p->p_vmspace);
3257210846Sjchandra	} else
3258210846Sjchandra		pmap = kernel_pmap;
3259178172Simp
3260210846Sjchandra	db_printf("pmap:%p segtab:%p asid:%x generation:%x\n",
3261211167Sjchandra	    pmap, pmap->pm_segtab, pmap->pm_asid[0].asid,
3262211167Sjchandra	    pmap->pm_asid[0].gen);
3263210846Sjchandra	for (i = 0; i < NPDEPG; i++) {
3264210846Sjchandra		pd_entry_t *pdpe;
3265210846Sjchandra		pt_entry_t *pde;
3266210846Sjchandra		pt_entry_t pte;
3267178172Simp
3268210846Sjchandra		pdpe = (pd_entry_t *)pmap->pm_segtab[i];
3269210846Sjchandra		if (pdpe == NULL)
3270210846Sjchandra			continue;
3271210846Sjchandra		db_printf("[%4d] %p\n", i, pdpe);
3272210846Sjchandra#ifdef __mips_n64
3273210846Sjchandra		for (j = 0; j < NPDEPG; j++) {
3274210846Sjchandra			pde = (pt_entry_t *)pdpe[j];
3275210846Sjchandra			if (pde == NULL)
3276210846Sjchandra				continue;
3277210846Sjchandra			db_printf("\t[%4d] %p\n", j, pde);
3278210846Sjchandra#else
3279210846Sjchandra		{
3280210846Sjchandra			j = 0;
3281210846Sjchandra			pde =  (pt_entry_t *)pdpe;
3282210846Sjchandra#endif
3283210846Sjchandra			for (k = 0; k < NPTEPG; k++) {
3284210846Sjchandra				pte = pde[k];
3285210846Sjchandra				if (pte == 0 || !pte_test(&pte, PTE_V))
3286210846Sjchandra					continue;
3287210846Sjchandra				pa = TLBLO_PTE_TO_PA(pte);
3288210846Sjchandra				va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT);
3289217345Sjchandra				db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n",
3290217345Sjchandra				       k, (void *)va, (uintmax_t)pte, (uintmax_t)pa);
3291178172Simp			}
3292178172Simp		}
3293178172Simp	}
3294178172Simp}
3295211167Sjchandra#endif
3296178172Simp
3297178172Simp#if defined(DEBUG)
3298178172Simp
3299178172Simpstatic void pads(pmap_t pm);
3300178172Simpvoid pmap_pvdump(vm_offset_t pa);
3301178172Simp
3302178172Simp/* print address space of pmap*/
3303178172Simpstatic void
3304178172Simppads(pmap_t pm)
3305178172Simp{
3306178172Simp	unsigned va, i, j;
3307178172Simp	pt_entry_t *ptep;
3308178172Simp
3309178172Simp	if (pm == kernel_pmap)
3310178172Simp		return;
3311178172Simp	for (i = 0; i < NPTEPG; i++)
3312178172Simp		if (pm->pm_segtab[i])
3313178172Simp			for (j = 0; j < NPTEPG; j++) {
3314178172Simp				va = (i << SEGSHIFT) + (j << PAGE_SHIFT);
3315178172Simp				if (pm == kernel_pmap && va < KERNBASE)
3316178172Simp					continue;
3317178172Simp				if (pm != kernel_pmap &&
3318178172Simp				    va >= VM_MAXUSER_ADDRESS)
3319178172Simp					continue;
3320178172Simp				ptep = pmap_pte(pm, va);
3321216324Sjchandra				if (pte_test(ptep, PTE_V))
3322178172Simp					printf("%x:%x ", va, *(int *)ptep);
3323178172Simp			}
3324178172Simp
3325178172Simp}
3326178172Simp
3327178172Simpvoid
3328178172Simppmap_pvdump(vm_offset_t pa)
3329178172Simp{
3330178172Simp	register pv_entry_t pv;
3331178172Simp	vm_page_t m;
3332178172Simp
3333178172Simp	printf("pa %x", pa);
3334178172Simp	m = PHYS_TO_VM_PAGE(pa);
3335178172Simp	for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3336178172Simp	    pv = TAILQ_NEXT(pv, pv_list)) {
3337178172Simp		printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3338178172Simp		pads(pv->pv_pmap);
3339178172Simp	}
3340178172Simp	printf(" ");
3341178172Simp}
3342178172Simp
3343178172Simp/* N/C */
3344178172Simp#endif
3345178172Simp
3346178172Simp
3347178172Simp/*
3348178172Simp * Allocate TLB address space tag (called ASID or TLBPID) and return it.
3349178172Simp * It takes almost as much or more time to search the TLB for a
3350178172Simp * specific ASID and flush those entries as it does to flush the entire TLB.
3351178172Simp * Therefore, when we allocate a new ASID, we just take the next number. When
3352178172Simp * we run out of numbers, we flush the TLB, increment the generation count
3353178172Simp * and start over. ASID zero is reserved for kernel use.
3354178172Simp */
3355178172Simpstatic void
3356178172Simppmap_asid_alloc(pmap)
3357178172Simp	pmap_t pmap;
3358178172Simp{
3359178172Simp	if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED &&
3360178172Simp	    pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
3361178172Simp	else {
3362178172Simp		if (PCPU_GET(next_asid) == pmap_max_asid) {
3363209243Sjchandra			tlb_invalidate_all_user(NULL);
3364178172Simp			PCPU_SET(asid_generation,
3365178172Simp			    (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
3366178172Simp			if (PCPU_GET(asid_generation) == 0) {
3367178172Simp				PCPU_SET(asid_generation, 1);
3368178172Simp			}
3369178172Simp			PCPU_SET(next_asid, 1);	/* 0 means invalid */
3370178172Simp		}
3371178172Simp		pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid);
3372178172Simp		pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation);
3373178172Simp		PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
3374178172Simp	}
3375178172Simp}
3376178172Simp
3377217345Sjchandrastatic pt_entry_t
3378239681Salcinit_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot)
3379178172Simp{
3380217345Sjchandra	pt_entry_t rw;
3381178172Simp
3382178172Simp	if (!(prot & VM_PROT_WRITE))
3383239321Salc		rw = PTE_V | PTE_RO;
3384224746Skib	else if ((m->oflags & VPO_UNMANAGED) == 0) {
3385239681Salc		if ((access & VM_PROT_WRITE) != 0)
3386239321Salc			rw = PTE_V | PTE_D;
3387178172Simp		else
3388238861Srwatson			rw = PTE_V;
3389208866Salc	} else
3390208866Salc		/* Needn't emulate a modified bit for unmanaged pages. */
3391239321Salc		rw = PTE_V | PTE_D;
3392208866Salc	return (rw);
3393178172Simp}
3394178172Simp
3395178172Simp/*
3396211217Sjchandra * pmap_emulate_modified : do dirty bit emulation
3397178172Simp *
3398211217Sjchandra * On SMP, update just the local TLB, other CPUs will update their
3399211217Sjchandra * TLBs from PTE lazily, if they get the exception.
3400211217Sjchandra * Returns 0 in case of sucess, 1 if the page is read only and we
3401211217Sjchandra * need to fault.
3402178172Simp */
3403211217Sjchandraint
3404211217Sjchandrapmap_emulate_modified(pmap_t pmap, vm_offset_t va)
3405178172Simp{
3406211217Sjchandra	pt_entry_t *pte;
3407178172Simp
3408211217Sjchandra	PMAP_LOCK(pmap);
3409211217Sjchandra	pte = pmap_pte(pmap, va);
3410211217Sjchandra	if (pte == NULL)
3411211217Sjchandra		panic("pmap_emulate_modified: can't find PTE");
3412211217Sjchandra#ifdef SMP
3413211217Sjchandra	/* It is possible that some other CPU changed m-bit */
3414211217Sjchandra	if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
3415227623Sjchandra		tlb_update(pmap, va, *pte);
3416211217Sjchandra		PMAP_UNLOCK(pmap);
3417211217Sjchandra		return (0);
3418211217Sjchandra	}
3419211217Sjchandra#else
3420211217Sjchandra	if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
3421211217Sjchandra		panic("pmap_emulate_modified: invalid pte");
3422211217Sjchandra#endif
3423211217Sjchandra	if (pte_test(pte, PTE_RO)) {
3424211217Sjchandra		PMAP_UNLOCK(pmap);
3425211217Sjchandra		return (1);
3426211217Sjchandra	}
3427211217Sjchandra	pte_set(pte, PTE_D);
3428227623Sjchandra	tlb_update(pmap, va, *pte);
3429239964Salc	if (!pte_test(pte, PTE_MANAGED))
3430239964Salc		panic("pmap_emulate_modified: unmanaged page");
3431211217Sjchandra	PMAP_UNLOCK(pmap);
3432211217Sjchandra	return (0);
3433178172Simp}
3434178172Simp
3435178172Simp/*
3436178172Simp *	Routine:	pmap_kextract
3437178172Simp *	Function:
3438178172Simp *		Extract the physical page address associated
3439178172Simp *		virtual address.
3440178172Simp */
3441233308Sjchandravm_paddr_t
3442178172Simppmap_kextract(vm_offset_t va)
3443178172Simp{
3444209930Sjchandra	int mapped;
3445178172Simp
3446209930Sjchandra	/*
3447209930Sjchandra	 * First, the direct-mapped regions.
3448209930Sjchandra	 */
3449209930Sjchandra#if defined(__mips_n64)
3450209930Sjchandra	if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END)
3451209930Sjchandra		return (MIPS_XKPHYS_TO_PHYS(va));
3452209930Sjchandra#endif
3453209930Sjchandra	if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END)
3454209930Sjchandra		return (MIPS_KSEG0_TO_PHYS(va));
3455209930Sjchandra
3456209930Sjchandra	if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END)
3457209930Sjchandra		return (MIPS_KSEG1_TO_PHYS(va));
3458209930Sjchandra
3459209930Sjchandra	/*
3460209930Sjchandra	 * User virtual addresses.
3461209930Sjchandra	 */
3462209930Sjchandra	if (va < VM_MAXUSER_ADDRESS) {
3463178172Simp		pt_entry_t *ptep;
3464178172Simp
3465178172Simp		if (curproc && curproc->p_vmspace) {
3466178172Simp			ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
3467209930Sjchandra			if (ptep) {
3468209930Sjchandra				return (TLBLO_PTE_TO_PA(*ptep) |
3469209930Sjchandra				    (va & PAGE_MASK));
3470209930Sjchandra			}
3471209930Sjchandra			return (0);
3472178172Simp		}
3473209930Sjchandra	}
3474209930Sjchandra
3475209930Sjchandra	/*
3476209930Sjchandra	 * Should be kernel virtual here, otherwise fail
3477209930Sjchandra	 */
3478209930Sjchandra	mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END);
3479209930Sjchandra#if defined(__mips_n64)
3480209930Sjchandra	mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END);
3481209930Sjchandra#endif
3482209930Sjchandra	/*
3483209930Sjchandra	 * Kernel virtual.
3484209930Sjchandra	 */
3485209930Sjchandra
3486209930Sjchandra	if (mapped) {
3487178172Simp		pt_entry_t *ptep;
3488178172Simp
3489191735Salc		/* Is the kernel pmap initialized? */
3490222813Sattilio		if (!CPU_EMPTY(&kernel_pmap->pm_active)) {
3491209930Sjchandra			/* It's inside the virtual address range */
3492206717Sjmallett			ptep = pmap_pte(kernel_pmap, va);
3493209243Sjchandra			if (ptep) {
3494209243Sjchandra				return (TLBLO_PTE_TO_PA(*ptep) |
3495209243Sjchandra				    (va & PAGE_MASK));
3496209243Sjchandra			}
3497178172Simp		}
3498209930Sjchandra		return (0);
3499178172Simp	}
3500209930Sjchandra
3501209930Sjchandra	panic("%s for unknown address space %p.", __func__, (void *)va);
3502178172Simp}
3503202046Simp
3504209930Sjchandra
3505202046Simpvoid
3506202046Simppmap_flush_pvcache(vm_page_t m)
3507202046Simp{
3508202046Simp	pv_entry_t pv;
3509202046Simp
3510202046Simp	if (m != NULL) {
3511202046Simp		for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3512210846Sjchandra		    pv = TAILQ_NEXT(pv, pv_list)) {
3513206746Sjmallett			mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
3514202046Simp		}
3515202046Simp	}
3516202046Simp}
3517