1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2020 Justin Hibbits
5 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
6 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
21 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Some hw specific parts of this pmap were derived or influenced
30 * by NetBSD's ibm4xx pmap module. More generic code is shared with
31 * a few other pmap modules from the FreeBSD tree.
32 */
33
34 /*
35  * VM layout notes:
36  *
37  * Kernel and user threads run within one common virtual address space
38  * defined by AS=0.
39  *
40  * 64-bit pmap:
41  * Virtual address space layout:
42  * -----------------------------
43  * 0x0000_0000_0000_0000 - 0x3fff_ffff_ffff_ffff      : user process
44  * 0x4000_0000_0000_0000 - 0x7fff_ffff_ffff_ffff      : unused
45  * 0x8000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff      : mmio region
46  * 0xc000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff      : direct map
47  * 0xe000_0000_0000_0000 - 0xffff_ffff_ffff_ffff      : KVA
48  */
49
50#include <sys/cdefs.h>
51#include "opt_ddb.h"
52#include "opt_kstack_pages.h"
53
54#include <sys/param.h>
55#include <sys/conf.h>
56#include <sys/malloc.h>
57#include <sys/ktr.h>
58#include <sys/proc.h>
59#include <sys/user.h>
60#include <sys/queue.h>
61#include <sys/systm.h>
62#include <sys/kernel.h>
63#include <sys/kerneldump.h>
64#include <sys/linker.h>
65#include <sys/msgbuf.h>
66#include <sys/lock.h>
67#include <sys/mutex.h>
68#include <sys/rwlock.h>
69#include <sys/sched.h>
70#include <sys/smp.h>
71#include <sys/vmmeter.h>
72
73#include <vm/vm.h>
74#include <vm/vm_page.h>
75#include <vm/vm_kern.h>
76#include <vm/vm_pageout.h>
77#include <vm/vm_extern.h>
78#include <vm/vm_object.h>
79#include <vm/vm_param.h>
80#include <vm/vm_map.h>
81#include <vm/vm_pager.h>
82#include <vm/vm_phys.h>
83#include <vm/vm_pagequeue.h>
84#include <vm/uma.h>
85
86#include <machine/_inttypes.h>
87#include <machine/cpu.h>
88#include <machine/pcb.h>
89#include <machine/platform.h>
90
91#include <machine/tlb.h>
92#include <machine/spr.h>
93#include <machine/md_var.h>
94#include <machine/mmuvar.h>
95#include <machine/pmap.h>
96#include <machine/pte.h>
97
98#include <ddb/ddb.h>
99
100#ifdef  DEBUG
101#define debugf(fmt, args...) printf(fmt, ##args)
102#else
103#define debugf(fmt, args...)
104#endif
105
106#define	PRI0ptrX	"016lx"
107
108/**************************************************************************/
109/* PMAP */
110/**************************************************************************/
111
112unsigned int kernel_pdirs;
113static uma_zone_t ptbl_root_zone;
114static pte_t ****kernel_ptbl_root;
115
116/*
117 * Base of the pmap_mapdev() region.  On 32-bit it immediately follows the
118 * userspace address range.  On On 64-bit it's far above, at (1 << 63), and
119 * ranges up to the DMAP, giving 62 bits of PA allowed.  This is far larger than
120 * the widest Book-E address bus, the e6500 has a 40-bit PA space.  This allows
121 * us to map akin to the DMAP, with addresses identical to the PA, offset by the
122 * base.
123 */
124#define	VM_MAPDEV_BASE		0x8000000000000000
125#define	VM_MAPDEV_PA_MAX	0x4000000000000000 /* Don't encroach on DMAP */
126
127static void tid_flush(tlbtid_t tid);
128
129/**************************************************************************/
130/* Page table management */
131/**************************************************************************/
132
133#define PMAP_ROOT_SIZE	(sizeof(pte_t****) * PG_ROOT_NENTRIES)
134static pte_t *ptbl_alloc(pmap_t pmap, vm_offset_t va,
135    bool nosleep, bool *is_new);
136static void ptbl_hold(pmap_t, pte_t *);
137static int ptbl_unhold(pmap_t, vm_offset_t);
138
139static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
140static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, bool);
141static int pte_remove(pmap_t, vm_offset_t, uint8_t);
142static pte_t *pte_find(pmap_t, vm_offset_t);
143static pte_t *pte_find_next(pmap_t, vm_offset_t *);
144static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
145
146/**************************************************************************/
147/* Page table related */
148/**************************************************************************/
149
150/* Allocate a page, to be used in a page table. */
151static vm_offset_t
152mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep)
153{
154	vm_page_t	m;
155	int		req;
156
157	req = VM_ALLOC_WIRED | VM_ALLOC_ZERO;
158	while ((m = vm_page_alloc_noobj(req)) == NULL) {
159		if (nosleep)
160			return (0);
161
162		PMAP_UNLOCK(pmap);
163		rw_wunlock(&pvh_global_lock);
164		vm_wait(NULL);
165		rw_wlock(&pvh_global_lock);
166		PMAP_LOCK(pmap);
167	}
168	m->pindex = idx;
169
170	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
171}
172
173/* Initialize pool of kva ptbl buffers. */
174static void
175ptbl_init(void)
176{
177}
178
179/* Get a pointer to a PTE in a page table. */
180static __inline pte_t *
181pte_find(pmap_t pmap, vm_offset_t va)
182{
183	pte_t        ***pdir_l1;
184	pte_t         **pdir;
185	pte_t          *ptbl;
186
187	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
188
189	pdir_l1 = pmap->pm_root[PG_ROOT_IDX(va)];
190	if (pdir_l1 == NULL)
191		return (NULL);
192	pdir = pdir_l1[PDIR_L1_IDX(va)];
193	if (pdir == NULL)
194		return (NULL);
195	ptbl = pdir[PDIR_IDX(va)];
196
197	return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
198}
199
200/* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
201static __inline pte_t *
202pte_find_next(pmap_t pmap, vm_offset_t *pva)
203{
204	vm_offset_t	va;
205	pte_t	    ****pm_root;
206	pte_t	       *pte;
207	unsigned long	i, j, k, l;
208
209	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
210
211	va = *pva;
212	i = PG_ROOT_IDX(va);
213	j = PDIR_L1_IDX(va);
214	k = PDIR_IDX(va);
215	l = PTBL_IDX(va);
216	pm_root = pmap->pm_root;
217
218	/* truncate the VA for later. */
219	va &= ~((1UL << (PG_ROOT_H + 1)) - 1);
220	for (; i < PG_ROOT_NENTRIES; i++, j = 0, k = 0, l = 0) {
221		if (pm_root[i] == 0)
222			continue;
223		for (; j < PDIR_L1_NENTRIES; j++, k = 0, l = 0) {
224			if (pm_root[i][j] == 0)
225				continue;
226			for (; k < PDIR_NENTRIES; k++, l = 0) {
227				if (pm_root[i][j][k] == NULL)
228					continue;
229				for (; l < PTBL_NENTRIES; l++) {
230					pte = &pm_root[i][j][k][l];
231					if (!PTE_ISVALID(pte))
232						continue;
233					*pva = va + PG_ROOT_SIZE * i +
234					    PDIR_L1_SIZE * j +
235					    PDIR_SIZE * k +
236					    PAGE_SIZE * l;
237					return (pte);
238				}
239			}
240		}
241	}
242	return (NULL);
243}
244
245static bool
246unhold_free_page(pmap_t pmap, vm_page_t m)
247{
248
249	if (vm_page_unwire_noq(m)) {
250		vm_page_free_zero(m);
251		return (true);
252	}
253
254	return (false);
255}
256
257static vm_offset_t
258get_pgtbl_page(pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
259    bool nosleep, bool hold_parent, bool *isnew)
260{
261	vm_offset_t	page;
262	vm_page_t	m;
263
264	page = ptr_tbl[index];
265	KASSERT(page != 0 || pmap != kernel_pmap,
266	    ("NULL page table page found in kernel pmap!"));
267	if (page == 0) {
268		page = mmu_booke_alloc_page(pmap, index, nosleep);
269		if (ptr_tbl[index] == 0) {
270			*isnew = true;
271			ptr_tbl[index] = page;
272			if (hold_parent) {
273				m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)ptr_tbl));
274				m->ref_count++;
275			}
276			return (page);
277		}
278		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(page));
279		page = ptr_tbl[index];
280		vm_page_unwire_noq(m);
281		vm_page_free_zero(m);
282	}
283
284	*isnew = false;
285
286	return (page);
287}
288
289/* Allocate page table. */
290static pte_t*
291ptbl_alloc(pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
292{
293	unsigned int	pg_root_idx = PG_ROOT_IDX(va);
294	unsigned int	pdir_l1_idx = PDIR_L1_IDX(va);
295	unsigned int	pdir_idx = PDIR_IDX(va);
296	vm_offset_t	pdir_l1, pdir, ptbl;
297
298	/* When holding a parent, no need to hold the root index pages. */
299	pdir_l1 = get_pgtbl_page(pmap, (vm_offset_t *)pmap->pm_root,
300	    pg_root_idx, nosleep, false, is_new);
301	if (pdir_l1 == 0)
302		return (NULL);
303	pdir = get_pgtbl_page(pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx,
304	    nosleep, !*is_new, is_new);
305	if (pdir == 0)
306		return (NULL);
307	ptbl = get_pgtbl_page(pmap, (vm_offset_t *)pdir, pdir_idx,
308	    nosleep, !*is_new, is_new);
309
310	return ((pte_t *)ptbl);
311}
312
313/*
314 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
315 * when removing pte entry from ptbl.
316 *
317 * Return 1 if ptbl pages were freed.
318 */
319static int
320ptbl_unhold(pmap_t pmap, vm_offset_t va)
321{
322	pte_t          *ptbl;
323	vm_page_t	m;
324	u_int		pg_root_idx;
325	pte_t        ***pdir_l1;
326	u_int		pdir_l1_idx;
327	pte_t         **pdir;
328	u_int		pdir_idx;
329
330	pg_root_idx = PG_ROOT_IDX(va);
331	pdir_l1_idx = PDIR_L1_IDX(va);
332	pdir_idx = PDIR_IDX(va);
333
334	KASSERT((pmap != kernel_pmap),
335		("ptbl_unhold: unholding kernel ptbl!"));
336
337	pdir_l1 = pmap->pm_root[pg_root_idx];
338	pdir = pdir_l1[pdir_l1_idx];
339	ptbl = pdir[pdir_idx];
340
341	/* decrement hold count */
342	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
343
344	if (!unhold_free_page(pmap, m))
345		return (0);
346
347	pdir[pdir_idx] = NULL;
348	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir));
349
350	if (!unhold_free_page(pmap, m))
351		return (1);
352
353	pdir_l1[pdir_l1_idx] = NULL;
354	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1));
355
356	if (!unhold_free_page(pmap, m))
357		return (1);
358	pmap->pm_root[pg_root_idx] = NULL;
359
360	return (1);
361}
362
363/*
364 * Increment hold count for ptbl pages. This routine is used when new pte
365 * entry is being inserted into ptbl.
366 */
367static void
368ptbl_hold(pmap_t pmap, pte_t *ptbl)
369{
370	vm_page_t	m;
371
372	KASSERT((pmap != kernel_pmap),
373		("ptbl_hold: holding kernel ptbl!"));
374
375	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
376	m->ref_count++;
377}
378
379/*
380 * Clean pte entry, try to free page table page if requested.
381 *
382 * Return 1 if ptbl pages were freed, otherwise return 0.
383 */
384static int
385pte_remove(pmap_t pmap, vm_offset_t va, u_int8_t flags)
386{
387	vm_page_t	m;
388	pte_t          *pte;
389
390	pte = pte_find(pmap, va);
391	KASSERT(pte != NULL, ("%s: NULL pte for va %#jx, pmap %p",
392	    __func__, (uintmax_t)va, pmap));
393
394	if (!PTE_ISVALID(pte))
395		return (0);
396
397	/* Get vm_page_t for mapped pte. */
398	m = PHYS_TO_VM_PAGE(PTE_PA(pte));
399
400	if (PTE_ISWIRED(pte))
401		pmap->pm_stats.wired_count--;
402
403	/* Handle managed entry. */
404	if (PTE_ISMANAGED(pte)) {
405		/* Handle modified pages. */
406		if (PTE_ISMODIFIED(pte))
407			vm_page_dirty(m);
408
409		/* Referenced pages. */
410		if (PTE_ISREFERENCED(pte))
411			vm_page_aflag_set(m, PGA_REFERENCED);
412
413		/* Remove pv_entry from pv_list. */
414		pv_remove(pmap, va, m);
415	} else if (pmap == kernel_pmap && m && m->md.pv_tracked) {
416		pv_remove(pmap, va, m);
417		if (TAILQ_EMPTY(&m->md.pv_list))
418			m->md.pv_tracked = false;
419	}
420	mtx_lock_spin(&tlbivax_mutex);
421	tlb_miss_lock();
422
423	tlb0_flush_entry(va);
424	*pte = 0;
425
426	tlb_miss_unlock();
427	mtx_unlock_spin(&tlbivax_mutex);
428
429	pmap->pm_stats.resident_count--;
430
431	if (flags & PTBL_UNHOLD) {
432		return (ptbl_unhold(pmap, va));
433	}
434	return (0);
435}
436
437/*
438 * Insert PTE for a given page and virtual address.
439 */
440static int
441pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
442    bool nosleep)
443{
444	unsigned int	ptbl_idx = PTBL_IDX(va);
445	pte_t          *ptbl, *pte, pte_tmp;
446	bool		is_new;
447
448	/* Get the page directory pointer. */
449	ptbl = ptbl_alloc(pmap, va, nosleep, &is_new);
450	if (ptbl == NULL) {
451		KASSERT(nosleep, ("nosleep and NULL ptbl"));
452		return (ENOMEM);
453	}
454	if (is_new) {
455		pte = &ptbl[ptbl_idx];
456	} else {
457		/*
458		 * Check if there is valid mapping for requested va, if there
459		 * is, remove it.
460		 */
461		pte = &ptbl[ptbl_idx];
462		if (PTE_ISVALID(pte)) {
463			pte_remove(pmap, va, PTBL_HOLD);
464		} else {
465			/*
466			 * pte is not used, increment hold count for ptbl
467			 * pages.
468			 */
469			if (pmap != kernel_pmap)
470				ptbl_hold(pmap, ptbl);
471		}
472	}
473
474	/*
475	 * Insert pv_entry into pv_list for mapped page if part of managed
476	 * memory.
477	 */
478	if ((m->oflags & VPO_UNMANAGED) == 0) {
479		flags |= PTE_MANAGED;
480
481		/* Create and insert pv entry. */
482		pv_insert(pmap, va, m);
483	}
484
485	pmap->pm_stats.resident_count++;
486
487	pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
488	pte_tmp |= (PTE_VALID | flags);
489
490	mtx_lock_spin(&tlbivax_mutex);
491	tlb_miss_lock();
492
493	tlb0_flush_entry(va);
494	*pte = pte_tmp;
495
496	tlb_miss_unlock();
497	mtx_unlock_spin(&tlbivax_mutex);
498
499	return (0);
500}
501
502/* Return the pa for the given pmap/va. */
503static	vm_paddr_t
504pte_vatopa(pmap_t pmap, vm_offset_t va)
505{
506	vm_paddr_t	pa = 0;
507	pte_t          *pte;
508
509	pte = pte_find(pmap, va);
510	if ((pte != NULL) && PTE_ISVALID(pte))
511		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
512	return (pa);
513}
514
515/* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
516static void
517kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr)
518{
519	pte_t		*pte;
520	vm_size_t	kva_size;
521	int		kernel_pdirs, kernel_pgtbls, pdir_l1s;
522	vm_offset_t	va, l1_va, pdir_va, ptbl_va;
523	int		i, j, k;
524
525	kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
526	kernel_pmap->pm_root = kernel_ptbl_root;
527	pdir_l1s = howmany(kva_size, PG_ROOT_SIZE);
528	kernel_pdirs = howmany(kva_size, PDIR_L1_SIZE);
529	kernel_pgtbls = howmany(kva_size, PDIR_SIZE);
530
531	/* Initialize kernel pdir */
532	l1_va = (vm_offset_t)kernel_ptbl_root +
533	    round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
534	pdir_va = l1_va + pdir_l1s * PAGE_SIZE;
535	ptbl_va = pdir_va + kernel_pdirs * PAGE_SIZE;
536	if (bootverbose) {
537		printf("ptbl_root_va: %#lx\n", (vm_offset_t)kernel_ptbl_root);
538		printf("l1_va: %#lx (%d entries)\n", l1_va, pdir_l1s);
539		printf("pdir_va: %#lx(%d entries)\n", pdir_va, kernel_pdirs);
540		printf("ptbl_va: %#lx(%d entries)\n", ptbl_va, kernel_pgtbls);
541	}
542
543	va = VM_MIN_KERNEL_ADDRESS;
544	for (i = PG_ROOT_IDX(va); i < PG_ROOT_IDX(va) + pdir_l1s;
545	    i++, l1_va += PAGE_SIZE) {
546		kernel_pmap->pm_root[i] = (pte_t ***)l1_va;
547		for (j = 0;
548		    j < PDIR_L1_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
549		    j++, pdir_va += PAGE_SIZE) {
550			kernel_pmap->pm_root[i][j] = (pte_t **)pdir_va;
551			for (k = 0;
552			    k < PDIR_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
553			    k++, va += PDIR_SIZE, ptbl_va += PAGE_SIZE)
554				kernel_pmap->pm_root[i][j][k] = (pte_t *)ptbl_va;
555		}
556	}
557	/*
558	 * Fill in PTEs covering kernel code and data. They are not required
559	 * for address translation, as this area is covered by static TLB1
560	 * entries, but for pte_vatopa() to work correctly with kernel area
561	 * addresses.
562	 */
563	for (va = addr; va < data_end; va += PAGE_SIZE) {
564		pte = &(kernel_pmap->pm_root[PG_ROOT_IDX(va)][PDIR_L1_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
565		*pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
566		*pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
567		    PTE_VALID | PTE_PS_4KB;
568	}
569}
570
571static vm_offset_t
572mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
573{
574	vm_size_t kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
575	kernel_ptbl_root = (pte_t ****)data_end;
576
577	data_end += round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
578	data_end += howmany(kva_size, PG_ROOT_SIZE) * PAGE_SIZE;
579	data_end += howmany(kva_size, PDIR_L1_SIZE) * PAGE_SIZE;
580	data_end += howmany(kva_size, PDIR_SIZE) * PAGE_SIZE;
581
582	return (data_end);
583}
584
585/*
586 * Initialize a preallocated and zeroed pmap structure,
587 * such as one in a vmspace structure.
588 */
589static int
590mmu_booke_pinit(pmap_t pmap)
591{
592	int i;
593
594	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
595	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
596
597	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
598
599	for (i = 0; i < MAXCPU; i++)
600		pmap->pm_tid[i] = TID_NONE;
601	CPU_ZERO(&kernel_pmap->pm_active);
602	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
603	pmap->pm_root = uma_zalloc(ptbl_root_zone, M_WAITOK);
604	bzero(pmap->pm_root, sizeof(pte_t **) * PG_ROOT_NENTRIES);
605
606	return (1);
607}
608
609/*
610 * Release any resources held by the given physical map.
611 * Called when a pmap initialized by mmu_booke_pinit is being released.
612 * Should only be called if the map contains no valid mappings.
613 */
614static void
615mmu_booke_release(pmap_t pmap)
616{
617
618	KASSERT(pmap->pm_stats.resident_count == 0,
619	    ("pmap_release: pmap resident count %ld != 0",
620	    pmap->pm_stats.resident_count));
621#ifdef INVARIANTS
622	/*
623	 * Verify that all page directories are gone.
624	 * Protects against reference count leakage.
625	 */
626	for (int i = 0; i < PG_ROOT_NENTRIES; i++)
627		KASSERT(pmap->pm_root[i] == 0,
628		    ("Index %d on root page %p is non-zero!\n", i, pmap->pm_root));
629#endif
630	uma_zfree(ptbl_root_zone, pmap->pm_root);
631}
632
633static void
634mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
635{
636	pte_t *pte;
637	vm_paddr_t pa = 0;
638	int sync_sz, valid;
639
640	while (sz > 0) {
641		PMAP_LOCK(pm);
642		pte = pte_find(pm, va);
643		valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
644		if (valid)
645			pa = PTE_PA(pte);
646		PMAP_UNLOCK(pm);
647		sync_sz = PAGE_SIZE - (va & PAGE_MASK);
648		sync_sz = min(sync_sz, sz);
649		if (valid) {
650			pa += (va & PAGE_MASK);
651			__syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
652		}
653		va += sync_sz;
654		sz -= sync_sz;
655	}
656}
657
658/*
659 * mmu_booke_zero_page_area zeros the specified hardware page by
660 * mapping it into virtual memory and using bzero to clear
661 * its contents.
662 *
663 * off and size must reside within a single page.
664 */
665static void
666mmu_booke_zero_page_area(vm_page_t m, int off, int size)
667{
668	vm_offset_t va;
669
670	/* XXX KASSERT off and size are within a single page? */
671
672	va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
673	bzero((caddr_t)va + off, size);
674}
675
676/*
677 * mmu_booke_zero_page zeros the specified hardware page.
678 */
679static void
680mmu_booke_zero_page(vm_page_t m)
681{
682	vm_offset_t off, va;
683
684	va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
685
686	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
687		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
688}
689
690/*
691 * mmu_booke_copy_page copies the specified (machine independent) page by
692 * mapping the page into virtual memory and using memcopy to copy the page,
693 * one machine dependent page at a time.
694 */
695static void
696mmu_booke_copy_page(vm_page_t sm, vm_page_t dm)
697{
698	vm_offset_t sva, dva;
699
700	sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
701	dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
702	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
703}
704
705static inline void
706mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
707    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
708{
709	void *a_cp, *b_cp;
710	vm_offset_t a_pg_offset, b_pg_offset;
711	int cnt;
712
713	vm_page_t pa, pb;
714
715	while (xfersize > 0) {
716		a_pg_offset = a_offset & PAGE_MASK;
717		pa = ma[a_offset >> PAGE_SHIFT];
718		b_pg_offset = b_offset & PAGE_MASK;
719		pb = mb[b_offset >> PAGE_SHIFT];
720		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
721		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
722		a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) +
723		    a_pg_offset);
724		b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) +
725		    b_pg_offset);
726		bcopy(a_cp, b_cp, cnt);
727		a_offset += cnt;
728		b_offset += cnt;
729		xfersize -= cnt;
730	}
731}
732
733static vm_offset_t
734mmu_booke_quick_enter_page(vm_page_t m)
735{
736	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
737}
738
739static void
740mmu_booke_quick_remove_page(vm_offset_t addr)
741{
742}
743
744/**************************************************************************/
745/* TID handling */
746/**************************************************************************/
747
748/*
749 * Invalidate all TLB0 entries which match the given TID. Note this is
750 * dedicated for cases when invalidations should NOT be propagated to other
751 * CPUs.
752 */
753static void
754tid_flush(tlbtid_t tid)
755{
756	register_t msr;
757
758	/* Don't evict kernel translations */
759	if (tid == TID_KERNEL)
760		return;
761
762	msr = mfmsr();
763	__asm __volatile("wrteei 0");
764
765	/*
766	 * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use
767	 * it for PID invalidation.
768	 */
769	mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT);
770	__asm __volatile("isync; .long 0x7c200024; isync; msync");
771
772	__asm __volatile("wrtee %0" :: "r"(msr));
773}
774