1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2020 Justin Hibbits
5 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
6 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
21 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Some hw specific parts of this pmap were derived or influenced
30 * by NetBSD's ibm4xx pmap module. More generic code is shared with
31 * a few other pmap modules from the FreeBSD tree.
32 */
33
34 /*
35  * VM layout notes:
36  *
37  * Kernel and user threads run within one common virtual address space
38  * defined by AS=0.
39  *
40  * 64-bit pmap:
41  * Virtual address space layout:
42  * -----------------------------
43  * 0x0000_0000_0000_0000 - 0x3fff_ffff_ffff_ffff      : user process
44  * 0x4000_0000_0000_0000 - 0x7fff_ffff_ffff_ffff      : unused
45  * 0x8000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff      : mmio region
46  * 0xc000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff      : direct map
47  * 0xe000_0000_0000_0000 - 0xffff_ffff_ffff_ffff      : KVA
48  */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD$");
52
53#include "opt_ddb.h"
54#include "opt_kstack_pages.h"
55
56#include <sys/param.h>
57#include <sys/conf.h>
58#include <sys/malloc.h>
59#include <sys/ktr.h>
60#include <sys/proc.h>
61#include <sys/user.h>
62#include <sys/queue.h>
63#include <sys/systm.h>
64#include <sys/kernel.h>
65#include <sys/kerneldump.h>
66#include <sys/linker.h>
67#include <sys/msgbuf.h>
68#include <sys/lock.h>
69#include <sys/mutex.h>
70#include <sys/rwlock.h>
71#include <sys/sched.h>
72#include <sys/smp.h>
73#include <sys/vmmeter.h>
74
75#include <vm/vm.h>
76#include <vm/vm_page.h>
77#include <vm/vm_kern.h>
78#include <vm/vm_pageout.h>
79#include <vm/vm_extern.h>
80#include <vm/vm_object.h>
81#include <vm/vm_param.h>
82#include <vm/vm_map.h>
83#include <vm/vm_pager.h>
84#include <vm/vm_phys.h>
85#include <vm/vm_pagequeue.h>
86#include <vm/uma.h>
87
88#include <machine/_inttypes.h>
89#include <machine/cpu.h>
90#include <machine/pcb.h>
91#include <machine/platform.h>
92
93#include <machine/tlb.h>
94#include <machine/spr.h>
95#include <machine/md_var.h>
96#include <machine/mmuvar.h>
97#include <machine/pmap.h>
98#include <machine/pte.h>
99
100#include <ddb/ddb.h>
101
102#ifdef  DEBUG
103#define debugf(fmt, args...) printf(fmt, ##args)
104#else
105#define debugf(fmt, args...)
106#endif
107
108#define	PRI0ptrX	"016lx"
109
110/**************************************************************************/
111/* PMAP */
112/**************************************************************************/
113
114unsigned int kernel_pdirs;
115static uma_zone_t ptbl_root_zone;
116static pte_t ****kernel_ptbl_root;
117
118/*
119 * Base of the pmap_mapdev() region.  On 32-bit it immediately follows the
120 * userspace address range.  On On 64-bit it's far above, at (1 << 63), and
121 * ranges up to the DMAP, giving 62 bits of PA allowed.  This is far larger than
122 * the widest Book-E address bus, the e6500 has a 40-bit PA space.  This allows
123 * us to map akin to the DMAP, with addresses identical to the PA, offset by the
124 * base.
125 */
126#define	VM_MAPDEV_BASE		0x8000000000000000
127#define	VM_MAPDEV_PA_MAX	0x4000000000000000 /* Don't encroach on DMAP */
128
129static void tid_flush(tlbtid_t tid);
130static unsigned long ilog2(unsigned long);
131
132/**************************************************************************/
133/* Page table management */
134/**************************************************************************/
135
136#define PMAP_ROOT_SIZE	(sizeof(pte_t****) * PG_ROOT_NENTRIES)
137static pte_t *ptbl_alloc(pmap_t pmap, vm_offset_t va,
138    bool nosleep, bool *is_new);
139static void ptbl_hold(pmap_t, pte_t *);
140static int ptbl_unhold(pmap_t, vm_offset_t);
141
142static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
143static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
144static int pte_remove(pmap_t, vm_offset_t, uint8_t);
145static pte_t *pte_find(pmap_t, vm_offset_t);
146static pte_t *pte_find_next(pmap_t, vm_offset_t *);
147static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
148
149/**************************************************************************/
150/* Page table related */
151/**************************************************************************/
152
153/* Allocate a page, to be used in a page table. */
154static vm_offset_t
155mmu_booke_alloc_page(pmap_t pmap, unsigned int idx, bool nosleep)
156{
157	vm_page_t	m;
158	int		req;
159
160	req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO;
161	while ((m = vm_page_alloc(NULL, idx, req)) == NULL) {
162		if (nosleep)
163			return (0);
164
165		PMAP_UNLOCK(pmap);
166		rw_wunlock(&pvh_global_lock);
167		vm_wait(NULL);
168		rw_wlock(&pvh_global_lock);
169		PMAP_LOCK(pmap);
170	}
171
172	if (!(m->flags & PG_ZERO))
173		/* Zero whole ptbl. */
174		mmu_booke_zero_page(m);
175
176	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
177}
178
179/* Initialize pool of kva ptbl buffers. */
180static void
181ptbl_init(void)
182{
183}
184
185/* Get a pointer to a PTE in a page table. */
186static __inline pte_t *
187pte_find(pmap_t pmap, vm_offset_t va)
188{
189	pte_t        ***pdir_l1;
190	pte_t         **pdir;
191	pte_t          *ptbl;
192
193	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
194
195	pdir_l1 = pmap->pm_root[PG_ROOT_IDX(va)];
196	if (pdir_l1 == NULL)
197		return (NULL);
198	pdir = pdir_l1[PDIR_L1_IDX(va)];
199	if (pdir == NULL)
200		return (NULL);
201	ptbl = pdir[PDIR_IDX(va)];
202
203	return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
204}
205
206/* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
207static __inline pte_t *
208pte_find_next(pmap_t pmap, vm_offset_t *pva)
209{
210	vm_offset_t	va;
211	pte_t	    ****pm_root;
212	pte_t	       *pte;
213	unsigned long	i, j, k, l;
214
215	KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
216
217	va = *pva;
218	i = PG_ROOT_IDX(va);
219	j = PDIR_L1_IDX(va);
220	k = PDIR_IDX(va);
221	l = PTBL_IDX(va);
222	pm_root = pmap->pm_root;
223
224	/* truncate the VA for later. */
225	va &= ~((1UL << (PG_ROOT_H + 1)) - 1);
226	for (; i < PG_ROOT_NENTRIES; i++, j = 0, k = 0, l = 0) {
227		if (pm_root[i] == 0)
228			continue;
229		for (; j < PDIR_L1_NENTRIES; j++, k = 0, l = 0) {
230			if (pm_root[i][j] == 0)
231				continue;
232			for (; k < PDIR_NENTRIES; k++, l = 0) {
233				if (pm_root[i][j][k] == NULL)
234					continue;
235				for (; l < PTBL_NENTRIES; l++) {
236					pte = &pm_root[i][j][k][l];
237					if (!PTE_ISVALID(pte))
238						continue;
239					*pva = va + PG_ROOT_SIZE * i +
240					    PDIR_L1_SIZE * j +
241					    PDIR_SIZE * k +
242					    PAGE_SIZE * l;
243					return (pte);
244				}
245			}
246		}
247	}
248	return (NULL);
249}
250
251static bool
252unhold_free_page(pmap_t pmap, vm_page_t m)
253{
254
255	if (vm_page_unwire_noq(m)) {
256		vm_page_free_zero(m);
257		return (true);
258	}
259
260	return (false);
261}
262
263static vm_offset_t
264get_pgtbl_page(pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index,
265    bool nosleep, bool hold_parent, bool *isnew)
266{
267	vm_offset_t	page;
268	vm_page_t	m;
269
270	page = ptr_tbl[index];
271	KASSERT(page != 0 || pmap != kernel_pmap,
272	    ("NULL page table page found in kernel pmap!"));
273	if (page == 0) {
274		page = mmu_booke_alloc_page(pmap, index, nosleep);
275		if (ptr_tbl[index] == 0) {
276			*isnew = true;
277			ptr_tbl[index] = page;
278			if (hold_parent) {
279				m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)ptr_tbl));
280				m->ref_count++;
281			}
282			return (page);
283		}
284		m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(page));
285		page = ptr_tbl[index];
286		vm_page_unwire_noq(m);
287		vm_page_free_zero(m);
288	}
289
290	*isnew = false;
291
292	return (page);
293}
294
295/* Allocate page table. */
296static pte_t*
297ptbl_alloc(pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new)
298{
299	unsigned int	pg_root_idx = PG_ROOT_IDX(va);
300	unsigned int	pdir_l1_idx = PDIR_L1_IDX(va);
301	unsigned int	pdir_idx = PDIR_IDX(va);
302	vm_offset_t	pdir_l1, pdir, ptbl;
303
304	/* When holding a parent, no need to hold the root index pages. */
305	pdir_l1 = get_pgtbl_page(pmap, (vm_offset_t *)pmap->pm_root,
306	    pg_root_idx, nosleep, false, is_new);
307	if (pdir_l1 == 0)
308		return (NULL);
309	pdir = get_pgtbl_page(pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx,
310	    nosleep, !*is_new, is_new);
311	if (pdir == 0)
312		return (NULL);
313	ptbl = get_pgtbl_page(pmap, (vm_offset_t *)pdir, pdir_idx,
314	    nosleep, !*is_new, is_new);
315
316	return ((pte_t *)ptbl);
317}
318
319/*
320 * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
321 * when removing pte entry from ptbl.
322 *
323 * Return 1 if ptbl pages were freed.
324 */
325static int
326ptbl_unhold(pmap_t pmap, vm_offset_t va)
327{
328	pte_t          *ptbl;
329	vm_page_t	m;
330	u_int		pg_root_idx;
331	pte_t        ***pdir_l1;
332	u_int		pdir_l1_idx;
333	pte_t         **pdir;
334	u_int		pdir_idx;
335
336	pg_root_idx = PG_ROOT_IDX(va);
337	pdir_l1_idx = PDIR_L1_IDX(va);
338	pdir_idx = PDIR_IDX(va);
339
340	KASSERT((pmap != kernel_pmap),
341		("ptbl_unhold: unholding kernel ptbl!"));
342
343	pdir_l1 = pmap->pm_root[pg_root_idx];
344	pdir = pdir_l1[pdir_l1_idx];
345	ptbl = pdir[pdir_idx];
346
347	/* decrement hold count */
348	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
349
350	if (!unhold_free_page(pmap, m))
351		return (0);
352
353	pdir[pdir_idx] = NULL;
354	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir));
355
356	if (!unhold_free_page(pmap, m))
357		return (1);
358
359	pdir_l1[pdir_l1_idx] = NULL;
360	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1));
361
362	if (!unhold_free_page(pmap, m))
363		return (1);
364	pmap->pm_root[pg_root_idx] = NULL;
365
366	return (1);
367}
368
369/*
370 * Increment hold count for ptbl pages. This routine is used when new pte
371 * entry is being inserted into ptbl.
372 */
373static void
374ptbl_hold(pmap_t pmap, pte_t *ptbl)
375{
376	vm_page_t	m;
377
378	KASSERT((pmap != kernel_pmap),
379		("ptbl_hold: holding kernel ptbl!"));
380
381	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl));
382	m->ref_count++;
383}
384
385/*
386 * Clean pte entry, try to free page table page if requested.
387 *
388 * Return 1 if ptbl pages were freed, otherwise return 0.
389 */
390static int
391pte_remove(pmap_t pmap, vm_offset_t va, u_int8_t flags)
392{
393	vm_page_t	m;
394	pte_t          *pte;
395
396	pte = pte_find(pmap, va);
397	KASSERT(pte != NULL, ("%s: NULL pte for va %#jx, pmap %p",
398	    __func__, (uintmax_t)va, pmap));
399
400	if (!PTE_ISVALID(pte))
401		return (0);
402
403	/* Get vm_page_t for mapped pte. */
404	m = PHYS_TO_VM_PAGE(PTE_PA(pte));
405
406	if (PTE_ISWIRED(pte))
407		pmap->pm_stats.wired_count--;
408
409	/* Handle managed entry. */
410	if (PTE_ISMANAGED(pte)) {
411		/* Handle modified pages. */
412		if (PTE_ISMODIFIED(pte))
413			vm_page_dirty(m);
414
415		/* Referenced pages. */
416		if (PTE_ISREFERENCED(pte))
417			vm_page_aflag_set(m, PGA_REFERENCED);
418
419		/* Remove pv_entry from pv_list. */
420		pv_remove(pmap, va, m);
421	} else if (pmap == kernel_pmap && m && m->md.pv_tracked) {
422		pv_remove(pmap, va, m);
423		if (TAILQ_EMPTY(&m->md.pv_list))
424			m->md.pv_tracked = false;
425	}
426	mtx_lock_spin(&tlbivax_mutex);
427	tlb_miss_lock();
428
429	tlb0_flush_entry(va);
430	*pte = 0;
431
432	tlb_miss_unlock();
433	mtx_unlock_spin(&tlbivax_mutex);
434
435	pmap->pm_stats.resident_count--;
436
437	if (flags & PTBL_UNHOLD) {
438		return (ptbl_unhold(pmap, va));
439	}
440	return (0);
441}
442
443/*
444 * Insert PTE for a given page and virtual address.
445 */
446static int
447pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
448    boolean_t nosleep)
449{
450	unsigned int	ptbl_idx = PTBL_IDX(va);
451	pte_t          *ptbl, *pte, pte_tmp;
452	bool		is_new;
453
454	/* Get the page directory pointer. */
455	ptbl = ptbl_alloc(pmap, va, nosleep, &is_new);
456	if (ptbl == NULL) {
457		KASSERT(nosleep, ("nosleep and NULL ptbl"));
458		return (ENOMEM);
459	}
460	if (is_new) {
461		pte = &ptbl[ptbl_idx];
462	} else {
463		/*
464		 * Check if there is valid mapping for requested va, if there
465		 * is, remove it.
466		 */
467		pte = &ptbl[ptbl_idx];
468		if (PTE_ISVALID(pte)) {
469			pte_remove(pmap, va, PTBL_HOLD);
470		} else {
471			/*
472			 * pte is not used, increment hold count for ptbl
473			 * pages.
474			 */
475			if (pmap != kernel_pmap)
476				ptbl_hold(pmap, ptbl);
477		}
478	}
479
480	/*
481	 * Insert pv_entry into pv_list for mapped page if part of managed
482	 * memory.
483	 */
484	if ((m->oflags & VPO_UNMANAGED) == 0) {
485		flags |= PTE_MANAGED;
486
487		/* Create and insert pv entry. */
488		pv_insert(pmap, va, m);
489	}
490
491	pmap->pm_stats.resident_count++;
492
493	pte_tmp = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
494	pte_tmp |= (PTE_VALID | flags);
495
496	mtx_lock_spin(&tlbivax_mutex);
497	tlb_miss_lock();
498
499	tlb0_flush_entry(va);
500	*pte = pte_tmp;
501
502	tlb_miss_unlock();
503	mtx_unlock_spin(&tlbivax_mutex);
504
505	return (0);
506}
507
508/* Return the pa for the given pmap/va. */
509static	vm_paddr_t
510pte_vatopa(pmap_t pmap, vm_offset_t va)
511{
512	vm_paddr_t	pa = 0;
513	pte_t          *pte;
514
515	pte = pte_find(pmap, va);
516	if ((pte != NULL) && PTE_ISVALID(pte))
517		pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
518	return (pa);
519}
520
521/* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
522static void
523kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr)
524{
525	pte_t		*pte;
526	vm_size_t	kva_size;
527	int		kernel_pdirs, kernel_pgtbls, pdir_l1s;
528	vm_offset_t	va, l1_va, pdir_va, ptbl_va;
529	int		i, j, k;
530
531	kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
532	kernel_pmap->pm_root = kernel_ptbl_root;
533	pdir_l1s = howmany(kva_size, PG_ROOT_SIZE);
534	kernel_pdirs = howmany(kva_size, PDIR_L1_SIZE);
535	kernel_pgtbls = howmany(kva_size, PDIR_SIZE);
536
537	/* Initialize kernel pdir */
538	l1_va = (vm_offset_t)kernel_ptbl_root +
539	    round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
540	pdir_va = l1_va + pdir_l1s * PAGE_SIZE;
541	ptbl_va = pdir_va + kernel_pdirs * PAGE_SIZE;
542	if (bootverbose) {
543		printf("ptbl_root_va: %#lx\n", (vm_offset_t)kernel_ptbl_root);
544		printf("l1_va: %#lx (%d entries)\n", l1_va, pdir_l1s);
545		printf("pdir_va: %#lx(%d entries)\n", pdir_va, kernel_pdirs);
546		printf("ptbl_va: %#lx(%d entries)\n", ptbl_va, kernel_pgtbls);
547	}
548
549	va = VM_MIN_KERNEL_ADDRESS;
550	for (i = PG_ROOT_IDX(va); i < PG_ROOT_IDX(va) + pdir_l1s;
551	    i++, l1_va += PAGE_SIZE) {
552		kernel_pmap->pm_root[i] = (pte_t ***)l1_va;
553		for (j = 0;
554		    j < PDIR_L1_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
555		    j++, pdir_va += PAGE_SIZE) {
556			kernel_pmap->pm_root[i][j] = (pte_t **)pdir_va;
557			for (k = 0;
558			    k < PDIR_NENTRIES && va < VM_MAX_KERNEL_ADDRESS;
559			    k++, va += PDIR_SIZE, ptbl_va += PAGE_SIZE)
560				kernel_pmap->pm_root[i][j][k] = (pte_t *)ptbl_va;
561		}
562	}
563	/*
564	 * Fill in PTEs covering kernel code and data. They are not required
565	 * for address translation, as this area is covered by static TLB1
566	 * entries, but for pte_vatopa() to work correctly with kernel area
567	 * addresses.
568	 */
569	for (va = addr; va < data_end; va += PAGE_SIZE) {
570		pte = &(kernel_pmap->pm_root[PG_ROOT_IDX(va)][PDIR_L1_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
571		*pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
572		*pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
573		    PTE_VALID | PTE_PS_4KB;
574	}
575}
576
577static vm_offset_t
578mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end)
579{
580	vm_size_t kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
581	kernel_ptbl_root = (pte_t ****)data_end;
582
583	data_end += round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***));
584	data_end += howmany(kva_size, PG_ROOT_SIZE) * PAGE_SIZE;
585	data_end += howmany(kva_size, PDIR_L1_SIZE) * PAGE_SIZE;
586	data_end += howmany(kva_size, PDIR_SIZE) * PAGE_SIZE;
587
588	return (data_end);
589}
590
591/*
592 * Initialize a preallocated and zeroed pmap structure,
593 * such as one in a vmspace structure.
594 */
595static int
596mmu_booke_pinit(pmap_t pmap)
597{
598	int i;
599
600	CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap,
601	    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
602
603	KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
604
605	for (i = 0; i < MAXCPU; i++)
606		pmap->pm_tid[i] = TID_NONE;
607	CPU_ZERO(&kernel_pmap->pm_active);
608	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
609	pmap->pm_root = uma_zalloc(ptbl_root_zone, M_WAITOK);
610	bzero(pmap->pm_root, sizeof(pte_t **) * PG_ROOT_NENTRIES);
611
612	return (1);
613}
614
615/*
616 * Release any resources held by the given physical map.
617 * Called when a pmap initialized by mmu_booke_pinit is being released.
618 * Should only be called if the map contains no valid mappings.
619 */
620static void
621mmu_booke_release(pmap_t pmap)
622{
623
624	KASSERT(pmap->pm_stats.resident_count == 0,
625	    ("pmap_release: pmap resident count %ld != 0",
626	    pmap->pm_stats.resident_count));
627#ifdef INVARIANTS
628	/*
629	 * Verify that all page directories are gone.
630	 * Protects against reference count leakage.
631	 */
632	for (int i = 0; i < PG_ROOT_NENTRIES; i++)
633		KASSERT(pmap->pm_root[i] == 0,
634		    ("Index %d on root page %p is non-zero!\n", i, pmap->pm_root));
635#endif
636	uma_zfree(ptbl_root_zone, pmap->pm_root);
637}
638
639static void
640mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
641{
642	pte_t *pte;
643	vm_paddr_t pa = 0;
644	int sync_sz, valid;
645
646	while (sz > 0) {
647		PMAP_LOCK(pm);
648		pte = pte_find(pm, va);
649		valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0;
650		if (valid)
651			pa = PTE_PA(pte);
652		PMAP_UNLOCK(pm);
653		sync_sz = PAGE_SIZE - (va & PAGE_MASK);
654		sync_sz = min(sync_sz, sz);
655		if (valid) {
656			pa += (va & PAGE_MASK);
657			__syncicache((void *)PHYS_TO_DMAP(pa), sync_sz);
658		}
659		va += sync_sz;
660		sz -= sync_sz;
661	}
662}
663
664/*
665 * mmu_booke_zero_page_area zeros the specified hardware page by
666 * mapping it into virtual memory and using bzero to clear
667 * its contents.
668 *
669 * off and size must reside within a single page.
670 */
671static void
672mmu_booke_zero_page_area(vm_page_t m, int off, int size)
673{
674	vm_offset_t va;
675
676	/* XXX KASSERT off and size are within a single page? */
677
678	va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
679	bzero((caddr_t)va + off, size);
680}
681
682/*
683 * mmu_booke_zero_page zeros the specified hardware page.
684 */
685static void
686mmu_booke_zero_page(vm_page_t m)
687{
688	vm_offset_t off, va;
689
690	va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
691
692	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
693		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
694}
695
696/*
697 * mmu_booke_copy_page copies the specified (machine independent) page by
698 * mapping the page into virtual memory and using memcopy to copy the page,
699 * one machine dependent page at a time.
700 */
701static void
702mmu_booke_copy_page(vm_page_t sm, vm_page_t dm)
703{
704	vm_offset_t sva, dva;
705
706	sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
707	dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
708	memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
709}
710
711static inline void
712mmu_booke_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
713    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
714{
715	void *a_cp, *b_cp;
716	vm_offset_t a_pg_offset, b_pg_offset;
717	int cnt;
718
719	vm_page_t pa, pb;
720
721	while (xfersize > 0) {
722		a_pg_offset = a_offset & PAGE_MASK;
723		pa = ma[a_offset >> PAGE_SHIFT];
724		b_pg_offset = b_offset & PAGE_MASK;
725		pb = mb[b_offset >> PAGE_SHIFT];
726		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
727		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
728		a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) +
729		    a_pg_offset);
730		b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) +
731		    b_pg_offset);
732		bcopy(a_cp, b_cp, cnt);
733		a_offset += cnt;
734		b_offset += cnt;
735		xfersize -= cnt;
736	}
737}
738
739static vm_offset_t
740mmu_booke_quick_enter_page(vm_page_t m)
741{
742	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
743}
744
745static void
746mmu_booke_quick_remove_page(vm_offset_t addr)
747{
748}
749
750/**************************************************************************/
751/* TID handling */
752/**************************************************************************/
753
754/*
755 * Return the largest uint value log such that 2^log <= num.
756 */
757static unsigned long
758ilog2(unsigned long num)
759{
760	long lz;
761
762	__asm ("cntlzd %0, %1" : "=r" (lz) : "r" (num));
763	return (63 - lz);
764}
765
766/*
767 * Invalidate all TLB0 entries which match the given TID. Note this is
768 * dedicated for cases when invalidations should NOT be propagated to other
769 * CPUs.
770 */
771static void
772tid_flush(tlbtid_t tid)
773{
774	register_t msr;
775
776	/* Don't evict kernel translations */
777	if (tid == TID_KERNEL)
778		return;
779
780	msr = mfmsr();
781	__asm __volatile("wrteei 0");
782
783	/*
784	 * Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use
785	 * it for PID invalidation.
786	 */
787	mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT);
788	__asm __volatile("isync; .long 0x7c200024; isync; msync");
789
790	__asm __volatile("wrtee %0" :: "r"(msr));
791}
792