mmu_if.m revision 285148
1#-
2# Copyright (c) 2005 Peter Grehan
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8# 1. Redistributions of source code must retain the above copyright
9#    notice, this list of conditions and the following disclaimer.
10# 2. Redistributions in binary form must reproduce the above copyright
11#    notice, this list of conditions and the following disclaimer in the
12#    documentation and/or other materials provided with the distribution.
13#
14# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24# SUCH DAMAGE.
25#
26# $FreeBSD: head/sys/powerpc/powerpc/mmu_if.m 285148 2015-07-04 19:00:38Z jhibbits $
27#
28
29#include <sys/param.h>
30#include <sys/lock.h>
31#include <sys/mutex.h>
32#include <sys/systm.h>
33
34#include <vm/vm.h>
35#include <vm/vm_page.h>
36
37#include <machine/mmuvar.h>
38
39/**
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
43 * code.
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
45 *@{
46 */
47
48INTERFACE mmu;
49
50#
51# Default implementations of some methods
52#
53CODE {
54	static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55	    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
56	{
57		return;
58	}
59
60	static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
61	{
62		return;
63	}
64
65	static void mmu_null_init(mmu_t mmu)
66	{
67		return;
68	}
69
70	static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
71	    vm_offset_t va)
72	{
73		return (FALSE);
74	}
75
76	static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77	    vm_offset_t addr, vm_object_t object, vm_pindex_t index,
78	    vm_size_t size)
79	{
80		return;
81	}
82
83	static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
84	{
85		return;
86	}
87
88	static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
89	{
90		return;
91	}
92
93	static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
94	    vm_paddr_t *locked_pa)
95	{
96		return (0);
97	}
98
99	static void mmu_null_deactivate(struct thread *td)
100	{
101		return;
102	}
103
104	static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
105	    vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
106	{
107		return;
108	}
109
110	static void *mmu_null_mapdev_attr(mmu_t mmu, vm_paddr_t pa,
111	    vm_size_t size, vm_memattr_t ma)
112	{
113		return MMU_MAPDEV(mmu, pa, size);
114	}
115
116	static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
117	    vm_paddr_t pa, vm_memattr_t ma)
118	{
119		MMU_KENTER(mmu, va, pa);
120	}
121
122	static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
123	    vm_memattr_t ma)
124	{
125		return;
126	}
127};
128
129
130/**
131 * @brief Apply the given advice to the specified range of addresses within
132 * the given pmap.  Depending on the advice, clear the referenced and/or
133 * modified flags in each mapping and set the mapped page's dirty field.
134 *
135 * @param _pmap		physical map
136 * @param _start	virtual range start
137 * @param _end		virtual range end
138 * @param _advice	advice to apply
139 */
140METHOD void advise {
141	mmu_t		_mmu;
142	pmap_t		_pmap;
143	vm_offset_t	_start;
144	vm_offset_t	_end;
145	int		_advice;
146};
147
148
149/**
150 * @brief Clear the 'modified' bit on the given physical page
151 *
152 * @param _pg		physical page
153 */
154METHOD void clear_modify {
155	mmu_t		_mmu;
156	vm_page_t	_pg;
157};
158
159
160/**
161 * @brief Clear the write and modified bits in each of the given
162 * physical page's mappings
163 *
164 * @param _pg		physical page
165 */
166METHOD void remove_write {
167	mmu_t		_mmu;
168	vm_page_t	_pg;
169};
170
171
172/**
173 * @brief Copy the address range given by the source physical map, virtual
174 * address and length to the destination physical map and virtual address.
175 * This routine is optional (xxx default null implementation ?)
176 *
177 * @param _dst_pmap	destination physical map
178 * @param _src_pmap	source physical map
179 * @param _dst_addr	destination virtual address
180 * @param _len		size of range
181 * @param _src_addr	source virtual address
182 */
183METHOD void copy {
184	mmu_t		_mmu;
185	pmap_t		_dst_pmap;
186	pmap_t		_src_pmap;
187	vm_offset_t	_dst_addr;
188	vm_size_t	_len;
189	vm_offset_t	_src_addr;
190} DEFAULT mmu_null_copy;
191
192
193/**
194 * @brief Copy the source physical page to the destination physical page
195 *
196 * @param _src		source physical page
197 * @param _dst		destination physical page
198 */
199METHOD void copy_page {
200	mmu_t		_mmu;
201	vm_page_t	_src;
202	vm_page_t	_dst;
203};
204
205METHOD void copy_pages {
206	mmu_t		_mmu;
207	vm_page_t	*_ma;
208	vm_offset_t	_a_offset;
209	vm_page_t	*_mb;
210	vm_offset_t	_b_offset;
211	int		_xfersize;
212};
213
214/**
215 * @brief Create a mapping between a virtual/physical address pair in the
216 * passed physical map with the specified protection and wiring
217 *
218 * @param _pmap		physical map
219 * @param _va		mapping virtual address
220 * @param _p		mapping physical page
221 * @param _prot		mapping page protection
222 * @param _flags	pmap_enter flags
223 * @param _psind	superpage size index
224 */
225METHOD int enter {
226	mmu_t		_mmu;
227	pmap_t		_pmap;
228	vm_offset_t	_va;
229	vm_page_t	_p;
230	vm_prot_t	_prot;
231	u_int		_flags;
232	int8_t		_psind;
233};
234
235
236/**
237 * @brief Maps a sequence of resident pages belonging to the same object.
238 *
239 * @param _pmap		physical map
240 * @param _start	virtual range start
241 * @param _end		virtual range end
242 * @param _m_start	physical page mapped at start
243 * @param _prot		mapping page protection
244 */
245METHOD void enter_object {
246	mmu_t		_mmu;
247	pmap_t		_pmap;
248	vm_offset_t	_start;
249	vm_offset_t	_end;
250	vm_page_t	_m_start;
251	vm_prot_t	_prot;
252};
253
254
255/**
256 * @brief A faster entry point for page mapping where it is possible
257 * to short-circuit some of the tests in pmap_enter.
258 *
259 * @param _pmap		physical map (and also currently active pmap)
260 * @param _va		mapping virtual address
261 * @param _pg		mapping physical page
262 * @param _prot		new page protection - used to see if page is exec.
263 */
264METHOD void enter_quick {
265	mmu_t		_mmu;
266	pmap_t		_pmap;
267	vm_offset_t	_va;
268	vm_page_t	_pg;
269	vm_prot_t	_prot;
270};
271
272
273/**
274 * @brief Reverse map the given virtual address, returning the physical
275 * page associated with the address if a mapping exists.
276 *
277 * @param _pmap		physical map
278 * @param _va		mapping virtual address
279 *
280 * @retval 0		No mapping found
281 * @retval addr		The mapping physical address
282 */
283METHOD vm_paddr_t extract {
284	mmu_t		_mmu;
285	pmap_t		_pmap;
286	vm_offset_t	_va;
287};
288
289
290/**
291 * @brief Reverse map the given virtual address, returning the
292 * physical page if found. The page must be held (by calling
293 * vm_page_hold) if the page protection matches the given protection
294 *
295 * @param _pmap		physical map
296 * @param _va		mapping virtual address
297 * @param _prot		protection used to determine if physical page
298 *			should be locked
299 *
300 * @retval NULL		No mapping found
301 * @retval page		Pointer to physical page. Held if protections match
302 */
303METHOD vm_page_t extract_and_hold {
304	mmu_t		_mmu;
305	pmap_t		_pmap;
306	vm_offset_t	_va;
307	vm_prot_t	_prot;
308};
309
310
311/**
312 * @brief Increase kernel virtual address space to the given virtual address.
313 * Not really required for PowerPC, so optional unless the MMU implementation
314 * can use it.
315 *
316 * @param _va		new upper limit for kernel virtual address space
317 */
318METHOD void growkernel {
319	mmu_t		_mmu;
320	vm_offset_t	_va;
321} DEFAULT mmu_null_growkernel;
322
323
324/**
325 * @brief Called from vm_mem_init. Zone allocation is available at
326 * this stage so a convenient time to create zones. This routine is
327 * for MMU-implementation convenience and is optional.
328 */
329METHOD void init {
330	mmu_t		_mmu;
331} DEFAULT mmu_null_init;
332
333
334/**
335 * @brief Return if the page has been marked by MMU hardware to have been
336 * modified
337 *
338 * @param _pg		physical page to test
339 *
340 * @retval boolean	TRUE if page has been modified
341 */
342METHOD boolean_t is_modified {
343	mmu_t		_mmu;
344	vm_page_t	_pg;
345};
346
347
348/**
349 * @brief Return whether the specified virtual address is a candidate to be
350 * prefaulted in. This routine is optional.
351 *
352 * @param _pmap		physical map
353 * @param _va		virtual address to test
354 *
355 * @retval boolean	TRUE if the address is a candidate.
356 */
357METHOD boolean_t is_prefaultable {
358	mmu_t		_mmu;
359	pmap_t		_pmap;
360	vm_offset_t	_va;
361} DEFAULT mmu_null_is_prefaultable;
362
363
364/**
365 * @brief Return whether or not the specified physical page was referenced
366 * in any physical maps.
367 *
368 * @params _pg		physical page
369 *
370 * @retval boolean	TRUE if page has been referenced
371 */
372METHOD boolean_t is_referenced {
373	mmu_t		_mmu;
374	vm_page_t	_pg;
375};
376
377
378/**
379 * @brief Return a count of referenced bits for a page, clearing those bits.
380 * Not all referenced bits need to be cleared, but it is necessary that 0
381 * only be returned when there are none set.
382 *
383 * @params _m		physical page
384 *
385 * @retval int		count of referenced bits
386 */
387METHOD int ts_referenced {
388	mmu_t		_mmu;
389	vm_page_t	_pg;
390};
391
392
393/**
394 * @brief Map the requested physical address range into kernel virtual
395 * address space. The value in _virt is taken as a hint. The virtual
396 * address of the range is returned, or NULL if the mapping could not
397 * be created. The range can be direct-mapped if that is supported.
398 *
399 * @param *_virt	Hint for start virtual address, and also return
400 *			value
401 * @param _start	physical address range start
402 * @param _end		physical address range end
403 * @param _prot		protection of range (currently ignored)
404 *
405 * @retval NULL		could not map the area
406 * @retval addr, *_virt	mapping start virtual address
407 */
408METHOD vm_offset_t map {
409	mmu_t		_mmu;
410	vm_offset_t	*_virt;
411	vm_paddr_t	_start;
412	vm_paddr_t	_end;
413	int		_prot;
414};
415
416
417/**
418 * @brief Used to create a contiguous set of read-only mappings for a
419 * given object to try and eliminate a cascade of on-demand faults as
420 * the object is accessed sequentially. This routine is optional.
421 *
422 * @param _pmap		physical map
423 * @param _addr		mapping start virtual address
424 * @param _object	device-backed V.M. object to be mapped
425 * @param _pindex	page-index within object of mapping start
426 * @param _size		size in bytes of mapping
427 */
428METHOD void object_init_pt {
429	mmu_t		_mmu;
430	pmap_t		_pmap;
431	vm_offset_t	_addr;
432	vm_object_t	_object;
433	vm_pindex_t	_pindex;
434	vm_size_t	_size;
435} DEFAULT mmu_null_object_init_pt;
436
437
438/**
439 * @brief Used to determine if the specified page has a mapping for the
440 * given physical map, by scanning the list of reverse-mappings from the
441 * page. The list is scanned to a maximum of 16 entries.
442 *
443 * @param _pmap		physical map
444 * @param _pg		physical page
445 *
446 * @retval bool		TRUE if the physical map was found in the first 16
447 *			reverse-map list entries off the physical page.
448 */
449METHOD boolean_t page_exists_quick {
450	mmu_t		_mmu;
451	pmap_t		_pmap;
452	vm_page_t	_pg;
453};
454
455
456/**
457 * @brief Initialise the machine-dependent section of the physical page
458 * data structure. This routine is optional.
459 *
460 * @param _pg		physical page
461 */
462METHOD void page_init {
463	mmu_t		_mmu;
464	vm_page_t	_pg;
465} DEFAULT mmu_null_page_init;
466
467
468/**
469 * @brief Count the number of managed mappings to the given physical
470 * page that are wired.
471 *
472 * @param _pg		physical page
473 *
474 * @retval int		the number of wired, managed mappings to the
475 *			given physical page
476 */
477METHOD int page_wired_mappings {
478	mmu_t		_mmu;
479	vm_page_t	_pg;
480};
481
482
483/**
484 * @brief Initialise a physical map data structure
485 *
486 * @param _pmap		physical map
487 */
488METHOD void pinit {
489	mmu_t		_mmu;
490	pmap_t		_pmap;
491};
492
493
494/**
495 * @brief Initialise the physical map for process 0, the initial process
496 * in the system.
497 * XXX default to pinit ?
498 *
499 * @param _pmap		physical map
500 */
501METHOD void pinit0 {
502	mmu_t		_mmu;
503	pmap_t		_pmap;
504};
505
506
507/**
508 * @brief Set the protection for physical pages in the given virtual address
509 * range to the given value.
510 *
511 * @param _pmap		physical map
512 * @param _start	virtual range start
513 * @param _end		virtual range end
514 * @param _prot		new page protection
515 */
516METHOD void protect {
517	mmu_t		_mmu;
518	pmap_t		_pmap;
519	vm_offset_t	_start;
520	vm_offset_t	_end;
521	vm_prot_t	_prot;
522};
523
524
525/**
526 * @brief Create a mapping in kernel virtual address space for the given array
527 * of wired physical pages.
528 *
529 * @param _start	mapping virtual address start
530 * @param *_m		array of physical page pointers
531 * @param _count	array elements
532 */
533METHOD void qenter {
534	mmu_t		_mmu;
535	vm_offset_t	_start;
536	vm_page_t	*_pg;
537	int		_count;
538};
539
540
541/**
542 * @brief Remove the temporary mappings created by qenter.
543 *
544 * @param _start	mapping virtual address start
545 * @param _count	number of pages in mapping
546 */
547METHOD void qremove {
548	mmu_t		_mmu;
549	vm_offset_t	_start;
550	int		_count;
551};
552
553
554/**
555 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
556 * should be no existing mappings for the physical map at this point
557 *
558 * @param _pmap		physical map
559 */
560METHOD void release {
561	mmu_t		_mmu;
562	pmap_t		_pmap;
563};
564
565
566/**
567 * @brief Remove all mappings in the given physical map for the start/end
568 * virtual address range. The range will be page-aligned.
569 *
570 * @param _pmap		physical map
571 * @param _start	mapping virtual address start
572 * @param _end		mapping virtual address end
573 */
574METHOD void remove {
575	mmu_t		_mmu;
576	pmap_t		_pmap;
577	vm_offset_t	_start;
578	vm_offset_t	_end;
579};
580
581
582/**
583 * @brief Traverse the reverse-map list off the given physical page and
584 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
585 *
586 * @param _pg		physical page
587 */
588METHOD void remove_all {
589	mmu_t		_mmu;
590	vm_page_t	_pg;
591};
592
593
594/**
595 * @brief Remove all mappings in the given start/end virtual address range
596 * for the given physical map. Similar to the remove method, but it used
597 * when tearing down all mappings in an address space. This method is
598 * optional, since pmap_remove will be called for each valid vm_map in
599 * the address space later.
600 *
601 * @param _pmap		physical map
602 * @param _start	mapping virtual address start
603 * @param _end		mapping virtual address end
604 */
605METHOD void remove_pages {
606	mmu_t		_mmu;
607	pmap_t		_pmap;
608} DEFAULT mmu_null_remove_pages;
609
610
611/**
612 * @brief Clear the wired attribute from the mappings for the specified range
613 * of addresses in the given pmap.
614 *
615 * @param _pmap		physical map
616 * @param _start	virtual range start
617 * @param _end		virtual range end
618 */
619METHOD void unwire {
620	mmu_t		_mmu;
621	pmap_t		_pmap;
622	vm_offset_t	_start;
623	vm_offset_t	_end;
624};
625
626
627/**
628 * @brief Zero a physical page. It is not assumed that the page is mapped,
629 * so a temporary (or direct) mapping may need to be used.
630 *
631 * @param _pg		physical page
632 */
633METHOD void zero_page {
634	mmu_t		_mmu;
635	vm_page_t	_pg;
636};
637
638
639/**
640 * @brief Zero a portion of a physical page, starting at a given offset and
641 * for a given size (multiples of 512 bytes for 4k pages).
642 *
643 * @param _pg		physical page
644 * @param _off		byte offset from start of page
645 * @param _size		size of area to zero
646 */
647METHOD void zero_page_area {
648	mmu_t		_mmu;
649	vm_page_t	_pg;
650	int		_off;
651	int		_size;
652};
653
654
655/**
656 * @brief Called from the idle loop to zero pages. XXX I think locking
657 * constraints might be different here compared to zero_page.
658 *
659 * @param _pg		physical page
660 */
661METHOD void zero_page_idle {
662	mmu_t		_mmu;
663	vm_page_t	_pg;
664};
665
666
667/**
668 * @brief Extract mincore(2) information from a mapping.
669 *
670 * @param _pmap		physical map
671 * @param _addr		page virtual address
672 * @param _locked_pa	page physical address
673 *
674 * @retval 0		no result
675 * @retval non-zero	mincore(2) flag values
676 */
677METHOD int mincore {
678	mmu_t		_mmu;
679	pmap_t		_pmap;
680	vm_offset_t	_addr;
681	vm_paddr_t	*_locked_pa;
682} DEFAULT mmu_null_mincore;
683
684
685/**
686 * @brief Perform any operations required to allow a physical map to be used
687 * before it's address space is accessed.
688 *
689 * @param _td		thread associated with physical map
690 */
691METHOD void activate {
692	mmu_t		_mmu;
693	struct thread	*_td;
694};
695
696/**
697 * @brief Perform any operations required to deactivate a physical map,
698 * for instance as it is context-switched out.
699 *
700 * @param _td		thread associated with physical map
701 */
702METHOD void deactivate {
703	mmu_t		_mmu;
704	struct thread	*_td;
705} DEFAULT mmu_null_deactivate;
706
707/**
708 * @brief Return a hint for the best virtual address to map a tentative
709 * virtual address range in a given VM object. The default is to just
710 * return the given tentative start address.
711 *
712 * @param _obj		VM backing object
713 * @param _offset	starting offset with the VM object
714 * @param _addr		initial guess at virtual address
715 * @param _size		size of virtual address range
716 */
717METHOD void align_superpage {
718	mmu_t		_mmu;
719	vm_object_t	_obj;
720	vm_ooffset_t	_offset;
721	vm_offset_t	*_addr;
722	vm_size_t	_size;
723} DEFAULT mmu_null_align_superpage;
724
725
726
727
728/**
729 * INTERNAL INTERFACES
730 */
731
732/**
733 * @brief Bootstrap the VM system. At the completion of this routine, the
734 * kernel will be running in it's own address space with full control over
735 * paging.
736 *
737 * @param _start	start of reserved memory (obsolete ???)
738 * @param _end		end of reserved memory (obsolete ???)
739 *			XXX I think the intent of these was to allow
740 *			the memory used by kernel text+data+bss and
741 *			loader variables/load-time kld's to be carved out
742 *			of available physical mem.
743 *
744 */
745METHOD void bootstrap {
746	mmu_t		_mmu;
747	vm_offset_t	_start;
748	vm_offset_t	_end;
749};
750
751/**
752 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
753 * for alternate CPUs on SMP systems.
754 *
755 * @param _ap		Set to 1 if the CPU being set up is an AP
756 *
757 */
758METHOD void cpu_bootstrap {
759	mmu_t		_mmu;
760	int		_ap;
761};
762
763
764/**
765 * @brief Create a kernel mapping for a given physical address range.
766 * Called by bus code on behalf of device drivers. The mapping does not
767 * have to be a virtual address: it can be a direct-mapped physical address
768 * if that is supported by the MMU.
769 *
770 * @param _pa		start physical address
771 * @param _size		size in bytes of mapping
772 *
773 * @retval addr		address of mapping.
774 */
775METHOD void * mapdev {
776	mmu_t		_mmu;
777	vm_paddr_t	_pa;
778	vm_size_t	_size;
779};
780
781/**
782 * @brief Create a kernel mapping for a given physical address range.
783 * Called by bus code on behalf of device drivers. The mapping does not
784 * have to be a virtual address: it can be a direct-mapped physical address
785 * if that is supported by the MMU.
786 *
787 * @param _pa		start physical address
788 * @param _size		size in bytes of mapping
789 * @param _attr		cache attributes
790 *
791 * @retval addr		address of mapping.
792 */
793METHOD void * mapdev_attr {
794	mmu_t		_mmu;
795	vm_paddr_t	_pa;
796	vm_size_t	_size;
797	vm_memattr_t	_attr;
798} DEFAULT mmu_null_mapdev_attr;
799
800/**
801 * @brief Change cache control attributes for a page. Should modify all
802 * mappings for that page.
803 *
804 * @param _m		page to modify
805 * @param _ma		new cache control attributes
806 */
807METHOD void page_set_memattr {
808	mmu_t		_mmu;
809	vm_page_t	_pg;
810	vm_memattr_t	_ma;
811} DEFAULT mmu_null_page_set_memattr;
812
813/**
814 * @brief Remove the mapping created by mapdev. Called when a driver
815 * is unloaded.
816 *
817 * @param _va		Mapping address returned from mapdev
818 * @param _size		size in bytes of mapping
819 */
820METHOD void unmapdev {
821	mmu_t		_mmu;
822	vm_offset_t	_va;
823	vm_size_t	_size;
824};
825
826
827/**
828 * @brief Reverse-map a kernel virtual address
829 *
830 * @param _va		kernel virtual address to reverse-map
831 *
832 * @retval pa		physical address corresponding to mapping
833 */
834METHOD vm_paddr_t kextract {
835	mmu_t		_mmu;
836	vm_offset_t	_va;
837};
838
839
840/**
841 * @brief Map a wired page into kernel virtual address space
842 *
843 * @param _va		mapping virtual address
844 * @param _pa		mapping physical address
845 */
846METHOD void kenter {
847	mmu_t		_mmu;
848	vm_offset_t	_va;
849	vm_paddr_t	_pa;
850};
851
852/**
853 * @brief Map a wired page into kernel virtual address space
854 *
855 * @param _va		mapping virtual address
856 * @param _pa		mapping physical address
857 * @param _ma		mapping cache control attributes
858 */
859METHOD void kenter_attr {
860	mmu_t		_mmu;
861	vm_offset_t	_va;
862	vm_paddr_t	_pa;
863	vm_memattr_t	_ma;
864} DEFAULT mmu_null_kenter_attr;
865
866/**
867 * @brief Determine if the given physical address range has been direct-mapped.
868 *
869 * @param _pa		physical address start
870 * @param _size		physical address range size
871 *
872 * @retval bool		TRUE if the range is direct-mapped.
873 */
874METHOD boolean_t dev_direct_mapped {
875	mmu_t		_mmu;
876	vm_paddr_t	_pa;
877	vm_size_t	_size;
878};
879
880
881/**
882 * @brief Enforce instruction cache coherency. Typically called after a
883 * region of memory has been modified and before execution of or within
884 * that region is attempted. Setting breakpoints in a process through
885 * ptrace(2) is one example of when the instruction cache needs to be
886 * made coherent.
887 *
888 * @param _pm		the physical map of the virtual address
889 * @param _va		the virtual address of the modified region
890 * @param _sz		the size of the modified region
891 */
892METHOD void sync_icache {
893	mmu_t		_mmu;
894	pmap_t		_pm;
895	vm_offset_t	_va;
896	vm_size_t	_sz;
897};
898
899
900/**
901 * @brief Create temporary memory mapping for use by dumpsys().
902 *
903 * @param _pa		The physical page to map.
904 * @param _sz		The requested size of the mapping.
905 * @param _va		The virtual address of the mapping.
906 */
907METHOD void dumpsys_map {
908	mmu_t		_mmu;
909	vm_paddr_t	_pa;
910	size_t		_sz;
911	void		**_va;
912};
913
914
915/**
916 * @brief Remove temporary dumpsys() mapping.
917 *
918 * @param _pa		The physical page to map.
919 * @param _sz		The requested size of the mapping.
920 * @param _va		The virtual address of the mapping.
921 */
922METHOD void dumpsys_unmap {
923	mmu_t		_mmu;
924	vm_paddr_t	_pa;
925	size_t		_sz;
926	void		*_va;
927};
928
929
930/**
931 * @brief Initialize memory chunks for dumpsys.
932 */
933METHOD void scan_init {
934	mmu_t		_mmu;
935};
936