mmu_if.m revision 269485
1#-
2# Copyright (c) 2005 Peter Grehan
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8# 1. Redistributions of source code must retain the above copyright
9#    notice, this list of conditions and the following disclaimer.
10# 2. Redistributions in binary form must reproduce the above copyright
11#    notice, this list of conditions and the following disclaimer in the
12#    documentation and/or other materials provided with the distribution.
13#
14# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24# SUCH DAMAGE.
25#
26# $FreeBSD: head/sys/powerpc/powerpc/mmu_if.m 269485 2014-08-03 20:40:51Z alc $
27#
28
29#include <sys/param.h>
30#include <sys/lock.h>
31#include <sys/mutex.h>
32#include <sys/systm.h>
33
34#include <vm/vm.h>
35#include <vm/vm_page.h>
36
37#include <machine/mmuvar.h>
38
39/**
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
43 * code.
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
45 *@{
46 */
47
48INTERFACE mmu;
49
50#
51# Default implementations of some methods
52#
53CODE {
54	static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55	    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
56	{
57		return;
58	}
59
60	static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
61	{
62		return;
63	}
64
65	static void mmu_null_init(mmu_t mmu)
66	{
67		return;
68	}
69
70	static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
71	    vm_offset_t va)
72	{
73		return (FALSE);
74	}
75
76	static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77	    vm_offset_t addr, vm_object_t object, vm_pindex_t index,
78	    vm_size_t size)
79	{
80		return;
81	}
82
83	static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
84	{
85		return;
86	}
87
88	static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
89	{
90		return;
91	}
92
93	static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
94	    vm_paddr_t *locked_pa)
95	{
96		return (0);
97	}
98
99	static void mmu_null_deactivate(struct thread *td)
100	{
101		return;
102	}
103
104	static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
105	    vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
106	{
107		return;
108	}
109
110	static struct pmap_md *mmu_null_scan_md(mmu_t mmu, struct pmap_md *p)
111	{
112		return (NULL);
113	}
114
115	static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
116	    vm_size_t size, vm_memattr_t ma)
117	{
118		return MMU_MAPDEV(mmu, pa, size);
119	}
120
121	static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
122	    vm_offset_t pa, vm_memattr_t ma)
123	{
124		MMU_KENTER(mmu, va, pa);
125	}
126
127	static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
128	    vm_memattr_t ma)
129	{
130		return;
131	}
132};
133
134
135/**
136 * @brief Apply the given advice to the specified range of addresses within
137 * the given pmap.  Depending on the advice, clear the referenced and/or
138 * modified flags in each mapping and set the mapped page's dirty field.
139 *
140 * @param _pmap		physical map
141 * @param _start	virtual range start
142 * @param _end		virtual range end
143 * @param _advice	advice to apply
144 */
145METHOD void advise {
146	mmu_t		_mmu;
147	pmap_t		_pmap;
148	vm_offset_t	_start;
149	vm_offset_t	_end;
150	int		_advice;
151};
152
153
154/**
155 * @brief Clear the 'modified' bit on the given physical page
156 *
157 * @param _pg		physical page
158 */
159METHOD void clear_modify {
160	mmu_t		_mmu;
161	vm_page_t	_pg;
162};
163
164
165/**
166 * @brief Clear the write and modified bits in each of the given
167 * physical page's mappings
168 *
169 * @param _pg		physical page
170 */
171METHOD void remove_write {
172	mmu_t		_mmu;
173	vm_page_t	_pg;
174};
175
176
177/**
178 * @brief Copy the address range given by the source physical map, virtual
179 * address and length to the destination physical map and virtual address.
180 * This routine is optional (xxx default null implementation ?)
181 *
182 * @param _dst_pmap	destination physical map
183 * @param _src_pmap	source physical map
184 * @param _dst_addr	destination virtual address
185 * @param _len		size of range
186 * @param _src_addr	source virtual address
187 */
188METHOD void copy {
189	mmu_t		_mmu;
190	pmap_t		_dst_pmap;
191	pmap_t		_src_pmap;
192	vm_offset_t	_dst_addr;
193	vm_size_t	_len;
194	vm_offset_t	_src_addr;
195} DEFAULT mmu_null_copy;
196
197
198/**
199 * @brief Copy the source physical page to the destination physical page
200 *
201 * @param _src		source physical page
202 * @param _dst		destination physical page
203 */
204METHOD void copy_page {
205	mmu_t		_mmu;
206	vm_page_t	_src;
207	vm_page_t	_dst;
208};
209
210METHOD void copy_pages {
211	mmu_t		_mmu;
212	vm_page_t	*_ma;
213	vm_offset_t	_a_offset;
214	vm_page_t	*_mb;
215	vm_offset_t	_b_offset;
216	int		_xfersize;
217};
218
219/**
220 * @brief Create a mapping between a virtual/physical address pair in the
221 * passed physical map with the specified protection and wiring
222 *
223 * @param _pmap		physical map
224 * @param _va		mapping virtual address
225 * @param _p		mapping physical page
226 * @param _prot		mapping page protection
227 * @param _wired	TRUE if page will be wired
228 */
229METHOD void enter {
230	mmu_t		_mmu;
231	pmap_t		_pmap;
232	vm_offset_t	_va;
233	vm_page_t	_p;
234	vm_prot_t	_prot;
235	boolean_t	_wired;
236};
237
238
239/**
240 * @brief Maps a sequence of resident pages belonging to the same object.
241 *
242 * @param _pmap		physical map
243 * @param _start	virtual range start
244 * @param _end		virtual range end
245 * @param _m_start	physical page mapped at start
246 * @param _prot		mapping page protection
247 */
248METHOD void enter_object {
249	mmu_t		_mmu;
250	pmap_t		_pmap;
251	vm_offset_t	_start;
252	vm_offset_t	_end;
253	vm_page_t	_m_start;
254	vm_prot_t	_prot;
255};
256
257
258/**
259 * @brief A faster entry point for page mapping where it is possible
260 * to short-circuit some of the tests in pmap_enter.
261 *
262 * @param _pmap		physical map (and also currently active pmap)
263 * @param _va		mapping virtual address
264 * @param _pg		mapping physical page
265 * @param _prot		new page protection - used to see if page is exec.
266 */
267METHOD void enter_quick {
268	mmu_t		_mmu;
269	pmap_t		_pmap;
270	vm_offset_t	_va;
271	vm_page_t	_pg;
272	vm_prot_t	_prot;
273};
274
275
276/**
277 * @brief Reverse map the given virtual address, returning the physical
278 * page associated with the address if a mapping exists.
279 *
280 * @param _pmap		physical map
281 * @param _va		mapping virtual address
282 *
283 * @retval 0		No mapping found
284 * @retval addr		The mapping physical address
285 */
286METHOD vm_paddr_t extract {
287	mmu_t		_mmu;
288	pmap_t		_pmap;
289	vm_offset_t	_va;
290};
291
292
293/**
294 * @brief Reverse map the given virtual address, returning the
295 * physical page if found. The page must be held (by calling
296 * vm_page_hold) if the page protection matches the given protection
297 *
298 * @param _pmap		physical map
299 * @param _va		mapping virtual address
300 * @param _prot		protection used to determine if physical page
301 *			should be locked
302 *
303 * @retval NULL		No mapping found
304 * @retval page		Pointer to physical page. Held if protections match
305 */
306METHOD vm_page_t extract_and_hold {
307	mmu_t		_mmu;
308	pmap_t		_pmap;
309	vm_offset_t	_va;
310	vm_prot_t	_prot;
311};
312
313
314/**
315 * @brief Increase kernel virtual address space to the given virtual address.
316 * Not really required for PowerPC, so optional unless the MMU implementation
317 * can use it.
318 *
319 * @param _va		new upper limit for kernel virtual address space
320 */
321METHOD void growkernel {
322	mmu_t		_mmu;
323	vm_offset_t	_va;
324} DEFAULT mmu_null_growkernel;
325
326
327/**
328 * @brief Called from vm_mem_init. Zone allocation is available at
329 * this stage so a convenient time to create zones. This routine is
330 * for MMU-implementation convenience and is optional.
331 */
332METHOD void init {
333	mmu_t		_mmu;
334} DEFAULT mmu_null_init;
335
336
337/**
338 * @brief Return if the page has been marked by MMU hardware to have been
339 * modified
340 *
341 * @param _pg		physical page to test
342 *
343 * @retval boolean	TRUE if page has been modified
344 */
345METHOD boolean_t is_modified {
346	mmu_t		_mmu;
347	vm_page_t	_pg;
348};
349
350
351/**
352 * @brief Return whether the specified virtual address is a candidate to be
353 * prefaulted in. This routine is optional.
354 *
355 * @param _pmap		physical map
356 * @param _va		virtual address to test
357 *
358 * @retval boolean	TRUE if the address is a candidate.
359 */
360METHOD boolean_t is_prefaultable {
361	mmu_t		_mmu;
362	pmap_t		_pmap;
363	vm_offset_t	_va;
364} DEFAULT mmu_null_is_prefaultable;
365
366
367/**
368 * @brief Return whether or not the specified physical page was referenced
369 * in any physical maps.
370 *
371 * @params _pg		physical page
372 *
373 * @retval boolean	TRUE if page has been referenced
374 */
375METHOD boolean_t is_referenced {
376	mmu_t		_mmu;
377	vm_page_t	_pg;
378};
379
380
381/**
382 * @brief Return a count of referenced bits for a page, clearing those bits.
383 * Not all referenced bits need to be cleared, but it is necessary that 0
384 * only be returned when there are none set.
385 *
386 * @params _m		physical page
387 *
388 * @retval int		count of referenced bits
389 */
390METHOD int ts_referenced {
391	mmu_t		_mmu;
392	vm_page_t	_pg;
393};
394
395
396/**
397 * @brief Map the requested physical address range into kernel virtual
398 * address space. The value in _virt is taken as a hint. The virtual
399 * address of the range is returned, or NULL if the mapping could not
400 * be created. The range can be direct-mapped if that is supported.
401 *
402 * @param *_virt	Hint for start virtual address, and also return
403 *			value
404 * @param _start	physical address range start
405 * @param _end		physical address range end
406 * @param _prot		protection of range (currently ignored)
407 *
408 * @retval NULL		could not map the area
409 * @retval addr, *_virt	mapping start virtual address
410 */
411METHOD vm_offset_t map {
412	mmu_t		_mmu;
413	vm_offset_t	*_virt;
414	vm_paddr_t	_start;
415	vm_paddr_t	_end;
416	int		_prot;
417};
418
419
420/**
421 * @brief Used to create a contiguous set of read-only mappings for a
422 * given object to try and eliminate a cascade of on-demand faults as
423 * the object is accessed sequentially. This routine is optional.
424 *
425 * @param _pmap		physical map
426 * @param _addr		mapping start virtual address
427 * @param _object	device-backed V.M. object to be mapped
428 * @param _pindex	page-index within object of mapping start
429 * @param _size		size in bytes of mapping
430 */
431METHOD void object_init_pt {
432	mmu_t		_mmu;
433	pmap_t		_pmap;
434	vm_offset_t	_addr;
435	vm_object_t	_object;
436	vm_pindex_t	_pindex;
437	vm_size_t	_size;
438} DEFAULT mmu_null_object_init_pt;
439
440
441/**
442 * @brief Used to determine if the specified page has a mapping for the
443 * given physical map, by scanning the list of reverse-mappings from the
444 * page. The list is scanned to a maximum of 16 entries.
445 *
446 * @param _pmap		physical map
447 * @param _pg		physical page
448 *
449 * @retval bool		TRUE if the physical map was found in the first 16
450 *			reverse-map list entries off the physical page.
451 */
452METHOD boolean_t page_exists_quick {
453	mmu_t		_mmu;
454	pmap_t		_pmap;
455	vm_page_t	_pg;
456};
457
458
459/**
460 * @brief Initialise the machine-dependent section of the physical page
461 * data structure. This routine is optional.
462 *
463 * @param _pg		physical page
464 */
465METHOD void page_init {
466	mmu_t		_mmu;
467	vm_page_t	_pg;
468} DEFAULT mmu_null_page_init;
469
470
471/**
472 * @brief Count the number of managed mappings to the given physical
473 * page that are wired.
474 *
475 * @param _pg		physical page
476 *
477 * @retval int		the number of wired, managed mappings to the
478 *			given physical page
479 */
480METHOD int page_wired_mappings {
481	mmu_t		_mmu;
482	vm_page_t	_pg;
483};
484
485
486/**
487 * @brief Initialise a physical map data structure
488 *
489 * @param _pmap		physical map
490 */
491METHOD void pinit {
492	mmu_t		_mmu;
493	pmap_t		_pmap;
494};
495
496
497/**
498 * @brief Initialise the physical map for process 0, the initial process
499 * in the system.
500 * XXX default to pinit ?
501 *
502 * @param _pmap		physical map
503 */
504METHOD void pinit0 {
505	mmu_t		_mmu;
506	pmap_t		_pmap;
507};
508
509
510/**
511 * @brief Set the protection for physical pages in the given virtual address
512 * range to the given value.
513 *
514 * @param _pmap		physical map
515 * @param _start	virtual range start
516 * @param _end		virtual range end
517 * @param _prot		new page protection
518 */
519METHOD void protect {
520	mmu_t		_mmu;
521	pmap_t		_pmap;
522	vm_offset_t	_start;
523	vm_offset_t	_end;
524	vm_prot_t	_prot;
525};
526
527
528/**
529 * @brief Create a mapping in kernel virtual address space for the given array
530 * of wired physical pages.
531 *
532 * @param _start	mapping virtual address start
533 * @param *_m		array of physical page pointers
534 * @param _count	array elements
535 */
536METHOD void qenter {
537	mmu_t		_mmu;
538	vm_offset_t	_start;
539	vm_page_t	*_pg;
540	int		_count;
541};
542
543
544/**
545 * @brief Remove the temporary mappings created by qenter.
546 *
547 * @param _start	mapping virtual address start
548 * @param _count	number of pages in mapping
549 */
550METHOD void qremove {
551	mmu_t		_mmu;
552	vm_offset_t	_start;
553	int		_count;
554};
555
556
557/**
558 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
559 * should be no existing mappings for the physical map at this point
560 *
561 * @param _pmap		physical map
562 */
563METHOD void release {
564	mmu_t		_mmu;
565	pmap_t		_pmap;
566};
567
568
569/**
570 * @brief Remove all mappings in the given physical map for the start/end
571 * virtual address range. The range will be page-aligned.
572 *
573 * @param _pmap		physical map
574 * @param _start	mapping virtual address start
575 * @param _end		mapping virtual address end
576 */
577METHOD void remove {
578	mmu_t		_mmu;
579	pmap_t		_pmap;
580	vm_offset_t	_start;
581	vm_offset_t	_end;
582};
583
584
585/**
586 * @brief Traverse the reverse-map list off the given physical page and
587 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
588 *
589 * @param _pg		physical page
590 */
591METHOD void remove_all {
592	mmu_t		_mmu;
593	vm_page_t	_pg;
594};
595
596
597/**
598 * @brief Remove all mappings in the given start/end virtual address range
599 * for the given physical map. Similar to the remove method, but it used
600 * when tearing down all mappings in an address space. This method is
601 * optional, since pmap_remove will be called for each valid vm_map in
602 * the address space later.
603 *
604 * @param _pmap		physical map
605 * @param _start	mapping virtual address start
606 * @param _end		mapping virtual address end
607 */
608METHOD void remove_pages {
609	mmu_t		_mmu;
610	pmap_t		_pmap;
611} DEFAULT mmu_null_remove_pages;
612
613
614/**
615 * @brief Clear the wired attribute from the mappings for the specified range
616 * of addresses in the given pmap.
617 *
618 * @param _pmap		physical map
619 * @param _start	virtual range start
620 * @param _end		virtual range end
621 */
622METHOD void unwire {
623	mmu_t		_mmu;
624	pmap_t		_pmap;
625	vm_offset_t	_start;
626	vm_offset_t	_end;
627};
628
629
630/**
631 * @brief Zero a physical page. It is not assumed that the page is mapped,
632 * so a temporary (or direct) mapping may need to be used.
633 *
634 * @param _pg		physical page
635 */
636METHOD void zero_page {
637	mmu_t		_mmu;
638	vm_page_t	_pg;
639};
640
641
642/**
643 * @brief Zero a portion of a physical page, starting at a given offset and
644 * for a given size (multiples of 512 bytes for 4k pages).
645 *
646 * @param _pg		physical page
647 * @param _off		byte offset from start of page
648 * @param _size		size of area to zero
649 */
650METHOD void zero_page_area {
651	mmu_t		_mmu;
652	vm_page_t	_pg;
653	int		_off;
654	int		_size;
655};
656
657
658/**
659 * @brief Called from the idle loop to zero pages. XXX I think locking
660 * constraints might be different here compared to zero_page.
661 *
662 * @param _pg		physical page
663 */
664METHOD void zero_page_idle {
665	mmu_t		_mmu;
666	vm_page_t	_pg;
667};
668
669
670/**
671 * @brief Extract mincore(2) information from a mapping.
672 *
673 * @param _pmap		physical map
674 * @param _addr		page virtual address
675 * @param _locked_pa	page physical address
676 *
677 * @retval 0		no result
678 * @retval non-zero	mincore(2) flag values
679 */
680METHOD int mincore {
681	mmu_t		_mmu;
682	pmap_t		_pmap;
683	vm_offset_t	_addr;
684	vm_paddr_t	*_locked_pa;
685} DEFAULT mmu_null_mincore;
686
687
688/**
689 * @brief Perform any operations required to allow a physical map to be used
690 * before it's address space is accessed.
691 *
692 * @param _td		thread associated with physical map
693 */
694METHOD void activate {
695	mmu_t		_mmu;
696	struct thread	*_td;
697};
698
699/**
700 * @brief Perform any operations required to deactivate a physical map,
701 * for instance as it is context-switched out.
702 *
703 * @param _td		thread associated with physical map
704 */
705METHOD void deactivate {
706	mmu_t		_mmu;
707	struct thread	*_td;
708} DEFAULT mmu_null_deactivate;
709
710/**
711 * @brief Return a hint for the best virtual address to map a tentative
712 * virtual address range in a given VM object. The default is to just
713 * return the given tentative start address.
714 *
715 * @param _obj		VM backing object
716 * @param _offset	starting offset with the VM object
717 * @param _addr		initial guess at virtual address
718 * @param _size		size of virtual address range
719 */
720METHOD void align_superpage {
721	mmu_t		_mmu;
722	vm_object_t	_obj;
723	vm_ooffset_t	_offset;
724	vm_offset_t	*_addr;
725	vm_size_t	_size;
726} DEFAULT mmu_null_align_superpage;
727
728
729
730
731/**
732 * INTERNAL INTERFACES
733 */
734
735/**
736 * @brief Bootstrap the VM system. At the completion of this routine, the
737 * kernel will be running in it's own address space with full control over
738 * paging.
739 *
740 * @param _start	start of reserved memory (obsolete ???)
741 * @param _end		end of reserved memory (obsolete ???)
742 *			XXX I think the intent of these was to allow
743 *			the memory used by kernel text+data+bss and
744 *			loader variables/load-time kld's to be carved out
745 *			of available physical mem.
746 *
747 */
748METHOD void bootstrap {
749	mmu_t		_mmu;
750	vm_offset_t	_start;
751	vm_offset_t	_end;
752};
753
754/**
755 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
756 * for alternate CPUs on SMP systems.
757 *
758 * @param _ap		Set to 1 if the CPU being set up is an AP
759 *
760 */
761METHOD void cpu_bootstrap {
762	mmu_t		_mmu;
763	int		_ap;
764};
765
766
767/**
768 * @brief Create a kernel mapping for a given physical address range.
769 * Called by bus code on behalf of device drivers. The mapping does not
770 * have to be a virtual address: it can be a direct-mapped physical address
771 * if that is supported by the MMU.
772 *
773 * @param _pa		start physical address
774 * @param _size		size in bytes of mapping
775 *
776 * @retval addr		address of mapping.
777 */
778METHOD void * mapdev {
779	mmu_t		_mmu;
780	vm_paddr_t	_pa;
781	vm_size_t	_size;
782};
783
784/**
785 * @brief Create a kernel mapping for a given physical address range.
786 * Called by bus code on behalf of device drivers. The mapping does not
787 * have to be a virtual address: it can be a direct-mapped physical address
788 * if that is supported by the MMU.
789 *
790 * @param _pa		start physical address
791 * @param _size		size in bytes of mapping
792 * @param _attr		cache attributes
793 *
794 * @retval addr		address of mapping.
795 */
796METHOD void * mapdev_attr {
797	mmu_t		_mmu;
798	vm_offset_t	_pa;
799	vm_size_t	_size;
800	vm_memattr_t	_attr;
801} DEFAULT mmu_null_mapdev_attr;
802
803/**
804 * @brief Change cache control attributes for a page. Should modify all
805 * mappings for that page.
806 *
807 * @param _m		page to modify
808 * @param _ma		new cache control attributes
809 */
810METHOD void page_set_memattr {
811	mmu_t		_mmu;
812	vm_page_t	_pg;
813	vm_memattr_t	_ma;
814} DEFAULT mmu_null_page_set_memattr;
815
816/**
817 * @brief Remove the mapping created by mapdev. Called when a driver
818 * is unloaded.
819 *
820 * @param _va		Mapping address returned from mapdev
821 * @param _size		size in bytes of mapping
822 */
823METHOD void unmapdev {
824	mmu_t		_mmu;
825	vm_offset_t	_va;
826	vm_size_t	_size;
827};
828
829
830/**
831 * @brief Reverse-map a kernel virtual address
832 *
833 * @param _va		kernel virtual address to reverse-map
834 *
835 * @retval pa		physical address corresponding to mapping
836 */
837METHOD vm_paddr_t kextract {
838	mmu_t		_mmu;
839	vm_offset_t	_va;
840};
841
842
843/**
844 * @brief Map a wired page into kernel virtual address space
845 *
846 * @param _va		mapping virtual address
847 * @param _pa		mapping physical address
848 */
849METHOD void kenter {
850	mmu_t		_mmu;
851	vm_offset_t	_va;
852	vm_paddr_t	_pa;
853};
854
855/**
856 * @brief Map a wired page into kernel virtual address space
857 *
858 * @param _va		mapping virtual address
859 * @param _pa		mapping physical address
860 * @param _ma		mapping cache control attributes
861 */
862METHOD void kenter_attr {
863	mmu_t		_mmu;
864	vm_offset_t	_va;
865	vm_offset_t	_pa;
866	vm_memattr_t	_ma;
867} DEFAULT mmu_null_kenter_attr;
868
869/**
870 * @brief Determine if the given physical address range has been direct-mapped.
871 *
872 * @param _pa		physical address start
873 * @param _size		physical address range size
874 *
875 * @retval bool		TRUE if the range is direct-mapped.
876 */
877METHOD boolean_t dev_direct_mapped {
878	mmu_t		_mmu;
879	vm_paddr_t	_pa;
880	vm_size_t	_size;
881};
882
883
884/**
885 * @brief Enforce instruction cache coherency. Typically called after a
886 * region of memory has been modified and before execution of or within
887 * that region is attempted. Setting breakpoints in a process through
888 * ptrace(2) is one example of when the instruction cache needs to be
889 * made coherent.
890 *
891 * @param _pm		the physical map of the virtual address
892 * @param _va		the virtual address of the modified region
893 * @param _sz		the size of the modified region
894 */
895METHOD void sync_icache {
896	mmu_t		_mmu;
897	pmap_t		_pm;
898	vm_offset_t	_va;
899	vm_size_t	_sz;
900};
901
902
903/**
904 * @brief Create temporary memory mapping for use by dumpsys().
905 *
906 * @param _md		The memory chunk in which the mapping lies.
907 * @param _ofs		The offset within the chunk of the mapping.
908 * @param _sz		The requested size of the mapping.
909 *
910 * @retval vm_offset_t	The virtual address of the mapping.
911 *			
912 * The sz argument is modified to reflect the actual size of the
913 * mapping.
914 */
915METHOD vm_offset_t dumpsys_map {
916	mmu_t		_mmu;
917	struct pmap_md	*_md;
918	vm_size_t	_ofs;
919	vm_size_t	*_sz;
920};
921
922
923/**
924 * @brief Remove temporary dumpsys() mapping.
925 *
926 * @param _md		The memory chunk in which the mapping lies.
927 * @param _ofs		The offset within the chunk of the mapping.
928 * @param _va		The virtual address of the mapping.
929 */
930METHOD void dumpsys_unmap {
931	mmu_t		_mmu;
932	struct pmap_md	*_md;
933	vm_size_t	_ofs;
934	vm_offset_t	_va;
935};
936
937
938/**
939 * @brief Scan/iterate memory chunks.
940 *
941 * @param _prev		The previously returned chunk or NULL.
942 *
943 * @retval		The next (or first when _prev is NULL) chunk.
944 */
945METHOD struct pmap_md * scan_md {
946	mmu_t		_mmu;
947	struct pmap_md	*_prev;
948} DEFAULT mmu_null_scan_md;
949