1#-
2# Copyright (c) 2005 Peter Grehan
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8# 1. Redistributions of source code must retain the above copyright
9#    notice, this list of conditions and the following disclaimer.
10# 2. Redistributions in binary form must reproduce the above copyright
11#    notice, this list of conditions and the following disclaimer in the
12#    documentation and/or other materials provided with the distribution.
13#
14# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24# SUCH DAMAGE.
25#
26# $FreeBSD$
27#
28
29#include <sys/param.h>
30#include <sys/lock.h>
31#include <sys/mutex.h>
32#include <sys/systm.h>
33
34#include <vm/vm.h>
35#include <vm/vm_page.h>
36
37#include <machine/mmuvar.h>
38
39/**
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
43 * code.
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
45 *@{
46 */
47
48INTERFACE mmu;
49
50#
51# Default implementations of some methods
52#
53CODE {
54	static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55	    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
56	{
57		return;
58	}
59
60	static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
61	{
62		return;
63	}
64
65	static void mmu_null_init(mmu_t mmu)
66	{
67		return;
68	}
69
70	static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
71	    vm_offset_t va)
72	{
73		return (FALSE);
74	}
75
76	static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77	    vm_offset_t addr, vm_object_t object, vm_pindex_t index,
78	    vm_size_t size)
79	{
80		return;
81	}
82
83	static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
84	{
85		return;
86	}
87
88	static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
89	{
90		return;
91	}
92
93	static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
94	    vm_paddr_t *locked_pa)
95	{
96		return (0);
97	}
98
99	static void mmu_null_deactivate(struct thread *td)
100	{
101		return;
102	}
103
104	static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
105	    vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
106	{
107		return;
108	}
109
110	static struct pmap_md *mmu_null_scan_md(mmu_t mmu, struct pmap_md *p)
111	{
112		return (NULL);
113	}
114
115	static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
116	    vm_size_t size, vm_memattr_t ma)
117	{
118		return MMU_MAPDEV(mmu, pa, size);
119	}
120
121	static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
122	    vm_offset_t pa, vm_memattr_t ma)
123	{
124		MMU_KENTER(mmu, va, pa);
125	}
126
127	static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
128	    vm_memattr_t ma)
129	{
130		return;
131	}
132};
133
134
135/**
136 * @brief Change the wiring attribute for the page in the given physical
137 * map and virtual address.
138 *
139 * @param _pmap		physical map of page
140 * @param _va		page virtual address
141 * @param _wired	TRUE to increment wired count, FALSE to decrement
142 */
143METHOD void change_wiring {
144	mmu_t		_mmu;
145	pmap_t		_pmap;
146	vm_offset_t	_va;
147	boolean_t	_wired;
148};
149
150
151/**
152 * @brief Clear the 'modified' bit on the given physical page
153 *
154 * @param _pg		physical page
155 */
156METHOD void clear_modify {
157	mmu_t		_mmu;
158	vm_page_t	_pg;
159};
160
161
162/**
163 * @brief Clear the 'referenced' bit on the given physical page
164 *
165 * @param _pg		physical page
166 */
167METHOD void clear_reference {
168	mmu_t		_mmu;
169	vm_page_t	_pg;
170};
171
172
173/**
174 * @brief Clear the write and modified bits in each of the given
175 * physical page's mappings
176 *
177 * @param _pg		physical page
178 */
179METHOD void remove_write {
180	mmu_t		_mmu;
181	vm_page_t	_pg;
182};
183
184
185/**
186 * @brief Copy the address range given by the source physical map, virtual
187 * address and length to the destination physical map and virtual address.
188 * This routine is optional (xxx default null implementation ?)
189 *
190 * @param _dst_pmap	destination physical map
191 * @param _src_pmap	source physical map
192 * @param _dst_addr	destination virtual address
193 * @param _len		size of range
194 * @param _src_addr	source virtual address
195 */
196METHOD void copy {
197	mmu_t		_mmu;
198	pmap_t		_dst_pmap;
199	pmap_t		_src_pmap;
200	vm_offset_t	_dst_addr;
201	vm_size_t	_len;
202	vm_offset_t	_src_addr;
203} DEFAULT mmu_null_copy;
204
205
206/**
207 * @brief Copy the source physical page to the destination physical page
208 *
209 * @param _src		source physical page
210 * @param _dst		destination physical page
211 */
212METHOD void copy_page {
213	mmu_t		_mmu;
214	vm_page_t	_src;
215	vm_page_t	_dst;
216};
217
218METHOD void copy_pages {
219	mmu_t		_mmu;
220	vm_page_t	*_ma;
221	vm_offset_t	_a_offset;
222	vm_page_t	*_mb;
223	vm_offset_t	_b_offset;
224	int		_xfersize;
225};
226
227/**
228 * @brief Create a mapping between a virtual/physical address pair in the
229 * passed physical map with the specified protection and wiring
230 *
231 * @param _pmap		physical map
232 * @param _va		mapping virtual address
233 * @param _p		mapping physical page
234 * @param _prot		mapping page protection
235 * @param _wired	TRUE if page will be wired
236 */
237METHOD void enter {
238	mmu_t		_mmu;
239	pmap_t		_pmap;
240	vm_offset_t	_va;
241	vm_page_t	_p;
242	vm_prot_t	_prot;
243	boolean_t	_wired;
244};
245
246
247/**
248 * @brief Maps a sequence of resident pages belonging to the same object.
249 *
250 * @param _pmap		physical map
251 * @param _start	virtual range start
252 * @param _end		virtual range end
253 * @param _m_start	physical page mapped at start
254 * @param _prot		mapping page protection
255 */
256METHOD void enter_object {
257	mmu_t		_mmu;
258	pmap_t		_pmap;
259	vm_offset_t	_start;
260	vm_offset_t	_end;
261	vm_page_t	_m_start;
262	vm_prot_t	_prot;
263};
264
265
266/**
267 * @brief A faster entry point for page mapping where it is possible
268 * to short-circuit some of the tests in pmap_enter.
269 *
270 * @param _pmap		physical map (and also currently active pmap)
271 * @param _va		mapping virtual address
272 * @param _pg		mapping physical page
273 * @param _prot		new page protection - used to see if page is exec.
274 */
275METHOD void enter_quick {
276	mmu_t		_mmu;
277	pmap_t		_pmap;
278	vm_offset_t	_va;
279	vm_page_t	_pg;
280	vm_prot_t	_prot;
281};
282
283
284/**
285 * @brief Reverse map the given virtual address, returning the physical
286 * page associated with the address if a mapping exists.
287 *
288 * @param _pmap		physical map
289 * @param _va		mapping virtual address
290 *
291 * @retval 0		No mapping found
292 * @retval addr		The mapping physical address
293 */
294METHOD vm_paddr_t extract {
295	mmu_t		_mmu;
296	pmap_t		_pmap;
297	vm_offset_t	_va;
298};
299
300
301/**
302 * @brief Reverse map the given virtual address, returning the
303 * physical page if found. The page must be held (by calling
304 * vm_page_hold) if the page protection matches the given protection
305 *
306 * @param _pmap		physical map
307 * @param _va		mapping virtual address
308 * @param _prot		protection used to determine if physical page
309 *			should be locked
310 *
311 * @retval NULL		No mapping found
312 * @retval page		Pointer to physical page. Held if protections match
313 */
314METHOD vm_page_t extract_and_hold {
315	mmu_t		_mmu;
316	pmap_t		_pmap;
317	vm_offset_t	_va;
318	vm_prot_t	_prot;
319};
320
321
322/**
323 * @brief Increase kernel virtual address space to the given virtual address.
324 * Not really required for PowerPC, so optional unless the MMU implementation
325 * can use it.
326 *
327 * @param _va		new upper limit for kernel virtual address space
328 */
329METHOD void growkernel {
330	mmu_t		_mmu;
331	vm_offset_t	_va;
332} DEFAULT mmu_null_growkernel;
333
334
335/**
336 * @brief Called from vm_mem_init. Zone allocation is available at
337 * this stage so a convenient time to create zones. This routine is
338 * for MMU-implementation convenience and is optional.
339 */
340METHOD void init {
341	mmu_t		_mmu;
342} DEFAULT mmu_null_init;
343
344
345/**
346 * @brief Return if the page has been marked by MMU hardware to have been
347 * modified
348 *
349 * @param _pg		physical page to test
350 *
351 * @retval boolean	TRUE if page has been modified
352 */
353METHOD boolean_t is_modified {
354	mmu_t		_mmu;
355	vm_page_t	_pg;
356};
357
358
359/**
360 * @brief Return whether the specified virtual address is a candidate to be
361 * prefaulted in. This routine is optional.
362 *
363 * @param _pmap		physical map
364 * @param _va		virtual address to test
365 *
366 * @retval boolean	TRUE if the address is a candidate.
367 */
368METHOD boolean_t is_prefaultable {
369	mmu_t		_mmu;
370	pmap_t		_pmap;
371	vm_offset_t	_va;
372} DEFAULT mmu_null_is_prefaultable;
373
374
375/**
376 * @brief Return whether or not the specified physical page was referenced
377 * in any physical maps.
378 *
379 * @params _pg		physical page
380 *
381 * @retval boolean	TRUE if page has been referenced
382 */
383METHOD boolean_t is_referenced {
384	mmu_t		_mmu;
385	vm_page_t	_pg;
386};
387
388
389/**
390 * @brief Return a count of referenced bits for a page, clearing those bits.
391 * Not all referenced bits need to be cleared, but it is necessary that 0
392 * only be returned when there are none set.
393 *
394 * @params _m		physical page
395 *
396 * @retval int		count of referenced bits
397 */
398METHOD boolean_t ts_referenced {
399	mmu_t		_mmu;
400	vm_page_t	_pg;
401};
402
403
404/**
405 * @brief Map the requested physical address range into kernel virtual
406 * address space. The value in _virt is taken as a hint. The virtual
407 * address of the range is returned, or NULL if the mapping could not
408 * be created. The range can be direct-mapped if that is supported.
409 *
410 * @param *_virt	Hint for start virtual address, and also return
411 *			value
412 * @param _start	physical address range start
413 * @param _end		physical address range end
414 * @param _prot		protection of range (currently ignored)
415 *
416 * @retval NULL		could not map the area
417 * @retval addr, *_virt	mapping start virtual address
418 */
419METHOD vm_offset_t map {
420	mmu_t		_mmu;
421	vm_offset_t	*_virt;
422	vm_paddr_t	_start;
423	vm_paddr_t	_end;
424	int		_prot;
425};
426
427
428/**
429 * @brief Used to create a contiguous set of read-only mappings for a
430 * given object to try and eliminate a cascade of on-demand faults as
431 * the object is accessed sequentially. This routine is optional.
432 *
433 * @param _pmap		physical map
434 * @param _addr		mapping start virtual address
435 * @param _object	device-backed V.M. object to be mapped
436 * @param _pindex	page-index within object of mapping start
437 * @param _size		size in bytes of mapping
438 */
439METHOD void object_init_pt {
440	mmu_t		_mmu;
441	pmap_t		_pmap;
442	vm_offset_t	_addr;
443	vm_object_t	_object;
444	vm_pindex_t	_pindex;
445	vm_size_t	_size;
446} DEFAULT mmu_null_object_init_pt;
447
448
449/**
450 * @brief Used to determine if the specified page has a mapping for the
451 * given physical map, by scanning the list of reverse-mappings from the
452 * page. The list is scanned to a maximum of 16 entries.
453 *
454 * @param _pmap		physical map
455 * @param _pg		physical page
456 *
457 * @retval bool		TRUE if the physical map was found in the first 16
458 *			reverse-map list entries off the physical page.
459 */
460METHOD boolean_t page_exists_quick {
461	mmu_t		_mmu;
462	pmap_t		_pmap;
463	vm_page_t	_pg;
464};
465
466
467/**
468 * @brief Initialise the machine-dependent section of the physical page
469 * data structure. This routine is optional.
470 *
471 * @param _pg		physical page
472 */
473METHOD void page_init {
474	mmu_t		_mmu;
475	vm_page_t	_pg;
476} DEFAULT mmu_null_page_init;
477
478
479/**
480 * @brief Count the number of managed mappings to the given physical
481 * page that are wired.
482 *
483 * @param _pg		physical page
484 *
485 * @retval int		the number of wired, managed mappings to the
486 *			given physical page
487 */
488METHOD int page_wired_mappings {
489	mmu_t		_mmu;
490	vm_page_t	_pg;
491};
492
493
494/**
495 * @brief Initialise a physical map data structure
496 *
497 * @param _pmap		physical map
498 */
499METHOD void pinit {
500	mmu_t		_mmu;
501	pmap_t		_pmap;
502};
503
504
505/**
506 * @brief Initialise the physical map for process 0, the initial process
507 * in the system.
508 * XXX default to pinit ?
509 *
510 * @param _pmap		physical map
511 */
512METHOD void pinit0 {
513	mmu_t		_mmu;
514	pmap_t		_pmap;
515};
516
517
518/**
519 * @brief Set the protection for physical pages in the given virtual address
520 * range to the given value.
521 *
522 * @param _pmap		physical map
523 * @param _start	virtual range start
524 * @param _end		virtual range end
525 * @param _prot		new page protection
526 */
527METHOD void protect {
528	mmu_t		_mmu;
529	pmap_t		_pmap;
530	vm_offset_t	_start;
531	vm_offset_t	_end;
532	vm_prot_t	_prot;
533};
534
535
536/**
537 * @brief Create a mapping in kernel virtual address space for the given array
538 * of wired physical pages.
539 *
540 * @param _start	mapping virtual address start
541 * @param *_m		array of physical page pointers
542 * @param _count	array elements
543 */
544METHOD void qenter {
545	mmu_t		_mmu;
546	vm_offset_t	_start;
547	vm_page_t	*_pg;
548	int		_count;
549};
550
551
552/**
553 * @brief Remove the temporary mappings created by qenter.
554 *
555 * @param _start	mapping virtual address start
556 * @param _count	number of pages in mapping
557 */
558METHOD void qremove {
559	mmu_t		_mmu;
560	vm_offset_t	_start;
561	int		_count;
562};
563
564
565/**
566 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
567 * should be no existing mappings for the physical map at this point
568 *
569 * @param _pmap		physical map
570 */
571METHOD void release {
572	mmu_t		_mmu;
573	pmap_t		_pmap;
574};
575
576
577/**
578 * @brief Remove all mappings in the given physical map for the start/end
579 * virtual address range. The range will be page-aligned.
580 *
581 * @param _pmap		physical map
582 * @param _start	mapping virtual address start
583 * @param _end		mapping virtual address end
584 */
585METHOD void remove {
586	mmu_t		_mmu;
587	pmap_t		_pmap;
588	vm_offset_t	_start;
589	vm_offset_t	_end;
590};
591
592
593/**
594 * @brief Traverse the reverse-map list off the given physical page and
595 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
596 *
597 * @param _pg		physical page
598 */
599METHOD void remove_all {
600	mmu_t		_mmu;
601	vm_page_t	_pg;
602};
603
604
605/**
606 * @brief Remove all mappings in the given start/end virtual address range
607 * for the given physical map. Similar to the remove method, but it used
608 * when tearing down all mappings in an address space. This method is
609 * optional, since pmap_remove will be called for each valid vm_map in
610 * the address space later.
611 *
612 * @param _pmap		physical map
613 * @param _start	mapping virtual address start
614 * @param _end		mapping virtual address end
615 */
616METHOD void remove_pages {
617	mmu_t		_mmu;
618	pmap_t		_pmap;
619} DEFAULT mmu_null_remove_pages;
620
621
622/**
623 * @brief Zero a physical page. It is not assumed that the page is mapped,
624 * so a temporary (or direct) mapping may need to be used.
625 *
626 * @param _pg		physical page
627 */
628METHOD void zero_page {
629	mmu_t		_mmu;
630	vm_page_t	_pg;
631};
632
633
634/**
635 * @brief Zero a portion of a physical page, starting at a given offset and
636 * for a given size (multiples of 512 bytes for 4k pages).
637 *
638 * @param _pg		physical page
639 * @param _off		byte offset from start of page
640 * @param _size		size of area to zero
641 */
642METHOD void zero_page_area {
643	mmu_t		_mmu;
644	vm_page_t	_pg;
645	int		_off;
646	int		_size;
647};
648
649
650/**
651 * @brief Called from the idle loop to zero pages. XXX I think locking
652 * constraints might be different here compared to zero_page.
653 *
654 * @param _pg		physical page
655 */
656METHOD void zero_page_idle {
657	mmu_t		_mmu;
658	vm_page_t	_pg;
659};
660
661
662/**
663 * @brief Extract mincore(2) information from a mapping.
664 *
665 * @param _pmap		physical map
666 * @param _addr		page virtual address
667 * @param _locked_pa	page physical address
668 *
669 * @retval 0		no result
670 * @retval non-zero	mincore(2) flag values
671 */
672METHOD int mincore {
673	mmu_t		_mmu;
674	pmap_t		_pmap;
675	vm_offset_t	_addr;
676	vm_paddr_t	*_locked_pa;
677} DEFAULT mmu_null_mincore;
678
679
680/**
681 * @brief Perform any operations required to allow a physical map to be used
682 * before it's address space is accessed.
683 *
684 * @param _td		thread associated with physical map
685 */
686METHOD void activate {
687	mmu_t		_mmu;
688	struct thread	*_td;
689};
690
691/**
692 * @brief Perform any operations required to deactivate a physical map,
693 * for instance as it is context-switched out.
694 *
695 * @param _td		thread associated with physical map
696 */
697METHOD void deactivate {
698	mmu_t		_mmu;
699	struct thread	*_td;
700} DEFAULT mmu_null_deactivate;
701
702/**
703 * @brief Return a hint for the best virtual address to map a tentative
704 * virtual address range in a given VM object. The default is to just
705 * return the given tentative start address.
706 *
707 * @param _obj		VM backing object
708 * @param _offset	starting offset with the VM object
709 * @param _addr		initial guess at virtual address
710 * @param _size		size of virtual address range
711 */
712METHOD void align_superpage {
713	mmu_t		_mmu;
714	vm_object_t	_obj;
715	vm_ooffset_t	_offset;
716	vm_offset_t	*_addr;
717	vm_size_t	_size;
718} DEFAULT mmu_null_align_superpage;
719
720
721
722
723/**
724 * INTERNAL INTERFACES
725 */
726
727/**
728 * @brief Bootstrap the VM system. At the completion of this routine, the
729 * kernel will be running in it's own address space with full control over
730 * paging.
731 *
732 * @param _start	start of reserved memory (obsolete ???)
733 * @param _end		end of reserved memory (obsolete ???)
734 *			XXX I think the intent of these was to allow
735 *			the memory used by kernel text+data+bss and
736 *			loader variables/load-time kld's to be carved out
737 *			of available physical mem.
738 *
739 */
740METHOD void bootstrap {
741	mmu_t		_mmu;
742	vm_offset_t	_start;
743	vm_offset_t	_end;
744};
745
746/**
747 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
748 * for alternate CPUs on SMP systems.
749 *
750 * @param _ap		Set to 1 if the CPU being set up is an AP
751 *
752 */
753METHOD void cpu_bootstrap {
754	mmu_t		_mmu;
755	int		_ap;
756};
757
758
759/**
760 * @brief Create a kernel mapping for a given physical address range.
761 * Called by bus code on behalf of device drivers. The mapping does not
762 * have to be a virtual address: it can be a direct-mapped physical address
763 * if that is supported by the MMU.
764 *
765 * @param _pa		start physical address
766 * @param _size		size in bytes of mapping
767 *
768 * @retval addr		address of mapping.
769 */
770METHOD void * mapdev {
771	mmu_t		_mmu;
772	vm_offset_t	_pa;
773	vm_size_t	_size;
774};
775
776/**
777 * @brief Create a kernel mapping for a given physical address range.
778 * Called by bus code on behalf of device drivers. The mapping does not
779 * have to be a virtual address: it can be a direct-mapped physical address
780 * if that is supported by the MMU.
781 *
782 * @param _pa		start physical address
783 * @param _size		size in bytes of mapping
784 * @param _attr		cache attributes
785 *
786 * @retval addr		address of mapping.
787 */
788METHOD void * mapdev_attr {
789	mmu_t		_mmu;
790	vm_offset_t	_pa;
791	vm_size_t	_size;
792	vm_memattr_t	_attr;
793} DEFAULT mmu_null_mapdev_attr;
794
795/**
796 * @brief Change cache control attributes for a page. Should modify all
797 * mappings for that page.
798 *
799 * @param _m		page to modify
800 * @param _ma		new cache control attributes
801 */
802METHOD void page_set_memattr {
803	mmu_t		_mmu;
804	vm_page_t	_pg;
805	vm_memattr_t	_ma;
806} DEFAULT mmu_null_page_set_memattr;
807
808/**
809 * @brief Remove the mapping created by mapdev. Called when a driver
810 * is unloaded.
811 *
812 * @param _va		Mapping address returned from mapdev
813 * @param _size		size in bytes of mapping
814 */
815METHOD void unmapdev {
816	mmu_t		_mmu;
817	vm_offset_t	_va;
818	vm_size_t	_size;
819};
820
821
822/**
823 * @brief Reverse-map a kernel virtual address
824 *
825 * @param _va		kernel virtual address to reverse-map
826 *
827 * @retval pa		physical address corresponding to mapping
828 */
829METHOD vm_offset_t kextract {
830	mmu_t		_mmu;
831	vm_offset_t	_va;
832};
833
834
835/**
836 * @brief Map a wired page into kernel virtual address space
837 *
838 * @param _va		mapping virtual address
839 * @param _pa		mapping physical address
840 */
841METHOD void kenter {
842	mmu_t		_mmu;
843	vm_offset_t	_va;
844	vm_offset_t	_pa;
845};
846
847/**
848 * @brief Map a wired page into kernel virtual address space
849 *
850 * @param _va		mapping virtual address
851 * @param _pa		mapping physical address
852 * @param _ma		mapping cache control attributes
853 */
854METHOD void kenter_attr {
855	mmu_t		_mmu;
856	vm_offset_t	_va;
857	vm_offset_t	_pa;
858	vm_memattr_t	_ma;
859} DEFAULT mmu_null_kenter_attr;
860
861/**
862 * @brief Determine if the given physical address range has been direct-mapped.
863 *
864 * @param _pa		physical address start
865 * @param _size		physical address range size
866 *
867 * @retval bool		TRUE if the range is direct-mapped.
868 */
869METHOD boolean_t dev_direct_mapped {
870	mmu_t		_mmu;
871	vm_offset_t	_pa;
872	vm_size_t	_size;
873};
874
875
876/**
877 * @brief Enforce instruction cache coherency. Typically called after a
878 * region of memory has been modified and before execution of or within
879 * that region is attempted. Setting breakpoints in a process through
880 * ptrace(2) is one example of when the instruction cache needs to be
881 * made coherent.
882 *
883 * @param _pm		the physical map of the virtual address
884 * @param _va		the virtual address of the modified region
885 * @param _sz		the size of the modified region
886 */
887METHOD void sync_icache {
888	mmu_t		_mmu;
889	pmap_t		_pm;
890	vm_offset_t	_va;
891	vm_size_t	_sz;
892};
893
894
895/**
896 * @brief Create temporary memory mapping for use by dumpsys().
897 *
898 * @param _md		The memory chunk in which the mapping lies.
899 * @param _ofs		The offset within the chunk of the mapping.
900 * @param _sz		The requested size of the mapping.
901 *
902 * @retval vm_offset_t	The virtual address of the mapping.
903 *			
904 * The sz argument is modified to reflect the actual size of the
905 * mapping.
906 */
907METHOD vm_offset_t dumpsys_map {
908	mmu_t		_mmu;
909	struct pmap_md	*_md;
910	vm_size_t	_ofs;
911	vm_size_t	*_sz;
912};
913
914
915/**
916 * @brief Remove temporary dumpsys() mapping.
917 *
918 * @param _md		The memory chunk in which the mapping lies.
919 * @param _ofs		The offset within the chunk of the mapping.
920 * @param _va		The virtual address of the mapping.
921 */
922METHOD void dumpsys_unmap {
923	mmu_t		_mmu;
924	struct pmap_md	*_md;
925	vm_size_t	_ofs;
926	vm_offset_t	_va;
927};
928
929
930/**
931 * @brief Scan/iterate memory chunks.
932 *
933 * @param _prev		The previously returned chunk or NULL.
934 *
935 * @retval		The next (or first when _prev is NULL) chunk.
936 */
937METHOD struct pmap_md * scan_md {
938	mmu_t		_mmu;
939	struct pmap_md	*_prev;
940} DEFAULT mmu_null_scan_md;
941