mmu_if.m revision 270439
1#-
2# Copyright (c) 2005 Peter Grehan
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8# 1. Redistributions of source code must retain the above copyright
9#    notice, this list of conditions and the following disclaimer.
10# 2. Redistributions in binary form must reproduce the above copyright
11#    notice, this list of conditions and the following disclaimer in the
12#    documentation and/or other materials provided with the distribution.
13#
14# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24# SUCH DAMAGE.
25#
26# $FreeBSD: stable/10/sys/powerpc/powerpc/mmu_if.m 270439 2014-08-24 07:53:15Z kib $
27#
28
29#include <sys/param.h>
30#include <sys/lock.h>
31#include <sys/mutex.h>
32#include <sys/systm.h>
33
34#include <vm/vm.h>
35#include <vm/vm_page.h>
36
37#include <machine/mmuvar.h>
38
39/**
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
43 * code.
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
45 *@{
46 */
47
48INTERFACE mmu;
49
50#
51# Default implementations of some methods
52#
53CODE {
54	static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55	    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
56	{
57		return;
58	}
59
60	static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
61	{
62		return;
63	}
64
65	static void mmu_null_init(mmu_t mmu)
66	{
67		return;
68	}
69
70	static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
71	    vm_offset_t va)
72	{
73		return (FALSE);
74	}
75
76	static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77	    vm_offset_t addr, vm_object_t object, vm_pindex_t index,
78	    vm_size_t size)
79	{
80		return;
81	}
82
83	static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
84	{
85		return;
86	}
87
88	static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
89	{
90		return;
91	}
92
93	static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
94	    vm_paddr_t *locked_pa)
95	{
96		return (0);
97	}
98
99	static void mmu_null_deactivate(struct thread *td)
100	{
101		return;
102	}
103
104	static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
105	    vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
106	{
107		return;
108	}
109
110	static struct pmap_md *mmu_null_scan_md(mmu_t mmu, struct pmap_md *p)
111	{
112		return (NULL);
113	}
114
115	static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
116	    vm_size_t size, vm_memattr_t ma)
117	{
118		return MMU_MAPDEV(mmu, pa, size);
119	}
120
121	static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
122	    vm_offset_t pa, vm_memattr_t ma)
123	{
124		MMU_KENTER(mmu, va, pa);
125	}
126
127	static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
128	    vm_memattr_t ma)
129	{
130		return;
131	}
132};
133
134
135/**
136 * @brief Apply the given advice to the specified range of addresses within
137 * the given pmap.  Depending on the advice, clear the referenced and/or
138 * modified flags in each mapping and set the mapped page's dirty field.
139 *
140 * @param _pmap		physical map
141 * @param _start	virtual range start
142 * @param _end		virtual range end
143 * @param _advice	advice to apply
144 */
145METHOD void advise {
146	mmu_t		_mmu;
147	pmap_t		_pmap;
148	vm_offset_t	_start;
149	vm_offset_t	_end;
150	int		_advice;
151};
152
153
154/**
155 * @brief Change the wiring attribute for the page in the given physical
156 * map and virtual address.
157 *
158 * @param _pmap		physical map of page
159 * @param _va		page virtual address
160 * @param _wired	TRUE to increment wired count, FALSE to decrement
161 */
162METHOD void change_wiring {
163	mmu_t		_mmu;
164	pmap_t		_pmap;
165	vm_offset_t	_va;
166	boolean_t	_wired;
167};
168
169
170/**
171 * @brief Clear the 'modified' bit on the given physical page
172 *
173 * @param _pg		physical page
174 */
175METHOD void clear_modify {
176	mmu_t		_mmu;
177	vm_page_t	_pg;
178};
179
180
181/**
182 * @brief Clear the write and modified bits in each of the given
183 * physical page's mappings
184 *
185 * @param _pg		physical page
186 */
187METHOD void remove_write {
188	mmu_t		_mmu;
189	vm_page_t	_pg;
190};
191
192
193/**
194 * @brief Copy the address range given by the source physical map, virtual
195 * address and length to the destination physical map and virtual address.
196 * This routine is optional (xxx default null implementation ?)
197 *
198 * @param _dst_pmap	destination physical map
199 * @param _src_pmap	source physical map
200 * @param _dst_addr	destination virtual address
201 * @param _len		size of range
202 * @param _src_addr	source virtual address
203 */
204METHOD void copy {
205	mmu_t		_mmu;
206	pmap_t		_dst_pmap;
207	pmap_t		_src_pmap;
208	vm_offset_t	_dst_addr;
209	vm_size_t	_len;
210	vm_offset_t	_src_addr;
211} DEFAULT mmu_null_copy;
212
213
214/**
215 * @brief Copy the source physical page to the destination physical page
216 *
217 * @param _src		source physical page
218 * @param _dst		destination physical page
219 */
220METHOD void copy_page {
221	mmu_t		_mmu;
222	vm_page_t	_src;
223	vm_page_t	_dst;
224};
225
226METHOD void copy_pages {
227	mmu_t		_mmu;
228	vm_page_t	*_ma;
229	vm_offset_t	_a_offset;
230	vm_page_t	*_mb;
231	vm_offset_t	_b_offset;
232	int		_xfersize;
233};
234
235/**
236 * @brief Create a mapping between a virtual/physical address pair in the
237 * passed physical map with the specified protection and wiring
238 *
239 * @param _pmap		physical map
240 * @param _va		mapping virtual address
241 * @param _p		mapping physical page
242 * @param _prot		mapping page protection
243 * @param _flags	pmap_enter flags
244 * @param _psind	superpage size index
245 */
246METHOD int enter {
247	mmu_t		_mmu;
248	pmap_t		_pmap;
249	vm_offset_t	_va;
250	vm_page_t	_p;
251	vm_prot_t	_prot;
252	u_int		_flags;
253	int8_t		_psind;
254};
255
256
257/**
258 * @brief Maps a sequence of resident pages belonging to the same object.
259 *
260 * @param _pmap		physical map
261 * @param _start	virtual range start
262 * @param _end		virtual range end
263 * @param _m_start	physical page mapped at start
264 * @param _prot		mapping page protection
265 */
266METHOD void enter_object {
267	mmu_t		_mmu;
268	pmap_t		_pmap;
269	vm_offset_t	_start;
270	vm_offset_t	_end;
271	vm_page_t	_m_start;
272	vm_prot_t	_prot;
273};
274
275
276/**
277 * @brief A faster entry point for page mapping where it is possible
278 * to short-circuit some of the tests in pmap_enter.
279 *
280 * @param _pmap		physical map (and also currently active pmap)
281 * @param _va		mapping virtual address
282 * @param _pg		mapping physical page
283 * @param _prot		new page protection - used to see if page is exec.
284 */
285METHOD void enter_quick {
286	mmu_t		_mmu;
287	pmap_t		_pmap;
288	vm_offset_t	_va;
289	vm_page_t	_pg;
290	vm_prot_t	_prot;
291};
292
293
294/**
295 * @brief Reverse map the given virtual address, returning the physical
296 * page associated with the address if a mapping exists.
297 *
298 * @param _pmap		physical map
299 * @param _va		mapping virtual address
300 *
301 * @retval 0		No mapping found
302 * @retval addr		The mapping physical address
303 */
304METHOD vm_paddr_t extract {
305	mmu_t		_mmu;
306	pmap_t		_pmap;
307	vm_offset_t	_va;
308};
309
310
311/**
312 * @brief Reverse map the given virtual address, returning the
313 * physical page if found. The page must be held (by calling
314 * vm_page_hold) if the page protection matches the given protection
315 *
316 * @param _pmap		physical map
317 * @param _va		mapping virtual address
318 * @param _prot		protection used to determine if physical page
319 *			should be locked
320 *
321 * @retval NULL		No mapping found
322 * @retval page		Pointer to physical page. Held if protections match
323 */
324METHOD vm_page_t extract_and_hold {
325	mmu_t		_mmu;
326	pmap_t		_pmap;
327	vm_offset_t	_va;
328	vm_prot_t	_prot;
329};
330
331
332/**
333 * @brief Increase kernel virtual address space to the given virtual address.
334 * Not really required for PowerPC, so optional unless the MMU implementation
335 * can use it.
336 *
337 * @param _va		new upper limit for kernel virtual address space
338 */
339METHOD void growkernel {
340	mmu_t		_mmu;
341	vm_offset_t	_va;
342} DEFAULT mmu_null_growkernel;
343
344
345/**
346 * @brief Called from vm_mem_init. Zone allocation is available at
347 * this stage so a convenient time to create zones. This routine is
348 * for MMU-implementation convenience and is optional.
349 */
350METHOD void init {
351	mmu_t		_mmu;
352} DEFAULT mmu_null_init;
353
354
355/**
356 * @brief Return if the page has been marked by MMU hardware to have been
357 * modified
358 *
359 * @param _pg		physical page to test
360 *
361 * @retval boolean	TRUE if page has been modified
362 */
363METHOD boolean_t is_modified {
364	mmu_t		_mmu;
365	vm_page_t	_pg;
366};
367
368
369/**
370 * @brief Return whether the specified virtual address is a candidate to be
371 * prefaulted in. This routine is optional.
372 *
373 * @param _pmap		physical map
374 * @param _va		virtual address to test
375 *
376 * @retval boolean	TRUE if the address is a candidate.
377 */
378METHOD boolean_t is_prefaultable {
379	mmu_t		_mmu;
380	pmap_t		_pmap;
381	vm_offset_t	_va;
382} DEFAULT mmu_null_is_prefaultable;
383
384
385/**
386 * @brief Return whether or not the specified physical page was referenced
387 * in any physical maps.
388 *
389 * @params _pg		physical page
390 *
391 * @retval boolean	TRUE if page has been referenced
392 */
393METHOD boolean_t is_referenced {
394	mmu_t		_mmu;
395	vm_page_t	_pg;
396};
397
398
399/**
400 * @brief Return a count of referenced bits for a page, clearing those bits.
401 * Not all referenced bits need to be cleared, but it is necessary that 0
402 * only be returned when there are none set.
403 *
404 * @params _m		physical page
405 *
406 * @retval int		count of referenced bits
407 */
408METHOD int ts_referenced {
409	mmu_t		_mmu;
410	vm_page_t	_pg;
411};
412
413
414/**
415 * @brief Map the requested physical address range into kernel virtual
416 * address space. The value in _virt is taken as a hint. The virtual
417 * address of the range is returned, or NULL if the mapping could not
418 * be created. The range can be direct-mapped if that is supported.
419 *
420 * @param *_virt	Hint for start virtual address, and also return
421 *			value
422 * @param _start	physical address range start
423 * @param _end		physical address range end
424 * @param _prot		protection of range (currently ignored)
425 *
426 * @retval NULL		could not map the area
427 * @retval addr, *_virt	mapping start virtual address
428 */
429METHOD vm_offset_t map {
430	mmu_t		_mmu;
431	vm_offset_t	*_virt;
432	vm_paddr_t	_start;
433	vm_paddr_t	_end;
434	int		_prot;
435};
436
437
438/**
439 * @brief Used to create a contiguous set of read-only mappings for a
440 * given object to try and eliminate a cascade of on-demand faults as
441 * the object is accessed sequentially. This routine is optional.
442 *
443 * @param _pmap		physical map
444 * @param _addr		mapping start virtual address
445 * @param _object	device-backed V.M. object to be mapped
446 * @param _pindex	page-index within object of mapping start
447 * @param _size		size in bytes of mapping
448 */
449METHOD void object_init_pt {
450	mmu_t		_mmu;
451	pmap_t		_pmap;
452	vm_offset_t	_addr;
453	vm_object_t	_object;
454	vm_pindex_t	_pindex;
455	vm_size_t	_size;
456} DEFAULT mmu_null_object_init_pt;
457
458
459/**
460 * @brief Used to determine if the specified page has a mapping for the
461 * given physical map, by scanning the list of reverse-mappings from the
462 * page. The list is scanned to a maximum of 16 entries.
463 *
464 * @param _pmap		physical map
465 * @param _pg		physical page
466 *
467 * @retval bool		TRUE if the physical map was found in the first 16
468 *			reverse-map list entries off the physical page.
469 */
470METHOD boolean_t page_exists_quick {
471	mmu_t		_mmu;
472	pmap_t		_pmap;
473	vm_page_t	_pg;
474};
475
476
477/**
478 * @brief Initialise the machine-dependent section of the physical page
479 * data structure. This routine is optional.
480 *
481 * @param _pg		physical page
482 */
483METHOD void page_init {
484	mmu_t		_mmu;
485	vm_page_t	_pg;
486} DEFAULT mmu_null_page_init;
487
488
489/**
490 * @brief Count the number of managed mappings to the given physical
491 * page that are wired.
492 *
493 * @param _pg		physical page
494 *
495 * @retval int		the number of wired, managed mappings to the
496 *			given physical page
497 */
498METHOD int page_wired_mappings {
499	mmu_t		_mmu;
500	vm_page_t	_pg;
501};
502
503
504/**
505 * @brief Initialise a physical map data structure
506 *
507 * @param _pmap		physical map
508 */
509METHOD void pinit {
510	mmu_t		_mmu;
511	pmap_t		_pmap;
512};
513
514
515/**
516 * @brief Initialise the physical map for process 0, the initial process
517 * in the system.
518 * XXX default to pinit ?
519 *
520 * @param _pmap		physical map
521 */
522METHOD void pinit0 {
523	mmu_t		_mmu;
524	pmap_t		_pmap;
525};
526
527
528/**
529 * @brief Set the protection for physical pages in the given virtual address
530 * range to the given value.
531 *
532 * @param _pmap		physical map
533 * @param _start	virtual range start
534 * @param _end		virtual range end
535 * @param _prot		new page protection
536 */
537METHOD void protect {
538	mmu_t		_mmu;
539	pmap_t		_pmap;
540	vm_offset_t	_start;
541	vm_offset_t	_end;
542	vm_prot_t	_prot;
543};
544
545
546/**
547 * @brief Create a mapping in kernel virtual address space for the given array
548 * of wired physical pages.
549 *
550 * @param _start	mapping virtual address start
551 * @param *_m		array of physical page pointers
552 * @param _count	array elements
553 */
554METHOD void qenter {
555	mmu_t		_mmu;
556	vm_offset_t	_start;
557	vm_page_t	*_pg;
558	int		_count;
559};
560
561
562/**
563 * @brief Remove the temporary mappings created by qenter.
564 *
565 * @param _start	mapping virtual address start
566 * @param _count	number of pages in mapping
567 */
568METHOD void qremove {
569	mmu_t		_mmu;
570	vm_offset_t	_start;
571	int		_count;
572};
573
574
575/**
576 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
577 * should be no existing mappings for the physical map at this point
578 *
579 * @param _pmap		physical map
580 */
581METHOD void release {
582	mmu_t		_mmu;
583	pmap_t		_pmap;
584};
585
586
587/**
588 * @brief Remove all mappings in the given physical map for the start/end
589 * virtual address range. The range will be page-aligned.
590 *
591 * @param _pmap		physical map
592 * @param _start	mapping virtual address start
593 * @param _end		mapping virtual address end
594 */
595METHOD void remove {
596	mmu_t		_mmu;
597	pmap_t		_pmap;
598	vm_offset_t	_start;
599	vm_offset_t	_end;
600};
601
602
603/**
604 * @brief Traverse the reverse-map list off the given physical page and
605 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
606 *
607 * @param _pg		physical page
608 */
609METHOD void remove_all {
610	mmu_t		_mmu;
611	vm_page_t	_pg;
612};
613
614
615/**
616 * @brief Remove all mappings in the given start/end virtual address range
617 * for the given physical map. Similar to the remove method, but it used
618 * when tearing down all mappings in an address space. This method is
619 * optional, since pmap_remove will be called for each valid vm_map in
620 * the address space later.
621 *
622 * @param _pmap		physical map
623 * @param _start	mapping virtual address start
624 * @param _end		mapping virtual address end
625 */
626METHOD void remove_pages {
627	mmu_t		_mmu;
628	pmap_t		_pmap;
629} DEFAULT mmu_null_remove_pages;
630
631
632/**
633 * @brief Zero a physical page. It is not assumed that the page is mapped,
634 * so a temporary (or direct) mapping may need to be used.
635 *
636 * @param _pg		physical page
637 */
638METHOD void zero_page {
639	mmu_t		_mmu;
640	vm_page_t	_pg;
641};
642
643
644/**
645 * @brief Zero a portion of a physical page, starting at a given offset and
646 * for a given size (multiples of 512 bytes for 4k pages).
647 *
648 * @param _pg		physical page
649 * @param _off		byte offset from start of page
650 * @param _size		size of area to zero
651 */
652METHOD void zero_page_area {
653	mmu_t		_mmu;
654	vm_page_t	_pg;
655	int		_off;
656	int		_size;
657};
658
659
660/**
661 * @brief Called from the idle loop to zero pages. XXX I think locking
662 * constraints might be different here compared to zero_page.
663 *
664 * @param _pg		physical page
665 */
666METHOD void zero_page_idle {
667	mmu_t		_mmu;
668	vm_page_t	_pg;
669};
670
671
672/**
673 * @brief Extract mincore(2) information from a mapping.
674 *
675 * @param _pmap		physical map
676 * @param _addr		page virtual address
677 * @param _locked_pa	page physical address
678 *
679 * @retval 0		no result
680 * @retval non-zero	mincore(2) flag values
681 */
682METHOD int mincore {
683	mmu_t		_mmu;
684	pmap_t		_pmap;
685	vm_offset_t	_addr;
686	vm_paddr_t	*_locked_pa;
687} DEFAULT mmu_null_mincore;
688
689
690/**
691 * @brief Perform any operations required to allow a physical map to be used
692 * before it's address space is accessed.
693 *
694 * @param _td		thread associated with physical map
695 */
696METHOD void activate {
697	mmu_t		_mmu;
698	struct thread	*_td;
699};
700
701/**
702 * @brief Perform any operations required to deactivate a physical map,
703 * for instance as it is context-switched out.
704 *
705 * @param _td		thread associated with physical map
706 */
707METHOD void deactivate {
708	mmu_t		_mmu;
709	struct thread	*_td;
710} DEFAULT mmu_null_deactivate;
711
712/**
713 * @brief Return a hint for the best virtual address to map a tentative
714 * virtual address range in a given VM object. The default is to just
715 * return the given tentative start address.
716 *
717 * @param _obj		VM backing object
718 * @param _offset	starting offset with the VM object
719 * @param _addr		initial guess at virtual address
720 * @param _size		size of virtual address range
721 */
722METHOD void align_superpage {
723	mmu_t		_mmu;
724	vm_object_t	_obj;
725	vm_ooffset_t	_offset;
726	vm_offset_t	*_addr;
727	vm_size_t	_size;
728} DEFAULT mmu_null_align_superpage;
729
730
731
732
733/**
734 * INTERNAL INTERFACES
735 */
736
737/**
738 * @brief Bootstrap the VM system. At the completion of this routine, the
739 * kernel will be running in it's own address space with full control over
740 * paging.
741 *
742 * @param _start	start of reserved memory (obsolete ???)
743 * @param _end		end of reserved memory (obsolete ???)
744 *			XXX I think the intent of these was to allow
745 *			the memory used by kernel text+data+bss and
746 *			loader variables/load-time kld's to be carved out
747 *			of available physical mem.
748 *
749 */
750METHOD void bootstrap {
751	mmu_t		_mmu;
752	vm_offset_t	_start;
753	vm_offset_t	_end;
754};
755
756/**
757 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
758 * for alternate CPUs on SMP systems.
759 *
760 * @param _ap		Set to 1 if the CPU being set up is an AP
761 *
762 */
763METHOD void cpu_bootstrap {
764	mmu_t		_mmu;
765	int		_ap;
766};
767
768
769/**
770 * @brief Create a kernel mapping for a given physical address range.
771 * Called by bus code on behalf of device drivers. The mapping does not
772 * have to be a virtual address: it can be a direct-mapped physical address
773 * if that is supported by the MMU.
774 *
775 * @param _pa		start physical address
776 * @param _size		size in bytes of mapping
777 *
778 * @retval addr		address of mapping.
779 */
780METHOD void * mapdev {
781	mmu_t		_mmu;
782	vm_paddr_t	_pa;
783	vm_size_t	_size;
784};
785
786/**
787 * @brief Create a kernel mapping for a given physical address range.
788 * Called by bus code on behalf of device drivers. The mapping does not
789 * have to be a virtual address: it can be a direct-mapped physical address
790 * if that is supported by the MMU.
791 *
792 * @param _pa		start physical address
793 * @param _size		size in bytes of mapping
794 * @param _attr		cache attributes
795 *
796 * @retval addr		address of mapping.
797 */
798METHOD void * mapdev_attr {
799	mmu_t		_mmu;
800	vm_offset_t	_pa;
801	vm_size_t	_size;
802	vm_memattr_t	_attr;
803} DEFAULT mmu_null_mapdev_attr;
804
805/**
806 * @brief Change cache control attributes for a page. Should modify all
807 * mappings for that page.
808 *
809 * @param _m		page to modify
810 * @param _ma		new cache control attributes
811 */
812METHOD void page_set_memattr {
813	mmu_t		_mmu;
814	vm_page_t	_pg;
815	vm_memattr_t	_ma;
816} DEFAULT mmu_null_page_set_memattr;
817
818/**
819 * @brief Remove the mapping created by mapdev. Called when a driver
820 * is unloaded.
821 *
822 * @param _va		Mapping address returned from mapdev
823 * @param _size		size in bytes of mapping
824 */
825METHOD void unmapdev {
826	mmu_t		_mmu;
827	vm_offset_t	_va;
828	vm_size_t	_size;
829};
830
831
832/**
833 * @brief Reverse-map a kernel virtual address
834 *
835 * @param _va		kernel virtual address to reverse-map
836 *
837 * @retval pa		physical address corresponding to mapping
838 */
839METHOD vm_paddr_t kextract {
840	mmu_t		_mmu;
841	vm_offset_t	_va;
842};
843
844
845/**
846 * @brief Map a wired page into kernel virtual address space
847 *
848 * @param _va		mapping virtual address
849 * @param _pa		mapping physical address
850 */
851METHOD void kenter {
852	mmu_t		_mmu;
853	vm_offset_t	_va;
854	vm_paddr_t	_pa;
855};
856
857/**
858 * @brief Map a wired page into kernel virtual address space
859 *
860 * @param _va		mapping virtual address
861 * @param _pa		mapping physical address
862 * @param _ma		mapping cache control attributes
863 */
864METHOD void kenter_attr {
865	mmu_t		_mmu;
866	vm_offset_t	_va;
867	vm_offset_t	_pa;
868	vm_memattr_t	_ma;
869} DEFAULT mmu_null_kenter_attr;
870
871/**
872 * @brief Determine if the given physical address range has been direct-mapped.
873 *
874 * @param _pa		physical address start
875 * @param _size		physical address range size
876 *
877 * @retval bool		TRUE if the range is direct-mapped.
878 */
879METHOD boolean_t dev_direct_mapped {
880	mmu_t		_mmu;
881	vm_paddr_t	_pa;
882	vm_size_t	_size;
883};
884
885
886/**
887 * @brief Enforce instruction cache coherency. Typically called after a
888 * region of memory has been modified and before execution of or within
889 * that region is attempted. Setting breakpoints in a process through
890 * ptrace(2) is one example of when the instruction cache needs to be
891 * made coherent.
892 *
893 * @param _pm		the physical map of the virtual address
894 * @param _va		the virtual address of the modified region
895 * @param _sz		the size of the modified region
896 */
897METHOD void sync_icache {
898	mmu_t		_mmu;
899	pmap_t		_pm;
900	vm_offset_t	_va;
901	vm_size_t	_sz;
902};
903
904
905/**
906 * @brief Create temporary memory mapping for use by dumpsys().
907 *
908 * @param _md		The memory chunk in which the mapping lies.
909 * @param _ofs		The offset within the chunk of the mapping.
910 * @param _sz		The requested size of the mapping.
911 *
912 * @retval vm_offset_t	The virtual address of the mapping.
913 *			
914 * The sz argument is modified to reflect the actual size of the
915 * mapping.
916 */
917METHOD vm_offset_t dumpsys_map {
918	mmu_t		_mmu;
919	struct pmap_md	*_md;
920	vm_size_t	_ofs;
921	vm_size_t	*_sz;
922};
923
924
925/**
926 * @brief Remove temporary dumpsys() mapping.
927 *
928 * @param _md		The memory chunk in which the mapping lies.
929 * @param _ofs		The offset within the chunk of the mapping.
930 * @param _va		The virtual address of the mapping.
931 */
932METHOD void dumpsys_unmap {
933	mmu_t		_mmu;
934	struct pmap_md	*_md;
935	vm_size_t	_ofs;
936	vm_offset_t	_va;
937};
938
939
940/**
941 * @brief Scan/iterate memory chunks.
942 *
943 * @param _prev		The previously returned chunk or NULL.
944 *
945 * @retval		The next (or first when _prev is NULL) chunk.
946 */
947METHOD struct pmap_md * scan_md {
948	mmu_t		_mmu;
949	struct pmap_md	*_prev;
950} DEFAULT mmu_null_scan_md;
951