mmu_if.m revision 268591
1#-
2# Copyright (c) 2005 Peter Grehan
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8# 1. Redistributions of source code must retain the above copyright
9#    notice, this list of conditions and the following disclaimer.
10# 2. Redistributions in binary form must reproduce the above copyright
11#    notice, this list of conditions and the following disclaimer in the
12#    documentation and/or other materials provided with the distribution.
13#
14# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24# SUCH DAMAGE.
25#
26# $FreeBSD: head/sys/powerpc/powerpc/mmu_if.m 268591 2014-07-13 16:27:57Z alc $
27#
28
29#include <sys/param.h>
30#include <sys/lock.h>
31#include <sys/mutex.h>
32#include <sys/systm.h>
33
34#include <vm/vm.h>
35#include <vm/vm_page.h>
36
37#include <machine/mmuvar.h>
38
39/**
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
43 * code.
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
45 *@{
46 */
47
48INTERFACE mmu;
49
50#
51# Default implementations of some methods
52#
53CODE {
54	static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55	    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
56	{
57		return;
58	}
59
60	static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
61	{
62		return;
63	}
64
65	static void mmu_null_init(mmu_t mmu)
66	{
67		return;
68	}
69
70	static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
71	    vm_offset_t va)
72	{
73		return (FALSE);
74	}
75
76	static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77	    vm_offset_t addr, vm_object_t object, vm_pindex_t index,
78	    vm_size_t size)
79	{
80		return;
81	}
82
83	static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
84	{
85		return;
86	}
87
88	static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
89	{
90		return;
91	}
92
93	static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
94	    vm_paddr_t *locked_pa)
95	{
96		return (0);
97	}
98
99	static void mmu_null_deactivate(struct thread *td)
100	{
101		return;
102	}
103
104	static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
105	    vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
106	{
107		return;
108	}
109
110	static struct pmap_md *mmu_null_scan_md(mmu_t mmu, struct pmap_md *p)
111	{
112		return (NULL);
113	}
114
115	static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
116	    vm_size_t size, vm_memattr_t ma)
117	{
118		return MMU_MAPDEV(mmu, pa, size);
119	}
120
121	static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
122	    vm_offset_t pa, vm_memattr_t ma)
123	{
124		MMU_KENTER(mmu, va, pa);
125	}
126
127	static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
128	    vm_memattr_t ma)
129	{
130		return;
131	}
132};
133
134
135/**
136 * @brief Apply the given advice to the specified range of addresses within
137 * the given pmap.  Depending on the advice, clear the referenced and/or
138 * modified flags in each mapping and set the mapped page's dirty field.
139 *
140 * @param _pmap		physical map
141 * @param _start	virtual range start
142 * @param _end		virtual range end
143 * @param _advice	advice to apply
144 */
145METHOD void advise {
146	mmu_t		_mmu;
147	pmap_t		_pmap;
148	vm_offset_t	_start;
149	vm_offset_t	_end;
150	int		_advice;
151};
152
153
154/**
155 * @brief Change the wiring attribute for the page in the given physical
156 * map and virtual address.
157 *
158 * @param _pmap		physical map of page
159 * @param _va		page virtual address
160 * @param _wired	TRUE to increment wired count, FALSE to decrement
161 */
162METHOD void change_wiring {
163	mmu_t		_mmu;
164	pmap_t		_pmap;
165	vm_offset_t	_va;
166	boolean_t	_wired;
167};
168
169
170/**
171 * @brief Clear the 'modified' bit on the given physical page
172 *
173 * @param _pg		physical page
174 */
175METHOD void clear_modify {
176	mmu_t		_mmu;
177	vm_page_t	_pg;
178};
179
180
181/**
182 * @brief Clear the write and modified bits in each of the given
183 * physical page's mappings
184 *
185 * @param _pg		physical page
186 */
187METHOD void remove_write {
188	mmu_t		_mmu;
189	vm_page_t	_pg;
190};
191
192
193/**
194 * @brief Copy the address range given by the source physical map, virtual
195 * address and length to the destination physical map and virtual address.
196 * This routine is optional (xxx default null implementation ?)
197 *
198 * @param _dst_pmap	destination physical map
199 * @param _src_pmap	source physical map
200 * @param _dst_addr	destination virtual address
201 * @param _len		size of range
202 * @param _src_addr	source virtual address
203 */
204METHOD void copy {
205	mmu_t		_mmu;
206	pmap_t		_dst_pmap;
207	pmap_t		_src_pmap;
208	vm_offset_t	_dst_addr;
209	vm_size_t	_len;
210	vm_offset_t	_src_addr;
211} DEFAULT mmu_null_copy;
212
213
214/**
215 * @brief Copy the source physical page to the destination physical page
216 *
217 * @param _src		source physical page
218 * @param _dst		destination physical page
219 */
220METHOD void copy_page {
221	mmu_t		_mmu;
222	vm_page_t	_src;
223	vm_page_t	_dst;
224};
225
226METHOD void copy_pages {
227	mmu_t		_mmu;
228	vm_page_t	*_ma;
229	vm_offset_t	_a_offset;
230	vm_page_t	*_mb;
231	vm_offset_t	_b_offset;
232	int		_xfersize;
233};
234
235/**
236 * @brief Create a mapping between a virtual/physical address pair in the
237 * passed physical map with the specified protection and wiring
238 *
239 * @param _pmap		physical map
240 * @param _va		mapping virtual address
241 * @param _p		mapping physical page
242 * @param _prot		mapping page protection
243 * @param _wired	TRUE if page will be wired
244 */
245METHOD void enter {
246	mmu_t		_mmu;
247	pmap_t		_pmap;
248	vm_offset_t	_va;
249	vm_page_t	_p;
250	vm_prot_t	_prot;
251	boolean_t	_wired;
252};
253
254
255/**
256 * @brief Maps a sequence of resident pages belonging to the same object.
257 *
258 * @param _pmap		physical map
259 * @param _start	virtual range start
260 * @param _end		virtual range end
261 * @param _m_start	physical page mapped at start
262 * @param _prot		mapping page protection
263 */
264METHOD void enter_object {
265	mmu_t		_mmu;
266	pmap_t		_pmap;
267	vm_offset_t	_start;
268	vm_offset_t	_end;
269	vm_page_t	_m_start;
270	vm_prot_t	_prot;
271};
272
273
274/**
275 * @brief A faster entry point for page mapping where it is possible
276 * to short-circuit some of the tests in pmap_enter.
277 *
278 * @param _pmap		physical map (and also currently active pmap)
279 * @param _va		mapping virtual address
280 * @param _pg		mapping physical page
281 * @param _prot		new page protection - used to see if page is exec.
282 */
283METHOD void enter_quick {
284	mmu_t		_mmu;
285	pmap_t		_pmap;
286	vm_offset_t	_va;
287	vm_page_t	_pg;
288	vm_prot_t	_prot;
289};
290
291
292/**
293 * @brief Reverse map the given virtual address, returning the physical
294 * page associated with the address if a mapping exists.
295 *
296 * @param _pmap		physical map
297 * @param _va		mapping virtual address
298 *
299 * @retval 0		No mapping found
300 * @retval addr		The mapping physical address
301 */
302METHOD vm_paddr_t extract {
303	mmu_t		_mmu;
304	pmap_t		_pmap;
305	vm_offset_t	_va;
306};
307
308
309/**
310 * @brief Reverse map the given virtual address, returning the
311 * physical page if found. The page must be held (by calling
312 * vm_page_hold) if the page protection matches the given protection
313 *
314 * @param _pmap		physical map
315 * @param _va		mapping virtual address
316 * @param _prot		protection used to determine if physical page
317 *			should be locked
318 *
319 * @retval NULL		No mapping found
320 * @retval page		Pointer to physical page. Held if protections match
321 */
322METHOD vm_page_t extract_and_hold {
323	mmu_t		_mmu;
324	pmap_t		_pmap;
325	vm_offset_t	_va;
326	vm_prot_t	_prot;
327};
328
329
330/**
331 * @brief Increase kernel virtual address space to the given virtual address.
332 * Not really required for PowerPC, so optional unless the MMU implementation
333 * can use it.
334 *
335 * @param _va		new upper limit for kernel virtual address space
336 */
337METHOD void growkernel {
338	mmu_t		_mmu;
339	vm_offset_t	_va;
340} DEFAULT mmu_null_growkernel;
341
342
343/**
344 * @brief Called from vm_mem_init. Zone allocation is available at
345 * this stage so a convenient time to create zones. This routine is
346 * for MMU-implementation convenience and is optional.
347 */
348METHOD void init {
349	mmu_t		_mmu;
350} DEFAULT mmu_null_init;
351
352
353/**
354 * @brief Return if the page has been marked by MMU hardware to have been
355 * modified
356 *
357 * @param _pg		physical page to test
358 *
359 * @retval boolean	TRUE if page has been modified
360 */
361METHOD boolean_t is_modified {
362	mmu_t		_mmu;
363	vm_page_t	_pg;
364};
365
366
367/**
368 * @brief Return whether the specified virtual address is a candidate to be
369 * prefaulted in. This routine is optional.
370 *
371 * @param _pmap		physical map
372 * @param _va		virtual address to test
373 *
374 * @retval boolean	TRUE if the address is a candidate.
375 */
376METHOD boolean_t is_prefaultable {
377	mmu_t		_mmu;
378	pmap_t		_pmap;
379	vm_offset_t	_va;
380} DEFAULT mmu_null_is_prefaultable;
381
382
383/**
384 * @brief Return whether or not the specified physical page was referenced
385 * in any physical maps.
386 *
387 * @params _pg		physical page
388 *
389 * @retval boolean	TRUE if page has been referenced
390 */
391METHOD boolean_t is_referenced {
392	mmu_t		_mmu;
393	vm_page_t	_pg;
394};
395
396
397/**
398 * @brief Return a count of referenced bits for a page, clearing those bits.
399 * Not all referenced bits need to be cleared, but it is necessary that 0
400 * only be returned when there are none set.
401 *
402 * @params _m		physical page
403 *
404 * @retval int		count of referenced bits
405 */
406METHOD int ts_referenced {
407	mmu_t		_mmu;
408	vm_page_t	_pg;
409};
410
411
412/**
413 * @brief Map the requested physical address range into kernel virtual
414 * address space. The value in _virt is taken as a hint. The virtual
415 * address of the range is returned, or NULL if the mapping could not
416 * be created. The range can be direct-mapped if that is supported.
417 *
418 * @param *_virt	Hint for start virtual address, and also return
419 *			value
420 * @param _start	physical address range start
421 * @param _end		physical address range end
422 * @param _prot		protection of range (currently ignored)
423 *
424 * @retval NULL		could not map the area
425 * @retval addr, *_virt	mapping start virtual address
426 */
427METHOD vm_offset_t map {
428	mmu_t		_mmu;
429	vm_offset_t	*_virt;
430	vm_paddr_t	_start;
431	vm_paddr_t	_end;
432	int		_prot;
433};
434
435
436/**
437 * @brief Used to create a contiguous set of read-only mappings for a
438 * given object to try and eliminate a cascade of on-demand faults as
439 * the object is accessed sequentially. This routine is optional.
440 *
441 * @param _pmap		physical map
442 * @param _addr		mapping start virtual address
443 * @param _object	device-backed V.M. object to be mapped
444 * @param _pindex	page-index within object of mapping start
445 * @param _size		size in bytes of mapping
446 */
447METHOD void object_init_pt {
448	mmu_t		_mmu;
449	pmap_t		_pmap;
450	vm_offset_t	_addr;
451	vm_object_t	_object;
452	vm_pindex_t	_pindex;
453	vm_size_t	_size;
454} DEFAULT mmu_null_object_init_pt;
455
456
457/**
458 * @brief Used to determine if the specified page has a mapping for the
459 * given physical map, by scanning the list of reverse-mappings from the
460 * page. The list is scanned to a maximum of 16 entries.
461 *
462 * @param _pmap		physical map
463 * @param _pg		physical page
464 *
465 * @retval bool		TRUE if the physical map was found in the first 16
466 *			reverse-map list entries off the physical page.
467 */
468METHOD boolean_t page_exists_quick {
469	mmu_t		_mmu;
470	pmap_t		_pmap;
471	vm_page_t	_pg;
472};
473
474
475/**
476 * @brief Initialise the machine-dependent section of the physical page
477 * data structure. This routine is optional.
478 *
479 * @param _pg		physical page
480 */
481METHOD void page_init {
482	mmu_t		_mmu;
483	vm_page_t	_pg;
484} DEFAULT mmu_null_page_init;
485
486
487/**
488 * @brief Count the number of managed mappings to the given physical
489 * page that are wired.
490 *
491 * @param _pg		physical page
492 *
493 * @retval int		the number of wired, managed mappings to the
494 *			given physical page
495 */
496METHOD int page_wired_mappings {
497	mmu_t		_mmu;
498	vm_page_t	_pg;
499};
500
501
502/**
503 * @brief Initialise a physical map data structure
504 *
505 * @param _pmap		physical map
506 */
507METHOD void pinit {
508	mmu_t		_mmu;
509	pmap_t		_pmap;
510};
511
512
513/**
514 * @brief Initialise the physical map for process 0, the initial process
515 * in the system.
516 * XXX default to pinit ?
517 *
518 * @param _pmap		physical map
519 */
520METHOD void pinit0 {
521	mmu_t		_mmu;
522	pmap_t		_pmap;
523};
524
525
526/**
527 * @brief Set the protection for physical pages in the given virtual address
528 * range to the given value.
529 *
530 * @param _pmap		physical map
531 * @param _start	virtual range start
532 * @param _end		virtual range end
533 * @param _prot		new page protection
534 */
535METHOD void protect {
536	mmu_t		_mmu;
537	pmap_t		_pmap;
538	vm_offset_t	_start;
539	vm_offset_t	_end;
540	vm_prot_t	_prot;
541};
542
543
544/**
545 * @brief Create a mapping in kernel virtual address space for the given array
546 * of wired physical pages.
547 *
548 * @param _start	mapping virtual address start
549 * @param *_m		array of physical page pointers
550 * @param _count	array elements
551 */
552METHOD void qenter {
553	mmu_t		_mmu;
554	vm_offset_t	_start;
555	vm_page_t	*_pg;
556	int		_count;
557};
558
559
560/**
561 * @brief Remove the temporary mappings created by qenter.
562 *
563 * @param _start	mapping virtual address start
564 * @param _count	number of pages in mapping
565 */
566METHOD void qremove {
567	mmu_t		_mmu;
568	vm_offset_t	_start;
569	int		_count;
570};
571
572
573/**
574 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
575 * should be no existing mappings for the physical map at this point
576 *
577 * @param _pmap		physical map
578 */
579METHOD void release {
580	mmu_t		_mmu;
581	pmap_t		_pmap;
582};
583
584
585/**
586 * @brief Remove all mappings in the given physical map for the start/end
587 * virtual address range. The range will be page-aligned.
588 *
589 * @param _pmap		physical map
590 * @param _start	mapping virtual address start
591 * @param _end		mapping virtual address end
592 */
593METHOD void remove {
594	mmu_t		_mmu;
595	pmap_t		_pmap;
596	vm_offset_t	_start;
597	vm_offset_t	_end;
598};
599
600
601/**
602 * @brief Traverse the reverse-map list off the given physical page and
603 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
604 *
605 * @param _pg		physical page
606 */
607METHOD void remove_all {
608	mmu_t		_mmu;
609	vm_page_t	_pg;
610};
611
612
613/**
614 * @brief Remove all mappings in the given start/end virtual address range
615 * for the given physical map. Similar to the remove method, but it used
616 * when tearing down all mappings in an address space. This method is
617 * optional, since pmap_remove will be called for each valid vm_map in
618 * the address space later.
619 *
620 * @param _pmap		physical map
621 * @param _start	mapping virtual address start
622 * @param _end		mapping virtual address end
623 */
624METHOD void remove_pages {
625	mmu_t		_mmu;
626	pmap_t		_pmap;
627} DEFAULT mmu_null_remove_pages;
628
629
630/**
631 * @brief Clear the wired attribute from the mappings for the specified range
632 * of addresses in the given pmap.
633 *
634 * @param _pmap		physical map
635 * @param _start	virtual range start
636 * @param _end		virtual range end
637 */
638METHOD void unwire {
639	mmu_t		_mmu;
640	pmap_t		_pmap;
641	vm_offset_t	_start;
642	vm_offset_t	_end;
643};
644
645
646/**
647 * @brief Zero a physical page. It is not assumed that the page is mapped,
648 * so a temporary (or direct) mapping may need to be used.
649 *
650 * @param _pg		physical page
651 */
652METHOD void zero_page {
653	mmu_t		_mmu;
654	vm_page_t	_pg;
655};
656
657
658/**
659 * @brief Zero a portion of a physical page, starting at a given offset and
660 * for a given size (multiples of 512 bytes for 4k pages).
661 *
662 * @param _pg		physical page
663 * @param _off		byte offset from start of page
664 * @param _size		size of area to zero
665 */
666METHOD void zero_page_area {
667	mmu_t		_mmu;
668	vm_page_t	_pg;
669	int		_off;
670	int		_size;
671};
672
673
674/**
675 * @brief Called from the idle loop to zero pages. XXX I think locking
676 * constraints might be different here compared to zero_page.
677 *
678 * @param _pg		physical page
679 */
680METHOD void zero_page_idle {
681	mmu_t		_mmu;
682	vm_page_t	_pg;
683};
684
685
686/**
687 * @brief Extract mincore(2) information from a mapping.
688 *
689 * @param _pmap		physical map
690 * @param _addr		page virtual address
691 * @param _locked_pa	page physical address
692 *
693 * @retval 0		no result
694 * @retval non-zero	mincore(2) flag values
695 */
696METHOD int mincore {
697	mmu_t		_mmu;
698	pmap_t		_pmap;
699	vm_offset_t	_addr;
700	vm_paddr_t	*_locked_pa;
701} DEFAULT mmu_null_mincore;
702
703
704/**
705 * @brief Perform any operations required to allow a physical map to be used
706 * before it's address space is accessed.
707 *
708 * @param _td		thread associated with physical map
709 */
710METHOD void activate {
711	mmu_t		_mmu;
712	struct thread	*_td;
713};
714
715/**
716 * @brief Perform any operations required to deactivate a physical map,
717 * for instance as it is context-switched out.
718 *
719 * @param _td		thread associated with physical map
720 */
721METHOD void deactivate {
722	mmu_t		_mmu;
723	struct thread	*_td;
724} DEFAULT mmu_null_deactivate;
725
726/**
727 * @brief Return a hint for the best virtual address to map a tentative
728 * virtual address range in a given VM object. The default is to just
729 * return the given tentative start address.
730 *
731 * @param _obj		VM backing object
732 * @param _offset	starting offset with the VM object
733 * @param _addr		initial guess at virtual address
734 * @param _size		size of virtual address range
735 */
736METHOD void align_superpage {
737	mmu_t		_mmu;
738	vm_object_t	_obj;
739	vm_ooffset_t	_offset;
740	vm_offset_t	*_addr;
741	vm_size_t	_size;
742} DEFAULT mmu_null_align_superpage;
743
744
745
746
747/**
748 * INTERNAL INTERFACES
749 */
750
751/**
752 * @brief Bootstrap the VM system. At the completion of this routine, the
753 * kernel will be running in it's own address space with full control over
754 * paging.
755 *
756 * @param _start	start of reserved memory (obsolete ???)
757 * @param _end		end of reserved memory (obsolete ???)
758 *			XXX I think the intent of these was to allow
759 *			the memory used by kernel text+data+bss and
760 *			loader variables/load-time kld's to be carved out
761 *			of available physical mem.
762 *
763 */
764METHOD void bootstrap {
765	mmu_t		_mmu;
766	vm_offset_t	_start;
767	vm_offset_t	_end;
768};
769
770/**
771 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
772 * for alternate CPUs on SMP systems.
773 *
774 * @param _ap		Set to 1 if the CPU being set up is an AP
775 *
776 */
777METHOD void cpu_bootstrap {
778	mmu_t		_mmu;
779	int		_ap;
780};
781
782
783/**
784 * @brief Create a kernel mapping for a given physical address range.
785 * Called by bus code on behalf of device drivers. The mapping does not
786 * have to be a virtual address: it can be a direct-mapped physical address
787 * if that is supported by the MMU.
788 *
789 * @param _pa		start physical address
790 * @param _size		size in bytes of mapping
791 *
792 * @retval addr		address of mapping.
793 */
794METHOD void * mapdev {
795	mmu_t		_mmu;
796	vm_paddr_t	_pa;
797	vm_size_t	_size;
798};
799
800/**
801 * @brief Create a kernel mapping for a given physical address range.
802 * Called by bus code on behalf of device drivers. The mapping does not
803 * have to be a virtual address: it can be a direct-mapped physical address
804 * if that is supported by the MMU.
805 *
806 * @param _pa		start physical address
807 * @param _size		size in bytes of mapping
808 * @param _attr		cache attributes
809 *
810 * @retval addr		address of mapping.
811 */
812METHOD void * mapdev_attr {
813	mmu_t		_mmu;
814	vm_offset_t	_pa;
815	vm_size_t	_size;
816	vm_memattr_t	_attr;
817} DEFAULT mmu_null_mapdev_attr;
818
819/**
820 * @brief Change cache control attributes for a page. Should modify all
821 * mappings for that page.
822 *
823 * @param _m		page to modify
824 * @param _ma		new cache control attributes
825 */
826METHOD void page_set_memattr {
827	mmu_t		_mmu;
828	vm_page_t	_pg;
829	vm_memattr_t	_ma;
830} DEFAULT mmu_null_page_set_memattr;
831
832/**
833 * @brief Remove the mapping created by mapdev. Called when a driver
834 * is unloaded.
835 *
836 * @param _va		Mapping address returned from mapdev
837 * @param _size		size in bytes of mapping
838 */
839METHOD void unmapdev {
840	mmu_t		_mmu;
841	vm_offset_t	_va;
842	vm_size_t	_size;
843};
844
845
846/**
847 * @brief Reverse-map a kernel virtual address
848 *
849 * @param _va		kernel virtual address to reverse-map
850 *
851 * @retval pa		physical address corresponding to mapping
852 */
853METHOD vm_paddr_t kextract {
854	mmu_t		_mmu;
855	vm_offset_t	_va;
856};
857
858
859/**
860 * @brief Map a wired page into kernel virtual address space
861 *
862 * @param _va		mapping virtual address
863 * @param _pa		mapping physical address
864 */
865METHOD void kenter {
866	mmu_t		_mmu;
867	vm_offset_t	_va;
868	vm_paddr_t	_pa;
869};
870
871/**
872 * @brief Map a wired page into kernel virtual address space
873 *
874 * @param _va		mapping virtual address
875 * @param _pa		mapping physical address
876 * @param _ma		mapping cache control attributes
877 */
878METHOD void kenter_attr {
879	mmu_t		_mmu;
880	vm_offset_t	_va;
881	vm_offset_t	_pa;
882	vm_memattr_t	_ma;
883} DEFAULT mmu_null_kenter_attr;
884
885/**
886 * @brief Determine if the given physical address range has been direct-mapped.
887 *
888 * @param _pa		physical address start
889 * @param _size		physical address range size
890 *
891 * @retval bool		TRUE if the range is direct-mapped.
892 */
893METHOD boolean_t dev_direct_mapped {
894	mmu_t		_mmu;
895	vm_paddr_t	_pa;
896	vm_size_t	_size;
897};
898
899
900/**
901 * @brief Enforce instruction cache coherency. Typically called after a
902 * region of memory has been modified and before execution of or within
903 * that region is attempted. Setting breakpoints in a process through
904 * ptrace(2) is one example of when the instruction cache needs to be
905 * made coherent.
906 *
907 * @param _pm		the physical map of the virtual address
908 * @param _va		the virtual address of the modified region
909 * @param _sz		the size of the modified region
910 */
911METHOD void sync_icache {
912	mmu_t		_mmu;
913	pmap_t		_pm;
914	vm_offset_t	_va;
915	vm_size_t	_sz;
916};
917
918
919/**
920 * @brief Create temporary memory mapping for use by dumpsys().
921 *
922 * @param _md		The memory chunk in which the mapping lies.
923 * @param _ofs		The offset within the chunk of the mapping.
924 * @param _sz		The requested size of the mapping.
925 *
926 * @retval vm_offset_t	The virtual address of the mapping.
927 *			
928 * The sz argument is modified to reflect the actual size of the
929 * mapping.
930 */
931METHOD vm_offset_t dumpsys_map {
932	mmu_t		_mmu;
933	struct pmap_md	*_md;
934	vm_size_t	_ofs;
935	vm_size_t	*_sz;
936};
937
938
939/**
940 * @brief Remove temporary dumpsys() mapping.
941 *
942 * @param _md		The memory chunk in which the mapping lies.
943 * @param _ofs		The offset within the chunk of the mapping.
944 * @param _va		The virtual address of the mapping.
945 */
946METHOD void dumpsys_unmap {
947	mmu_t		_mmu;
948	struct pmap_md	*_md;
949	vm_size_t	_ofs;
950	vm_offset_t	_va;
951};
952
953
954/**
955 * @brief Scan/iterate memory chunks.
956 *
957 * @param _prev		The previously returned chunk or NULL.
958 *
959 * @retval		The next (or first when _prev is NULL) chunk.
960 */
961METHOD struct pmap_md * scan_md {
962	mmu_t		_mmu;
963	struct pmap_md	*_prev;
964} DEFAULT mmu_null_scan_md;
965