mmu_if.m revision 296142
1#-
2# Copyright (c) 2005 Peter Grehan
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8# 1. Redistributions of source code must retain the above copyright
9#    notice, this list of conditions and the following disclaimer.
10# 2. Redistributions in binary form must reproduce the above copyright
11#    notice, this list of conditions and the following disclaimer in the
12#    documentation and/or other materials provided with the distribution.
13#
14# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24# SUCH DAMAGE.
25#
26# $FreeBSD: head/sys/powerpc/powerpc/mmu_if.m 296142 2016-02-27 20:39:36Z jhibbits $
27#
28
29#include <sys/param.h>
30#include <sys/lock.h>
31#include <sys/mutex.h>
32#include <sys/systm.h>
33
34#include <vm/vm.h>
35#include <vm/vm_page.h>
36
37#include <machine/mmuvar.h>
38
39/**
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
43 * code.
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
45 *@{
46 */
47
48INTERFACE mmu;
49
50#
51# Default implementations of some methods
52#
53CODE {
54	static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55	    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
56	{
57		return;
58	}
59
60	static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
61	{
62		return;
63	}
64
65	static void mmu_null_init(mmu_t mmu)
66	{
67		return;
68	}
69
70	static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
71	    vm_offset_t va)
72	{
73		return (FALSE);
74	}
75
76	static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77	    vm_offset_t addr, vm_object_t object, vm_pindex_t index,
78	    vm_size_t size)
79	{
80		return;
81	}
82
83	static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
84	{
85		return;
86	}
87
88	static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
89	{
90		return;
91	}
92
93	static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
94	    vm_paddr_t *locked_pa)
95	{
96		return (0);
97	}
98
99	static void mmu_null_deactivate(struct thread *td)
100	{
101		return;
102	}
103
104	static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
105	    vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
106	{
107		return;
108	}
109
110	static void *mmu_null_mapdev_attr(mmu_t mmu, vm_paddr_t pa,
111	    vm_size_t size, vm_memattr_t ma)
112	{
113		return MMU_MAPDEV(mmu, pa, size);
114	}
115
116	static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
117	    vm_paddr_t pa, vm_memattr_t ma)
118	{
119		MMU_KENTER(mmu, va, pa);
120	}
121
122	static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
123	    vm_memattr_t ma)
124	{
125		return;
126	}
127
128	static int mmu_null_change_attr(mmu_t mmu, vm_offset_t va,
129	    vm_size_t sz, vm_memattr_t mode)
130	{
131		return (0);
132	}
133};
134
135
136/**
137 * @brief Apply the given advice to the specified range of addresses within
138 * the given pmap.  Depending on the advice, clear the referenced and/or
139 * modified flags in each mapping and set the mapped page's dirty field.
140 *
141 * @param _pmap		physical map
142 * @param _start	virtual range start
143 * @param _end		virtual range end
144 * @param _advice	advice to apply
145 */
146METHOD void advise {
147	mmu_t		_mmu;
148	pmap_t		_pmap;
149	vm_offset_t	_start;
150	vm_offset_t	_end;
151	int		_advice;
152};
153
154
155/**
156 * @brief Clear the 'modified' bit on the given physical page
157 *
158 * @param _pg		physical page
159 */
160METHOD void clear_modify {
161	mmu_t		_mmu;
162	vm_page_t	_pg;
163};
164
165
166/**
167 * @brief Clear the write and modified bits in each of the given
168 * physical page's mappings
169 *
170 * @param _pg		physical page
171 */
172METHOD void remove_write {
173	mmu_t		_mmu;
174	vm_page_t	_pg;
175};
176
177
178/**
179 * @brief Copy the address range given by the source physical map, virtual
180 * address and length to the destination physical map and virtual address.
181 * This routine is optional (xxx default null implementation ?)
182 *
183 * @param _dst_pmap	destination physical map
184 * @param _src_pmap	source physical map
185 * @param _dst_addr	destination virtual address
186 * @param _len		size of range
187 * @param _src_addr	source virtual address
188 */
189METHOD void copy {
190	mmu_t		_mmu;
191	pmap_t		_dst_pmap;
192	pmap_t		_src_pmap;
193	vm_offset_t	_dst_addr;
194	vm_size_t	_len;
195	vm_offset_t	_src_addr;
196} DEFAULT mmu_null_copy;
197
198
199/**
200 * @brief Copy the source physical page to the destination physical page
201 *
202 * @param _src		source physical page
203 * @param _dst		destination physical page
204 */
205METHOD void copy_page {
206	mmu_t		_mmu;
207	vm_page_t	_src;
208	vm_page_t	_dst;
209};
210
211METHOD void copy_pages {
212	mmu_t		_mmu;
213	vm_page_t	*_ma;
214	vm_offset_t	_a_offset;
215	vm_page_t	*_mb;
216	vm_offset_t	_b_offset;
217	int		_xfersize;
218};
219
220/**
221 * @brief Create a mapping between a virtual/physical address pair in the
222 * passed physical map with the specified protection and wiring
223 *
224 * @param _pmap		physical map
225 * @param _va		mapping virtual address
226 * @param _p		mapping physical page
227 * @param _prot		mapping page protection
228 * @param _flags	pmap_enter flags
229 * @param _psind	superpage size index
230 */
231METHOD int enter {
232	mmu_t		_mmu;
233	pmap_t		_pmap;
234	vm_offset_t	_va;
235	vm_page_t	_p;
236	vm_prot_t	_prot;
237	u_int		_flags;
238	int8_t		_psind;
239};
240
241
242/**
243 * @brief Maps a sequence of resident pages belonging to the same object.
244 *
245 * @param _pmap		physical map
246 * @param _start	virtual range start
247 * @param _end		virtual range end
248 * @param _m_start	physical page mapped at start
249 * @param _prot		mapping page protection
250 */
251METHOD void enter_object {
252	mmu_t		_mmu;
253	pmap_t		_pmap;
254	vm_offset_t	_start;
255	vm_offset_t	_end;
256	vm_page_t	_m_start;
257	vm_prot_t	_prot;
258};
259
260
261/**
262 * @brief A faster entry point for page mapping where it is possible
263 * to short-circuit some of the tests in pmap_enter.
264 *
265 * @param _pmap		physical map (and also currently active pmap)
266 * @param _va		mapping virtual address
267 * @param _pg		mapping physical page
268 * @param _prot		new page protection - used to see if page is exec.
269 */
270METHOD void enter_quick {
271	mmu_t		_mmu;
272	pmap_t		_pmap;
273	vm_offset_t	_va;
274	vm_page_t	_pg;
275	vm_prot_t	_prot;
276};
277
278
279/**
280 * @brief Reverse map the given virtual address, returning the physical
281 * page associated with the address if a mapping exists.
282 *
283 * @param _pmap		physical map
284 * @param _va		mapping virtual address
285 *
286 * @retval 0		No mapping found
287 * @retval addr		The mapping physical address
288 */
289METHOD vm_paddr_t extract {
290	mmu_t		_mmu;
291	pmap_t		_pmap;
292	vm_offset_t	_va;
293};
294
295
296/**
297 * @brief Reverse map the given virtual address, returning the
298 * physical page if found. The page must be held (by calling
299 * vm_page_hold) if the page protection matches the given protection
300 *
301 * @param _pmap		physical map
302 * @param _va		mapping virtual address
303 * @param _prot		protection used to determine if physical page
304 *			should be locked
305 *
306 * @retval NULL		No mapping found
307 * @retval page		Pointer to physical page. Held if protections match
308 */
309METHOD vm_page_t extract_and_hold {
310	mmu_t		_mmu;
311	pmap_t		_pmap;
312	vm_offset_t	_va;
313	vm_prot_t	_prot;
314};
315
316
317/**
318 * @brief Increase kernel virtual address space to the given virtual address.
319 * Not really required for PowerPC, so optional unless the MMU implementation
320 * can use it.
321 *
322 * @param _va		new upper limit for kernel virtual address space
323 */
324METHOD void growkernel {
325	mmu_t		_mmu;
326	vm_offset_t	_va;
327} DEFAULT mmu_null_growkernel;
328
329
330/**
331 * @brief Called from vm_mem_init. Zone allocation is available at
332 * this stage so a convenient time to create zones. This routine is
333 * for MMU-implementation convenience and is optional.
334 */
335METHOD void init {
336	mmu_t		_mmu;
337} DEFAULT mmu_null_init;
338
339
340/**
341 * @brief Return if the page has been marked by MMU hardware to have been
342 * modified
343 *
344 * @param _pg		physical page to test
345 *
346 * @retval boolean	TRUE if page has been modified
347 */
348METHOD boolean_t is_modified {
349	mmu_t		_mmu;
350	vm_page_t	_pg;
351};
352
353
354/**
355 * @brief Return whether the specified virtual address is a candidate to be
356 * prefaulted in. This routine is optional.
357 *
358 * @param _pmap		physical map
359 * @param _va		virtual address to test
360 *
361 * @retval boolean	TRUE if the address is a candidate.
362 */
363METHOD boolean_t is_prefaultable {
364	mmu_t		_mmu;
365	pmap_t		_pmap;
366	vm_offset_t	_va;
367} DEFAULT mmu_null_is_prefaultable;
368
369
370/**
371 * @brief Return whether or not the specified physical page was referenced
372 * in any physical maps.
373 *
374 * @params _pg		physical page
375 *
376 * @retval boolean	TRUE if page has been referenced
377 */
378METHOD boolean_t is_referenced {
379	mmu_t		_mmu;
380	vm_page_t	_pg;
381};
382
383
384/**
385 * @brief Return a count of referenced bits for a page, clearing those bits.
386 * Not all referenced bits need to be cleared, but it is necessary that 0
387 * only be returned when there are none set.
388 *
389 * @params _m		physical page
390 *
391 * @retval int		count of referenced bits
392 */
393METHOD int ts_referenced {
394	mmu_t		_mmu;
395	vm_page_t	_pg;
396};
397
398
399/**
400 * @brief Map the requested physical address range into kernel virtual
401 * address space. The value in _virt is taken as a hint. The virtual
402 * address of the range is returned, or NULL if the mapping could not
403 * be created. The range can be direct-mapped if that is supported.
404 *
405 * @param *_virt	Hint for start virtual address, and also return
406 *			value
407 * @param _start	physical address range start
408 * @param _end		physical address range end
409 * @param _prot		protection of range (currently ignored)
410 *
411 * @retval NULL		could not map the area
412 * @retval addr, *_virt	mapping start virtual address
413 */
414METHOD vm_offset_t map {
415	mmu_t		_mmu;
416	vm_offset_t	*_virt;
417	vm_paddr_t	_start;
418	vm_paddr_t	_end;
419	int		_prot;
420};
421
422
423/**
424 * @brief Used to create a contiguous set of read-only mappings for a
425 * given object to try and eliminate a cascade of on-demand faults as
426 * the object is accessed sequentially. This routine is optional.
427 *
428 * @param _pmap		physical map
429 * @param _addr		mapping start virtual address
430 * @param _object	device-backed V.M. object to be mapped
431 * @param _pindex	page-index within object of mapping start
432 * @param _size		size in bytes of mapping
433 */
434METHOD void object_init_pt {
435	mmu_t		_mmu;
436	pmap_t		_pmap;
437	vm_offset_t	_addr;
438	vm_object_t	_object;
439	vm_pindex_t	_pindex;
440	vm_size_t	_size;
441} DEFAULT mmu_null_object_init_pt;
442
443
444/**
445 * @brief Used to determine if the specified page has a mapping for the
446 * given physical map, by scanning the list of reverse-mappings from the
447 * page. The list is scanned to a maximum of 16 entries.
448 *
449 * @param _pmap		physical map
450 * @param _pg		physical page
451 *
452 * @retval bool		TRUE if the physical map was found in the first 16
453 *			reverse-map list entries off the physical page.
454 */
455METHOD boolean_t page_exists_quick {
456	mmu_t		_mmu;
457	pmap_t		_pmap;
458	vm_page_t	_pg;
459};
460
461
462/**
463 * @brief Initialise the machine-dependent section of the physical page
464 * data structure. This routine is optional.
465 *
466 * @param _pg		physical page
467 */
468METHOD void page_init {
469	mmu_t		_mmu;
470	vm_page_t	_pg;
471} DEFAULT mmu_null_page_init;
472
473
474/**
475 * @brief Count the number of managed mappings to the given physical
476 * page that are wired.
477 *
478 * @param _pg		physical page
479 *
480 * @retval int		the number of wired, managed mappings to the
481 *			given physical page
482 */
483METHOD int page_wired_mappings {
484	mmu_t		_mmu;
485	vm_page_t	_pg;
486};
487
488
489/**
490 * @brief Initialise a physical map data structure
491 *
492 * @param _pmap		physical map
493 */
494METHOD void pinit {
495	mmu_t		_mmu;
496	pmap_t		_pmap;
497};
498
499
500/**
501 * @brief Initialise the physical map for process 0, the initial process
502 * in the system.
503 * XXX default to pinit ?
504 *
505 * @param _pmap		physical map
506 */
507METHOD void pinit0 {
508	mmu_t		_mmu;
509	pmap_t		_pmap;
510};
511
512
513/**
514 * @brief Set the protection for physical pages in the given virtual address
515 * range to the given value.
516 *
517 * @param _pmap		physical map
518 * @param _start	virtual range start
519 * @param _end		virtual range end
520 * @param _prot		new page protection
521 */
522METHOD void protect {
523	mmu_t		_mmu;
524	pmap_t		_pmap;
525	vm_offset_t	_start;
526	vm_offset_t	_end;
527	vm_prot_t	_prot;
528};
529
530
531/**
532 * @brief Create a mapping in kernel virtual address space for the given array
533 * of wired physical pages.
534 *
535 * @param _start	mapping virtual address start
536 * @param *_m		array of physical page pointers
537 * @param _count	array elements
538 */
539METHOD void qenter {
540	mmu_t		_mmu;
541	vm_offset_t	_start;
542	vm_page_t	*_pg;
543	int		_count;
544};
545
546
547/**
548 * @brief Remove the temporary mappings created by qenter.
549 *
550 * @param _start	mapping virtual address start
551 * @param _count	number of pages in mapping
552 */
553METHOD void qremove {
554	mmu_t		_mmu;
555	vm_offset_t	_start;
556	int		_count;
557};
558
559
560/**
561 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
562 * should be no existing mappings for the physical map at this point
563 *
564 * @param _pmap		physical map
565 */
566METHOD void release {
567	mmu_t		_mmu;
568	pmap_t		_pmap;
569};
570
571
572/**
573 * @brief Remove all mappings in the given physical map for the start/end
574 * virtual address range. The range will be page-aligned.
575 *
576 * @param _pmap		physical map
577 * @param _start	mapping virtual address start
578 * @param _end		mapping virtual address end
579 */
580METHOD void remove {
581	mmu_t		_mmu;
582	pmap_t		_pmap;
583	vm_offset_t	_start;
584	vm_offset_t	_end;
585};
586
587
588/**
589 * @brief Traverse the reverse-map list off the given physical page and
590 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
591 *
592 * @param _pg		physical page
593 */
594METHOD void remove_all {
595	mmu_t		_mmu;
596	vm_page_t	_pg;
597};
598
599
600/**
601 * @brief Remove all mappings in the given start/end virtual address range
602 * for the given physical map. Similar to the remove method, but it used
603 * when tearing down all mappings in an address space. This method is
604 * optional, since pmap_remove will be called for each valid vm_map in
605 * the address space later.
606 *
607 * @param _pmap		physical map
608 * @param _start	mapping virtual address start
609 * @param _end		mapping virtual address end
610 */
611METHOD void remove_pages {
612	mmu_t		_mmu;
613	pmap_t		_pmap;
614} DEFAULT mmu_null_remove_pages;
615
616
617/**
618 * @brief Clear the wired attribute from the mappings for the specified range
619 * of addresses in the given pmap.
620 *
621 * @param _pmap		physical map
622 * @param _start	virtual range start
623 * @param _end		virtual range end
624 */
625METHOD void unwire {
626	mmu_t		_mmu;
627	pmap_t		_pmap;
628	vm_offset_t	_start;
629	vm_offset_t	_end;
630};
631
632
633/**
634 * @brief Zero a physical page. It is not assumed that the page is mapped,
635 * so a temporary (or direct) mapping may need to be used.
636 *
637 * @param _pg		physical page
638 */
639METHOD void zero_page {
640	mmu_t		_mmu;
641	vm_page_t	_pg;
642};
643
644
645/**
646 * @brief Zero a portion of a physical page, starting at a given offset and
647 * for a given size (multiples of 512 bytes for 4k pages).
648 *
649 * @param _pg		physical page
650 * @param _off		byte offset from start of page
651 * @param _size		size of area to zero
652 */
653METHOD void zero_page_area {
654	mmu_t		_mmu;
655	vm_page_t	_pg;
656	int		_off;
657	int		_size;
658};
659
660
661/**
662 * @brief Called from the idle loop to zero pages. XXX I think locking
663 * constraints might be different here compared to zero_page.
664 *
665 * @param _pg		physical page
666 */
667METHOD void zero_page_idle {
668	mmu_t		_mmu;
669	vm_page_t	_pg;
670};
671
672
673/**
674 * @brief Extract mincore(2) information from a mapping.
675 *
676 * @param _pmap		physical map
677 * @param _addr		page virtual address
678 * @param _locked_pa	page physical address
679 *
680 * @retval 0		no result
681 * @retval non-zero	mincore(2) flag values
682 */
683METHOD int mincore {
684	mmu_t		_mmu;
685	pmap_t		_pmap;
686	vm_offset_t	_addr;
687	vm_paddr_t	*_locked_pa;
688} DEFAULT mmu_null_mincore;
689
690
691/**
692 * @brief Perform any operations required to allow a physical map to be used
693 * before it's address space is accessed.
694 *
695 * @param _td		thread associated with physical map
696 */
697METHOD void activate {
698	mmu_t		_mmu;
699	struct thread	*_td;
700};
701
702/**
703 * @brief Perform any operations required to deactivate a physical map,
704 * for instance as it is context-switched out.
705 *
706 * @param _td		thread associated with physical map
707 */
708METHOD void deactivate {
709	mmu_t		_mmu;
710	struct thread	*_td;
711} DEFAULT mmu_null_deactivate;
712
713/**
714 * @brief Return a hint for the best virtual address to map a tentative
715 * virtual address range in a given VM object. The default is to just
716 * return the given tentative start address.
717 *
718 * @param _obj		VM backing object
719 * @param _offset	starting offset with the VM object
720 * @param _addr		initial guess at virtual address
721 * @param _size		size of virtual address range
722 */
723METHOD void align_superpage {
724	mmu_t		_mmu;
725	vm_object_t	_obj;
726	vm_ooffset_t	_offset;
727	vm_offset_t	*_addr;
728	vm_size_t	_size;
729} DEFAULT mmu_null_align_superpage;
730
731
732
733
734/**
735 * INTERNAL INTERFACES
736 */
737
738/**
739 * @brief Bootstrap the VM system. At the completion of this routine, the
740 * kernel will be running in it's own address space with full control over
741 * paging.
742 *
743 * @param _start	start of reserved memory (obsolete ???)
744 * @param _end		end of reserved memory (obsolete ???)
745 *			XXX I think the intent of these was to allow
746 *			the memory used by kernel text+data+bss and
747 *			loader variables/load-time kld's to be carved out
748 *			of available physical mem.
749 *
750 */
751METHOD void bootstrap {
752	mmu_t		_mmu;
753	vm_offset_t	_start;
754	vm_offset_t	_end;
755};
756
757/**
758 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
759 * for alternate CPUs on SMP systems.
760 *
761 * @param _ap		Set to 1 if the CPU being set up is an AP
762 *
763 */
764METHOD void cpu_bootstrap {
765	mmu_t		_mmu;
766	int		_ap;
767};
768
769
770/**
771 * @brief Create a kernel mapping for a given physical address range.
772 * Called by bus code on behalf of device drivers. The mapping does not
773 * have to be a virtual address: it can be a direct-mapped physical address
774 * if that is supported by the MMU.
775 *
776 * @param _pa		start physical address
777 * @param _size		size in bytes of mapping
778 *
779 * @retval addr		address of mapping.
780 */
781METHOD void * mapdev {
782	mmu_t		_mmu;
783	vm_paddr_t	_pa;
784	vm_size_t	_size;
785};
786
787/**
788 * @brief Create a kernel mapping for a given physical address range.
789 * Called by bus code on behalf of device drivers. The mapping does not
790 * have to be a virtual address: it can be a direct-mapped physical address
791 * if that is supported by the MMU.
792 *
793 * @param _pa		start physical address
794 * @param _size		size in bytes of mapping
795 * @param _attr		cache attributes
796 *
797 * @retval addr		address of mapping.
798 */
799METHOD void * mapdev_attr {
800	mmu_t		_mmu;
801	vm_paddr_t	_pa;
802	vm_size_t	_size;
803	vm_memattr_t	_attr;
804} DEFAULT mmu_null_mapdev_attr;
805
806/**
807 * @brief Change cache control attributes for a page. Should modify all
808 * mappings for that page.
809 *
810 * @param _m		page to modify
811 * @param _ma		new cache control attributes
812 */
813METHOD void page_set_memattr {
814	mmu_t		_mmu;
815	vm_page_t	_pg;
816	vm_memattr_t	_ma;
817} DEFAULT mmu_null_page_set_memattr;
818
819/**
820 * @brief Remove the mapping created by mapdev. Called when a driver
821 * is unloaded.
822 *
823 * @param _va		Mapping address returned from mapdev
824 * @param _size		size in bytes of mapping
825 */
826METHOD void unmapdev {
827	mmu_t		_mmu;
828	vm_offset_t	_va;
829	vm_size_t	_size;
830};
831
832
833/**
834 * @brief Reverse-map a kernel virtual address
835 *
836 * @param _va		kernel virtual address to reverse-map
837 *
838 * @retval pa		physical address corresponding to mapping
839 */
840METHOD vm_paddr_t kextract {
841	mmu_t		_mmu;
842	vm_offset_t	_va;
843};
844
845
846/**
847 * @brief Map a wired page into kernel virtual address space
848 *
849 * @param _va		mapping virtual address
850 * @param _pa		mapping physical address
851 */
852METHOD void kenter {
853	mmu_t		_mmu;
854	vm_offset_t	_va;
855	vm_paddr_t	_pa;
856};
857
858/**
859 * @brief Map a wired page into kernel virtual address space
860 *
861 * @param _va		mapping virtual address
862 * @param _pa		mapping physical address
863 * @param _ma		mapping cache control attributes
864 */
865METHOD void kenter_attr {
866	mmu_t		_mmu;
867	vm_offset_t	_va;
868	vm_paddr_t	_pa;
869	vm_memattr_t	_ma;
870} DEFAULT mmu_null_kenter_attr;
871
872/**
873 * @brief Determine if the given physical address range has been direct-mapped.
874 *
875 * @param _pa		physical address start
876 * @param _size		physical address range size
877 *
878 * @retval bool		TRUE if the range is direct-mapped.
879 */
880METHOD boolean_t dev_direct_mapped {
881	mmu_t		_mmu;
882	vm_paddr_t	_pa;
883	vm_size_t	_size;
884};
885
886
887/**
888 * @brief Enforce instruction cache coherency. Typically called after a
889 * region of memory has been modified and before execution of or within
890 * that region is attempted. Setting breakpoints in a process through
891 * ptrace(2) is one example of when the instruction cache needs to be
892 * made coherent.
893 *
894 * @param _pm		the physical map of the virtual address
895 * @param _va		the virtual address of the modified region
896 * @param _sz		the size of the modified region
897 */
898METHOD void sync_icache {
899	mmu_t		_mmu;
900	pmap_t		_pm;
901	vm_offset_t	_va;
902	vm_size_t	_sz;
903};
904
905
906/**
907 * @brief Create temporary memory mapping for use by dumpsys().
908 *
909 * @param _pa		The physical page to map.
910 * @param _sz		The requested size of the mapping.
911 * @param _va		The virtual address of the mapping.
912 */
913METHOD void dumpsys_map {
914	mmu_t		_mmu;
915	vm_paddr_t	_pa;
916	size_t		_sz;
917	void		**_va;
918};
919
920
921/**
922 * @brief Remove temporary dumpsys() mapping.
923 *
924 * @param _pa		The physical page to map.
925 * @param _sz		The requested size of the mapping.
926 * @param _va		The virtual address of the mapping.
927 */
928METHOD void dumpsys_unmap {
929	mmu_t		_mmu;
930	vm_paddr_t	_pa;
931	size_t		_sz;
932	void		*_va;
933};
934
935
936/**
937 * @brief Initialize memory chunks for dumpsys.
938 */
939METHOD void scan_init {
940	mmu_t		_mmu;
941};
942
943/**
944 * @brief Create a temporary thread-local KVA mapping of a single page.
945 *
946 * @param _pg		The physical page to map
947 *
948 * @retval addr		The temporary KVA
949 */
950METHOD vm_offset_t quick_enter_page {
951	mmu_t		_mmu;
952	vm_page_t	_pg;
953};
954
955/**
956 * @brief Undo a mapping created by quick_enter_page
957 *
958 * @param _va		The mapped KVA
959 */
960METHOD void quick_remove_page {
961	mmu_t		_mmu;
962	vm_offset_t	_va;
963};
964
965/**
966 * @brief Change the specified virtual address range's memory type.
967 *
968 * @param _va		The virtual base address to change
969 *
970 * @param _sz		Size of the region to change
971 *
972 * @param _mode		New mode to set on the VA range
973 *
974 * @retval error	0 on success, EINVAL or ENOMEM on error.
975 */
976METHOD int change_attr {
977	mmu_t		_mmu;
978	vm_offset_t	_va;
979	vm_size_t	_sz;
980	vm_memattr_t	_mode;
981} DEFAULT mmu_null_change_attr;
982