mmu_if.m revision 255887
155714Skris#-
255714Skris# Copyright (c) 2005 Peter Grehan
355714Skris# All rights reserved.
455714Skris#
555714Skris# Redistribution and use in source and binary forms, with or without
655714Skris# modification, are permitted provided that the following conditions
755714Skris# are met:
8296465Sdelphij# 1. Redistributions of source code must retain the above copyright
955714Skris#    notice, this list of conditions and the following disclaimer.
1055714Skris# 2. Redistributions in binary form must reproduce the above copyright
1155714Skris#    notice, this list of conditions and the following disclaimer in the
1255714Skris#    documentation and/or other materials provided with the distribution.
1355714Skris#
1455714Skris# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15296465Sdelphij# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1655714Skris# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1755714Skris# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1855714Skris# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1955714Skris# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2055714Skris# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2155714Skris# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22296465Sdelphij# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2355714Skris# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2455714Skris# SUCH DAMAGE.
2555714Skris#
2655714Skris# $FreeBSD: head/sys/powerpc/powerpc/mmu_if.m 255887 2013-09-26 15:36:20Z alc $
2755714Skris#
2855714Skris
2955714Skris#include <sys/param.h>
3055714Skris#include <sys/lock.h>
3155714Skris#include <sys/mutex.h>
3255714Skris#include <sys/systm.h>
3355714Skris
3455714Skris#include <vm/vm.h>
3555714Skris#include <vm/vm_page.h>
3655714Skris
37296465Sdelphij#include <machine/mmuvar.h>
3855714Skris
3955714Skris/**
40296465Sdelphij * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
4155714Skris * @brief A set of methods required by all MMU implementations. These
4255714Skris * are basically direct call-thru's from the pmap machine-dependent
4355714Skris * code.
4455714Skris * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
4555714Skris *@{
4655714Skris */
4755714Skris
4855714SkrisINTERFACE mmu;
4955714Skris
5055714Skris#
5155714Skris# Default implementations of some methods
52296465Sdelphij#
5355714SkrisCODE {
5455714Skris	static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
5555714Skris	    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
5655714Skris	{
5755714Skris		return;
5855714Skris	}
59160814Ssimon
60160814Ssimon	static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
61160814Ssimon	{
62109998Smarkm		return;
6355714Skris	}
64296465Sdelphij
65296465Sdelphij	static void mmu_null_init(mmu_t mmu)
66296465Sdelphij	{
67296465Sdelphij		return;
6855714Skris	}
69109998Smarkm
70296465Sdelphij	static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
71109998Smarkm	    vm_offset_t va)
72296465Sdelphij	{
73296465Sdelphij		return (FALSE);
74296465Sdelphij	}
75109998Smarkm
76296465Sdelphij	static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77109998Smarkm	    vm_offset_t addr, vm_object_t object, vm_pindex_t index,
7868651Skris	    vm_size_t size)
79296465Sdelphij	{
8068651Skris		return;
81296465Sdelphij	}
82296465Sdelphij
83296465Sdelphij	static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
84296465Sdelphij	{
85296465Sdelphij		return;
86296465Sdelphij	}
87296465Sdelphij
88296465Sdelphij	static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
89296465Sdelphij	{
90296465Sdelphij		return;
91296465Sdelphij	}
92296465Sdelphij
93296465Sdelphij	static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
94296465Sdelphij	    vm_paddr_t *locked_pa)
9555714Skris	{
96296465Sdelphij		return (0);
97296465Sdelphij	}
98296465Sdelphij
99296465Sdelphij	static void mmu_null_deactivate(struct thread *td)
100296465Sdelphij	{
101296465Sdelphij		return;
102296465Sdelphij	}
103296465Sdelphij
104296465Sdelphij	static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
105296465Sdelphij	    vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
106296465Sdelphij	{
107296465Sdelphij		return;
108296465Sdelphij	}
10955714Skris
110109998Smarkm	static struct pmap_md *mmu_null_scan_md(mmu_t mmu, struct pmap_md *p)
111296465Sdelphij	{
112296465Sdelphij		return (NULL);
113296465Sdelphij	}
11455714Skris
115109998Smarkm	static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
116296465Sdelphij	    vm_size_t size, vm_memattr_t ma)
117296465Sdelphij	{
118296465Sdelphij		return MMU_MAPDEV(mmu, pa, size);
11955714Skris	}
12068651Skris
121296465Sdelphij	static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
122296465Sdelphij	    vm_offset_t pa, vm_memattr_t ma)
123296465Sdelphij	{
124296465Sdelphij		MMU_KENTER(mmu, va, pa);
125296465Sdelphij	}
12655714Skris
12768651Skris	static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
128296465Sdelphij	    vm_memattr_t ma)
129296465Sdelphij	{
130296465Sdelphij		return;
131296465Sdelphij	}
132296465Sdelphij};
13355714Skris
134
135/**
136 * @brief Apply the given advice to the specified range of addresses within
137 * the given pmap.  Depending on the advice, clear the referenced and/or
138 * modified flags in each mapping and set the mapped page's dirty field.
139 *
140 * @param _pmap		physical map
141 * @param _start	virtual range start
142 * @param _end		virtual range end
143 * @param _advice	advice to apply
144 */
145METHOD void advise {
146	mmu_t		_mmu;
147	pmap_t		_pmap;
148	vm_offset_t	_start;
149	vm_offset_t	_end;
150	int		_advice;
151};
152
153
154/**
155 * @brief Change the wiring attribute for the page in the given physical
156 * map and virtual address.
157 *
158 * @param _pmap		physical map of page
159 * @param _va		page virtual address
160 * @param _wired	TRUE to increment wired count, FALSE to decrement
161 */
162METHOD void change_wiring {
163	mmu_t		_mmu;
164	pmap_t		_pmap;
165	vm_offset_t	_va;
166	boolean_t	_wired;
167};
168
169
170/**
171 * @brief Clear the 'modified' bit on the given physical page
172 *
173 * @param _pg		physical page
174 */
175METHOD void clear_modify {
176	mmu_t		_mmu;
177	vm_page_t	_pg;
178};
179
180
181/**
182 * @brief Clear the write and modified bits in each of the given
183 * physical page's mappings
184 *
185 * @param _pg		physical page
186 */
187METHOD void remove_write {
188	mmu_t		_mmu;
189	vm_page_t	_pg;
190};
191
192
193/**
194 * @brief Copy the address range given by the source physical map, virtual
195 * address and length to the destination physical map and virtual address.
196 * This routine is optional (xxx default null implementation ?)
197 *
198 * @param _dst_pmap	destination physical map
199 * @param _src_pmap	source physical map
200 * @param _dst_addr	destination virtual address
201 * @param _len		size of range
202 * @param _src_addr	source virtual address
203 */
204METHOD void copy {
205	mmu_t		_mmu;
206	pmap_t		_dst_pmap;
207	pmap_t		_src_pmap;
208	vm_offset_t	_dst_addr;
209	vm_size_t	_len;
210	vm_offset_t	_src_addr;
211} DEFAULT mmu_null_copy;
212
213
214/**
215 * @brief Copy the source physical page to the destination physical page
216 *
217 * @param _src		source physical page
218 * @param _dst		destination physical page
219 */
220METHOD void copy_page {
221	mmu_t		_mmu;
222	vm_page_t	_src;
223	vm_page_t	_dst;
224};
225
226METHOD void copy_pages {
227	mmu_t		_mmu;
228	vm_page_t	*_ma;
229	vm_offset_t	_a_offset;
230	vm_page_t	*_mb;
231	vm_offset_t	_b_offset;
232	int		_xfersize;
233};
234
235/**
236 * @brief Create a mapping between a virtual/physical address pair in the
237 * passed physical map with the specified protection and wiring
238 *
239 * @param _pmap		physical map
240 * @param _va		mapping virtual address
241 * @param _p		mapping physical page
242 * @param _prot		mapping page protection
243 * @param _wired	TRUE if page will be wired
244 */
245METHOD void enter {
246	mmu_t		_mmu;
247	pmap_t		_pmap;
248	vm_offset_t	_va;
249	vm_page_t	_p;
250	vm_prot_t	_prot;
251	boolean_t	_wired;
252};
253
254
255/**
256 * @brief Maps a sequence of resident pages belonging to the same object.
257 *
258 * @param _pmap		physical map
259 * @param _start	virtual range start
260 * @param _end		virtual range end
261 * @param _m_start	physical page mapped at start
262 * @param _prot		mapping page protection
263 */
264METHOD void enter_object {
265	mmu_t		_mmu;
266	pmap_t		_pmap;
267	vm_offset_t	_start;
268	vm_offset_t	_end;
269	vm_page_t	_m_start;
270	vm_prot_t	_prot;
271};
272
273
274/**
275 * @brief A faster entry point for page mapping where it is possible
276 * to short-circuit some of the tests in pmap_enter.
277 *
278 * @param _pmap		physical map (and also currently active pmap)
279 * @param _va		mapping virtual address
280 * @param _pg		mapping physical page
281 * @param _prot		new page protection - used to see if page is exec.
282 */
283METHOD void enter_quick {
284	mmu_t		_mmu;
285	pmap_t		_pmap;
286	vm_offset_t	_va;
287	vm_page_t	_pg;
288	vm_prot_t	_prot;
289};
290
291
292/**
293 * @brief Reverse map the given virtual address, returning the physical
294 * page associated with the address if a mapping exists.
295 *
296 * @param _pmap		physical map
297 * @param _va		mapping virtual address
298 *
299 * @retval 0		No mapping found
300 * @retval addr		The mapping physical address
301 */
302METHOD vm_paddr_t extract {
303	mmu_t		_mmu;
304	pmap_t		_pmap;
305	vm_offset_t	_va;
306};
307
308
309/**
310 * @brief Reverse map the given virtual address, returning the
311 * physical page if found. The page must be held (by calling
312 * vm_page_hold) if the page protection matches the given protection
313 *
314 * @param _pmap		physical map
315 * @param _va		mapping virtual address
316 * @param _prot		protection used to determine if physical page
317 *			should be locked
318 *
319 * @retval NULL		No mapping found
320 * @retval page		Pointer to physical page. Held if protections match
321 */
322METHOD vm_page_t extract_and_hold {
323	mmu_t		_mmu;
324	pmap_t		_pmap;
325	vm_offset_t	_va;
326	vm_prot_t	_prot;
327};
328
329
330/**
331 * @brief Increase kernel virtual address space to the given virtual address.
332 * Not really required for PowerPC, so optional unless the MMU implementation
333 * can use it.
334 *
335 * @param _va		new upper limit for kernel virtual address space
336 */
337METHOD void growkernel {
338	mmu_t		_mmu;
339	vm_offset_t	_va;
340} DEFAULT mmu_null_growkernel;
341
342
343/**
344 * @brief Called from vm_mem_init. Zone allocation is available at
345 * this stage so a convenient time to create zones. This routine is
346 * for MMU-implementation convenience and is optional.
347 */
348METHOD void init {
349	mmu_t		_mmu;
350} DEFAULT mmu_null_init;
351
352
353/**
354 * @brief Return if the page has been marked by MMU hardware to have been
355 * modified
356 *
357 * @param _pg		physical page to test
358 *
359 * @retval boolean	TRUE if page has been modified
360 */
361METHOD boolean_t is_modified {
362	mmu_t		_mmu;
363	vm_page_t	_pg;
364};
365
366
367/**
368 * @brief Return whether the specified virtual address is a candidate to be
369 * prefaulted in. This routine is optional.
370 *
371 * @param _pmap		physical map
372 * @param _va		virtual address to test
373 *
374 * @retval boolean	TRUE if the address is a candidate.
375 */
376METHOD boolean_t is_prefaultable {
377	mmu_t		_mmu;
378	pmap_t		_pmap;
379	vm_offset_t	_va;
380} DEFAULT mmu_null_is_prefaultable;
381
382
383/**
384 * @brief Return whether or not the specified physical page was referenced
385 * in any physical maps.
386 *
387 * @params _pg		physical page
388 *
389 * @retval boolean	TRUE if page has been referenced
390 */
391METHOD boolean_t is_referenced {
392	mmu_t		_mmu;
393	vm_page_t	_pg;
394};
395
396
397/**
398 * @brief Return a count of referenced bits for a page, clearing those bits.
399 * Not all referenced bits need to be cleared, but it is necessary that 0
400 * only be returned when there are none set.
401 *
402 * @params _m		physical page
403 *
404 * @retval int		count of referenced bits
405 */
406METHOD int ts_referenced {
407	mmu_t		_mmu;
408	vm_page_t	_pg;
409};
410
411
412/**
413 * @brief Map the requested physical address range into kernel virtual
414 * address space. The value in _virt is taken as a hint. The virtual
415 * address of the range is returned, or NULL if the mapping could not
416 * be created. The range can be direct-mapped if that is supported.
417 *
418 * @param *_virt	Hint for start virtual address, and also return
419 *			value
420 * @param _start	physical address range start
421 * @param _end		physical address range end
422 * @param _prot		protection of range (currently ignored)
423 *
424 * @retval NULL		could not map the area
425 * @retval addr, *_virt	mapping start virtual address
426 */
427METHOD vm_offset_t map {
428	mmu_t		_mmu;
429	vm_offset_t	*_virt;
430	vm_paddr_t	_start;
431	vm_paddr_t	_end;
432	int		_prot;
433};
434
435
436/**
437 * @brief Used to create a contiguous set of read-only mappings for a
438 * given object to try and eliminate a cascade of on-demand faults as
439 * the object is accessed sequentially. This routine is optional.
440 *
441 * @param _pmap		physical map
442 * @param _addr		mapping start virtual address
443 * @param _object	device-backed V.M. object to be mapped
444 * @param _pindex	page-index within object of mapping start
445 * @param _size		size in bytes of mapping
446 */
447METHOD void object_init_pt {
448	mmu_t		_mmu;
449	pmap_t		_pmap;
450	vm_offset_t	_addr;
451	vm_object_t	_object;
452	vm_pindex_t	_pindex;
453	vm_size_t	_size;
454} DEFAULT mmu_null_object_init_pt;
455
456
457/**
458 * @brief Used to determine if the specified page has a mapping for the
459 * given physical map, by scanning the list of reverse-mappings from the
460 * page. The list is scanned to a maximum of 16 entries.
461 *
462 * @param _pmap		physical map
463 * @param _pg		physical page
464 *
465 * @retval bool		TRUE if the physical map was found in the first 16
466 *			reverse-map list entries off the physical page.
467 */
468METHOD boolean_t page_exists_quick {
469	mmu_t		_mmu;
470	pmap_t		_pmap;
471	vm_page_t	_pg;
472};
473
474
475/**
476 * @brief Initialise the machine-dependent section of the physical page
477 * data structure. This routine is optional.
478 *
479 * @param _pg		physical page
480 */
481METHOD void page_init {
482	mmu_t		_mmu;
483	vm_page_t	_pg;
484} DEFAULT mmu_null_page_init;
485
486
487/**
488 * @brief Count the number of managed mappings to the given physical
489 * page that are wired.
490 *
491 * @param _pg		physical page
492 *
493 * @retval int		the number of wired, managed mappings to the
494 *			given physical page
495 */
496METHOD int page_wired_mappings {
497	mmu_t		_mmu;
498	vm_page_t	_pg;
499};
500
501
502/**
503 * @brief Initialise a physical map data structure
504 *
505 * @param _pmap		physical map
506 */
507METHOD void pinit {
508	mmu_t		_mmu;
509	pmap_t		_pmap;
510};
511
512
513/**
514 * @brief Initialise the physical map for process 0, the initial process
515 * in the system.
516 * XXX default to pinit ?
517 *
518 * @param _pmap		physical map
519 */
520METHOD void pinit0 {
521	mmu_t		_mmu;
522	pmap_t		_pmap;
523};
524
525
526/**
527 * @brief Set the protection for physical pages in the given virtual address
528 * range to the given value.
529 *
530 * @param _pmap		physical map
531 * @param _start	virtual range start
532 * @param _end		virtual range end
533 * @param _prot		new page protection
534 */
535METHOD void protect {
536	mmu_t		_mmu;
537	pmap_t		_pmap;
538	vm_offset_t	_start;
539	vm_offset_t	_end;
540	vm_prot_t	_prot;
541};
542
543
544/**
545 * @brief Create a mapping in kernel virtual address space for the given array
546 * of wired physical pages.
547 *
548 * @param _start	mapping virtual address start
549 * @param *_m		array of physical page pointers
550 * @param _count	array elements
551 */
552METHOD void qenter {
553	mmu_t		_mmu;
554	vm_offset_t	_start;
555	vm_page_t	*_pg;
556	int		_count;
557};
558
559
560/**
561 * @brief Remove the temporary mappings created by qenter.
562 *
563 * @param _start	mapping virtual address start
564 * @param _count	number of pages in mapping
565 */
566METHOD void qremove {
567	mmu_t		_mmu;
568	vm_offset_t	_start;
569	int		_count;
570};
571
572
573/**
574 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
575 * should be no existing mappings for the physical map at this point
576 *
577 * @param _pmap		physical map
578 */
579METHOD void release {
580	mmu_t		_mmu;
581	pmap_t		_pmap;
582};
583
584
585/**
586 * @brief Remove all mappings in the given physical map for the start/end
587 * virtual address range. The range will be page-aligned.
588 *
589 * @param _pmap		physical map
590 * @param _start	mapping virtual address start
591 * @param _end		mapping virtual address end
592 */
593METHOD void remove {
594	mmu_t		_mmu;
595	pmap_t		_pmap;
596	vm_offset_t	_start;
597	vm_offset_t	_end;
598};
599
600
601/**
602 * @brief Traverse the reverse-map list off the given physical page and
603 * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
604 *
605 * @param _pg		physical page
606 */
607METHOD void remove_all {
608	mmu_t		_mmu;
609	vm_page_t	_pg;
610};
611
612
613/**
614 * @brief Remove all mappings in the given start/end virtual address range
615 * for the given physical map. Similar to the remove method, but it used
616 * when tearing down all mappings in an address space. This method is
617 * optional, since pmap_remove will be called for each valid vm_map in
618 * the address space later.
619 *
620 * @param _pmap		physical map
621 * @param _start	mapping virtual address start
622 * @param _end		mapping virtual address end
623 */
624METHOD void remove_pages {
625	mmu_t		_mmu;
626	pmap_t		_pmap;
627} DEFAULT mmu_null_remove_pages;
628
629
630/**
631 * @brief Zero a physical page. It is not assumed that the page is mapped,
632 * so a temporary (or direct) mapping may need to be used.
633 *
634 * @param _pg		physical page
635 */
636METHOD void zero_page {
637	mmu_t		_mmu;
638	vm_page_t	_pg;
639};
640
641
642/**
643 * @brief Zero a portion of a physical page, starting at a given offset and
644 * for a given size (multiples of 512 bytes for 4k pages).
645 *
646 * @param _pg		physical page
647 * @param _off		byte offset from start of page
648 * @param _size		size of area to zero
649 */
650METHOD void zero_page_area {
651	mmu_t		_mmu;
652	vm_page_t	_pg;
653	int		_off;
654	int		_size;
655};
656
657
658/**
659 * @brief Called from the idle loop to zero pages. XXX I think locking
660 * constraints might be different here compared to zero_page.
661 *
662 * @param _pg		physical page
663 */
664METHOD void zero_page_idle {
665	mmu_t		_mmu;
666	vm_page_t	_pg;
667};
668
669
670/**
671 * @brief Extract mincore(2) information from a mapping.
672 *
673 * @param _pmap		physical map
674 * @param _addr		page virtual address
675 * @param _locked_pa	page physical address
676 *
677 * @retval 0		no result
678 * @retval non-zero	mincore(2) flag values
679 */
680METHOD int mincore {
681	mmu_t		_mmu;
682	pmap_t		_pmap;
683	vm_offset_t	_addr;
684	vm_paddr_t	*_locked_pa;
685} DEFAULT mmu_null_mincore;
686
687
688/**
689 * @brief Perform any operations required to allow a physical map to be used
690 * before it's address space is accessed.
691 *
692 * @param _td		thread associated with physical map
693 */
694METHOD void activate {
695	mmu_t		_mmu;
696	struct thread	*_td;
697};
698
699/**
700 * @brief Perform any operations required to deactivate a physical map,
701 * for instance as it is context-switched out.
702 *
703 * @param _td		thread associated with physical map
704 */
705METHOD void deactivate {
706	mmu_t		_mmu;
707	struct thread	*_td;
708} DEFAULT mmu_null_deactivate;
709
710/**
711 * @brief Return a hint for the best virtual address to map a tentative
712 * virtual address range in a given VM object. The default is to just
713 * return the given tentative start address.
714 *
715 * @param _obj		VM backing object
716 * @param _offset	starting offset with the VM object
717 * @param _addr		initial guess at virtual address
718 * @param _size		size of virtual address range
719 */
720METHOD void align_superpage {
721	mmu_t		_mmu;
722	vm_object_t	_obj;
723	vm_ooffset_t	_offset;
724	vm_offset_t	*_addr;
725	vm_size_t	_size;
726} DEFAULT mmu_null_align_superpage;
727
728
729
730
731/**
732 * INTERNAL INTERFACES
733 */
734
735/**
736 * @brief Bootstrap the VM system. At the completion of this routine, the
737 * kernel will be running in it's own address space with full control over
738 * paging.
739 *
740 * @param _start	start of reserved memory (obsolete ???)
741 * @param _end		end of reserved memory (obsolete ???)
742 *			XXX I think the intent of these was to allow
743 *			the memory used by kernel text+data+bss and
744 *			loader variables/load-time kld's to be carved out
745 *			of available physical mem.
746 *
747 */
748METHOD void bootstrap {
749	mmu_t		_mmu;
750	vm_offset_t	_start;
751	vm_offset_t	_end;
752};
753
754/**
755 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
756 * for alternate CPUs on SMP systems.
757 *
758 * @param _ap		Set to 1 if the CPU being set up is an AP
759 *
760 */
761METHOD void cpu_bootstrap {
762	mmu_t		_mmu;
763	int		_ap;
764};
765
766
767/**
768 * @brief Create a kernel mapping for a given physical address range.
769 * Called by bus code on behalf of device drivers. The mapping does not
770 * have to be a virtual address: it can be a direct-mapped physical address
771 * if that is supported by the MMU.
772 *
773 * @param _pa		start physical address
774 * @param _size		size in bytes of mapping
775 *
776 * @retval addr		address of mapping.
777 */
778METHOD void * mapdev {
779	mmu_t		_mmu;
780	vm_paddr_t	_pa;
781	vm_size_t	_size;
782};
783
784/**
785 * @brief Create a kernel mapping for a given physical address range.
786 * Called by bus code on behalf of device drivers. The mapping does not
787 * have to be a virtual address: it can be a direct-mapped physical address
788 * if that is supported by the MMU.
789 *
790 * @param _pa		start physical address
791 * @param _size		size in bytes of mapping
792 * @param _attr		cache attributes
793 *
794 * @retval addr		address of mapping.
795 */
796METHOD void * mapdev_attr {
797	mmu_t		_mmu;
798	vm_offset_t	_pa;
799	vm_size_t	_size;
800	vm_memattr_t	_attr;
801} DEFAULT mmu_null_mapdev_attr;
802
803/**
804 * @brief Change cache control attributes for a page. Should modify all
805 * mappings for that page.
806 *
807 * @param _m		page to modify
808 * @param _ma		new cache control attributes
809 */
810METHOD void page_set_memattr {
811	mmu_t		_mmu;
812	vm_page_t	_pg;
813	vm_memattr_t	_ma;
814} DEFAULT mmu_null_page_set_memattr;
815
816/**
817 * @brief Remove the mapping created by mapdev. Called when a driver
818 * is unloaded.
819 *
820 * @param _va		Mapping address returned from mapdev
821 * @param _size		size in bytes of mapping
822 */
823METHOD void unmapdev {
824	mmu_t		_mmu;
825	vm_offset_t	_va;
826	vm_size_t	_size;
827};
828
829
830/**
831 * @brief Reverse-map a kernel virtual address
832 *
833 * @param _va		kernel virtual address to reverse-map
834 *
835 * @retval pa		physical address corresponding to mapping
836 */
837METHOD vm_paddr_t kextract {
838	mmu_t		_mmu;
839	vm_offset_t	_va;
840};
841
842
843/**
844 * @brief Map a wired page into kernel virtual address space
845 *
846 * @param _va		mapping virtual address
847 * @param _pa		mapping physical address
848 */
849METHOD void kenter {
850	mmu_t		_mmu;
851	vm_offset_t	_va;
852	vm_paddr_t	_pa;
853};
854
855/**
856 * @brief Map a wired page into kernel virtual address space
857 *
858 * @param _va		mapping virtual address
859 * @param _pa		mapping physical address
860 * @param _ma		mapping cache control attributes
861 */
862METHOD void kenter_attr {
863	mmu_t		_mmu;
864	vm_offset_t	_va;
865	vm_offset_t	_pa;
866	vm_memattr_t	_ma;
867} DEFAULT mmu_null_kenter_attr;
868
869/**
870 * @brief Determine if the given physical address range has been direct-mapped.
871 *
872 * @param _pa		physical address start
873 * @param _size		physical address range size
874 *
875 * @retval bool		TRUE if the range is direct-mapped.
876 */
877METHOD boolean_t dev_direct_mapped {
878	mmu_t		_mmu;
879	vm_paddr_t	_pa;
880	vm_size_t	_size;
881};
882
883
884/**
885 * @brief Enforce instruction cache coherency. Typically called after a
886 * region of memory has been modified and before execution of or within
887 * that region is attempted. Setting breakpoints in a process through
888 * ptrace(2) is one example of when the instruction cache needs to be
889 * made coherent.
890 *
891 * @param _pm		the physical map of the virtual address
892 * @param _va		the virtual address of the modified region
893 * @param _sz		the size of the modified region
894 */
895METHOD void sync_icache {
896	mmu_t		_mmu;
897	pmap_t		_pm;
898	vm_offset_t	_va;
899	vm_size_t	_sz;
900};
901
902
903/**
904 * @brief Create temporary memory mapping for use by dumpsys().
905 *
906 * @param _md		The memory chunk in which the mapping lies.
907 * @param _ofs		The offset within the chunk of the mapping.
908 * @param _sz		The requested size of the mapping.
909 *
910 * @retval vm_offset_t	The virtual address of the mapping.
911 *			
912 * The sz argument is modified to reflect the actual size of the
913 * mapping.
914 */
915METHOD vm_offset_t dumpsys_map {
916	mmu_t		_mmu;
917	struct pmap_md	*_md;
918	vm_size_t	_ofs;
919	vm_size_t	*_sz;
920};
921
922
923/**
924 * @brief Remove temporary dumpsys() mapping.
925 *
926 * @param _md		The memory chunk in which the mapping lies.
927 * @param _ofs		The offset within the chunk of the mapping.
928 * @param _va		The virtual address of the mapping.
929 */
930METHOD void dumpsys_unmap {
931	mmu_t		_mmu;
932	struct pmap_md	*_md;
933	vm_size_t	_ofs;
934	vm_offset_t	_va;
935};
936
937
938/**
939 * @brief Scan/iterate memory chunks.
940 *
941 * @param _prev		The previously returned chunk or NULL.
942 *
943 * @retval		The next (or first when _prev is NULL) chunk.
944 */
945METHOD struct pmap_md * scan_md {
946	mmu_t		_mmu;
947	struct pmap_md	*_prev;
948} DEFAULT mmu_null_scan_md;
949