mmu_if.m revision 190681
1#-
2# Copyright (c) 2005 Peter Grehan
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8# 1. Redistributions of source code must retain the above copyright
9#    notice, this list of conditions and the following disclaimer.
10# 2. Redistributions in binary form must reproduce the above copyright
11#    notice, this list of conditions and the following disclaimer in the
12#    documentation and/or other materials provided with the distribution.
13#
14# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24# SUCH DAMAGE.
25#
26# $FreeBSD: head/sys/powerpc/powerpc/mmu_if.m 190681 2009-04-04 00:22:44Z nwhitehorn $
27#
28
29#include <sys/param.h>
30#include <sys/lock.h>
31#include <sys/mutex.h>
32#include <sys/systm.h>
33
34#include <vm/vm.h>
35#include <vm/vm_page.h>
36
37#include <machine/mmuvar.h>
38
39/**
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
43 * code.
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
45 *@{
46 */
47
48INTERFACE mmu;
49
50#
51# Default implementations of some methods
52#
53CODE {
54	static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55	    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
56	{
57		return;
58	}
59
60	static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
61	{
62		return;
63	}
64
65	static void mmu_null_init(mmu_t mmu)
66	{
67		return;
68	}
69
70	static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
71	    vm_offset_t va)
72	{
73		return (FALSE);
74	}
75
76	static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77	    vm_offset_t addr, vm_object_t object, vm_pindex_t index,
78	    vm_size_t size)
79	{
80		return;
81	}
82
83	static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
84	{
85		return;
86	}
87
88	static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
89	{
90		return;
91	}
92
93	static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
94	{
95		return (0);
96	}
97
98	static void mmu_null_deactivate(struct thread *td)
99	{
100		return;
101	}
102
103	static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
104	    vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
105	{
106		return;
107	}
108};
109
110
111/**
112 * @brief Change the wiring attribute for the page in the given physical
113 * map and virtual address.
114 *
115 * @param _pmap		physical map of page
116 * @param _va		page virtual address
117 * @param _wired	TRUE to increment wired count, FALSE to decrement
118 */
119METHOD void change_wiring {
120	mmu_t		_mmu;
121	pmap_t		_pmap;
122	vm_offset_t	_va;
123	boolean_t	_wired;
124};
125
126
127/**
128 * @brief Clear the 'modified' bit on the given physical page
129 *
130 * @param _pg		physical page
131 */
132METHOD void clear_modify {
133	mmu_t		_mmu;
134	vm_page_t	_pg;
135};
136
137
138/**
139 * @brief Clear the 'referenced' bit on the given physical page
140 *
141 * @param _pg		physical page
142 */
143METHOD void clear_reference {
144	mmu_t		_mmu;
145	vm_page_t	_pg;
146};
147
148
149/**
150 * @brief Clear the write and modified bits in each of the given
151 * physical page's mappings
152 *
153 * @param _pg		physical page
154 */
155METHOD void remove_write {
156	mmu_t		_mmu;
157	vm_page_t	_pg;
158};
159
160
161/**
162 * @brief Copy the address range given by the source physical map, virtual
163 * address and length to the destination physical map and virtual address.
164 * This routine is optional (xxx default null implementation ?)
165 *
166 * @param _dst_pmap	destination physical map
167 * @param _src_pmap	source physical map
168 * @param _dst_addr	destination virtual address
169 * @param _len		size of range
170 * @param _src_addr	source virtual address
171 */
172METHOD void copy {
173	mmu_t		_mmu;
174	pmap_t		_dst_pmap;
175	pmap_t		_src_pmap;
176	vm_offset_t	_dst_addr;
177	vm_size_t	_len;
178	vm_offset_t	_src_addr;
179} DEFAULT mmu_null_copy;
180
181
182/**
183 * @brief Copy the source physical page to the destination physical page
184 *
185 * @param _src		source physical page
186 * @param _dst		destination physical page
187 */
188METHOD void copy_page {
189	mmu_t		_mmu;
190	vm_page_t	_src;
191	vm_page_t	_dst;
192};
193
194
195/**
196 * @brief Create a mapping between a virtual/physical address pair in the
197 * passed physical map with the specified protection and wiring
198 *
199 * @param _pmap		physical map
200 * @param _va		mapping virtual address
201 * @param _p		mapping physical page
202 * @param _prot		mapping page protection
203 * @param _wired	TRUE if page will be wired
204 */
205METHOD void enter {
206	mmu_t		_mmu;
207	pmap_t		_pmap;
208	vm_offset_t	_va;
209	vm_page_t	_p;
210	vm_prot_t	_prot;
211	boolean_t	_wired;
212};
213
214
215/**
216 * @brief Maps a sequence of resident pages belonging to the same object.
217 *
218 * @param _pmap		physical map
219 * @param _start	virtual range start
220 * @param _end		virtual range end
221 * @param _m_start	physical page mapped at start
222 * @param _prot		mapping page protection
223 */
224METHOD void enter_object {
225	mmu_t		_mmu;
226	pmap_t		_pmap;
227	vm_offset_t	_start;
228	vm_offset_t	_end;
229	vm_page_t	_m_start;
230	vm_prot_t	_prot;
231};
232
233
234/**
235 * @brief A faster entry point for page mapping where it is possible
236 * to short-circuit some of the tests in pmap_enter.
237 *
238 * @param _pmap		physical map (and also currently active pmap)
239 * @param _va		mapping virtual address
240 * @param _pg		mapping physical page
241 * @param _prot		new page protection - used to see if page is exec.
242 */
243METHOD void enter_quick {
244	mmu_t		_mmu;
245	pmap_t		_pmap;
246	vm_offset_t	_va;
247	vm_page_t	_pg;
248	vm_prot_t	_prot;
249};
250
251
252/**
253 * @brief Reverse map the given virtual address, returning the physical
254 * page associated with the address if a mapping exists.
255 *
256 * @param _pmap		physical map
257 * @param _va		mapping virtual address
258 *
259 * @retval 0		No mapping found
260 * @retval addr		The mapping physical address
261 */
262METHOD vm_paddr_t extract {
263	mmu_t		_mmu;
264	pmap_t		_pmap;
265	vm_offset_t	_va;
266};
267
268
269/**
270 * @brief Reverse map the given virtual address, returning the
271 * physical page if found. The page must be held (by calling
272 * vm_page_hold) if the page protection matches the given protection
273 *
274 * @param _pmap		physical map
275 * @param _va		mapping virtual address
276 * @param _prot		protection used to determine if physical page
277 *			should be locked
278 *
279 * @retval NULL		No mapping found
280 * @retval page		Pointer to physical page. Held if protections match
281 */
282METHOD vm_page_t extract_and_hold {
283	mmu_t		_mmu;
284	pmap_t		_pmap;
285	vm_offset_t	_va;
286	vm_prot_t	_prot;
287};
288
289
290/**
291 * @brief Increase kernel virtual address space to the given virtual address.
292 * Not really required for PowerPC, so optional unless the MMU implementation
293 * can use it.
294 *
295 * @param _va		new upper limit for kernel virtual address space
296 */
297METHOD void growkernel {
298	mmu_t		_mmu;
299	vm_offset_t	_va;
300} DEFAULT mmu_null_growkernel;
301
302
303/**
304 * @brief Called from vm_mem_init. Zone allocation is available at
305 * this stage so a convenient time to create zones. This routine is
306 * for MMU-implementation convenience and is optional.
307 */
308METHOD void init {
309	mmu_t		_mmu;
310} DEFAULT mmu_null_init;
311
312
313/**
314 * @brief Return if the page has been marked by MMU hardware to have been
315 * modified
316 *
317 * @param _pg		physical page to test
318 *
319 * @retval boolean	TRUE if page has been modified
320 */
321METHOD boolean_t is_modified {
322	mmu_t		_mmu;
323	vm_page_t	_pg;
324};
325
326
327/**
328 * @brief Return whether the specified virtual address is a candidate to be
329 * prefaulted in. This routine is optional.
330 *
331 * @param _pmap		physical map
332 * @param _va		virtual address to test
333 *
334 * @retval boolean	TRUE if the address is a candidate.
335 */
336METHOD boolean_t is_prefaultable {
337	mmu_t		_mmu;
338	pmap_t		_pmap;
339	vm_offset_t	_va;
340} DEFAULT mmu_null_is_prefaultable;
341
342
343/**
344 * @brief Return a count of referenced bits for a page, clearing those bits.
345 * Not all referenced bits need to be cleared, but it is necessary that 0
346 * only be returned when there are none set.
347 *
348 * @params _m		physical page
349 *
350 * @retval int		count of referenced bits
351 */
352METHOD boolean_t ts_referenced {
353	mmu_t		_mmu;
354	vm_page_t	_pg;
355};
356
357
358/**
359 * @brief Map the requested physical address range into kernel virtual
360 * address space. The value in _virt is taken as a hint. The virtual
361 * address of the range is returned, or NULL if the mapping could not
362 * be created. The range can be direct-mapped if that is supported.
363 *
364 * @param *_virt	Hint for start virtual address, and also return
365 *			value
366 * @param _start	physical address range start
367 * @param _end		physical address range end
368 * @param _prot		protection of range (currently ignored)
369 *
370 * @retval NULL		could not map the area
371 * @retval addr, *_virt	mapping start virtual address
372 */
373METHOD vm_offset_t map {
374	mmu_t		_mmu;
375	vm_offset_t	*_virt;
376	vm_paddr_t	_start;
377	vm_paddr_t	_end;
378	int		_prot;
379};
380
381
382/**
383 * @brief Used to create a contiguous set of read-only mappings for a
384 * given object to try and eliminate a cascade of on-demand faults as
385 * the object is accessed sequentially. This routine is optional.
386 *
387 * @param _pmap		physical map
388 * @param _addr		mapping start virtual address
389 * @param _object	device-backed V.M. object to be mapped
390 * @param _pindex	page-index within object of mapping start
391 * @param _size		size in bytes of mapping
392 */
393METHOD void object_init_pt {
394	mmu_t		_mmu;
395	pmap_t		_pmap;
396	vm_offset_t	_addr;
397	vm_object_t	_object;
398	vm_pindex_t	_pindex;
399	vm_size_t	_size;
400} DEFAULT mmu_null_object_init_pt;
401
402
403/**
404 * @brief Used to determine if the specified page has a mapping for the
405 * given physical map, by scanning the list of reverse-mappings from the
406 * page. The list is scanned to a maximum of 16 entries.
407 *
408 * @param _pmap		physical map
409 * @param _pg		physical page
410 *
411 * @retval bool		TRUE if the physical map was found in the first 16
412 *			reverse-map list entries off the physical page.
413 */
414METHOD boolean_t page_exists_quick {
415	mmu_t		_mmu;
416	pmap_t		_pmap;
417	vm_page_t	_pg;
418};
419
420
421/**
422 * @brief Initialise the machine-dependent section of the physical page
423 * data structure. This routine is optional.
424 *
425 * @param _pg		physical page
426 */
427METHOD void page_init {
428	mmu_t		_mmu;
429	vm_page_t	_pg;
430} DEFAULT mmu_null_page_init;
431
432
433/**
434 * @brief Count the number of managed mappings to the given physical
435 * page that are wired.
436 *
437 * @param _pg		physical page
438 *
439 * @retval int		the number of wired, managed mappings to the
440 *			given physical page
441 */
442METHOD int page_wired_mappings {
443	mmu_t		_mmu;
444	vm_page_t	_pg;
445};
446
447
448/**
449 * @brief Initialise a physical map data structure
450 *
451 * @param _pmap		physical map
452 */
453METHOD void pinit {
454	mmu_t		_mmu;
455	pmap_t		_pmap;
456};
457
458
459/**
460 * @brief Initialise the physical map for process 0, the initial process
461 * in the system.
462 * XXX default to pinit ?
463 *
464 * @param _pmap		physical map
465 */
466METHOD void pinit0 {
467	mmu_t		_mmu;
468	pmap_t		_pmap;
469};
470
471
472/**
473 * @brief Set the protection for physical pages in the given virtual address
474 * range to the given value.
475 *
476 * @param _pmap		physical map
477 * @param _start	virtual range start
478 * @param _end		virtual range end
479 * @param _prot		new page protection
480 */
481METHOD void protect {
482	mmu_t		_mmu;
483	pmap_t		_pmap;
484	vm_offset_t	_start;
485	vm_offset_t	_end;
486	vm_prot_t	_prot;
487};
488
489
490/**
491 * @brief Create a mapping in kernel virtual address space for the given array
492 * of wired physical pages.
493 *
494 * @param _start	mapping virtual address start
495 * @param *_m		array of physical page pointers
496 * @param _count	array elements
497 */
498METHOD void qenter {
499	mmu_t		_mmu;
500	vm_offset_t	_start;
501	vm_page_t	*_pg;
502	int		_count;
503};
504
505
506/**
507 * @brief Remove the temporary mappings created by qenter.
508 *
509 * @param _start	mapping virtual address start
510 * @param _count	number of pages in mapping
511 */
512METHOD void qremove {
513	mmu_t		_mmu;
514	vm_offset_t	_start;
515	int		_count;
516};
517
518
519/**
520 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
521 * should be no existing mappings for the physical map at this point
522 *
523 * @param _pmap		physical map
524 */
525METHOD void release {
526	mmu_t		_mmu;
527	pmap_t		_pmap;
528};
529
530
531/**
532 * @brief Remove all mappings in the given physical map for the start/end
533 * virtual address range. The range will be page-aligned.
534 *
535 * @param _pmap		physical map
536 * @param _start	mapping virtual address start
537 * @param _end		mapping virtual address end
538 */
539METHOD void remove {
540	mmu_t		_mmu;
541	pmap_t		_pmap;
542	vm_offset_t	_start;
543	vm_offset_t	_end;
544};
545
546
547/**
548 * @brief Traverse the reverse-map list off the given physical page and
549 * remove all mappings. Clear the PG_WRITEABLE attribute from the page.
550 *
551 * @param _pg		physical page
552 */
553METHOD void remove_all {
554	mmu_t		_mmu;
555	vm_page_t	_pg;
556};
557
558
559/**
560 * @brief Remove all mappings in the given start/end virtual address range
561 * for the given physical map. Similar to the remove method, but it used
562 * when tearing down all mappings in an address space. This method is
563 * optional, since pmap_remove will be called for each valid vm_map in
564 * the address space later.
565 *
566 * @param _pmap		physical map
567 * @param _start	mapping virtual address start
568 * @param _end		mapping virtual address end
569 */
570METHOD void remove_pages {
571	mmu_t		_mmu;
572	pmap_t		_pmap;
573} DEFAULT mmu_null_remove_pages;
574
575
576/**
577 * @brief Zero a physical page. It is not assumed that the page is mapped,
578 * so a temporary (or direct) mapping may need to be used.
579 *
580 * @param _pg		physical page
581 */
582METHOD void zero_page {
583	mmu_t		_mmu;
584	vm_page_t	_pg;
585};
586
587
588/**
589 * @brief Zero a portion of a physical page, starting at a given offset and
590 * for a given size (multiples of 512 bytes for 4k pages).
591 *
592 * @param _pg		physical page
593 * @param _off		byte offset from start of page
594 * @param _size		size of area to zero
595 */
596METHOD void zero_page_area {
597	mmu_t		_mmu;
598	vm_page_t	_pg;
599	int		_off;
600	int		_size;
601};
602
603
604/**
605 * @brief Called from the idle loop to zero pages. XXX I think locking
606 * constraints might be different here compared to zero_page.
607 *
608 * @param _pg		physical page
609 */
610METHOD void zero_page_idle {
611	mmu_t		_mmu;
612	vm_page_t	_pg;
613};
614
615
616/**
617 * @brief Extract mincore(2) information from a mapping. This routine is
618 * optional and is an optimisation: the mincore code will call is_modified
619 * and ts_referenced if no result is returned.
620 *
621 * @param _pmap		physical map
622 * @param _addr		page virtual address
623 *
624 * @retval 0		no result
625 * @retval non-zero	mincore(2) flag values
626 */
627METHOD int mincore {
628	mmu_t		_mmu;
629	pmap_t		_pmap;
630	vm_offset_t	_addr;
631} DEFAULT mmu_null_mincore;
632
633
634/**
635 * @brief Perform any operations required to allow a physical map to be used
636 * before it's address space is accessed.
637 *
638 * @param _td		thread associated with physical map
639 */
640METHOD void activate {
641	mmu_t		_mmu;
642	struct thread	*_td;
643};
644
645/**
646 * @brief Perform any operations required to deactivate a physical map,
647 * for instance as it is context-switched out.
648 *
649 * @param _td		thread associated with physical map
650 */
651METHOD void deactivate {
652	mmu_t		_mmu;
653	struct thread	*_td;
654} DEFAULT mmu_null_deactivate;
655
656/**
657 * @brief Return a hint for the best virtual address to map a tentative
658 * virtual address range in a given VM object. The default is to just
659 * return the given tentative start address.
660 *
661 * @param _obj		VM backing object
662 * @param _offset	starting offset with the VM object
663 * @param _addr		initial guess at virtual address
664 * @param _size		size of virtual address range
665 */
666METHOD void align_superpage {
667	mmu_t		_mmu;
668	vm_object_t	_obj;
669	vm_ooffset_t	_offset;
670	vm_offset_t	*_addr;
671	vm_size_t	_size;
672} DEFAULT mmu_null_align_superpage;
673
674
675
676
677/**
678 * INTERNAL INTERFACES
679 */
680
681/**
682 * @brief Bootstrap the VM system. At the completion of this routine, the
683 * kernel will be running in it's own address space with full control over
684 * paging.
685 *
686 * @param _start	start of reserved memory (obsolete ???)
687 * @param _end		end of reserved memory (obsolete ???)
688 *			XXX I think the intent of these was to allow
689 *			the memory used by kernel text+data+bss and
690 *			loader variables/load-time kld's to be carved out
691 *			of available physical mem.
692 *
693 */
694METHOD void bootstrap {
695	mmu_t		_mmu;
696	vm_offset_t	_start;
697	vm_offset_t	_end;
698};
699
700/**
701 * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
702 * for alternate CPUs on SMP systems.
703 *
704 * @param _ap		Set to 1 if the CPU being set up is an AP
705 *
706 */
707METHOD void cpu_bootstrap {
708	mmu_t		_mmu;
709	int		_ap;
710};
711
712
713/**
714 * @brief Create a kernel mapping for a given physical address range.
715 * Called by bus code on behalf of device drivers. The mapping does not
716 * have to be a virtual address: it can be a direct-mapped physical address
717 * if that is supported by the MMU.
718 *
719 * @param _pa		start physical address
720 * @param _size		size in bytes of mapping
721 *
722 * @retval addr		address of mapping.
723 */
724METHOD void * mapdev {
725	mmu_t		_mmu;
726	vm_offset_t	_pa;
727	vm_size_t	_size;
728};
729
730
731/**
732 * @brief Remove the mapping created by mapdev. Called when a driver
733 * is unloaded.
734 *
735 * @param _va		Mapping address returned from mapdev
736 * @param _size		size in bytes of mapping
737 */
738METHOD void unmapdev {
739	mmu_t		_mmu;
740	vm_offset_t	_va;
741	vm_size_t	_size;
742};
743
744
745/**
746 * @brief Reverse-map a kernel virtual address
747 *
748 * @param _va		kernel virtual address to reverse-map
749 *
750 * @retval pa		physical address corresponding to mapping
751 */
752METHOD vm_offset_t kextract {
753	mmu_t		_mmu;
754	vm_offset_t	_va;
755};
756
757
758/**
759 * @brief Map a wired page into kernel virtual address space
760 *
761 * @param _va		mapping virtual address
762 * @param _pa		mapping physical address
763 */
764METHOD void kenter {
765	mmu_t		_mmu;
766	vm_offset_t	_va;
767	vm_offset_t	_pa;
768};
769
770
771/**
772 * @brief Determine if the given physical address range has been direct-mapped.
773 *
774 * @param _pa		physical address start
775 * @param _size		physical address range size
776 *
777 * @retval bool		TRUE if the range is direct-mapped.
778 */
779METHOD boolean_t dev_direct_mapped {
780	mmu_t		_mmu;
781	vm_offset_t	_pa;
782	vm_size_t	_size;
783};
784
785
786/**
787 * @brief Evaluate if a physical page has an executable mapping
788 *
789 * @param _pg		physical page
790 *
791 * @retval bool		TRUE if a physical mapping exists for the given page.
792 */
793METHOD boolean_t page_executable {
794	mmu_t		_mmu;
795	vm_page_t	_pg;
796};
797
798