mmu_if.m revision 160889
1#-
2# Copyright (c) 2005 Peter Grehan
3# All rights reserved.
4#
5# Redistribution and use in source and binary forms, with or without
6# modification, are permitted provided that the following conditions
7# are met:
8# 1. Redistributions of source code must retain the above copyright
9#    notice, this list of conditions and the following disclaimer.
10# 2. Redistributions in binary form must reproduce the above copyright
11#    notice, this list of conditions and the following disclaimer in the
12#    documentation and/or other materials provided with the distribution.
13#
14# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24# SUCH DAMAGE.
25#
26# $FreeBSD: head/sys/powerpc/powerpc/mmu_if.m 160889 2006-08-01 19:06:06Z alc $
27#
28
29#include <sys/param.h>
30#include <sys/lock.h>
31#include <sys/mutex.h>
32#include <sys/systm.h>
33
34#include <vm/vm.h>
35#include <vm/vm_page.h>
36
37#include <machine/mmuvar.h>
38
39/**
40 * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
41 * @brief A set of methods required by all MMU implementations. These
42 * are basically direct call-thru's from the pmap machine-dependent
43 * code.
44 * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
45 *@{
46 */
47
48INTERFACE mmu;
49
50#
51# Default implementations of some methods
52#
53CODE {
54	static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
55	    vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
56	{
57		return;
58	}
59
60	static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
61	{
62		return;
63	}
64
65	static void mmu_null_init(mmu_t mmu)
66	{
67		return;
68	}
69
70	static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
71	    vm_offset_t va)
72	{
73		return (FALSE);
74	}
75
76	static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
77	    vm_offset_t addr, vm_object_t object, vm_pindex_t index,
78	    vm_size_t size)
79	{
80		return;
81	}
82
83	static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
84	{
85		return;
86	}
87
88	static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
89	{
90		return;
91	}
92
93	static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
94	{
95		return (0);
96	}
97
98	static void mmu_null_deactivate(struct thread *td)
99	{
100		return;
101	}
102
103	static vm_offset_t mmu_null_addr_hint(mmu_t mmu, vm_object_t object,
104	    vm_offset_t va, vm_size_t size)
105	{
106		return (va);
107	}
108};
109
110
111/**
112 * @brief Change the wiring attribute for the page in the given physical
113 * map and virtual address.
114 *
115 * @param _pmap		physical map of page
116 * @param _va		page virtual address
117 * @param _wired	TRUE to increment wired count, FALSE to decrement
118 */
119METHOD void change_wiring {
120	mmu_t		_mmu;
121	pmap_t		_pmap;
122	vm_offset_t	_va;
123	boolean_t	_wired;
124};
125
126
127/**
128 * @brief Clear the 'modified' bit on the given physical page
129 *
130 * @param _pg		physical page
131 */
132METHOD void clear_modify {
133	mmu_t		_mmu;
134	vm_page_t	_pg;
135};
136
137
138/**
139 * @brief Clear the 'referenced' bit on the given physical page
140 *
141 * @param _pg		physical page
142 */
143METHOD void clear_reference {
144	mmu_t		_mmu;
145	vm_page_t	_pg;
146};
147
148
149/**
150 * @brief Clear the write and modified bits in each of the given
151 * physical page's mappings
152 *
153 * @param _pg		physical page
154 */
155METHOD void remove_write {
156	mmu_t		_mmu;
157	vm_page_t	_pg;
158};
159
160
161/**
162 * @brief Copy the address range given by the source physical map, virtual
163 * address and length to the destination physical map and virtual address.
164 * This routine is optional (xxx default null implementation ?)
165 *
166 * @param _dst_pmap	destination physical map
167 * @param _src_pmap	source physical map
168 * @param _dst_addr	destination virtual address
169 * @param _len		size of range
170 * @param _src_addr	source virtual address
171 */
172METHOD void copy {
173	mmu_t		_mmu;
174	pmap_t		_dst_pmap;
175	pmap_t		_src_pmap;
176	vm_offset_t	_dst_addr;
177	vm_size_t	_len;
178	vm_offset_t	_src_addr;
179} DEFAULT mmu_null_copy;
180
181
182/**
183 * @brief Copy the source physical page to the destination physical page
184 *
185 * @param _src		source physical page
186 * @param _dst		destination physical page
187 */
188METHOD void copy_page {
189	mmu_t		_mmu;
190	vm_page_t	_src;
191	vm_page_t	_dst;
192};
193
194
195/**
196 * @brief Create a mapping between a virtual/physical address pair in the
197 * passed physical map with the specified protection and wiring
198 *
199 * @param _pmap		physical map
200 * @param _va		mapping virtual address
201 * @param _p		mapping physical page
202 * @param _prot		mapping page protection
203 * @param _wired	TRUE if page will be wired
204 */
205METHOD void enter {
206	mmu_t		_mmu;
207	pmap_t		_pmap;
208	vm_offset_t	_va;
209	vm_page_t	_p;
210	vm_prot_t	_prot;
211	boolean_t	_wired;
212};
213
214
215/**
216 * @brief Maps a sequence of resident pages belonging to the same object.
217 *
218 * @param _pmap		physical map
219 * @param _start	virtual range start
220 * @param _end		virtual range end
221 * @param _m_start	physical page mapped at start
222 * @param _prot		mapping page protection
223 */
224METHOD void enter_object {
225	mmu_t		_mmu;
226	pmap_t		_pmap;
227	vm_offset_t	_start;
228	vm_offset_t	_end;
229	vm_page_t	_m_start;
230	vm_prot_t	_prot;
231};
232
233
234/**
235 * @brief A faster entry point for page mapping where it is possible
236 * to short-circuit some of the tests in pmap_enter.
237 *
238 * @param _pmap		physical map (and also currently active pmap)
239 * @param _va		mapping virtual address
240 * @param _pg		mapping physical page
241 * @param _prot		new page protection - used to see if page is exec.
242 */
243METHOD void enter_quick {
244	mmu_t		_mmu;
245	pmap_t		_pmap;
246	vm_offset_t	_va;
247	vm_page_t	_pg;
248	vm_prot_t	_prot;
249};
250
251
252/**
253 * @brief Reverse map the given virtual address, returning the physical
254 * page associated with the address if a mapping exists.
255 *
256 * @param _pmap		physical map
257 * @param _va		mapping virtual address
258 *
259 * @retval 0		No mapping found
260 * @retval addr		The mapping physical address
261 */
262METHOD vm_paddr_t extract {
263	mmu_t		_mmu;
264	pmap_t		_pmap;
265	vm_offset_t	_va;
266};
267
268
269/**
270 * @brief Reverse map the given virtual address, returning the
271 * physical page if found. The page must be held (by calling
272 * vm_page_hold) if the page protection matches the given protection
273 *
274 * @param _pmap		physical map
275 * @param _va		mapping virtual address
276 * @param _prot		protection used to determine if physical page
277 *			should be locked
278 *
279 * @retval NULL		No mapping found
280 * @retval page		Pointer to physical page. Held if protections match
281 */
282METHOD vm_page_t extract_and_hold {
283	mmu_t		_mmu;
284	pmap_t		_pmap;
285	vm_offset_t	_va;
286	vm_prot_t	_prot;
287};
288
289
290/**
291 * @brief Increase kernel virtual address space to the given virtual address.
292 * Not really required for PowerPC, so optional unless the MMU implementation
293 * can use it.
294 *
295 * @param _va		new upper limit for kernel virtual address space
296 */
297METHOD void growkernel {
298	mmu_t		_mmu;
299	vm_offset_t	_va;
300} DEFAULT mmu_null_growkernel;
301
302
303/**
304 * @brief Called from vm_mem_init. Zone allocation is available at
305 * this stage so a convenient time to create zones. This routine is
306 * for MMU-implementation convenience and is optional.
307 */
308METHOD void init {
309	mmu_t		_mmu;
310} DEFAULT mmu_null_init;
311
312
313/**
314 * @brief Return if the page has been marked by MMU hardware to have been
315 * modified
316 *
317 * @param _pg		physical page to test
318 *
319 * @retval boolean	TRUE if page has been modified
320 */
321METHOD boolean_t is_modified {
322	mmu_t		_mmu;
323	vm_page_t	_pg;
324};
325
326
327/**
328 * @brief Return whether the specified virtual address is a candidate to be
329 * prefaulted in. This routine is optional.
330 *
331 * @param _pmap		physical map
332 * @param _va		virtual address to test
333 *
334 * @retval boolean	TRUE if the address is a candidate.
335 */
336METHOD boolean_t is_prefaultable {
337	mmu_t		_mmu;
338	pmap_t		_pmap;
339	vm_offset_t	_va;
340} DEFAULT mmu_null_is_prefaultable;
341
342
343/**
344 * @brief Return a count of referenced bits for a page, clearing those bits.
345 * Not all referenced bits need to be cleared, but it is necessary that 0
346 * only be returned when there are none set.
347 *
348 * @params _m		physical page
349 *
350 * @retval int		count of referenced bits
351 */
352METHOD boolean_t ts_referenced {
353	mmu_t		_mmu;
354	vm_page_t	_pg;
355};
356
357
358/**
359 * @brief Map the requested physical address range into kernel virtual
360 * address space. The value in _virt is taken as a hint. The virtual
361 * address of the range is returned, or NULL if the mapping could not
362 * be created. The range can be direct-mapped if that is supported.
363 *
364 * @param *_virt	Hint for start virtual address, and also return
365 *			value
366 * @param _start	physical address range start
367 * @param _end		physical address range end
368 * @param _prot		protection of range (currently ignored)
369 *
370 * @retval NULL		could not map the area
371 * @retval addr, *_virt	mapping start virtual address
372 */
373METHOD vm_offset_t map {
374	mmu_t		_mmu;
375	vm_offset_t	*_virt;
376	vm_paddr_t	_start;
377	vm_paddr_t	_end;
378	int		_prot;
379};
380
381
382/**
383 * @brief Used to create a contiguous set of read-only mappings for a
384 * given object to try and eliminate a cascade of on-demand faults as
385 * the object is accessed sequentially. This routine is optional.
386 *
387 * @param _pmap		physical map
388 * @param _addr		mapping start virtual address
389 * @param _object	device-backed V.M. object to be mapped
390 * @param _pindex	page-index within object of mapping start
391 * @param _size		size in bytes of mapping
392 */
393METHOD void object_init_pt {
394	mmu_t		_mmu;
395	pmap_t		_pmap;
396	vm_offset_t	_addr;
397	vm_object_t	_object;
398	vm_pindex_t	_pindex;
399	vm_size_t	_size;
400} DEFAULT mmu_null_object_init_pt;
401
402
403/**
404 * @brief Used to determine if the specified page has a mapping for the
405 * given physical map, by scanning the list of reverse-mappings from the
406 * page. The list is scanned to a maximum of 16 entries.
407 *
408 * @param _pmap		physical map
409 * @param _pg		physical page
410 *
411 * @retval bool		TRUE if the physical map was found in the first 16
412 *			reverse-map list entries off the physical page.
413 */
414METHOD boolean_t page_exists_quick {
415	mmu_t		_mmu;
416	pmap_t		_pmap;
417	vm_page_t	_pg;
418};
419
420
421/**
422 * @brief Initialise the machine-dependent section of the physical page
423 * data structure. This routine is optional.
424 *
425 * @param _pg		physical page
426 */
427METHOD void page_init {
428	mmu_t		_mmu;
429	vm_page_t	_pg;
430} DEFAULT mmu_null_page_init;
431
432
433/**
434 * @brief Initialise a physical map data structure
435 *
436 * @param _pmap		physical map
437 */
438METHOD void pinit {
439	mmu_t		_mmu;
440	pmap_t		_pmap;
441};
442
443
444/**
445 * @brief Initialise the physical map for process 0, the initial process
446 * in the system.
447 * XXX default to pinit ?
448 *
449 * @param _pmap		physical map
450 */
451METHOD void pinit0 {
452	mmu_t		_mmu;
453	pmap_t		_pmap;
454};
455
456
457/**
458 * @brief Set the protection for physical pages in the given virtual address
459 * range to the given value.
460 *
461 * @param _pmap		physical map
462 * @param _start	virtual range start
463 * @param _end		virtual range end
464 * @param _prot		new page protection
465 */
466METHOD void protect {
467	mmu_t		_mmu;
468	pmap_t		_pmap;
469	vm_offset_t	_start;
470	vm_offset_t	_end;
471	vm_prot_t	_prot;
472};
473
474
475/**
476 * @brief Create a mapping in kernel virtual address space for the given array
477 * of wired physical pages.
478 *
479 * @param _start	mapping virtual address start
480 * @param *_m		array of physical page pointers
481 * @param _count	array elements
482 */
483METHOD void qenter {
484	mmu_t		_mmu;
485	vm_offset_t	_start;
486	vm_page_t	*_pg;
487	int		_count;
488};
489
490
491/**
492 * @brief Remove the temporary mappings created by qenter.
493 *
494 * @param _start	mapping virtual address start
495 * @param _count	number of pages in mapping
496 */
497METHOD void qremove {
498	mmu_t		_mmu;
499	vm_offset_t	_start;
500	int		_count;
501};
502
503
504/**
505 * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
506 * should be no existing mappings for the physical map at this point
507 *
508 * @param _pmap		physical map
509 */
510METHOD void release {
511	mmu_t		_mmu;
512	pmap_t		_pmap;
513};
514
515
516/**
517 * @brief Remove all mappings in the given physical map for the start/end
518 * virtual address range. The range will be page-aligned.
519 *
520 * @param _pmap		physical map
521 * @param _start	mapping virtual address start
522 * @param _end		mapping virtual address end
523 */
524METHOD void remove {
525	mmu_t		_mmu;
526	pmap_t		_pmap;
527	vm_offset_t	_start;
528	vm_offset_t	_end;
529};
530
531
532/**
533 * @brief Traverse the reverse-map list off the given physical page and
534 * remove all mappings. Clear the PG_WRITEABLE attribute from the page.
535 *
536 * @param _pg		physical page
537 */
538METHOD void remove_all {
539	mmu_t		_mmu;
540	vm_page_t	_pg;
541};
542
543
544/**
545 * @brief Remove all mappings in the given start/end virtual address range
546 * for the given physical map. Similar to the remove method, but it used
547 * when tearing down all mappings in an address space. This method is
548 * optional, since pmap_remove will be called for each valid vm_map in
549 * the address space later.
550 *
551 * @param _pmap		physical map
552 * @param _start	mapping virtual address start
553 * @param _end		mapping virtual address end
554 */
555METHOD void remove_pages {
556	mmu_t		_mmu;
557	pmap_t		_pmap;
558} DEFAULT mmu_null_remove_pages;
559
560
561/**
562 * @brief Zero a physical page. It is not assumed that the page is mapped,
563 * so a temporary (or direct) mapping may need to be used.
564 *
565 * @param _pg		physical page
566 */
567METHOD void zero_page {
568	mmu_t		_mmu;
569	vm_page_t	_pg;
570};
571
572
573/**
574 * @brief Zero a portion of a physical page, starting at a given offset and
575 * for a given size (multiples of 512 bytes for 4k pages).
576 *
577 * @param _pg		physical page
578 * @param _off		byte offset from start of page
579 * @param _size		size of area to zero
580 */
581METHOD void zero_page_area {
582	mmu_t		_mmu;
583	vm_page_t	_pg;
584	int		_off;
585	int		_size;
586};
587
588
589/**
590 * @brief Called from the idle loop to zero pages. XXX I think locking
591 * constraints might be different here compared to zero_page.
592 *
593 * @param _pg		physical page
594 */
595METHOD void zero_page_idle {
596	mmu_t		_mmu;
597	vm_page_t	_pg;
598};
599
600
601/**
602 * @brief Extract mincore(2) information from a mapping. This routine is
603 * optional and is an optimisation: the mincore code will call is_modified
604 * and ts_referenced if no result is returned.
605 *
606 * @param _pmap		physical map
607 * @param _addr		page virtual address
608 *
609 * @retval 0		no result
610 * @retval non-zero	mincore(2) flag values
611 */
612METHOD int mincore {
613	mmu_t		_mmu;
614	pmap_t		_pmap;
615	vm_offset_t	_addr;
616} DEFAULT mmu_null_mincore;
617
618
619/**
620 * @brief Perform any operations required to allow a physical map to be used
621 * before it's address space is accessed.
622 *
623 * @param _td		thread associated with physical map
624 */
625METHOD void activate {
626	mmu_t		_mmu;
627	struct thread	*_td;
628};
629
630/**
631 * @brief Perform any operations required to deactivate a physical map,
632 * for instance as it is context-switched out.
633 *
634 * @param _td		thread associated with physical map
635 */
636METHOD void deactivate {
637	mmu_t		_mmu;
638	struct thread	*_td;
639} DEFAULT mmu_null_deactivate;
640
641/**
642 * @brief Return a hint for the best virtual address to map a tentative
643 * virtual address range in a given VM object. The default is to just
644 * return the given tentative start address.
645 *
646 * @param _obj		VM backing object
647 * @param _addr		initial guess at virtual address
648 * @param _size		size of virtual address range
649 */
650METHOD vm_offset_t addr_hint {
651	mmu_t		_mmu;
652	vm_object_t	_obj;
653	vm_offset_t	_addr;
654	vm_size_t	_size;
655} DEFAULT mmu_null_addr_hint;
656
657
658
659
660/**
661 * INTERNAL INTERFACES
662 */
663
664/**
665 * @brief Bootstrap the VM system. At the completion of this routine, the
666 * kernel will be running in it's own address space with full control over
667 * paging.
668 *
669 * @param _start	start of reserved memory (obsolete ???)
670 * @param _end		end of reserved memory (obsolete ???)
671 *			XXX I think the intent of these was to allow
672 *			the memory used by kernel text+data+bss and
673 *			loader variables/load-time kld's to be carved out
674 *			of available physical mem.
675 *
676 */
677METHOD void bootstrap {
678	mmu_t		_mmu;
679	vm_offset_t	_start;
680	vm_offset_t	_end;
681};
682
683
684/**
685 * @brief Create a kernel mapping for a given physical address range.
686 * Called by bus code on behalf of device drivers. The mapping does not
687 * have to be a virtual address: it can be a direct-mapped physical address
688 * if that is supported by the MMU.
689 *
690 * @param _pa		start physical address
691 * @param _size		size in bytes of mapping
692 *
693 * @retval addr		address of mapping.
694 */
695METHOD void * mapdev {
696	mmu_t		_mmu;
697	vm_offset_t	_pa;
698	vm_size_t	_size;
699};
700
701
702/**
703 * @brief Remove the mapping created by mapdev. Called when a driver
704 * is unloaded.
705 *
706 * @param _va		Mapping address returned from mapdev
707 * @param _size		size in bytes of mapping
708 */
709METHOD void unmapdev {
710	mmu_t		_mmu;
711	vm_offset_t	_va;
712	vm_size_t	_size;
713};
714
715
716/**
717 * @brief Reverse-map a kernel virtual address
718 *
719 * @param _va		kernel virtual address to reverse-map
720 *
721 * @retval pa		physical address corresponding to mapping
722 */
723METHOD vm_offset_t kextract {
724	mmu_t		_mmu;
725	vm_offset_t	_va;
726};
727
728
729/**
730 * @brief Map a wired page into kernel virtual address space
731 *
732 * @param _va		mapping virtual address
733 * @param _pa		mapping physical address
734 */
735METHOD void kenter {
736	mmu_t		_mmu;
737	vm_offset_t	_va;
738	vm_offset_t	_pa;
739};
740
741
742/**
743 * @brief Determine if the given physical address range has been direct-mapped.
744 *
745 * @param _pa		physical address start
746 * @param _size		physical address range size
747 *
748 * @retval bool		TRUE if the range is direct-mapped.
749 */
750METHOD boolean_t dev_direct_mapped {
751	mmu_t		_mmu;
752	vm_offset_t	_pa;
753	vm_size_t	_size;
754};
755