1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	vm/pmap.h
60 *	Author:	Avadis Tevanian, Jr.
61 *	Date:	1985
62 *
63 *	Machine address mapping definitions -- machine-independent
64 *	section.  [For machine-dependent section, see "machine/pmap.h".]
65 */
66
67#ifndef	_VM_PMAP_H_
68#define _VM_PMAP_H_
69
70#include <mach/kern_return.h>
71#include <mach/vm_param.h>
72#include <mach/vm_types.h>
73#include <mach/vm_attributes.h>
74#include <mach/boolean.h>
75#include <mach/vm_prot.h>
76
77#ifdef	KERNEL_PRIVATE
78
79/*
80 *	The following is a description of the interface to the
81 *	machine-dependent "physical map" data structure.  The module
82 *	must provide a "pmap_t" data type that represents the
83 *	set of valid virtual-to-physical addresses for one user
84 *	address space.  [The kernel address space is represented
85 *	by a distinguished "pmap_t".]  The routines described manage
86 *	this type, install and update virtual-to-physical mappings,
87 *	and perform operations on physical addresses common to
88 *	many address spaces.
89 */
90
91/* Copy between a physical page and a virtual address */
92/* LP64todo - switch to vm_map_offset_t when it grows */
93extern kern_return_t 	copypv(
94				addr64_t source,
95				addr64_t sink,
96				unsigned int size,
97				int which);
98#define cppvPsnk        1
99#define cppvPsnkb      31
100#define cppvPsrc        2
101#define cppvPsrcb      30
102#define cppvFsnk        4
103#define cppvFsnkb      29
104#define cppvFsrc        8
105#define cppvFsrcb      28
106#define cppvNoModSnk   16
107#define cppvNoModSnkb  27
108#define cppvNoRefSrc   32
109#define cppvNoRefSrcb  26
110#define cppvKmap       64	/* Use the kernel's vm_map */
111#define cppvKmapb      25
112
113#ifdef	MACH_KERNEL_PRIVATE
114
115#include <machine/pmap.h>
116
117/*
118 *	Routines used for initialization.
119 *	There is traditionally also a pmap_bootstrap,
120 *	used very early by machine-dependent code,
121 *	but it is not part of the interface.
122 *
123 *	LP64todo -
124 *	These interfaces are tied to the size of the
125 *	kernel pmap - and therefore use the "local"
126 *	vm_offset_t, etc... types.
127 */
128
129extern void 		*pmap_steal_memory(vm_size_t size);
130						/* During VM initialization,
131						 * steal a chunk of memory.
132						 */
133extern unsigned int	pmap_free_pages(void);	/* During VM initialization,
134						 * report remaining unused
135						 * physical pages.
136						 */
137extern void		pmap_startup(
138				vm_offset_t *startp,
139				vm_offset_t *endp);
140						/* During VM initialization,
141						 * use remaining physical pages
142						 * to allocate page frames.
143						 */
144extern void		pmap_init(void) __attribute__((section("__TEXT, initcode")));
145						/* Initialization,
146						 * after kernel runs
147						 * in virtual memory.
148						 */
149
150extern void 		mapping_adjust(void);	/* Adjust free mapping count */
151
152extern void 		mapping_free_prime(void); /* Primes the mapping block release list */
153
154#ifndef	MACHINE_PAGES
155/*
156 *	If machine/pmap.h defines MACHINE_PAGES, it must implement
157 *	the above functions.  The pmap module has complete control.
158 *	Otherwise, it must implement
159 *		pmap_free_pages
160 *		pmap_virtual_space
161 *		pmap_next_page
162 *		pmap_init
163 *	and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
164 *	using pmap_free_pages, pmap_next_page, pmap_virtual_space,
165 *	and pmap_enter.  pmap_free_pages may over-estimate the number
166 *	of unused physical pages, and pmap_next_page may return FALSE
167 *	to indicate that there are no more unused pages to return.
168 *	However, for best performance pmap_free_pages should be accurate.
169 */
170
171extern boolean_t	pmap_next_page(ppnum_t *pnum);
172						/* During VM initialization,
173						 * return the next unused
174						 * physical page.
175						 */
176extern void		pmap_virtual_space(
177					vm_offset_t	*virtual_start,
178					vm_offset_t	*virtual_end);
179						/* During VM initialization,
180						 * report virtual space
181						 * available for the kernel.
182						 */
183#endif	/* MACHINE_PAGES */
184
185/*
186 *	Routines to manage the physical map data structure.
187 */
188extern pmap_t		pmap_create(	/* Create a pmap_t. */
189				vm_map_size_t	size,
190#ifdef __i386__
191				boolean_t	is_64bit);
192#else
193				__unused boolean_t	is_64bit);
194#endif
195extern pmap_t		(pmap_kernel)(void);	/* Return the kernel's pmap */
196extern void		pmap_reference(pmap_t pmap);	/* Gain a reference. */
197extern void		pmap_destroy(pmap_t pmap); /* Release a reference. */
198extern void		pmap_switch(pmap_t);
199
200
201extern void		pmap_enter(	/* Enter a mapping */
202				pmap_t		pmap,
203				vm_map_offset_t	v,
204				ppnum_t		pn,
205				vm_prot_t	prot,
206				unsigned int	flags,
207				boolean_t	wired);
208
209extern void		pmap_remove_some_phys(
210				pmap_t		pmap,
211				ppnum_t		pn);
212
213
214/*
215 *	Routines that operate on physical addresses.
216 */
217
218extern void		pmap_page_protect(	/* Restrict access to page. */
219				ppnum_t	phys,
220				vm_prot_t	prot);
221
222extern void		(pmap_zero_page)(
223				ppnum_t		pn);
224
225extern void		(pmap_zero_part_page)(
226				ppnum_t		pn,
227				vm_offset_t     offset,
228				vm_size_t       len);
229
230extern void		(pmap_copy_page)(
231				ppnum_t		src,
232				ppnum_t		dest);
233
234extern void		(pmap_copy_part_page)(
235				ppnum_t		src,
236				vm_offset_t	src_offset,
237				ppnum_t		dst,
238				vm_offset_t	dst_offset,
239				vm_size_t	len);
240
241extern void		(pmap_copy_part_lpage)(
242				vm_offset_t	src,
243				ppnum_t		dst,
244				vm_offset_t	dst_offset,
245				vm_size_t	len);
246
247extern void		(pmap_copy_part_rpage)(
248				ppnum_t		src,
249				vm_offset_t	src_offset,
250				vm_offset_t	dst,
251				vm_size_t	len);
252
253extern unsigned int (pmap_disconnect)(	/* disconnect mappings and return reference and change */
254				ppnum_t		phys);
255
256extern kern_return_t	(pmap_attribute_cache_sync)(  /* Flush appropriate
257						       * cache based on
258						       * page number sent */
259				ppnum_t		pn,
260				vm_size_t	size,
261				vm_machine_attribute_t attribute,
262				vm_machine_attribute_val_t* value);
263
264extern unsigned int	(pmap_cache_attributes)(
265				ppnum_t		pn);
266
267extern void pmap_sync_page_data_phys(ppnum_t pa);
268extern void pmap_sync_page_attributes_phys(ppnum_t pa);
269
270/*
271 * debug/assertions. pmap_verify_free returns true iff
272 * the given physical page is mapped into no pmap.
273 */
274extern boolean_t	pmap_verify_free(ppnum_t pn);
275
276/*
277 *	Statistics routines
278 */
279extern int		(pmap_resident_count)(pmap_t pmap);
280extern int		(pmap_resident_max)(pmap_t pmap);
281
282/*
283 *	Sundry required (internal) routines
284 */
285#ifdef CURRENTLY_UNUSED_AND_UNTESTED
286extern void		pmap_collect(pmap_t pmap);/* Perform garbage
287						 * collection, if any */
288#endif
289/*
290 *	Optional routines
291 */
292extern void		(pmap_copy)(		/* Copy range of mappings,
293						 * if desired. */
294				pmap_t		dest,
295				pmap_t		source,
296				vm_map_offset_t	dest_va,
297				vm_map_size_t	size,
298				vm_map_offset_t	source_va);
299
300extern kern_return_t	(pmap_attribute)(	/* Get/Set special memory
301						 * attributes */
302				pmap_t		pmap,
303				vm_map_offset_t	va,
304				vm_map_size_t	size,
305				vm_machine_attribute_t  attribute,
306				vm_machine_attribute_val_t* value);
307
308/*
309 * Routines defined as macros.
310 */
311#ifndef PMAP_ACTIVATE_USER
312#ifndef	PMAP_ACTIVATE
313#define PMAP_ACTIVATE_USER(thr, cpu)
314#else	/* PMAP_ACTIVATE */
315#define PMAP_ACTIVATE_USER(thr, cpu) {			\
316	pmap_t  pmap;						\
317								\
318	pmap = (thr)->map->pmap;				\
319	if (pmap != pmap_kernel())				\
320		PMAP_ACTIVATE(pmap, (thr), (cpu));		\
321}
322#endif  /* PMAP_ACTIVATE */
323#endif  /* PMAP_ACTIVATE_USER */
324
325#ifndef PMAP_DEACTIVATE_USER
326#ifndef PMAP_DEACTIVATE
327#define PMAP_DEACTIVATE_USER(thr, cpu)
328#else	/* PMAP_DEACTIVATE */
329#define PMAP_DEACTIVATE_USER(thr, cpu) {			\
330	pmap_t  pmap;						\
331								\
332	pmap = (thr)->map->pmap;				\
333	if ((pmap) != pmap_kernel())			\
334		PMAP_DEACTIVATE(pmap, (thr), (cpu));	\
335}
336#endif	/* PMAP_DEACTIVATE */
337#endif  /* PMAP_DEACTIVATE_USER */
338
339#ifndef	PMAP_ACTIVATE_KERNEL
340#ifndef PMAP_ACTIVATE
341#define	PMAP_ACTIVATE_KERNEL(cpu)
342#else	/* PMAP_ACTIVATE */
343#define	PMAP_ACTIVATE_KERNEL(cpu)			\
344		PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
345#endif	/* PMAP_ACTIVATE */
346#endif	/* PMAP_ACTIVATE_KERNEL */
347
348#ifndef	PMAP_DEACTIVATE_KERNEL
349#ifndef PMAP_DEACTIVATE
350#define	PMAP_DEACTIVATE_KERNEL(cpu)
351#else	/* PMAP_DEACTIVATE */
352#define	PMAP_DEACTIVATE_KERNEL(cpu)			\
353		PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
354#endif	/* PMAP_DEACTIVATE */
355#endif	/* PMAP_DEACTIVATE_KERNEL */
356
357#ifndef	PMAP_ENTER
358/*
359 *	Macro to be used in place of pmap_enter()
360 */
361#define PMAP_ENTER(pmap, virtual_address, page, protection, flags, wired) \
362	MACRO_BEGIN							\
363	pmap_t		__pmap = (pmap);				\
364	vm_page_t	__page = (page);				\
365									\
366	if (__pmap != kernel_pmap) {					\
367		ASSERT_PAGE_DECRYPTED(__page);				\
368	}								\
369	if (__page->error) {						\
370		panic("VM page %p should not have an error\n",		\
371			__page);					\
372	}								\
373	pmap_enter(__pmap,						\
374		   (virtual_address),					\
375		   __page->phys_page,					\
376		   (protection),					\
377		   (flags),						\
378		   (wired));						\
379	MACRO_END
380#endif	/* !PMAP_ENTER */
381
382/*
383 *	Routines to manage reference/modify bits based on
384 *	physical addresses, simulating them if not provided
385 *	by the hardware.
386 */
387				/* Clear reference bit */
388extern void		pmap_clear_reference(ppnum_t	 pn);
389				/* Return reference bit */
390extern boolean_t	(pmap_is_referenced)(ppnum_t	 pn);
391				/* Set modify bit */
392extern void             pmap_set_modify(ppnum_t	 pn);
393				/* Clear modify bit */
394extern void		pmap_clear_modify(ppnum_t pn);
395				/* Return modify bit */
396extern boolean_t	pmap_is_modified(ppnum_t pn);
397				/* Return modified and referenced bits */
398extern unsigned int pmap_get_refmod(ppnum_t pn);
399				/* Clear modified and referenced bits */
400extern void			pmap_clear_refmod(ppnum_t pn, unsigned int mask);
401#define VM_MEM_MODIFIED		0x01	/* Modified bit */
402#define VM_MEM_REFERENCED	0x02	/* Referenced bit */
403
404/*
405 *	Routines that operate on ranges of virtual addresses.
406 */
407extern void		pmap_protect(	/* Change protections. */
408				pmap_t		map,
409				vm_map_offset_t	s,
410				vm_map_offset_t	e,
411				vm_prot_t	prot);
412
413extern void		(pmap_pageable)(
414				pmap_t		pmap,
415				vm_map_offset_t	start,
416				vm_map_offset_t	end,
417				boolean_t	pageable);
418
419#ifndef NO_NESTED_PMAP
420extern uint64_t pmap_nesting_size_min;
421extern uint64_t pmap_nesting_size_max;
422extern kern_return_t pmap_nest(pmap_t grand,
423			       pmap_t subord,
424			       addr64_t vstart,
425			       addr64_t nstart,
426			       uint64_t size);
427extern kern_return_t pmap_unnest(pmap_t grand,
428				 addr64_t vaddr,
429				 uint64_t size);
430#endif /* NO_NESTED_PMAP */
431
432#endif	/* MACH_KERNEL_PRIVATE */
433
434/*
435 * JMM - This portion is exported to other kernel components right now,
436 * but will be pulled back in the future when the needed functionality
437 * is provided in a cleaner manner.
438 */
439
440extern pmap_t	kernel_pmap;			/* The kernel's map */
441#define		pmap_kernel()	(kernel_pmap)
442
443/* machine independent WIMG bits */
444
445#define VM_MEM_GUARDED 		0x1		/* (G) Guarded Storage */
446#define VM_MEM_COHERENT		0x2		/* (M) Memory Coherency */
447#define VM_MEM_NOT_CACHEABLE	0x4		/* (I) Cache Inhibit */
448#define VM_MEM_WRITE_THROUGH	0x8		/* (W) Write-Through */
449
450#define VM_WIMG_MASK		0xFF
451#define VM_WIMG_USE_DEFAULT	0x80000000
452
453extern vm_offset_t	pmap_extract(pmap_t pmap,
454				vm_map_offset_t va);
455
456extern void		pmap_change_wiring(	/* Specify pageability */
457				pmap_t		pmap,
458				vm_map_offset_t	va,
459				boolean_t	wired);
460
461/* LP64todo - switch to vm_map_offset_t when it grows */
462extern void		pmap_remove(	/* Remove mappings. */
463				pmap_t		map,
464				vm_map_offset_t	s,
465				vm_map_offset_t	e);
466
467extern void		fillPage(ppnum_t pa, unsigned int fill);
468
469extern void pmap_map_sharedpage(task_t task, pmap_t pmap);
470extern void pmap_unmap_sharedpage(pmap_t pmap);
471
472#endif  /* KERNEL_PRIVATE */
473
474#endif	/* _VM_PMAP_H_ */
475