1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 *	File:	vm/pmap.h
60 *	Author:	Avadis Tevanian, Jr.
61 *	Date:	1985
62 *
63 *	Machine address mapping definitions -- machine-independent
64 *	section.  [For machine-dependent section, see "machine/pmap.h".]
65 */
66
67#ifndef	_VM_PMAP_H_
68#define _VM_PMAP_H_
69
70#include <mach/kern_return.h>
71#include <mach/vm_param.h>
72#include <mach/vm_types.h>
73#include <mach/vm_attributes.h>
74#include <mach/boolean.h>
75#include <mach/vm_prot.h>
76
77#ifdef	KERNEL_PRIVATE
78
79/*
80 *	The following is a description of the interface to the
81 *	machine-dependent "physical map" data structure.  The module
82 *	must provide a "pmap_t" data type that represents the
83 *	set of valid virtual-to-physical addresses for one user
84 *	address space.  [The kernel address space is represented
85 *	by a distinguished "pmap_t".]  The routines described manage
86 *	this type, install and update virtual-to-physical mappings,
87 *	and perform operations on physical addresses common to
88 *	many address spaces.
89 */
90
91/* Copy between a physical page and a virtual address */
92/* LP64todo - switch to vm_map_offset_t when it grows */
93extern kern_return_t 	copypv(
94				addr64_t source,
95				addr64_t sink,
96				unsigned int size,
97				int which);
98#define cppvPsnk        1
99#define cppvPsnkb      31
100#define cppvPsrc        2
101#define cppvPsrcb      30
102#define cppvFsnk        4
103#define cppvFsnkb      29
104#define cppvFsrc        8
105#define cppvFsrcb      28
106#define cppvNoModSnk   16
107#define cppvNoModSnkb  27
108#define cppvNoRefSrc   32
109#define cppvNoRefSrcb  26
110#define cppvKmap       64	/* Use the kernel's vm_map */
111#define cppvKmapb      25
112
113#ifdef	MACH_KERNEL_PRIVATE
114
115#include <machine/pmap.h>
116
117/*
118 *	Routines used for initialization.
119 *	There is traditionally also a pmap_bootstrap,
120 *	used very early by machine-dependent code,
121 *	but it is not part of the interface.
122 *
123 *	LP64todo -
124 *	These interfaces are tied to the size of the
125 *	kernel pmap - and therefore use the "local"
126 *	vm_offset_t, etc... types.
127 */
128
129extern void 		*pmap_steal_memory(vm_size_t size);
130						/* During VM initialization,
131						 * steal a chunk of memory.
132						 */
133extern unsigned int	pmap_free_pages(void);	/* During VM initialization,
134						 * report remaining unused
135						 * physical pages.
136						 */
137extern void		pmap_startup(
138				vm_offset_t *startp,
139				vm_offset_t *endp);
140						/* During VM initialization,
141						 * use remaining physical pages
142						 * to allocate page frames.
143						 */
144extern void		pmap_init(void) __attribute__((section("__TEXT, initcode")));
145						/* Initialization,
146						 * after kernel runs
147						 * in virtual memory.
148						 */
149
150extern void 		mapping_adjust(void);	/* Adjust free mapping count */
151
152extern void 		mapping_free_prime(void); /* Primes the mapping block release list */
153
154#ifndef	MACHINE_PAGES
155/*
156 *	If machine/pmap.h defines MACHINE_PAGES, it must implement
157 *	the above functions.  The pmap module has complete control.
158 *	Otherwise, it must implement
159 *		pmap_free_pages
160 *		pmap_virtual_space
161 *		pmap_next_page
162 *		pmap_init
163 *	and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
164 *	using pmap_free_pages, pmap_next_page, pmap_virtual_space,
165 *	and pmap_enter.  pmap_free_pages may over-estimate the number
166 *	of unused physical pages, and pmap_next_page may return FALSE
167 *	to indicate that there are no more unused pages to return.
168 *	However, for best performance pmap_free_pages should be accurate.
169 */
170
171extern boolean_t	pmap_next_page(ppnum_t *pnum);
172extern boolean_t	pmap_next_page_hi(ppnum_t *pnum);
173						/* During VM initialization,
174						 * return the next unused
175						 * physical page.
176						 */
177extern void		pmap_virtual_space(
178					vm_offset_t	*virtual_start,
179					vm_offset_t	*virtual_end);
180						/* During VM initialization,
181						 * report virtual space
182						 * available for the kernel.
183						 */
184#endif	/* MACHINE_PAGES */
185
186/*
187 *	Routines to manage the physical map data structure.
188 */
189extern pmap_t		pmap_create(	/* Create a pmap_t. */
190				ledger_t	ledger,
191				vm_map_size_t	size,
192#ifdef __i386__
193				boolean_t	is_64bit);
194#else
195				__unused boolean_t	is_64bit);
196#endif
197extern pmap_t		(pmap_kernel)(void);	/* Return the kernel's pmap */
198extern void		pmap_reference(pmap_t pmap);	/* Gain a reference. */
199extern void		pmap_destroy(pmap_t pmap); /* Release a reference. */
200extern void		pmap_switch(pmap_t);
201
202
203extern void		pmap_enter(	/* Enter a mapping */
204				pmap_t		pmap,
205				vm_map_offset_t	v,
206				ppnum_t		pn,
207				vm_prot_t	prot,
208				vm_prot_t	fault_type,
209				unsigned int	flags,
210				boolean_t	wired);
211
212extern kern_return_t	pmap_enter_options(
213					   pmap_t pmap,
214					   vm_map_offset_t v,
215					   ppnum_t pn,
216					   vm_prot_t prot,
217					   vm_prot_t fault_type,
218					   unsigned int flags,
219					   boolean_t wired,
220					   unsigned int options);
221
222extern void		pmap_remove_some_phys(
223				pmap_t		pmap,
224				ppnum_t		pn);
225
226
227/*
228 *	Routines that operate on physical addresses.
229 */
230
231extern void		pmap_page_protect(	/* Restrict access to page. */
232				ppnum_t	phys,
233				vm_prot_t	prot);
234
235extern void		(pmap_zero_page)(
236				ppnum_t		pn);
237
238extern void		(pmap_zero_part_page)(
239				ppnum_t		pn,
240				vm_offset_t     offset,
241				vm_size_t       len);
242
243extern void		(pmap_copy_page)(
244				ppnum_t		src,
245				ppnum_t		dest);
246
247extern void		(pmap_copy_part_page)(
248				ppnum_t		src,
249				vm_offset_t	src_offset,
250				ppnum_t		dst,
251				vm_offset_t	dst_offset,
252				vm_size_t	len);
253
254extern void		(pmap_copy_part_lpage)(
255				vm_offset_t	src,
256				ppnum_t		dst,
257				vm_offset_t	dst_offset,
258				vm_size_t	len);
259
260extern void		(pmap_copy_part_rpage)(
261				ppnum_t		src,
262				vm_offset_t	src_offset,
263				vm_offset_t	dst,
264				vm_size_t	len);
265
266extern unsigned int (pmap_disconnect)(	/* disconnect mappings and return reference and change */
267				ppnum_t		phys);
268
269extern kern_return_t	(pmap_attribute_cache_sync)(  /* Flush appropriate
270						       * cache based on
271						       * page number sent */
272				ppnum_t		pn,
273				vm_size_t	size,
274				vm_machine_attribute_t attribute,
275				vm_machine_attribute_val_t* value);
276
277extern unsigned int	(pmap_cache_attributes)(
278				ppnum_t		pn);
279
280/*
281 * Set (override) cache attributes for the specified physical page
282 */
283extern	void		pmap_set_cache_attributes(
284				ppnum_t,
285				unsigned int);
286extern void pmap_sync_page_data_phys(ppnum_t pa);
287extern void pmap_sync_page_attributes_phys(ppnum_t pa);
288
289/*
290 * debug/assertions. pmap_verify_free returns true iff
291 * the given physical page is mapped into no pmap.
292 */
293extern boolean_t	pmap_verify_free(ppnum_t pn);
294
295/*
296 *	Statistics routines
297 */
298extern int		(pmap_resident_count)(pmap_t pmap);
299extern int		(pmap_resident_max)(pmap_t pmap);
300
301/*
302 *	Sundry required (internal) routines
303 */
304#ifdef CURRENTLY_UNUSED_AND_UNTESTED
305extern void		pmap_collect(pmap_t pmap);/* Perform garbage
306						 * collection, if any */
307#endif
308/*
309 *	Optional routines
310 */
311extern void		(pmap_copy)(		/* Copy range of mappings,
312						 * if desired. */
313				pmap_t		dest,
314				pmap_t		source,
315				vm_map_offset_t	dest_va,
316				vm_map_size_t	size,
317				vm_map_offset_t	source_va);
318
319extern kern_return_t	(pmap_attribute)(	/* Get/Set special memory
320						 * attributes */
321				pmap_t		pmap,
322				vm_map_offset_t	va,
323				vm_map_size_t	size,
324				vm_machine_attribute_t  attribute,
325				vm_machine_attribute_val_t* value);
326
327/*
328 * Routines defined as macros.
329 */
330#ifndef PMAP_ACTIVATE_USER
331#ifndef	PMAP_ACTIVATE
332#define PMAP_ACTIVATE_USER(thr, cpu)
333#else	/* PMAP_ACTIVATE */
334#define PMAP_ACTIVATE_USER(thr, cpu) {			\
335	pmap_t  pmap;						\
336								\
337	pmap = (thr)->map->pmap;				\
338	if (pmap != pmap_kernel())				\
339		PMAP_ACTIVATE(pmap, (thr), (cpu));		\
340}
341#endif  /* PMAP_ACTIVATE */
342#endif  /* PMAP_ACTIVATE_USER */
343
344#ifndef PMAP_DEACTIVATE_USER
345#ifndef PMAP_DEACTIVATE
346#define PMAP_DEACTIVATE_USER(thr, cpu)
347#else	/* PMAP_DEACTIVATE */
348#define PMAP_DEACTIVATE_USER(thr, cpu) {			\
349	pmap_t  pmap;						\
350								\
351	pmap = (thr)->map->pmap;				\
352	if ((pmap) != pmap_kernel())			\
353		PMAP_DEACTIVATE(pmap, (thr), (cpu));	\
354}
355#endif	/* PMAP_DEACTIVATE */
356#endif  /* PMAP_DEACTIVATE_USER */
357
358#ifndef	PMAP_ACTIVATE_KERNEL
359#ifndef PMAP_ACTIVATE
360#define	PMAP_ACTIVATE_KERNEL(cpu)
361#else	/* PMAP_ACTIVATE */
362#define	PMAP_ACTIVATE_KERNEL(cpu)			\
363		PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
364#endif	/* PMAP_ACTIVATE */
365#endif	/* PMAP_ACTIVATE_KERNEL */
366
367#ifndef	PMAP_DEACTIVATE_KERNEL
368#ifndef PMAP_DEACTIVATE
369#define	PMAP_DEACTIVATE_KERNEL(cpu)
370#else	/* PMAP_DEACTIVATE */
371#define	PMAP_DEACTIVATE_KERNEL(cpu)			\
372		PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
373#endif	/* PMAP_DEACTIVATE */
374#endif	/* PMAP_DEACTIVATE_KERNEL */
375
376#ifndef	PMAP_ENTER
377/*
378 *	Macro to be used in place of pmap_enter()
379 */
380#define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, flags, wired) \
381	MACRO_BEGIN							\
382	pmap_t		__pmap = (pmap);				\
383	vm_page_t	__page = (page);				\
384									\
385	PMAP_ENTER_CHECK(__pmap, __page)				\
386	pmap_enter(__pmap,						\
387		(virtual_address),					\
388		__page->phys_page,					\
389		(protection),						\
390		(fault_type),						\
391		(flags),						\
392		(wired));						\
393	MACRO_END
394#endif	/* !PMAP_ENTER */
395
396#ifndef	PMAP_ENTER_OPTIONS
397#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, fault_type,	\
398				flags, wired, options, result) 		\
399	MACRO_BEGIN							\
400	pmap_t		__pmap = (pmap);				\
401	vm_page_t	__page = (page);				\
402									\
403	PMAP_ENTER_CHECK(__pmap, __page)				\
404	result = pmap_enter_options(__pmap,				\
405		(virtual_address),					\
406		__page->phys_page,					\
407		(protection),						\
408		(fault_type),						\
409		(flags),						\
410		(wired),						\
411		options);						\
412	MACRO_END
413#endif	/* !PMAP_ENTER_OPTIONS */
414
415#ifndef PMAP_SET_CACHE_ATTR
416#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op)		\
417	MACRO_BEGIN								\
418		if (!batch_pmap_op) {						\
419			pmap_set_cache_attributes(mem->phys_page, cache_attr);	\
420			object->set_cache_attr = TRUE;				\
421		}								\
422	MACRO_END
423#endif	/* PMAP_SET_CACHE_ATTR */
424
425#ifndef PMAP_BATCH_SET_CACHE_ATTR
426#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list,			\
427					cache_attr, num_pages, batch_pmap_op)	\
428	MACRO_BEGIN								\
429		if ((batch_pmap_op)) {						\
430			unsigned int __page_idx=0;				\
431			while (__page_idx < (num_pages)) {			\
432				pmap_set_cache_attributes(			\
433					user_page_list[__page_idx].phys_addr,	\
434					(cache_attr));				\
435				__page_idx++;					\
436			}							\
437			(object)->set_cache_attr = TRUE;			\
438		}								\
439	MACRO_END
440#endif	/* PMAP_BATCH_SET_CACHE_ATTR */
441
442#define PMAP_ENTER_CHECK(pmap, page)					\
443{									\
444	if ((pmap) != kernel_pmap) {					\
445		ASSERT_PAGE_DECRYPTED(page);				\
446	}								\
447	if ((page)->error) {						\
448		panic("VM page %p should not have an error\n",		\
449			(page));					\
450	}								\
451}
452
453/*
454 *	Routines to manage reference/modify bits based on
455 *	physical addresses, simulating them if not provided
456 *	by the hardware.
457 */
458				/* Clear reference bit */
459extern void		pmap_clear_reference(ppnum_t	 pn);
460				/* Return reference bit */
461extern boolean_t	(pmap_is_referenced)(ppnum_t	 pn);
462				/* Set modify bit */
463extern void             pmap_set_modify(ppnum_t	 pn);
464				/* Clear modify bit */
465extern void		pmap_clear_modify(ppnum_t pn);
466				/* Return modify bit */
467extern boolean_t	pmap_is_modified(ppnum_t pn);
468				/* Return modified and referenced bits */
469extern unsigned int pmap_get_refmod(ppnum_t pn);
470				/* Clear modified and referenced bits */
471extern void			pmap_clear_refmod(ppnum_t pn, unsigned int mask);
472#define VM_MEM_MODIFIED		0x01	/* Modified bit */
473#define VM_MEM_REFERENCED	0x02	/* Referenced bit */
474
475/*
476 *	Routines that operate on ranges of virtual addresses.
477 */
478extern void		pmap_protect(	/* Change protections. */
479				pmap_t		map,
480				vm_map_offset_t	s,
481				vm_map_offset_t	e,
482				vm_prot_t	prot);
483
484extern void		(pmap_pageable)(
485				pmap_t		pmap,
486				vm_map_offset_t	start,
487				vm_map_offset_t	end,
488				boolean_t	pageable);
489
490
491extern uint64_t pmap_nesting_size_min;
492extern uint64_t pmap_nesting_size_max;
493
494extern kern_return_t pmap_nest(pmap_t,
495			       pmap_t,
496			       addr64_t,
497			       addr64_t,
498			       uint64_t);
499extern kern_return_t pmap_unnest(pmap_t,
500				 addr64_t,
501				 uint64_t);
502extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *);
503#endif	/* MACH_KERNEL_PRIVATE */
504
505extern boolean_t	pmap_is_noencrypt(ppnum_t);
506extern void		pmap_set_noencrypt(ppnum_t pn);
507extern void		pmap_clear_noencrypt(ppnum_t pn);
508
509/*
510 * JMM - This portion is exported to other kernel components right now,
511 * but will be pulled back in the future when the needed functionality
512 * is provided in a cleaner manner.
513 */
514
515extern pmap_t	kernel_pmap;			/* The kernel's map */
516#define		pmap_kernel()	(kernel_pmap)
517
518/* machine independent WIMG bits */
519
520#define VM_MEM_GUARDED 		0x1		/* (G) Guarded Storage */
521#define VM_MEM_COHERENT		0x2		/* (M) Memory Coherency */
522#define VM_MEM_NOT_CACHEABLE	0x4		/* (I) Cache Inhibit */
523#define VM_MEM_WRITE_THROUGH	0x8		/* (W) Write-Through */
524
525#define VM_WIMG_USE_DEFAULT	0x80
526#define VM_WIMG_MASK		0xFF
527
528#define VM_MEM_SUPERPAGE	0x100		/* map a superpage instead of a base page */
529#define VM_MEM_STACK		0x200
530
531#define PMAP_OPTIONS_NOWAIT	0x1		/* don't block, return
532						 * KERN_RESOURCE_SHORTAGE
533						 * instead */
534#define PMAP_OPTIONS_NOENTER	0x2		/* expand pmap if needed
535						 * but don't enter mapping
536						 */
537
538#if	!defined(__LP64__)
539extern vm_offset_t	pmap_extract(pmap_t pmap,
540				vm_map_offset_t va);
541#endif
542extern void		pmap_change_wiring(	/* Specify pageability */
543				pmap_t		pmap,
544				vm_map_offset_t	va,
545				boolean_t	wired);
546
547/* LP64todo - switch to vm_map_offset_t when it grows */
548extern void		pmap_remove(	/* Remove mappings. */
549				pmap_t		map,
550				vm_map_offset_t	s,
551				vm_map_offset_t	e);
552
553extern void		fillPage(ppnum_t pa, unsigned int fill);
554
555extern void pmap_map_sharedpage(task_t task, pmap_t pmap);
556extern void pmap_unmap_sharedpage(pmap_t pmap);
557
558#if defined(__LP64__)
559void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr);
560#endif
561
562#endif  /* KERNEL_PRIVATE */
563
564#endif	/* _VM_PMAP_H_ */
565