1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 *	File:	vm/vm_map.h
61 *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
62 *	Date:	1985
63 *
64 *	Virtual memory map module definitions.
65 *
66 * Contributors:
67 *	avie, dlb, mwyoung
68 */
69
70#ifndef	_VM_VM_MAP_H_
71#define _VM_VM_MAP_H_
72
73#include <mach/mach_types.h>
74#include <mach/kern_return.h>
75#include <mach/boolean.h>
76#include <mach/vm_types.h>
77#include <mach/vm_prot.h>
78#include <mach/vm_inherit.h>
79#include <mach/vm_behavior.h>
80#include <mach/vm_param.h>
81#include <vm/pmap.h>
82
83#ifdef	KERNEL_PRIVATE
84
85#include <sys/cdefs.h>
86
87__BEGIN_DECLS
88
89extern void	vm_map_reference(vm_map_t	map);
90extern vm_map_t current_map(void);
91
92/* Setup reserved areas in a new VM map */
93extern kern_return_t	vm_map_exec(
94				vm_map_t		new_map,
95				task_t			task,
96				void			*fsroot,
97				cpu_type_t		cpu);
98
99__END_DECLS
100
101#ifdef	MACH_KERNEL_PRIVATE
102
103#include <task_swapper.h>
104#include <mach_assert.h>
105
106#include <vm/vm_object.h>
107#include <vm/vm_page.h>
108#include <kern/lock.h>
109#include <kern/zalloc.h>
110#include <kern/macro_help.h>
111
112#include <kern/thread.h>
113
114#define current_map_fast()	(current_thread()->map)
115#define	current_map()		(current_map_fast())
116
117#include <vm/vm_map_store.h>
118
119
120/*
121 *	Types defined:
122 *
123 *	vm_map_t		the high-level address map data structure.
124 *	vm_map_entry_t		an entry in an address map.
125 *	vm_map_version_t	a timestamp of a map, for use with vm_map_lookup
126 *	vm_map_copy_t		represents memory copied from an address map,
127 *				 used for inter-map copy operations
128 */
129typedef struct vm_map_entry	*vm_map_entry_t;
130#define VM_MAP_ENTRY_NULL	((vm_map_entry_t) 0)
131
132
133/*
134 *	Type:		vm_map_object_t [internal use only]
135 *
136 *	Description:
137 *		The target of an address mapping, either a virtual
138 *		memory object or a sub map (of the kernel map).
139 */
140typedef union vm_map_object {
141	vm_object_t		vm_object;	/* object object */
142	vm_map_t		sub_map;	/* belongs to another map */
143} vm_map_object_t;
144
145#define named_entry_lock_init(object)	lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
146#define named_entry_lock_destroy(object)	lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
147#define named_entry_lock(object)		lck_mtx_lock(&(object)->Lock)
148#define named_entry_unlock(object)		lck_mtx_unlock(&(object)->Lock)
149
150/*
151 *	Type:		vm_named_entry_t [internal use only]
152 *
153 *	Description:
154 *		Description of a mapping to a memory cache object.
155 *
156 *	Implementation:
157 *		While the handle to this object is used as a means to map
158 * 		and pass around the right to map regions backed by pagers
159 *		of all sorts, the named_entry itself is only manipulated
160 *		by the kernel.  Named entries hold information on the
161 *		right to map a region of a cached object.  Namely,
162 *		the target cache object, the beginning and ending of the
163 *		region to be mapped, and the permissions, (read, write)
164 *		with which it can be mapped.
165 *
166 */
167
168struct vm_named_entry {
169	decl_lck_mtx_data(,	Lock)		/* Synchronization */
170	union {
171		vm_object_t	object;		/* object I point to */
172		memory_object_t	pager;		/* amo pager port */
173		vm_map_t	map;		/* map backing submap */
174	} backing;
175	vm_object_offset_t	offset;		/* offset into object */
176	vm_object_size_t	size;		/* size of region */
177	vm_prot_t		protection;	/* access permissions */
178	int			ref_count;	/* Number of references */
179	unsigned int				/* Is backing.xxx : */
180	/* boolean_t */		internal:1,	/* ... an internal object */
181	/* boolean_t */		is_sub_map:1,	/* ... a submap? */
182	/* boolean_t */		is_pager:1;	/* ... a pager port */
183};
184
185/*
186 *	Type:		vm_map_entry_t [internal use only]
187 *
188 *	Description:
189 *		A single mapping within an address map.
190 *
191 *	Implementation:
192 *		Address map entries consist of start and end addresses,
193 *		a VM object (or sub map) and offset into that object,
194 *		and user-exported inheritance and protection information.
195 *		Control information for virtual copy operations is also
196 *		stored in the address map entry.
197 */
198
199struct vm_map_links {
200	struct vm_map_entry	*prev;		/* previous entry */
201	struct vm_map_entry	*next;		/* next entry */
202	vm_map_offset_t		start;		/* start address */
203	vm_map_offset_t		end;		/* end address */
204};
205
206struct vm_map_entry {
207	struct vm_map_links	links;		/* links to other entries */
208#define vme_prev		links.prev
209#define vme_next		links.next
210#define vme_start		links.start
211#define vme_end			links.end
212
213	struct vm_map_store	store;
214	union vm_map_object	object;		/* object I point to */
215	vm_object_offset_t	offset;		/* offset into object */
216	unsigned int
217	/* boolean_t */		is_shared:1,	/* region is shared */
218	/* boolean_t */		is_sub_map:1,	/* Is "object" a submap? */
219	/* boolean_t */		in_transition:1, /* Entry being changed */
220	/* boolean_t */		needs_wakeup:1,  /* Waiters on in_transition */
221	/* vm_behavior_t */	behavior:2,	/* user paging behavior hint */
222		/* behavior is not defined for submap type */
223	/* boolean_t */		needs_copy:1,	/* object need to be copied? */
224		/* Only in task maps: */
225	/* vm_prot_t */		protection:3,	/* protection code */
226	/* vm_prot_t */		max_protection:3,/* maximum protection */
227	/* vm_inherit_t */	inheritance:2,	/* inheritance */
228	/* boolean_t */		use_pmap:1,	/* nested pmaps */
229	/*
230	 * IMPORTANT:
231	 * The "alias" field can be updated while holding the VM map lock
232	 * "shared".  It's OK as along as it's the only field that can be
233	 * updated without the VM map "exclusive" lock.
234	 */
235	/* unsigned char */	alias:8,	/* user alias */
236	/* boolean_t */		no_cache:1,	/* should new pages be cached? */
237	/* boolean_t */		permanent:1,	/* mapping can not be removed */
238	/* boolean_t */		superpage_size:3,/* use superpages of a certain size */
239	/* boolean_t */		zero_wired_pages:1, /* zero out the wired pages of this entry it is being deleted without unwiring them */
240	/* boolean_t */		used_for_jit:1,
241	/* boolean_t */	from_reserved_zone:1;	/* Allocated from
242						 * kernel reserved zone	 */
243	unsigned short		wired_count;	/* can be paged if = 0 */
244	unsigned short		user_wired_count; /* for vm_wire */
245#if	DEBUG
246#define	MAP_ENTRY_CREATION_DEBUG (1)
247#endif
248#if	MAP_ENTRY_CREATION_DEBUG
249	uintptr_t		vme_bt[16];
250#endif
251};
252
253/*
254 * Convenience macros for dealing with superpages
255 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
256 */
257#define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
258#define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
259#define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
260#define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
261
262/*
263 * wired_counts are unsigned short.  This value is used to safeguard
264 * against any mishaps due to runaway user programs.
265 */
266#define MAX_WIRE_COUNT		65535
267
268
269
270/*
271 *	Type:		struct vm_map_header
272 *
273 *	Description:
274 *		Header for a vm_map and a vm_map_copy.
275 */
276
277
278struct vm_map_header {
279	struct vm_map_links	links;		/* first, last, min, max */
280	int			nentries;	/* Number of entries */
281	boolean_t		entries_pageable;
282						/* are map entries pageable? */
283	vm_map_offset_t		highest_entry_end_addr;	/* The ending address of the highest allocated vm_entry_t */
284#ifdef VM_MAP_STORE_USE_RB
285	struct rb_head	rb_head_store;
286#endif
287};
288
289/*
290 *	Type:		vm_map_t [exported; contents invisible]
291 *
292 *	Description:
293 *		An address map -- a directory relating valid
294 *		regions of a task's address space to the corresponding
295 *		virtual memory objects.
296 *
297 *	Implementation:
298 *		Maps are doubly-linked lists of map entries, sorted
299 *		by address.  One hint is used to start
300 *		searches again from the last successful search,
301 *		insertion, or removal.  Another hint is used to
302 *		quickly find free space.
303 */
304struct _vm_map {
305	lock_t			lock;		/* uni- and smp-lock */
306	struct vm_map_header	hdr;		/* Map entry header */
307#define min_offset		hdr.links.start	/* start of range */
308#define max_offset		hdr.links.end	/* end of range */
309#define highest_entry_end	hdr.highest_entry_end_addr
310	pmap_t			pmap;		/* Physical map */
311	vm_map_size_t		size;		/* virtual size */
312	vm_map_size_t		user_wire_limit;/* rlimit on user locked memory */
313	vm_map_size_t		user_wire_size; /* current size of user locked memory in this map */
314	int			ref_count;	/* Reference count */
315#if	TASK_SWAPPER
316	int			res_count;	/* Residence count (swap) */
317	int			sw_state;	/* Swap state */
318#endif	/* TASK_SWAPPER */
319	decl_lck_mtx_data(,	s_lock)		/* Lock ref, res fields */
320	lck_mtx_ext_t		s_lock_ext;
321	vm_map_entry_t		hint;		/* hint for quick lookups */
322	vm_map_entry_t		first_free;	/* First free space hint */
323	unsigned int
324	/* boolean_t */		wait_for_space:1, /* Should callers wait for space? */
325	/* boolean_t */		wiring_required:1, /* All memory wired? */
326	/* boolean_t */		no_zero_fill:1, /*No zero fill absent pages */
327	/* boolean_t */		mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */
328	/* boolean_t */		switch_protect:1, /*  Protect map from write faults while switched */
329	/* boolean_t */		disable_vmentry_reuse:1, /*  All vm entries should keep using newer and higher addresses in the map */
330	/* boolean_t */		map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
331	/* reserved */		pad:25;
332	unsigned int		timestamp;	/* Version number */
333	unsigned int		color_rr;	/* next color (not protected by a lock) */
334#if CONFIG_FREEZE
335	void			*default_freezer_handle;
336#endif
337 	boolean_t		jit_entry_exists;
338} ;
339
340#define vm_map_to_entry(map)	((struct vm_map_entry *) &(map)->hdr.links)
341#define vm_map_first_entry(map)	((map)->hdr.links.next)
342#define vm_map_last_entry(map)	((map)->hdr.links.prev)
343
344#if	TASK_SWAPPER
345/*
346 * VM map swap states.  There are no transition states.
347 */
348#define MAP_SW_IN	 1	/* map is swapped in; residence count > 0 */
349#define MAP_SW_OUT	 2	/* map is out (res_count == 0 */
350#endif	/* TASK_SWAPPER */
351
352/*
353 *	Type:		vm_map_version_t [exported; contents invisible]
354 *
355 *	Description:
356 *		Map versions may be used to quickly validate a previous
357 *		lookup operation.
358 *
359 *	Usage note:
360 *		Because they are bulky objects, map versions are usually
361 *		passed by reference.
362 *
363 *	Implementation:
364 *		Just a timestamp for the main map.
365 */
366typedef struct vm_map_version {
367	unsigned int	main_timestamp;
368} vm_map_version_t;
369
370/*
371 *	Type:		vm_map_copy_t [exported; contents invisible]
372 *
373 *	Description:
374 *		A map copy object represents a region of virtual memory
375 *		that has been copied from an address map but is still
376 *		in transit.
377 *
378 *		A map copy object may only be used by a single thread
379 *		at a time.
380 *
381 *	Implementation:
382 * 		There are three formats for map copy objects.
383 *		The first is very similar to the main
384 *		address map in structure, and as a result, some
385 *		of the internal maintenance functions/macros can
386 *		be used with either address maps or map copy objects.
387 *
388 *		The map copy object contains a header links
389 *		entry onto which the other entries that represent
390 *		the region are chained.
391 *
392 *		The second format is a single vm object.  This was used
393 *		primarily in the pageout path - but is not currently used
394 *		except for placeholder copy objects (see vm_map_copy_copy()).
395 *
396 *		The third format is a kernel buffer copy object - for data
397 * 		small enough that physical copies were the most efficient
398 *		method.
399 */
400
401struct vm_map_copy {
402	int			type;
403#define VM_MAP_COPY_ENTRY_LIST		1
404#define VM_MAP_COPY_OBJECT		2
405#define VM_MAP_COPY_KERNEL_BUFFER	3
406	vm_object_offset_t	offset;
407	vm_map_size_t		size;
408	union {
409	    struct vm_map_header	hdr;	/* ENTRY_LIST */
410	    vm_object_t			object; /* OBJECT */
411	    struct {
412		void			*kdata;	      /* KERNEL_BUFFER */
413		vm_size_t		kalloc_size;  /* size of this copy_t */
414	    } c_k;
415	} c_u;
416};
417
418
419#define cpy_hdr			c_u.hdr
420
421#define cpy_object		c_u.object
422
423#define cpy_kdata		c_u.c_k.kdata
424#define cpy_kalloc_size		c_u.c_k.kalloc_size
425
426
427/*
428 *	Useful macros for entry list copy objects
429 */
430
431#define vm_map_copy_to_entry(copy)		\
432		((struct vm_map_entry *) &(copy)->cpy_hdr.links)
433#define vm_map_copy_first_entry(copy)		\
434		((copy)->cpy_hdr.links.next)
435#define vm_map_copy_last_entry(copy)		\
436		((copy)->cpy_hdr.links.prev)
437
438/*
439 *	Macros:		vm_map_lock, etc. [internal use only]
440 *	Description:
441 *		Perform locking on the data portion of a map.
442 *	When multiple maps are to be locked, order by map address.
443 *	(See vm_map.c::vm_remap())
444 */
445
446#define vm_map_lock_init(map)						\
447	((map)->timestamp = 0 ,						\
448	lock_init(&(map)->lock, TRUE, 0, 0))
449
450#define vm_map_lock(map)		lock_write(&(map)->lock)
451#define vm_map_unlock(map)						\
452		((map)->timestamp++ ,	lock_write_done(&(map)->lock))
453#define vm_map_lock_read(map)		lock_read(&(map)->lock)
454#define vm_map_unlock_read(map)		lock_read_done(&(map)->lock)
455#define vm_map_lock_write_to_read(map)					\
456		((map)->timestamp++ ,	lock_write_to_read(&(map)->lock))
457/* lock_read_to_write() returns FALSE on failure.  Macro evaluates to
458 * zero on success and non-zero value on failure.
459 */
460#define vm_map_lock_read_to_write(map)	(lock_read_to_write(&(map)->lock) != TRUE)
461
462/*
463 *	Exported procedures that operate on vm_map_t.
464 */
465
466/* Initialize the module */
467extern void		vm_map_init(void) __attribute__((section("__TEXT, initcode")));
468
469extern void		vm_kernel_reserved_entry_init(void) __attribute__((section("__TEXT, initcode")));
470
471/* Allocate a range in the specified virtual address map and
472 * return the entry allocated for that range. */
473extern kern_return_t vm_map_find_space(
474				vm_map_t		map,
475				vm_map_address_t	*address,	/* OUT */
476				vm_map_size_t		size,
477				vm_map_offset_t		mask,
478				int			flags,
479				vm_map_entry_t		*o_entry);	/* OUT */
480
481extern void vm_map_clip_start(
482	vm_map_t	map,
483	vm_map_entry_t	entry,
484	vm_map_offset_t	endaddr);
485extern void vm_map_clip_end(
486	vm_map_t	map,
487	vm_map_entry_t	entry,
488	vm_map_offset_t	endaddr);
489#if !CONFIG_EMBEDDED
490extern boolean_t vm_map_entry_should_cow_for_true_share(
491	vm_map_entry_t	entry);
492#endif /* !CONFIG_EMBEDDED */
493
494/* Lookup map entry containing or the specified address in the given map */
495extern boolean_t	vm_map_lookup_entry(
496				vm_map_t		map,
497				vm_map_address_t	address,
498				vm_map_entry_t		*entry);	/* OUT */
499
500/* Find the VM object, offset, and protection for a given virtual address
501 * in the specified map, assuming a page fault of the	type specified. */
502extern kern_return_t	vm_map_lookup_locked(
503				vm_map_t		*var_map,	/* IN/OUT */
504				vm_map_address_t	vaddr,
505				vm_prot_t		fault_type,
506				int			object_lock_type,
507				vm_map_version_t 	*out_version,	/* OUT */
508				vm_object_t		*object,	/* OUT */
509				vm_object_offset_t 	*offset,	/* OUT */
510				vm_prot_t		*out_prot,	/* OUT */
511				boolean_t		*wired,		/* OUT */
512				vm_object_fault_info_t	fault_info,	/* OUT */
513				vm_map_t		*real_map);	/* OUT */
514
515/* Verifies that the map has not changed since the given version. */
516extern boolean_t	vm_map_verify(
517				vm_map_t	 	map,
518				vm_map_version_t 	*version);	/* REF */
519
520extern vm_map_entry_t	vm_map_entry_insert(
521				vm_map_t		map,
522				vm_map_entry_t		insp_entry,
523				vm_map_offset_t		start,
524				vm_map_offset_t		end,
525				vm_object_t		object,
526				vm_object_offset_t	offset,
527				boolean_t		needs_copy,
528				boolean_t		is_shared,
529				boolean_t		in_transition,
530				vm_prot_t		cur_protection,
531				vm_prot_t		max_protection,
532				vm_behavior_t		behavior,
533				vm_inherit_t		inheritance,
534				unsigned		wired_count,
535				boolean_t		no_cache,
536				boolean_t		permanent,
537				unsigned int		superpage_size);
538
539
540/*
541 *	Functions implemented as macros
542 */
543#define		vm_map_min(map)	((map)->min_offset)
544						/* Lowest valid address in
545						 * a map */
546
547#define		vm_map_max(map)	((map)->max_offset)
548						/* Highest valid address */
549
550#define		vm_map_pmap(map)	((map)->pmap)
551						/* Physical map associated
552						 * with this address map */
553
554#define		vm_map_verify_done(map, version)    vm_map_unlock_read(map)
555						/* Operation that required
556						 * a verified lookup is
557						 * now complete */
558
559/*
560 * Macros/functions for map residence counts and swapin/out of vm maps
561 */
562#if	TASK_SWAPPER
563
564#if	MACH_ASSERT
565/* Gain a reference to an existing map */
566extern void		vm_map_reference(
567				vm_map_t	map);
568/* Lose a residence count */
569extern void		vm_map_res_deallocate(
570				vm_map_t	map);
571/* Gain a residence count on a map */
572extern void		vm_map_res_reference(
573				vm_map_t	map);
574/* Gain reference & residence counts to possibly swapped-out map */
575extern void		vm_map_reference_swap(
576				vm_map_t	map);
577
578#else	/* MACH_ASSERT */
579
580#define vm_map_reference(map)		\
581MACRO_BEGIN					\
582	vm_map_t Map = (map);		\
583	if (Map) {				\
584		lck_mtx_lock(&Map->s_lock);	\
585		Map->res_count++;		\
586		Map->ref_count++;		\
587		lck_mtx_unlock(&Map->s_lock);	\
588	}					\
589MACRO_END
590
591#define vm_map_res_reference(map)		\
592MACRO_BEGIN					\
593	vm_map_t Lmap = (map);		\
594	if (Lmap->res_count == 0) {		\
595		lck_mtx_unlock(&Lmap->s_lock);\
596		vm_map_lock(Lmap);		\
597		vm_map_swapin(Lmap);		\
598		lck_mtx_lock(&Lmap->s_lock);	\
599		++Lmap->res_count;		\
600		vm_map_unlock(Lmap);		\
601	} else					\
602		++Lmap->res_count;		\
603MACRO_END
604
605#define vm_map_res_deallocate(map)		\
606MACRO_BEGIN					\
607	vm_map_t Map = (map);		\
608	if (--Map->res_count == 0) {	\
609		lck_mtx_unlock(&Map->s_lock);	\
610		vm_map_lock(Map);		\
611		vm_map_swapout(Map);		\
612		vm_map_unlock(Map);		\
613		lck_mtx_lock(&Map->s_lock);	\
614	}					\
615MACRO_END
616
617#define vm_map_reference_swap(map)	\
618MACRO_BEGIN				\
619	vm_map_t Map = (map);		\
620	lck_mtx_lock(&Map->s_lock);	\
621	++Map->ref_count;		\
622	vm_map_res_reference(Map);	\
623	lck_mtx_unlock(&Map->s_lock);	\
624MACRO_END
625#endif 	/* MACH_ASSERT */
626
627extern void		vm_map_swapin(
628				vm_map_t	map);
629
630extern void		vm_map_swapout(
631				vm_map_t	map);
632
633#else	/* TASK_SWAPPER */
634
635#define vm_map_reference(map)			\
636MACRO_BEGIN					\
637	vm_map_t Map = (map);			\
638	if (Map) {				\
639		lck_mtx_lock(&Map->s_lock);	\
640		Map->ref_count++;		\
641		lck_mtx_unlock(&Map->s_lock);	\
642	}					\
643MACRO_END
644
645#define vm_map_reference_swap(map)	vm_map_reference(map)
646#define vm_map_res_reference(map)
647#define vm_map_res_deallocate(map)
648
649#endif	/* TASK_SWAPPER */
650
651/*
652 *	Submap object.  Must be used to create memory to be put
653 *	in a submap by vm_map_submap.
654 */
655extern vm_object_t	vm_submap_object;
656
657/*
658 *	Wait and wakeup macros for in_transition map entries.
659 */
660#define vm_map_entry_wait(map, interruptible)    	\
661	((map)->timestamp++ ,				\
662	 thread_sleep_lock_write((event_t)&(map)->hdr,  \
663			 &(map)->lock, interruptible))
664
665
666#define vm_map_entry_wakeup(map)        \
667	thread_wakeup((event_t)(&(map)->hdr))
668
669
670#define	vm_map_ref_fast(map)			\
671	MACRO_BEGIN					\
672	lck_mtx_lock(&map->s_lock);			\
673	map->ref_count++;				\
674	vm_map_res_reference(map);			\
675	lck_mtx_unlock(&map->s_lock);			\
676	MACRO_END
677
678#define	vm_map_dealloc_fast(map)		\
679	MACRO_BEGIN					\
680	register int c;				\
681							\
682	lck_mtx_lock(&map->s_lock);			\
683	c = --map->ref_count;			\
684	if (c > 0)					\
685		vm_map_res_deallocate(map);		\
686	lck_mtx_unlock(&map->s_lock);			\
687	if (c == 0)					\
688		vm_map_destroy(map);			\
689	MACRO_END
690
691
692/* simplify map entries */
693extern void		vm_map_simplify_entry(
694	vm_map_t	map,
695	vm_map_entry_t	this_entry);
696extern void		vm_map_simplify(
697				vm_map_t		map,
698				vm_map_offset_t		start);
699
700/* Move the information in a map copy object to a new map copy object */
701extern vm_map_copy_t	vm_map_copy_copy(
702				vm_map_copy_t           copy);
703
704/* Create a copy object from an object. */
705extern kern_return_t	vm_map_copyin_object(
706				vm_object_t		object,
707				vm_object_offset_t	offset,
708				vm_object_size_t	size,
709				vm_map_copy_t		*copy_result); /* OUT */
710
711extern kern_return_t	vm_map_random_address_for_size(
712				vm_map_t	map,
713				vm_map_offset_t	*address,
714				vm_map_size_t	size);
715
716/* Enter a mapping */
717extern kern_return_t	vm_map_enter(
718				vm_map_t		map,
719				vm_map_offset_t		*address,
720				vm_map_size_t		size,
721				vm_map_offset_t		mask,
722				int			flags,
723				vm_object_t		object,
724				vm_object_offset_t	offset,
725				boolean_t		needs_copy,
726				vm_prot_t		cur_protection,
727				vm_prot_t		max_protection,
728				vm_inherit_t		inheritance);
729
730/* XXX should go away - replaced with regular enter of contig object */
731extern  kern_return_t	vm_map_enter_cpm(
732				vm_map_t		map,
733				vm_map_address_t	*addr,
734				vm_map_size_t		size,
735				int			flags);
736
737extern kern_return_t vm_map_remap(
738				vm_map_t		target_map,
739				vm_map_offset_t		*address,
740				vm_map_size_t		size,
741				vm_map_offset_t		mask,
742				int			flags,
743				vm_map_t		src_map,
744				vm_map_offset_t		memory_address,
745				boolean_t		copy,
746				vm_prot_t		*cur_protection,
747				vm_prot_t		*max_protection,
748				vm_inherit_t		inheritance);
749
750
751/*
752 * Read and write from a kernel buffer to a specified map.
753 */
754extern	kern_return_t	vm_map_write_user(
755				vm_map_t		map,
756				void			*src_p,
757				vm_map_offset_t		dst_addr,
758				vm_size_t		size);
759
760extern	kern_return_t	vm_map_read_user(
761				vm_map_t		map,
762				vm_map_offset_t		src_addr,
763				void			*dst_p,
764				vm_size_t		size);
765
766/* Create a new task map using an existing task map as a template. */
767extern vm_map_t		vm_map_fork(
768				ledger_t		ledger,
769				vm_map_t		old_map);
770
771/* Change inheritance */
772extern kern_return_t	vm_map_inherit(
773				vm_map_t		map,
774				vm_map_offset_t		start,
775				vm_map_offset_t		end,
776				vm_inherit_t		new_inheritance);
777
778/* Add or remove machine-dependent attributes from map regions */
779extern kern_return_t	vm_map_machine_attribute(
780				vm_map_t		map,
781				vm_map_offset_t		start,
782				vm_map_offset_t		end,
783				vm_machine_attribute_t	attribute,
784				vm_machine_attribute_val_t* value); /* IN/OUT */
785
786extern kern_return_t	vm_map_msync(
787				vm_map_t		map,
788				vm_map_address_t	address,
789				vm_map_size_t		size,
790				vm_sync_t		sync_flags);
791
792/* Set paging behavior */
793extern kern_return_t	vm_map_behavior_set(
794				vm_map_t		map,
795				vm_map_offset_t		start,
796				vm_map_offset_t		end,
797				vm_behavior_t		new_behavior);
798
799extern kern_return_t vm_map_purgable_control(
800				vm_map_t		map,
801				vm_map_offset_t		address,
802				vm_purgable_t		control,
803				int			*state);
804
805extern kern_return_t vm_map_region(
806				vm_map_t		 map,
807				vm_map_offset_t		*address,
808				vm_map_size_t		*size,
809				vm_region_flavor_t	 flavor,
810				vm_region_info_t	 info,
811				mach_msg_type_number_t	*count,
812				mach_port_t		*object_name);
813
814extern kern_return_t vm_map_region_recurse_64(
815				vm_map_t		 map,
816				vm_map_offset_t		*address,
817				vm_map_size_t		*size,
818				natural_t	 	*nesting_depth,
819				vm_region_submap_info_64_t info,
820				mach_msg_type_number_t  *count);
821
822extern kern_return_t vm_map_page_query_internal(
823				vm_map_t		map,
824				vm_map_offset_t		offset,
825				int			*disposition,
826				int			*ref_count);
827
828
829extern kern_return_t	vm_map_submap(
830				vm_map_t		map,
831				vm_map_offset_t		start,
832				vm_map_offset_t		end,
833				vm_map_t		submap,
834				vm_map_offset_t		offset,
835				boolean_t		use_pmap);
836
837extern void vm_map_submap_pmap_clean(
838	vm_map_t	map,
839	vm_map_offset_t	start,
840	vm_map_offset_t	end,
841	vm_map_t	sub_map,
842	vm_map_offset_t	offset);
843
844/* Convert from a map entry port to a map */
845extern vm_map_t convert_port_entry_to_map(
846	ipc_port_t	port);
847
848/* Convert from a port to a vm_object */
849extern vm_object_t convert_port_entry_to_object(
850	ipc_port_t	port);
851
852
853extern kern_return_t vm_map_set_cache_attr(
854        vm_map_t        map,
855        vm_map_offset_t va);
856
857
858/* definitions related to overriding the NX behavior */
859
860#define VM_ABI_32	0x1
861#define VM_ABI_64	0x2
862
863extern int override_nx(vm_map_t map, uint32_t user_tag);
864
865#endif /* MACH_KERNEL_PRIVATE */
866
867__BEGIN_DECLS
868
869/* Create an empty map */
870extern vm_map_t		vm_map_create(
871				pmap_t			pmap,
872				vm_map_offset_t 	min_off,
873				vm_map_offset_t 	max_off,
874				boolean_t		pageable);
875
876/* Get rid of a map */
877extern void		vm_map_destroy(
878				vm_map_t		map,
879				int			flags);
880
881/* Lose a reference */
882extern void		vm_map_deallocate(
883				vm_map_t		map);
884
885extern vm_map_t		vm_map_switch(
886				vm_map_t		map);
887
888/* Change protection */
889extern kern_return_t	vm_map_protect(
890				vm_map_t		map,
891				vm_map_offset_t		start,
892				vm_map_offset_t		end,
893				vm_prot_t		new_prot,
894				boolean_t		set_max);
895
896/* Check protection */
897extern boolean_t vm_map_check_protection(
898				vm_map_t		map,
899				vm_map_offset_t		start,
900				vm_map_offset_t		end,
901				vm_prot_t		protection);
902
903/* wire down a region */
904extern kern_return_t	vm_map_wire(
905				vm_map_t		map,
906				vm_map_offset_t		start,
907				vm_map_offset_t		end,
908				vm_prot_t		access_type,
909				boolean_t		user_wire);
910
911/* unwire a region */
912extern kern_return_t	vm_map_unwire(
913				vm_map_t		map,
914				vm_map_offset_t		start,
915				vm_map_offset_t		end,
916				boolean_t		user_wire);
917
918/* Enter a mapping of a memory object */
919extern kern_return_t	vm_map_enter_mem_object(
920				vm_map_t		map,
921				vm_map_offset_t		*address,
922				vm_map_size_t		size,
923				vm_map_offset_t		mask,
924				int			flags,
925				ipc_port_t		port,
926				vm_object_offset_t	offset,
927				boolean_t		needs_copy,
928				vm_prot_t		cur_protection,
929				vm_prot_t		max_protection,
930				vm_inherit_t		inheritance);
931
932/* Enter a mapping of a memory object */
933extern kern_return_t	vm_map_enter_mem_object_control(
934				vm_map_t		map,
935				vm_map_offset_t		*address,
936				vm_map_size_t		size,
937				vm_map_offset_t		mask,
938				int			flags,
939				memory_object_control_t	control,
940				vm_object_offset_t	offset,
941				boolean_t		needs_copy,
942				vm_prot_t		cur_protection,
943				vm_prot_t		max_protection,
944				vm_inherit_t		inheritance);
945
946/* Deallocate a region */
947extern kern_return_t	vm_map_remove(
948				vm_map_t		map,
949				vm_map_offset_t		start,
950				vm_map_offset_t		end,
951				boolean_t		flags);
952
953/* Discard a copy without using it */
954extern void		vm_map_copy_discard(
955				vm_map_copy_t		copy);
956
957/* Overwrite existing memory with a copy */
958extern kern_return_t	vm_map_copy_overwrite(
959				vm_map_t                dst_map,
960				vm_map_address_t        dst_addr,
961				vm_map_copy_t           copy,
962				boolean_t               interruptible);
963
964/* Place a copy into a map */
965extern kern_return_t	vm_map_copyout(
966				vm_map_t		dst_map,
967				vm_map_address_t	*dst_addr,	/* OUT */
968				vm_map_copy_t		copy);
969
970extern kern_return_t	vm_map_copyin(
971				vm_map_t			src_map,
972				vm_map_address_t	src_addr,
973				vm_map_size_t		len,
974				boolean_t			src_destroy,
975				vm_map_copy_t		*copy_result);	/* OUT */
976
977extern kern_return_t	vm_map_copyin_common(
978				vm_map_t		src_map,
979				vm_map_address_t	src_addr,
980				vm_map_size_t		len,
981				boolean_t		src_destroy,
982				boolean_t		src_volatile,
983				vm_map_copy_t		*copy_result,	/* OUT */
984				boolean_t		use_maxprot);
985
986extern void		vm_map_disable_NX(
987			        vm_map_t		map);
988
989extern void		vm_map_disallow_data_exec(
990			        vm_map_t		map);
991
992extern void		vm_map_set_64bit(
993			        vm_map_t		map);
994
995extern void		vm_map_set_32bit(
996			        vm_map_t		map);
997
998extern boolean_t	vm_map_has_hard_pagezero(
999		       		vm_map_t		map,
1000				vm_map_offset_t		pagezero_size);
1001
1002extern boolean_t	vm_map_is_64bit(
1003			        vm_map_t		map);
1004#define vm_map_has_4GB_pagezero(map) 	vm_map_has_hard_pagezero(map, (vm_map_offset_t)0x100000000ULL)
1005
1006
1007extern void		vm_map_set_4GB_pagezero(
1008			        vm_map_t		map);
1009
1010extern void		vm_map_clear_4GB_pagezero(
1011			        vm_map_t		map);
1012
1013extern kern_return_t	vm_map_raise_max_offset(
1014	vm_map_t	map,
1015	vm_map_offset_t	new_max_offset);
1016
1017extern kern_return_t	vm_map_raise_min_offset(
1018	vm_map_t	map,
1019	vm_map_offset_t	new_min_offset);
1020
1021extern vm_map_offset_t	vm_compute_max_offset(
1022				unsigned		is64);
1023
1024extern void		vm_map_set_user_wire_limit(
1025				vm_map_t		map,
1026				vm_size_t		limit);
1027
1028extern void vm_map_switch_protect(
1029				vm_map_t		map,
1030				boolean_t		val);
1031
1032extern boolean_t first_free_is_valid(vm_map_t);
1033
1034#ifdef XNU_KERNEL_PRIVATE
1035extern kern_return_t vm_map_page_info(
1036	vm_map_t		map,
1037	vm_map_offset_t		offset,
1038	vm_page_info_flavor_t	flavor,
1039	vm_page_info_t		info,
1040	mach_msg_type_number_t	*count);
1041#endif /* XNU_KERNEL_PRIVATE */
1042
1043
1044#ifdef	MACH_KERNEL_PRIVATE
1045
1046/*
1047 *	Macros to invoke vm_map_copyin_common.  vm_map_copyin is the
1048 *	usual form; it handles a copyin based on the current protection
1049 *	(current protection == VM_PROT_NONE) is a failure.
1050 *	vm_map_copyin_maxprot handles a copyin based on maximum possible
1051 *	access.  The difference is that a region with no current access
1052 *	BUT possible maximum access is rejected by vm_map_copyin(), but
1053 *	returned by vm_map_copyin_maxprot.
1054 */
1055#define	vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1056		vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1057					FALSE, copy_result, FALSE)
1058
1059#define vm_map_copyin_maxprot(src_map, \
1060			      src_addr, len, src_destroy, copy_result) \
1061		vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1062					FALSE, copy_result, TRUE)
1063
1064#endif /* MACH_KERNEL_PRIVATE */
1065
1066/*
1067 * Macros for rounding and truncation of vm_map offsets and sizes
1068 */
1069#define vm_map_round_page(x) (((vm_map_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1070#define vm_map_trunc_page(x) ((vm_map_offset_t)(x) & ~((signed)PAGE_MASK))
1071
1072/*
1073 * Flags for vm_map_remove() and vm_map_delete()
1074 */
1075#define	VM_MAP_NO_FLAGS	  		0x0
1076#define	VM_MAP_REMOVE_KUNWIRE	  	0x1
1077#define	VM_MAP_REMOVE_INTERRUPTIBLE  	0x2
1078#define	VM_MAP_REMOVE_WAIT_FOR_KWIRE  	0x4
1079#define VM_MAP_REMOVE_SAVE_ENTRIES	0x8
1080#define VM_MAP_REMOVE_NO_PMAP_CLEANUP	0x10
1081
1082/* Support for UPLs from vm_maps */
1083
1084extern kern_return_t vm_map_get_upl(
1085				vm_map_t		target_map,
1086				vm_map_offset_t		map_offset,
1087				upl_size_t		*size,
1088				upl_t			*upl,
1089				upl_page_info_array_t	page_info,
1090				unsigned int	*page_infoCnt,
1091				int		*flags,
1092				int		force_data_sync);
1093
1094#if CONFIG_DYNAMIC_CODE_SIGNING
1095extern kern_return_t vm_map_sign(vm_map_t map,
1096				 vm_map_offset_t start,
1097				 vm_map_offset_t end);
1098#endif
1099
1100#if CONFIG_FREEZE
1101void	vm_map_freeze_thaw_init(void);
1102void	vm_map_freeze_thaw(void);
1103void	vm_map_demand_fault(void);
1104
1105extern kern_return_t vm_map_freeze_walk(
1106              	vm_map_t map,
1107              	unsigned int *purgeable_count,
1108              	unsigned int *wired_count,
1109              	unsigned int *clean_count,
1110              	unsigned int *dirty_count,
1111             	unsigned int dirty_budget,
1112              	boolean_t *has_shared);
1113
1114extern kern_return_t vm_map_freeze(
1115             	vm_map_t map,
1116             	unsigned int *purgeable_count,
1117             	unsigned int *wired_count,
1118             	unsigned int *clean_count,
1119             	unsigned int *dirty_count,
1120             	unsigned int dirty_budget,
1121             	boolean_t *has_shared);
1122
1123extern kern_return_t vm_map_thaw(
1124                vm_map_t map);
1125#endif
1126
1127__END_DECLS
1128
1129#endif	/* KERNEL_PRIVATE */
1130
1131#endif	/* _VM_VM_MAP_H_ */
1132