1/*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 *	File:	vm/vm_map.h
61 *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
62 *	Date:	1985
63 *
64 *	Virtual memory map module definitions.
65 *
66 * Contributors:
67 *	avie, dlb, mwyoung
68 */
69
70#ifndef	_VM_VM_MAP_H_
71#define _VM_VM_MAP_H_
72
73#include <mach/mach_types.h>
74#include <mach/kern_return.h>
75#include <mach/boolean.h>
76#include <mach/vm_types.h>
77#include <mach/vm_prot.h>
78#include <mach/vm_inherit.h>
79#include <mach/vm_behavior.h>
80#include <mach/vm_param.h>
81#include <vm/pmap.h>
82
83#ifdef	KERNEL_PRIVATE
84
85#include <sys/cdefs.h>
86
87__BEGIN_DECLS
88
89extern void	vm_map_reference(vm_map_t	map);
90extern vm_map_t current_map(void);
91
92/* Setup reserved areas in a new VM map */
93extern kern_return_t	vm_map_exec(
94				vm_map_t		new_map,
95				task_t			task,
96				void			*fsroot,
97				cpu_type_t		cpu);
98
99__END_DECLS
100
101#ifdef	MACH_KERNEL_PRIVATE
102
103#include <task_swapper.h>
104#include <mach_assert.h>
105
106#include <vm/vm_object.h>
107#include <vm/vm_page.h>
108#include <kern/locks.h>
109#include <kern/zalloc.h>
110#include <kern/macro_help.h>
111
112#include <kern/thread.h>
113
114#define current_map_fast()	(current_thread()->map)
115#define	current_map()		(current_map_fast())
116
117#include <vm/vm_map_store.h>
118
119
120/*
121 *	Types defined:
122 *
123 *	vm_map_t		the high-level address map data structure.
124 *	vm_map_entry_t		an entry in an address map.
125 *	vm_map_version_t	a timestamp of a map, for use with vm_map_lookup
126 *	vm_map_copy_t		represents memory copied from an address map,
127 *				 used for inter-map copy operations
128 */
129typedef struct vm_map_entry	*vm_map_entry_t;
130#define VM_MAP_ENTRY_NULL	((vm_map_entry_t) 0)
131
132
133/*
134 *	Type:		vm_map_object_t [internal use only]
135 *
136 *	Description:
137 *		The target of an address mapping, either a virtual
138 *		memory object or a sub map (of the kernel map).
139 */
140typedef union vm_map_object {
141	vm_object_t		vm_object;	/* object object */
142	vm_map_t		sub_map;	/* belongs to another map */
143} vm_map_object_t;
144
145#define named_entry_lock_init(object)	lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr)
146#define named_entry_lock_destroy(object)	lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
147#define named_entry_lock(object)		lck_mtx_lock(&(object)->Lock)
148#define named_entry_unlock(object)		lck_mtx_unlock(&(object)->Lock)
149
150/*
151 *	Type:		vm_named_entry_t [internal use only]
152 *
153 *	Description:
154 *		Description of a mapping to a memory cache object.
155 *
156 *	Implementation:
157 *		While the handle to this object is used as a means to map
158 * 		and pass around the right to map regions backed by pagers
159 *		of all sorts, the named_entry itself is only manipulated
160 *		by the kernel.  Named entries hold information on the
161 *		right to map a region of a cached object.  Namely,
162 *		the target cache object, the beginning and ending of the
163 *		region to be mapped, and the permissions, (read, write)
164 *		with which it can be mapped.
165 *
166 */
167
168struct vm_named_entry {
169	decl_lck_mtx_data(,	Lock)		/* Synchronization */
170	union {
171		vm_object_t	object;		/* object I point to */
172		memory_object_t	pager;		/* amo pager port */
173		vm_map_t	map;		/* map backing submap */
174		vm_map_copy_t	copy;		/* a VM map copy */
175	} backing;
176	vm_object_offset_t	offset;		/* offset into object */
177	vm_object_size_t	size;		/* size of region */
178	vm_object_offset_t	data_offset;	/* offset to first byte of data */
179	vm_prot_t		protection;	/* access permissions */
180	int			ref_count;	/* Number of references */
181	unsigned int				/* Is backing.xxx : */
182	/* boolean_t */		internal:1,	/* ... an internal object */
183	/* boolean_t */		is_sub_map:1,	/* ... a submap? */
184	/* boolean_t */		is_pager:1,	/* ... a pager port */
185	/* boolean_t */		is_copy:1;	/* ... a VM map copy */
186};
187
188/*
189 *	Type:		vm_map_entry_t [internal use only]
190 *
191 *	Description:
192 *		A single mapping within an address map.
193 *
194 *	Implementation:
195 *		Address map entries consist of start and end addresses,
196 *		a VM object (or sub map) and offset into that object,
197 *		and user-exported inheritance and protection information.
198 *		Control information for virtual copy operations is also
199 *		stored in the address map entry.
200 */
201
202struct vm_map_links {
203	struct vm_map_entry	*prev;		/* previous entry */
204	struct vm_map_entry	*next;		/* next entry */
205	vm_map_offset_t		start;		/* start address */
206	vm_map_offset_t		end;		/* end address */
207};
208
209struct vm_map_entry {
210	struct vm_map_links	links;		/* links to other entries */
211#define vme_prev		links.prev
212#define vme_next		links.next
213#define vme_start		links.start
214#define vme_end			links.end
215
216	struct vm_map_store	store;
217	union vm_map_object	object;		/* object I point to */
218	vm_object_offset_t	offset;		/* offset into object */
219	unsigned int
220	/* boolean_t */		is_shared:1,	/* region is shared */
221	/* boolean_t */		is_sub_map:1,	/* Is "object" a submap? */
222	/* boolean_t */		in_transition:1, /* Entry being changed */
223	/* boolean_t */		needs_wakeup:1,  /* Waiters on in_transition */
224	/* vm_behavior_t */	behavior:2,	/* user paging behavior hint */
225		/* behavior is not defined for submap type */
226	/* boolean_t */		needs_copy:1,	/* object need to be copied? */
227		/* Only in task maps: */
228	/* vm_prot_t */		protection:3,	/* protection code */
229	/* vm_prot_t */		max_protection:3,/* maximum protection */
230	/* vm_inherit_t */	inheritance:2,	/* inheritance */
231	/* boolean_t */		use_pmap:1,	/*
232	   					 * use_pmap is overloaded:
233						 * if "is_sub_map":
234						 * 	use a nested pmap?
235						 * else (i.e. if object):
236						 * 	use pmap accounting
237						 * 	for footprint?
238						 */
239	/*
240	 * IMPORTANT:
241	 * The "alias" field can be updated while holding the VM map lock
242	 * "shared".  It's OK as along as it's the only field that can be
243	 * updated without the VM map "exclusive" lock.
244	 */
245	/* unsigned char */	alias:8,	/* user alias */
246	/* boolean_t */		no_cache:1,	/* should new pages be cached? */
247	/* boolean_t */		permanent:1,	/* mapping can not be removed */
248	/* boolean_t */		superpage_size:1,/* use superpages of a certain size */
249	/* boolean_t */		map_aligned:1,	/* align to map's page size */
250	/* boolean_t */		zero_wired_pages:1, /* zero out the wired pages of this entry it is being deleted without unwiring them */
251	/* boolean_t */		used_for_jit:1,
252	/* boolean_t */	from_reserved_zone:1,	/* Allocated from
253						 * kernel reserved zone	 */
254
255	/* iokit accounting: use the virtual size rather than resident size: */
256	/* boolean_t */ iokit_acct:1;
257
258	unsigned short		wired_count;	/* can be paged if = 0 */
259	unsigned short		user_wired_count; /* for vm_wire */
260#if	DEBUG
261#define	MAP_ENTRY_CREATION_DEBUG (1)
262#define MAP_ENTRY_INSERTION_DEBUG (1)
263#endif
264#if	MAP_ENTRY_CREATION_DEBUG
265	struct vm_map_header	*vme_creation_maphdr;
266	uintptr_t		vme_creation_bt[16];
267#endif
268#if	MAP_ENTRY_INSERTION_DEBUG
269	uintptr_t		vme_insertion_bt[16];
270#endif
271};
272
273/*
274 * Convenience macros for dealing with superpages
275 * SUPERPAGE_NBASEPAGES is architecture dependent and defined in pmap.h
276 */
277#define SUPERPAGE_SIZE (PAGE_SIZE*SUPERPAGE_NBASEPAGES)
278#define SUPERPAGE_MASK (-SUPERPAGE_SIZE)
279#define SUPERPAGE_ROUND_DOWN(a) (a & SUPERPAGE_MASK)
280#define SUPERPAGE_ROUND_UP(a) ((a + SUPERPAGE_SIZE-1) & SUPERPAGE_MASK)
281
282/*
283 * wired_counts are unsigned short.  This value is used to safeguard
284 * against any mishaps due to runaway user programs.
285 */
286#define MAX_WIRE_COUNT		65535
287
288
289
290/*
291 *	Type:		struct vm_map_header
292 *
293 *	Description:
294 *		Header for a vm_map and a vm_map_copy.
295 */
296
297
298struct vm_map_header {
299	struct vm_map_links	links;		/* first, last, min, max */
300	int			nentries;	/* Number of entries */
301	boolean_t		entries_pageable;
302						/* are map entries pageable? */
303	vm_map_offset_t		highest_entry_end_addr;	/* The ending address of the highest allocated vm_entry_t */
304#ifdef VM_MAP_STORE_USE_RB
305	struct rb_head	rb_head_store;
306#endif
307	int			page_shift;	/* page shift */
308};
309
310#define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift)
311#define VM_MAP_HDR_PAGE_SIZE(hdr) (1 << VM_MAP_HDR_PAGE_SHIFT((hdr)))
312#define VM_MAP_HDR_PAGE_MASK(hdr) (VM_MAP_HDR_PAGE_SIZE((hdr)) - 1)
313
314/*
315 *	Type:		vm_map_t [exported; contents invisible]
316 *
317 *	Description:
318 *		An address map -- a directory relating valid
319 *		regions of a task's address space to the corresponding
320 *		virtual memory objects.
321 *
322 *	Implementation:
323 *		Maps are doubly-linked lists of map entries, sorted
324 *		by address.  One hint is used to start
325 *		searches again from the last successful search,
326 *		insertion, or removal.  Another hint is used to
327 *		quickly find free space.
328 */
329struct _vm_map {
330	lck_rw_t			lock;		/* map lock */
331	struct vm_map_header	hdr;		/* Map entry header */
332#define min_offset		hdr.links.start	/* start of range */
333#define max_offset		hdr.links.end	/* end of range */
334#define highest_entry_end	hdr.highest_entry_end_addr
335	pmap_t			pmap;		/* Physical map */
336	vm_map_size_t		size;		/* virtual size */
337	vm_map_size_t		user_wire_limit;/* rlimit on user locked memory */
338	vm_map_size_t		user_wire_size; /* current size of user locked memory in this map */
339	int			ref_count;	/* Reference count */
340#if	TASK_SWAPPER
341	int			res_count;	/* Residence count (swap) */
342	int			sw_state;	/* Swap state */
343#endif	/* TASK_SWAPPER */
344	decl_lck_mtx_data(,	s_lock)		/* Lock ref, res fields */
345	lck_mtx_ext_t		s_lock_ext;
346	vm_map_entry_t		hint;		/* hint for quick lookups */
347	vm_map_entry_t		first_free;	/* First free space hint */
348	unsigned int
349	/* boolean_t */		wait_for_space:1, /* Should callers wait for space? */
350	/* boolean_t */		wiring_required:1, /* All memory wired? */
351	/* boolean_t */		no_zero_fill:1, /*No zero fill absent pages */
352	/* boolean_t */		mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */
353	/* boolean_t */		switch_protect:1, /*  Protect map from write faults while switched */
354	/* boolean_t */		disable_vmentry_reuse:1, /*  All vm entries should keep using newer and higher addresses in the map */
355	/* boolean_t */		map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */
356	/* reserved */		pad:25;
357	unsigned int		timestamp;	/* Version number */
358	unsigned int		color_rr;	/* next color (not protected by a lock) */
359#if CONFIG_FREEZE
360	void			*default_freezer_handle;
361#endif
362 	boolean_t		jit_entry_exists;
363} ;
364
365#define vm_map_to_entry(map)	((struct vm_map_entry *) &(map)->hdr.links)
366#define vm_map_first_entry(map)	((map)->hdr.links.next)
367#define vm_map_last_entry(map)	((map)->hdr.links.prev)
368
369#if	TASK_SWAPPER
370/*
371 * VM map swap states.  There are no transition states.
372 */
373#define MAP_SW_IN	 1	/* map is swapped in; residence count > 0 */
374#define MAP_SW_OUT	 2	/* map is out (res_count == 0 */
375#endif	/* TASK_SWAPPER */
376
377/*
378 *	Type:		vm_map_version_t [exported; contents invisible]
379 *
380 *	Description:
381 *		Map versions may be used to quickly validate a previous
382 *		lookup operation.
383 *
384 *	Usage note:
385 *		Because they are bulky objects, map versions are usually
386 *		passed by reference.
387 *
388 *	Implementation:
389 *		Just a timestamp for the main map.
390 */
391typedef struct vm_map_version {
392	unsigned int	main_timestamp;
393} vm_map_version_t;
394
395/*
396 *	Type:		vm_map_copy_t [exported; contents invisible]
397 *
398 *	Description:
399 *		A map copy object represents a region of virtual memory
400 *		that has been copied from an address map but is still
401 *		in transit.
402 *
403 *		A map copy object may only be used by a single thread
404 *		at a time.
405 *
406 *	Implementation:
407 * 		There are three formats for map copy objects.
408 *		The first is very similar to the main
409 *		address map in structure, and as a result, some
410 *		of the internal maintenance functions/macros can
411 *		be used with either address maps or map copy objects.
412 *
413 *		The map copy object contains a header links
414 *		entry onto which the other entries that represent
415 *		the region are chained.
416 *
417 *		The second format is a single vm object.  This was used
418 *		primarily in the pageout path - but is not currently used
419 *		except for placeholder copy objects (see vm_map_copy_copy()).
420 *
421 *		The third format is a kernel buffer copy object - for data
422 * 		small enough that physical copies were the most efficient
423 *		method.
424 */
425
426struct vm_map_copy {
427	int			type;
428#define VM_MAP_COPY_ENTRY_LIST		1
429#define VM_MAP_COPY_OBJECT		2
430#define VM_MAP_COPY_KERNEL_BUFFER	3
431	vm_object_offset_t	offset;
432	vm_map_size_t		size;
433	union {
434	    struct vm_map_header	hdr;	/* ENTRY_LIST */
435	    vm_object_t			object; /* OBJECT */
436	    struct {
437		void			*kdata;	      /* KERNEL_BUFFER */
438		vm_size_t		kalloc_size;  /* size of this copy_t */
439	    } c_k;
440	} c_u;
441};
442
443
444#define cpy_hdr			c_u.hdr
445
446#define cpy_object		c_u.object
447
448#define cpy_kdata		c_u.c_k.kdata
449#define cpy_kalloc_size		c_u.c_k.kalloc_size
450
451#define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift)
452#define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy)))
453#define VM_MAP_COPY_PAGE_MASK(copy) (VM_MAP_COPY_PAGE_SIZE((copy)) - 1)
454
455/*
456 *	Useful macros for entry list copy objects
457 */
458
459#define vm_map_copy_to_entry(copy)		\
460		((struct vm_map_entry *) &(copy)->cpy_hdr.links)
461#define vm_map_copy_first_entry(copy)		\
462		((copy)->cpy_hdr.links.next)
463#define vm_map_copy_last_entry(copy)		\
464		((copy)->cpy_hdr.links.prev)
465
466/*
467 *	Macros:		vm_map_lock, etc. [internal use only]
468 *	Description:
469 *		Perform locking on the data portion of a map.
470 *	When multiple maps are to be locked, order by map address.
471 *	(See vm_map.c::vm_remap())
472 */
473
474#define vm_map_lock_init(map)						\
475	((map)->timestamp = 0 ,						\
476	lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr))
477
478#define vm_map_lock(map)		lck_rw_lock_exclusive(&(map)->lock)
479#define vm_map_unlock(map)						\
480		((map)->timestamp++ ,	lck_rw_done(&(map)->lock))
481#define vm_map_lock_read(map)		lck_rw_lock_shared(&(map)->lock)
482#define vm_map_unlock_read(map)		lck_rw_done(&(map)->lock)
483#define vm_map_lock_write_to_read(map)					\
484		((map)->timestamp++ ,	lck_rw_lock_exclusive_to_shared(&(map)->lock))
485/* lock_read_to_write() returns FALSE on failure.  Macro evaluates to
486 * zero on success and non-zero value on failure.
487 */
488#define vm_map_lock_read_to_write(map)	(lck_rw_lock_shared_to_exclusive(&(map)->lock) != TRUE)
489
490/*
491 *	Exported procedures that operate on vm_map_t.
492 */
493
494/* Initialize the module */
495extern void		vm_map_init(void);
496
497extern void		vm_kernel_reserved_entry_init(void);
498
499/* Allocate a range in the specified virtual address map and
500 * return the entry allocated for that range. */
501extern kern_return_t vm_map_find_space(
502				vm_map_t		map,
503				vm_map_address_t	*address,	/* OUT */
504				vm_map_size_t		size,
505				vm_map_offset_t		mask,
506				int			flags,
507				vm_map_entry_t		*o_entry);	/* OUT */
508
509extern void vm_map_clip_start(
510	vm_map_t	map,
511	vm_map_entry_t	entry,
512	vm_map_offset_t	endaddr);
513extern void vm_map_clip_end(
514	vm_map_t	map,
515	vm_map_entry_t	entry,
516	vm_map_offset_t	endaddr);
517extern boolean_t vm_map_entry_should_cow_for_true_share(
518	vm_map_entry_t	entry);
519
520/* Lookup map entry containing or the specified address in the given map */
521extern boolean_t	vm_map_lookup_entry(
522				vm_map_t		map,
523				vm_map_address_t	address,
524				vm_map_entry_t		*entry);	/* OUT */
525
526extern void		vm_map_copy_remap(
527	vm_map_t		map,
528	vm_map_entry_t		where,
529	vm_map_copy_t		copy,
530	vm_map_offset_t		adjustment,
531	vm_prot_t		cur_prot,
532	vm_prot_t		max_prot,
533	vm_inherit_t		inheritance);
534
535/* Find the VM object, offset, and protection for a given virtual address
536 * in the specified map, assuming a page fault of the	type specified. */
537extern kern_return_t	vm_map_lookup_locked(
538				vm_map_t		*var_map,	/* IN/OUT */
539				vm_map_address_t	vaddr,
540				vm_prot_t		fault_type,
541				int			object_lock_type,
542				vm_map_version_t 	*out_version,	/* OUT */
543				vm_object_t		*object,	/* OUT */
544				vm_object_offset_t 	*offset,	/* OUT */
545				vm_prot_t		*out_prot,	/* OUT */
546				boolean_t		*wired,		/* OUT */
547				vm_object_fault_info_t	fault_info,	/* OUT */
548				vm_map_t		*real_map);	/* OUT */
549
550/* Verifies that the map has not changed since the given version. */
551extern boolean_t	vm_map_verify(
552				vm_map_t	 	map,
553				vm_map_version_t 	*version);	/* REF */
554
555extern vm_map_entry_t	vm_map_entry_insert(
556				vm_map_t		map,
557				vm_map_entry_t		insp_entry,
558				vm_map_offset_t		start,
559				vm_map_offset_t		end,
560				vm_object_t		object,
561				vm_object_offset_t	offset,
562				boolean_t		needs_copy,
563				boolean_t		is_shared,
564				boolean_t		in_transition,
565				vm_prot_t		cur_protection,
566				vm_prot_t		max_protection,
567				vm_behavior_t		behavior,
568				vm_inherit_t		inheritance,
569				unsigned		wired_count,
570				boolean_t		no_cache,
571				boolean_t		permanent,
572				unsigned int		superpage_size,
573				boolean_t		clear_map_aligned,
574				boolean_t		is_submap);
575
576
577/*
578 *	Functions implemented as macros
579 */
580#define		vm_map_min(map)	((map)->min_offset)
581						/* Lowest valid address in
582						 * a map */
583
584#define		vm_map_max(map)	((map)->max_offset)
585						/* Highest valid address */
586
587#define		vm_map_pmap(map)	((map)->pmap)
588						/* Physical map associated
589						 * with this address map */
590
591#define		vm_map_verify_done(map, version)    vm_map_unlock_read(map)
592						/* Operation that required
593						 * a verified lookup is
594						 * now complete */
595
596/*
597 * Macros/functions for map residence counts and swapin/out of vm maps
598 */
599#if	TASK_SWAPPER
600
601#if	MACH_ASSERT
602/* Gain a reference to an existing map */
603extern void		vm_map_reference(
604				vm_map_t	map);
605/* Lose a residence count */
606extern void		vm_map_res_deallocate(
607				vm_map_t	map);
608/* Gain a residence count on a map */
609extern void		vm_map_res_reference(
610				vm_map_t	map);
611/* Gain reference & residence counts to possibly swapped-out map */
612extern void		vm_map_reference_swap(
613				vm_map_t	map);
614
615#else	/* MACH_ASSERT */
616
617#define vm_map_reference(map)		\
618MACRO_BEGIN					\
619	vm_map_t Map = (map);		\
620	if (Map) {				\
621		lck_mtx_lock(&Map->s_lock);	\
622		Map->res_count++;		\
623		Map->ref_count++;		\
624		lck_mtx_unlock(&Map->s_lock);	\
625	}					\
626MACRO_END
627
628#define vm_map_res_reference(map)		\
629MACRO_BEGIN					\
630	vm_map_t Lmap = (map);		\
631	if (Lmap->res_count == 0) {		\
632		lck_mtx_unlock(&Lmap->s_lock);\
633		vm_map_lock(Lmap);		\
634		vm_map_swapin(Lmap);		\
635		lck_mtx_lock(&Lmap->s_lock);	\
636		++Lmap->res_count;		\
637		vm_map_unlock(Lmap);		\
638	} else					\
639		++Lmap->res_count;		\
640MACRO_END
641
642#define vm_map_res_deallocate(map)		\
643MACRO_BEGIN					\
644	vm_map_t Map = (map);		\
645	if (--Map->res_count == 0) {	\
646		lck_mtx_unlock(&Map->s_lock);	\
647		vm_map_lock(Map);		\
648		vm_map_swapout(Map);		\
649		vm_map_unlock(Map);		\
650		lck_mtx_lock(&Map->s_lock);	\
651	}					\
652MACRO_END
653
654#define vm_map_reference_swap(map)	\
655MACRO_BEGIN				\
656	vm_map_t Map = (map);		\
657	lck_mtx_lock(&Map->s_lock);	\
658	++Map->ref_count;		\
659	vm_map_res_reference(Map);	\
660	lck_mtx_unlock(&Map->s_lock);	\
661MACRO_END
662#endif 	/* MACH_ASSERT */
663
664extern void		vm_map_swapin(
665				vm_map_t	map);
666
667extern void		vm_map_swapout(
668				vm_map_t	map);
669
670#else	/* TASK_SWAPPER */
671
672#define vm_map_reference(map)			\
673MACRO_BEGIN					\
674	vm_map_t Map = (map);			\
675	if (Map) {				\
676		lck_mtx_lock(&Map->s_lock);	\
677		Map->ref_count++;		\
678		lck_mtx_unlock(&Map->s_lock);	\
679	}					\
680MACRO_END
681
682#define vm_map_reference_swap(map)	vm_map_reference(map)
683#define vm_map_res_reference(map)
684#define vm_map_res_deallocate(map)
685
686#endif	/* TASK_SWAPPER */
687
688/*
689 *	Submap object.  Must be used to create memory to be put
690 *	in a submap by vm_map_submap.
691 */
692extern vm_object_t	vm_submap_object;
693
694/*
695 *	Wait and wakeup macros for in_transition map entries.
696 */
697#define vm_map_entry_wait(map, interruptible)    	\
698	((map)->timestamp++ ,				\
699	 lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \
700				  (event_t)&(map)->hdr,	interruptible))
701
702
703#define vm_map_entry_wakeup(map)        \
704	thread_wakeup((event_t)(&(map)->hdr))
705
706
707#define	vm_map_ref_fast(map)			\
708	MACRO_BEGIN					\
709	lck_mtx_lock(&map->s_lock);			\
710	map->ref_count++;				\
711	vm_map_res_reference(map);			\
712	lck_mtx_unlock(&map->s_lock);			\
713	MACRO_END
714
715#define	vm_map_dealloc_fast(map)		\
716	MACRO_BEGIN					\
717	register int c;				\
718							\
719	lck_mtx_lock(&map->s_lock);			\
720	c = --map->ref_count;			\
721	if (c > 0)					\
722		vm_map_res_deallocate(map);		\
723	lck_mtx_unlock(&map->s_lock);			\
724	if (c == 0)					\
725		vm_map_destroy(map);			\
726	MACRO_END
727
728
729/* simplify map entries */
730extern void		vm_map_simplify_entry(
731	vm_map_t	map,
732	vm_map_entry_t	this_entry);
733extern void		vm_map_simplify(
734				vm_map_t		map,
735				vm_map_offset_t		start);
736
737/* Move the information in a map copy object to a new map copy object */
738extern vm_map_copy_t	vm_map_copy_copy(
739				vm_map_copy_t           copy);
740
741/* Create a copy object from an object. */
742extern kern_return_t	vm_map_copyin_object(
743				vm_object_t		object,
744				vm_object_offset_t	offset,
745				vm_object_size_t	size,
746				vm_map_copy_t		*copy_result); /* OUT */
747
748extern kern_return_t	vm_map_random_address_for_size(
749				vm_map_t	map,
750				vm_map_offset_t	*address,
751				vm_map_size_t	size);
752
753/* Enter a mapping */
754extern kern_return_t	vm_map_enter(
755				vm_map_t		map,
756				vm_map_offset_t		*address,
757				vm_map_size_t		size,
758				vm_map_offset_t		mask,
759				int			flags,
760				vm_object_t		object,
761				vm_object_offset_t	offset,
762				boolean_t		needs_copy,
763				vm_prot_t		cur_protection,
764				vm_prot_t		max_protection,
765				vm_inherit_t		inheritance);
766
767/* XXX should go away - replaced with regular enter of contig object */
768extern  kern_return_t	vm_map_enter_cpm(
769				vm_map_t		map,
770				vm_map_address_t	*addr,
771				vm_map_size_t		size,
772				int			flags);
773
774extern kern_return_t vm_map_remap(
775				vm_map_t		target_map,
776				vm_map_offset_t		*address,
777				vm_map_size_t		size,
778				vm_map_offset_t		mask,
779				int			flags,
780				vm_map_t		src_map,
781				vm_map_offset_t		memory_address,
782				boolean_t		copy,
783				vm_prot_t		*cur_protection,
784				vm_prot_t		*max_protection,
785				vm_inherit_t		inheritance);
786
787
788/*
789 * Read and write from a kernel buffer to a specified map.
790 */
791extern	kern_return_t	vm_map_write_user(
792				vm_map_t		map,
793				void			*src_p,
794				vm_map_offset_t		dst_addr,
795				vm_size_t		size);
796
797extern	kern_return_t	vm_map_read_user(
798				vm_map_t		map,
799				vm_map_offset_t		src_addr,
800				void			*dst_p,
801				vm_size_t		size);
802
803/* Create a new task map using an existing task map as a template. */
804extern vm_map_t		vm_map_fork(
805				ledger_t		ledger,
806				vm_map_t		old_map);
807
808/* Change inheritance */
809extern kern_return_t	vm_map_inherit(
810				vm_map_t		map,
811				vm_map_offset_t		start,
812				vm_map_offset_t		end,
813				vm_inherit_t		new_inheritance);
814
815/* Add or remove machine-dependent attributes from map regions */
816extern kern_return_t	vm_map_machine_attribute(
817				vm_map_t		map,
818				vm_map_offset_t		start,
819				vm_map_offset_t		end,
820				vm_machine_attribute_t	attribute,
821				vm_machine_attribute_val_t* value); /* IN/OUT */
822
823extern kern_return_t	vm_map_msync(
824				vm_map_t		map,
825				vm_map_address_t	address,
826				vm_map_size_t		size,
827				vm_sync_t		sync_flags);
828
829/* Set paging behavior */
830extern kern_return_t	vm_map_behavior_set(
831				vm_map_t		map,
832				vm_map_offset_t		start,
833				vm_map_offset_t		end,
834				vm_behavior_t		new_behavior);
835
836extern kern_return_t vm_map_purgable_control(
837				vm_map_t		map,
838				vm_map_offset_t		address,
839				vm_purgable_t		control,
840				int			*state);
841
842extern kern_return_t vm_map_region(
843				vm_map_t		 map,
844				vm_map_offset_t		*address,
845				vm_map_size_t		*size,
846				vm_region_flavor_t	 flavor,
847				vm_region_info_t	 info,
848				mach_msg_type_number_t	*count,
849				mach_port_t		*object_name);
850
851extern kern_return_t vm_map_region_recurse_64(
852				vm_map_t		 map,
853				vm_map_offset_t		*address,
854				vm_map_size_t		*size,
855				natural_t	 	*nesting_depth,
856				vm_region_submap_info_64_t info,
857				mach_msg_type_number_t  *count);
858
859extern kern_return_t vm_map_page_query_internal(
860				vm_map_t		map,
861				vm_map_offset_t		offset,
862				int			*disposition,
863				int			*ref_count);
864
865extern kern_return_t vm_map_query_volatile(
866	vm_map_t	map,
867	mach_vm_size_t	*volatile_virtual_size_p,
868	mach_vm_size_t	*volatile_resident_size_p,
869	mach_vm_size_t	*volatile_pmap_size_p);
870
871extern kern_return_t	vm_map_submap(
872				vm_map_t		map,
873				vm_map_offset_t		start,
874				vm_map_offset_t		end,
875				vm_map_t		submap,
876				vm_map_offset_t		offset,
877				boolean_t		use_pmap);
878
879extern void vm_map_submap_pmap_clean(
880	vm_map_t	map,
881	vm_map_offset_t	start,
882	vm_map_offset_t	end,
883	vm_map_t	sub_map,
884	vm_map_offset_t	offset);
885
886/* Convert from a map entry port to a map */
887extern vm_map_t convert_port_entry_to_map(
888	ipc_port_t	port);
889
890/* Convert from a port to a vm_object */
891extern vm_object_t convert_port_entry_to_object(
892	ipc_port_t	port);
893
894
895extern kern_return_t vm_map_set_cache_attr(
896        vm_map_t        map,
897        vm_map_offset_t va);
898
899
900/* definitions related to overriding the NX behavior */
901
902#define VM_ABI_32	0x1
903#define VM_ABI_64	0x2
904
905extern int override_nx(vm_map_t map, uint32_t user_tag);
906
907extern int vm_map_purge(vm_map_t map);
908
909#endif /* MACH_KERNEL_PRIVATE */
910
911__BEGIN_DECLS
912
913/* Create an empty map */
914extern vm_map_t		vm_map_create(
915				pmap_t			pmap,
916				vm_map_offset_t 	min_off,
917				vm_map_offset_t 	max_off,
918				boolean_t		pageable);
919
920/* Get rid of a map */
921extern void		vm_map_destroy(
922				vm_map_t		map,
923				int			flags);
924
925/* Lose a reference */
926extern void		vm_map_deallocate(
927				vm_map_t		map);
928
929extern vm_map_t		vm_map_switch(
930				vm_map_t		map);
931
932/* Change protection */
933extern kern_return_t	vm_map_protect(
934				vm_map_t		map,
935				vm_map_offset_t		start,
936				vm_map_offset_t		end,
937				vm_prot_t		new_prot,
938				boolean_t		set_max);
939
940/* Check protection */
941extern boolean_t vm_map_check_protection(
942				vm_map_t		map,
943				vm_map_offset_t		start,
944				vm_map_offset_t		end,
945				vm_prot_t		protection);
946
947/* wire down a region */
948extern kern_return_t	vm_map_wire(
949				vm_map_t		map,
950				vm_map_offset_t		start,
951				vm_map_offset_t		end,
952				vm_prot_t		access_type,
953				boolean_t		user_wire);
954
955extern kern_return_t	vm_map_wire_and_extract(
956				vm_map_t		map,
957				vm_map_offset_t		start,
958				vm_prot_t		access_type,
959				boolean_t		user_wire,
960				ppnum_t			*physpage_p);
961
962/* unwire a region */
963extern kern_return_t	vm_map_unwire(
964				vm_map_t		map,
965				vm_map_offset_t		start,
966				vm_map_offset_t		end,
967				boolean_t		user_wire);
968
969/* Enter a mapping of a memory object */
970extern kern_return_t	vm_map_enter_mem_object(
971				vm_map_t		map,
972				vm_map_offset_t		*address,
973				vm_map_size_t		size,
974				vm_map_offset_t		mask,
975				int			flags,
976				ipc_port_t		port,
977				vm_object_offset_t	offset,
978				boolean_t		needs_copy,
979				vm_prot_t		cur_protection,
980				vm_prot_t		max_protection,
981				vm_inherit_t		inheritance);
982
983/* Enter a mapping of a memory object */
984extern kern_return_t	vm_map_enter_mem_object_prefault(
985				vm_map_t		map,
986				vm_map_offset_t		*address,
987				vm_map_size_t		size,
988				vm_map_offset_t		mask,
989				int			flags,
990				ipc_port_t		port,
991				vm_object_offset_t	offset,
992				vm_prot_t		cur_protection,
993				vm_prot_t		max_protection,
994				upl_page_list_ptr_t	page_list,
995				unsigned int 		page_list_count);
996
997/* Enter a mapping of a memory object */
998extern kern_return_t	vm_map_enter_mem_object_control(
999				vm_map_t		map,
1000				vm_map_offset_t		*address,
1001				vm_map_size_t		size,
1002				vm_map_offset_t		mask,
1003				int			flags,
1004				memory_object_control_t	control,
1005				vm_object_offset_t	offset,
1006				boolean_t		needs_copy,
1007				vm_prot_t		cur_protection,
1008				vm_prot_t		max_protection,
1009				vm_inherit_t		inheritance);
1010
1011/* Deallocate a region */
1012extern kern_return_t	vm_map_remove(
1013				vm_map_t		map,
1014				vm_map_offset_t		start,
1015				vm_map_offset_t		end,
1016				boolean_t		flags);
1017
1018/* Discard a copy without using it */
1019extern void		vm_map_copy_discard(
1020				vm_map_copy_t		copy);
1021
1022/* Overwrite existing memory with a copy */
1023extern kern_return_t	vm_map_copy_overwrite(
1024				vm_map_t                dst_map,
1025				vm_map_address_t        dst_addr,
1026				vm_map_copy_t           copy,
1027				boolean_t               interruptible);
1028
1029/* Place a copy into a map */
1030extern kern_return_t	vm_map_copyout(
1031				vm_map_t		dst_map,
1032				vm_map_address_t	*dst_addr,	/* OUT */
1033				vm_map_copy_t		copy);
1034
1035extern kern_return_t	vm_map_copyout_internal(
1036	vm_map_t		dst_map,
1037	vm_map_address_t	*dst_addr,	/* OUT */
1038	vm_map_copy_t		copy,
1039	boolean_t		consume_on_success,
1040	vm_prot_t		cur_protection,
1041	vm_prot_t		max_protection,
1042	vm_inherit_t		inheritance);
1043
1044extern kern_return_t	vm_map_copyin(
1045				vm_map_t			src_map,
1046				vm_map_address_t	src_addr,
1047				vm_map_size_t		len,
1048				boolean_t			src_destroy,
1049				vm_map_copy_t		*copy_result);	/* OUT */
1050
1051extern kern_return_t	vm_map_copyin_common(
1052				vm_map_t		src_map,
1053				vm_map_address_t	src_addr,
1054				vm_map_size_t		len,
1055				boolean_t		src_destroy,
1056				boolean_t		src_volatile,
1057				vm_map_copy_t		*copy_result,	/* OUT */
1058				boolean_t		use_maxprot);
1059
1060extern kern_return_t	vm_map_copy_extract(
1061	vm_map_t		src_map,
1062	vm_map_address_t	src_addr,
1063	vm_map_size_t		len,
1064	vm_map_copy_t		*copy_result,	/* OUT */
1065	vm_prot_t		*cur_prot,	/* OUT */
1066	vm_prot_t		*max_prot);
1067
1068
1069extern void		vm_map_disable_NX(
1070			        vm_map_t		map);
1071
1072extern void		vm_map_disallow_data_exec(
1073			        vm_map_t		map);
1074
1075extern void		vm_map_set_64bit(
1076			        vm_map_t		map);
1077
1078extern void		vm_map_set_32bit(
1079			        vm_map_t		map);
1080
1081extern boolean_t	vm_map_has_hard_pagezero(
1082		       		vm_map_t		map,
1083				vm_map_offset_t		pagezero_size);
1084
1085extern boolean_t	vm_map_is_64bit(
1086			        vm_map_t		map);
1087
1088
1089extern kern_return_t	vm_map_raise_max_offset(
1090	vm_map_t	map,
1091	vm_map_offset_t	new_max_offset);
1092
1093extern kern_return_t	vm_map_raise_min_offset(
1094	vm_map_t	map,
1095	vm_map_offset_t	new_min_offset);
1096
1097extern vm_map_offset_t	vm_compute_max_offset(
1098				unsigned		is64);
1099
1100extern uint64_t 	vm_map_get_max_aslr_slide_pages(
1101				vm_map_t map);
1102
1103extern void		vm_map_set_user_wire_limit(
1104				vm_map_t		map,
1105				vm_size_t		limit);
1106
1107extern void vm_map_switch_protect(
1108				vm_map_t		map,
1109				boolean_t		val);
1110
1111extern void vm_map_iokit_mapped_region(
1112				vm_map_t		map,
1113				vm_size_t		bytes);
1114
1115extern void vm_map_iokit_unmapped_region(
1116				vm_map_t		map,
1117				vm_size_t		bytes);
1118
1119
1120extern boolean_t first_free_is_valid(vm_map_t);
1121
1122extern int 		vm_map_page_shift(
1123				vm_map_t 		map);
1124
1125extern int		vm_map_page_mask(
1126				vm_map_t 		map);
1127
1128extern int		vm_map_page_size(
1129				vm_map_t 		map);
1130
1131extern vm_map_offset_t	vm_map_round_page_mask(
1132				vm_map_offset_t		offset,
1133				vm_map_offset_t		mask);
1134
1135extern vm_map_offset_t	vm_map_trunc_page_mask(
1136				vm_map_offset_t		offset,
1137				vm_map_offset_t		mask);
1138
1139#ifdef XNU_KERNEL_PRIVATE
1140extern kern_return_t vm_map_page_info(
1141	vm_map_t		map,
1142	vm_map_offset_t		offset,
1143	vm_page_info_flavor_t	flavor,
1144	vm_page_info_t		info,
1145	mach_msg_type_number_t	*count);
1146#endif /* XNU_KERNEL_PRIVATE */
1147
1148
1149#ifdef	MACH_KERNEL_PRIVATE
1150
1151/*
1152 *	Macros to invoke vm_map_copyin_common.  vm_map_copyin is the
1153 *	usual form; it handles a copyin based on the current protection
1154 *	(current protection == VM_PROT_NONE) is a failure.
1155 *	vm_map_copyin_maxprot handles a copyin based on maximum possible
1156 *	access.  The difference is that a region with no current access
1157 *	BUT possible maximum access is rejected by vm_map_copyin(), but
1158 *	returned by vm_map_copyin_maxprot.
1159 */
1160#define	vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \
1161		vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1162					FALSE, copy_result, FALSE)
1163
1164#define vm_map_copyin_maxprot(src_map, \
1165			      src_addr, len, src_destroy, copy_result) \
1166		vm_map_copyin_common(src_map, src_addr, len, src_destroy, \
1167					FALSE, copy_result, TRUE)
1168
1169
1170/*
1171 * Internal macros for rounding and truncation of vm_map offsets and sizes
1172 */
1173#define VM_MAP_ROUND_PAGE(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1174#define VM_MAP_TRUNC_PAGE(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1175
1176/*
1177 * Macros for rounding and truncation of vm_map offsets and sizes
1178 */
1179#define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT)
1180#define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map)))
1181#define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1)
1182#define VM_MAP_PAGE_ALIGNED(x,pgmask) (((x) & (pgmask)) == 0)
1183
1184#endif /* MACH_KERNEL_PRIVATE */
1185
1186#ifdef XNU_KERNEL_PRIVATE
1187extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift);
1188#endif /* XNU_KERNEL_PRIVATE */
1189
1190#define vm_map_round_page(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
1191#define vm_map_trunc_page(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask)))
1192
1193/*
1194 * Flags for vm_map_remove() and vm_map_delete()
1195 */
1196#define	VM_MAP_NO_FLAGS	  		0x0
1197#define	VM_MAP_REMOVE_KUNWIRE	  	0x1
1198#define	VM_MAP_REMOVE_INTERRUPTIBLE  	0x2
1199#define	VM_MAP_REMOVE_WAIT_FOR_KWIRE  	0x4
1200#define VM_MAP_REMOVE_SAVE_ENTRIES	0x8
1201#define VM_MAP_REMOVE_NO_PMAP_CLEANUP	0x10
1202#define VM_MAP_REMOVE_NO_MAP_ALIGN	0x20
1203
1204/* Support for UPLs from vm_maps */
1205
1206extern kern_return_t vm_map_get_upl(
1207				vm_map_t		target_map,
1208				vm_map_offset_t		map_offset,
1209				upl_size_t		*size,
1210				upl_t			*upl,
1211				upl_page_info_array_t	page_info,
1212				unsigned int	*page_infoCnt,
1213				int		*flags,
1214				int		force_data_sync);
1215
1216#if CONFIG_DYNAMIC_CODE_SIGNING
1217extern kern_return_t vm_map_sign(vm_map_t map,
1218				 vm_map_offset_t start,
1219				 vm_map_offset_t end);
1220#endif
1221
1222extern kern_return_t vm_map_partial_reap(
1223              	vm_map_t map,
1224		unsigned int *reclaimed_resident,
1225		unsigned int *reclaimed_compressed);
1226
1227#if CONFIG_FREEZE
1228void	vm_map_freeze_thaw_init(void);
1229void	vm_map_freeze_thaw(void);
1230void	vm_map_demand_fault(void);
1231
1232extern kern_return_t vm_map_freeze_walk(
1233              	vm_map_t map,
1234              	unsigned int *purgeable_count,
1235              	unsigned int *wired_count,
1236              	unsigned int *clean_count,
1237              	unsigned int *dirty_count,
1238             	unsigned int dirty_budget,
1239              	boolean_t *has_shared);
1240
1241extern kern_return_t vm_map_freeze(
1242             	vm_map_t map,
1243             	unsigned int *purgeable_count,
1244             	unsigned int *wired_count,
1245             	unsigned int *clean_count,
1246             	unsigned int *dirty_count,
1247             	unsigned int dirty_budget,
1248             	boolean_t *has_shared);
1249
1250extern kern_return_t vm_map_thaw(
1251                vm_map_t map);
1252#endif
1253
1254__END_DECLS
1255
1256#endif	/* KERNEL_PRIVATE */
1257
1258#endif	/* _VM_VM_MAP_H_ */
1259