vm_map.h revision 44396
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)vm_map.h	8.9 (Berkeley) 5/17/95
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57 *  School of Computer Science
58 *  Carnegie Mellon University
59 *  Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_map.h,v 1.36 1999/02/19 14:25:36 luoqi Exp $
65 */
66
67/*
68 *	Virtual memory map module definitions.
69 */
70
71#ifndef	_VM_MAP_
72#define	_VM_MAP_
73
74/*
75 *	Types defined:
76 *
77 *	vm_map_t		the high-level address map data structure.
78 *	vm_map_entry_t		an entry in an address map.
79 *	vm_map_version_t	a timestamp of a map, for use with vm_map_lookup
80 */
81
82/*
83 *	Objects which live in maps may be either VM objects, or
84 *	another map (called a "sharing map") which denotes read-write
85 *	sharing with other maps.
86 */
87
88union vm_map_object {
89	struct vm_object *vm_object;	/* object object */
90	struct vm_map *sub_map;		/* belongs to another map */
91};
92
93/*
94 *	Address map entries consist of start and end addresses,
95 *	a VM object (or sharing map) and offset into that object,
96 *	and user-exported inheritance and protection information.
97 *	Also included is control information for virtual copy operations.
98 */
99struct vm_map_entry {
100	struct vm_map_entry *prev;	/* previous entry */
101	struct vm_map_entry *next;	/* next entry */
102	vm_offset_t start;		/* start address */
103	vm_offset_t end;		/* end address */
104	vm_offset_t avail_ssize;	/* amt can grow if this is a stack */
105	union vm_map_object object;	/* object I point to */
106	vm_ooffset_t offset;		/* offset into object */
107	u_char eflags;			/* map entry flags */
108	/* Only in task maps: */
109	vm_prot_t protection;		/* protection code */
110	vm_prot_t max_protection;	/* maximum protection */
111	vm_inherit_t inheritance;	/* inheritance */
112	int wired_count;		/* can be paged if = 0 */
113};
114
115#define MAP_ENTRY_UNUSED_01		0x1
116#define MAP_ENTRY_IS_SUB_MAP		0x2
117#define MAP_ENTRY_COW			0x4
118#define MAP_ENTRY_NEEDS_COPY		0x8
119#define MAP_ENTRY_NOFAULT		0x10
120#define MAP_ENTRY_USER_WIRED		0x20
121
122/*
123 *	Maps are doubly-linked lists of map entries, kept sorted
124 *	by address.  A single hint is provided to start
125 *	searches again from the last successful search,
126 *	insertion, or removal.
127 */
128struct vm_map {
129	struct lock lock;		/* Lock for map data */
130	struct vm_map_entry header;	/* List of entries */
131	int nentries;			/* Number of entries */
132	vm_size_t size;			/* virtual size */
133	unsigned char	system_map;			/* Am I a system map? */
134	vm_map_entry_t hint;		/* hint for quick lookups */
135	unsigned int timestamp;		/* Version number */
136	vm_map_entry_t first_free;	/* First free space hint */
137	struct pmap *pmap;		/* Physical map */
138#define	min_offset		header.start
139#define max_offset		header.end
140};
141
142/*
143 * Shareable process virtual address space.
144 * May eventually be merged with vm_map.
145 * Several fields are temporary (text, data stuff).
146 */
147struct vmspace {
148	struct vm_map vm_map;	/* VM address map */
149	struct pmap vm_pmap;	/* private physical map */
150	int vm_refcnt;		/* number of references */
151	caddr_t vm_shm;		/* SYS5 shared memory private data XXX */
152/* we copy from vm_startcopy to the end of the structure on fork */
153#define vm_startcopy vm_rssize
154	segsz_t vm_rssize;	/* current resident set size in pages */
155	segsz_t vm_swrss;	/* resident set size before last swap */
156	segsz_t vm_tsize;	/* text size (pages) XXX */
157	segsz_t vm_dsize;	/* data size (pages) XXX */
158	segsz_t vm_ssize;	/* stack size (pages) */
159	caddr_t vm_taddr;	/* user virtual address of text XXX */
160	caddr_t vm_daddr;	/* user virtual address of data XXX */
161	caddr_t vm_maxsaddr;	/* user VA at max stack growth */
162	caddr_t vm_minsaddr;	/* user VA at max stack growth */
163};
164
165
166#if 0
167/*
168 *	Map versions are used to validate a previous lookup attempt.
169 *
170 *	Since lookup operations may involve both a main map and
171 *	a sharing map, it is necessary to have a timestamp from each.
172 *	[If the main map timestamp has changed, the share_map and
173 *	associated timestamp are no longer valid; the map version
174 *	does not include a reference for the embedded share_map.]
175 */
176typedef struct {
177	int main_timestamp;
178	int share_timestamp;
179} vm_map_version_t;
180
181#endif
182
183/*
184 *	Macros:		vm_map_lock, etc.
185 *	Function:
186 *		Perform locking on the data portion of a map.
187 */
188
189#define	vm_map_lock_drain_interlock(map) { \
190	lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \
191		&(map)->ref_lock, curproc); \
192	(map)->timestamp++; \
193}
194
195#ifdef DIAGNOSTIC
196/* #define MAP_LOCK_DIAGNOSTIC 1 */
197#ifdef MAP_LOCK_DIAGNOSTIC
198#define	vm_map_lock(map) { \
199	printf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \
200	if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc) != 0) { \
201		panic("vm_map_lock: failed to get lock"); \
202	} \
203	(map)->timestamp++; \
204}
205#else
206#define	vm_map_lock(map) { \
207	if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc) != 0) { \
208		panic("vm_map_lock: failed to get lock"); \
209	} \
210	(map)->timestamp++; \
211}
212#endif
213#else
214#define	vm_map_lock(map) { \
215	lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc); \
216	(map)->timestamp++; \
217}
218#endif /* DIAGNOSTIC */
219
220#if defined(MAP_LOCK_DIAGNOSTIC)
221#define	vm_map_unlock(map) \
222	do { \
223		printf ("locking map LK_RELEASE: 0x%x\n", map); \
224		lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc); \
225	} while (0);
226#define	vm_map_lock_read(map) \
227	do { \
228		printf ("locking map LK_SHARED: 0x%x\n", map); \
229		lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc); \
230	} while (0);
231#define	vm_map_unlock_read(map) \
232	do { \
233		printf ("locking map LK_RELEASE: 0x%x\n", map); \
234		lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc); \
235	} while (0);
236#else
237#define	vm_map_unlock(map) \
238	lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc);
239#define	vm_map_lock_read(map) \
240	lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc);
241#define	vm_map_unlock_read(map) \
242	lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc);
243#endif
244
245static __inline__ int
246_vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
247#if defined(MAP_LOCK_DIAGNOSTIC)
248	printf("locking map LK_EXCLUPGRADE: 0x%x\n", map);
249#endif
250	return lockmgr(&(map)->lock, LK_EXCLUPGRADE, (void *)0, p);
251}
252
253#define vm_map_lock_upgrade(map) _vm_map_lock_upgrade(map, curproc)
254
255#if defined(MAP_LOCK_DIAGNOSTIC)
256#define vm_map_lock_downgrade(map) \
257	do { \
258		printf ("locking map LK_DOWNGRADE: 0x%x\n", map); \
259		lockmgr(&(map)->lock, LK_DOWNGRADE, (void *)0, curproc); \
260	} while (0);
261#else
262#define vm_map_lock_downgrade(map) \
263	lockmgr(&(map)->lock, LK_DOWNGRADE, (void *)0, curproc);
264#endif
265
266#define vm_map_set_recursive(map) { \
267	simple_lock(&(map)->lock.lk_interlock); \
268	(map)->lock.lk_flags |= LK_CANRECURSE; \
269	simple_unlock(&(map)->lock.lk_interlock); \
270}
271#define vm_map_clear_recursive(map) { \
272	simple_lock(&(map)->lock.lk_interlock); \
273	(map)->lock.lk_flags &= ~LK_CANRECURSE; \
274	simple_unlock(&(map)->lock.lk_interlock); \
275}
276
277/*
278 *	Functions implemented as macros
279 */
280#define		vm_map_min(map)		((map)->min_offset)
281#define		vm_map_max(map)		((map)->max_offset)
282#define		vm_map_pmap(map)	((map)->pmap)
283
284static __inline struct pmap *
285vmspace_pmap(struct vmspace *vmspace)
286{
287	return &vmspace->vm_pmap;
288}
289
290static __inline long
291vmspace_resident_count(struct vmspace *vmspace)
292{
293	return pmap_resident_count(vmspace_pmap(vmspace));
294}
295
296/* XXX: number of kernel maps and entries to statically allocate */
297#define MAX_KMAP	10
298#define	MAX_KMAPENT	128
299#define	MAX_MAPENT	128
300
301/*
302 * Copy-on-write flags for vm_map operations
303 */
304#define MAP_COPY_NEEDED 0x1
305#define MAP_COPY_ON_WRITE 0x2
306#define MAP_NOFAULT 0x4
307
308/*
309 * vm_fault option flags
310 */
311#define VM_FAULT_NORMAL 0		/* Nothing special */
312#define VM_FAULT_CHANGE_WIRING 1	/* Change the wiring as appropriate */
313#define VM_FAULT_USER_WIRE 2		/* Likewise, but for user purposes */
314#define VM_FAULT_WIRE_MASK (VM_FAULT_CHANGE_WIRING|VM_FAULT_USER_WIRE)
315#define	VM_FAULT_HOLD 4			/* Hold the page */
316#define VM_FAULT_DIRTY 8		/* Dirty the page */
317
318#ifdef KERNEL
319extern vm_offset_t kentry_data;
320extern vm_size_t kentry_data_size;
321
322boolean_t vm_map_check_protection __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t));
323int vm_map_copy __P((vm_map_t, vm_map_t, vm_offset_t, vm_size_t, vm_offset_t, boolean_t, boolean_t));
324struct pmap;
325vm_map_t vm_map_create __P((struct pmap *, vm_offset_t, vm_offset_t));
326void vm_map_deallocate __P((vm_map_t));
327int vm_map_delete __P((vm_map_t, vm_offset_t, vm_offset_t));
328int vm_map_find __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, boolean_t, vm_prot_t, vm_prot_t, int));
329int vm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *));
330int vm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t));
331void vm_map_init __P((struct vm_map *, vm_offset_t, vm_offset_t));
332int vm_map_insert __P((vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int));
333int vm_map_lookup __P((vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
334    vm_pindex_t *, vm_prot_t *, boolean_t *));
335void vm_map_lookup_done __P((vm_map_t, vm_map_entry_t));
336boolean_t vm_map_lookup_entry __P((vm_map_t, vm_offset_t, vm_map_entry_t *));
337int vm_map_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
338int vm_map_user_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t));
339int vm_map_clean __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t));
340int vm_map_protect __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
341void vm_map_reference __P((vm_map_t));
342int vm_map_remove __P((vm_map_t, vm_offset_t, vm_offset_t));
343void vm_map_simplify __P((vm_map_t, vm_offset_t));
344void vm_map_startup __P((void));
345int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
346void vm_map_madvise __P((vm_map_t, pmap_t, vm_offset_t, vm_offset_t, int));
347void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
348void vm_init2 __P((void));
349int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *));
350void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t));
351int vm_map_stack __P((vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int));
352int vm_map_growstack __P((struct proc *p, vm_offset_t addr));
353
354#endif
355#endif				/* _VM_MAP_ */
356