Deleted Added
full compact
vm_map.c (92588) vm_map.c (92654)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD: head/sys/vm/vm_map.c 92588 2002-03-18 15:08:09Z green $
64 * $FreeBSD: head/sys/vm/vm_map.c 92654 2002-03-19 09:11:49Z jeff $
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 10 unchanged lines hidden (view full) ---

83#include <vm/vm_param.h>
84#include <vm/pmap.h>
85#include <vm/vm_map.h>
86#include <vm/vm_page.h>
87#include <vm/vm_object.h>
88#include <vm/vm_pager.h>
89#include <vm/vm_kern.h>
90#include <vm/vm_extern.h>
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 10 unchanged lines hidden (view full) ---

83#include <vm/vm_param.h>
84#include <vm/pmap.h>
85#include <vm/vm_map.h>
86#include <vm/vm_page.h>
87#include <vm/vm_object.h>
88#include <vm/vm_pager.h>
89#include <vm/vm_kern.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_zone.h>
92#include <vm/swap_pager.h>
93
94/*
95 * Virtual memory maps provide for the mapping, protection,
96 * and sharing of virtual memory objects. In addition,
97 * this module provides for an efficient virtual copy of
98 * memory from one map to another.
99 *

--- 26 unchanged lines hidden (view full) ---

126 *
127 * - The kernel map and kmem submap are allocated statically.
128 * - Kernel map entries are allocated out of a static pool.
129 *
130 * These restrictions are necessary since malloc() uses the
131 * maps and requires map entries.
132 */
133
91#include <vm/swap_pager.h>
92
93/*
94 * Virtual memory maps provide for the mapping, protection,
95 * and sharing of virtual memory objects. In addition,
96 * this module provides for an efficient virtual copy of
97 * memory from one map to another.
98 *

--- 26 unchanged lines hidden (view full) ---

125 *
126 * - The kernel map and kmem submap are allocated statically.
127 * - Kernel map entries are allocated out of a static pool.
128 *
129 * These restrictions are necessary since malloc() uses the
130 * maps and requires map entries.
131 */
132
134static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
135static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
136static struct vm_object kmapentobj, mapentobj, mapobj;
133static uma_zone_t mapentzone;
134static uma_zone_t kmapentzone;
135static uma_zone_t mapzone;
136static uma_zone_t vmspace_zone;
137static struct vm_object kmapentobj;
138static void vmspace_zinit(void *mem, int size);
139static void vmspace_zfini(void *mem, int size);
140static void vm_map_zinit(void *mem, int size);
141static void vm_map_zfini(void *mem, int size);
142static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max);
137
143
138static struct vm_map_entry map_entry_init[MAX_MAPENT];
139static struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
140static struct vm_map map_init[MAX_KMAP];
144#ifdef INVARIANTS
145static void vm_map_zdtor(void *mem, int size, void *arg);
146static void vmspace_zdtor(void *mem, int size, void *arg);
147#endif
141
142void
143vm_map_startup(void)
144{
148
149void
150vm_map_startup(void)
151{
145 mapzone = &mapzone_store;
146 zbootinit(mapzone, "MAP", sizeof (struct vm_map),
147 map_init, MAX_KMAP);
148 kmapentzone = &kmapentzone_store;
149 zbootinit(kmapentzone, "KMAP ENTRY", sizeof (struct vm_map_entry),
150 kmap_entry_init, MAX_KMAPENT);
151 mapentzone = &mapentzone_store;
152 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
153 map_entry_init, MAX_MAPENT);
152 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
153#ifdef INVARIANTS
154 vm_map_zdtor,
155#else
156 NULL,
157#endif
158 vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
159 uma_prealloc(mapzone, MAX_KMAP);
160 kmapentzone = zinit("KMAP ENTRY", sizeof(struct vm_map_entry), 0, 0, 0); uma_prealloc(kmapentzone, MAX_KMAPENT);
161 mapentzone = zinit("MAP ENTRY", sizeof(struct vm_map_entry), 0, 0, 0);
162 uma_prealloc(mapentzone, MAX_MAPENT);
154}
155
163}
164
165static void
166vmspace_zfini(void *mem, int size)
167{
168 struct vmspace *vm;
169
170 vm = (struct vmspace *)mem;
171
172 vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
173}
174
175static void
176vmspace_zinit(void *mem, int size)
177{
178 struct vmspace *vm;
179
180 vm = (struct vmspace *)mem;
181
182 vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map));
183}
184
185static void
186vm_map_zfini(void *mem, int size)
187{
188 vm_map_t map;
189
190 GIANT_REQUIRED;
191 map = (vm_map_t)mem;
192
193 lockdestroy(&map->lock);
194}
195
196static void
197vm_map_zinit(void *mem, int size)
198{
199 vm_map_t map;
200
201 GIANT_REQUIRED;
202
203 map = (vm_map_t)mem;
204 map->nentries = 0;
205 map->size = 0;
206 map->infork = 0;
207 lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
208}
209
210#ifdef INVARIANTS
211static void
212vmspace_zdtor(void *mem, int size, void *arg)
213{
214 struct vmspace *vm;
215
216 vm = (struct vmspace *)mem;
217
218 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
219}
220static void
221vm_map_zdtor(void *mem, int size, void *arg)
222{
223 vm_map_t map;
224
225 map = (vm_map_t)mem;
226 KASSERT(map->nentries == 0,
227 ("map %p nentries == %d on free.",
228 map, map->nentries));
229 KASSERT(map->size == 0,
230 ("map %p size == %lu on free.",
231 map, map->size));
232 KASSERT(map->infork == 0,
233 ("map %p infork == %d on free.",
234 map, map->infork));
235}
236#endif /* INVARIANTS */
237
156/*
157 * Allocate a vmspace structure, including a vm_map and pmap,
158 * and initialize those structures. The refcnt is set to 1.
159 * The remaining fields must be initialized by the caller.
160 */
161struct vmspace *
162vmspace_alloc(min, max)
163 vm_offset_t min, max;
164{
165 struct vmspace *vm;
166
167 GIANT_REQUIRED;
238/*
239 * Allocate a vmspace structure, including a vm_map and pmap,
240 * and initialize those structures. The refcnt is set to 1.
241 * The remaining fields must be initialized by the caller.
242 */
243struct vmspace *
244vmspace_alloc(min, max)
245 vm_offset_t min, max;
246{
247 struct vmspace *vm;
248
249 GIANT_REQUIRED;
168 vm = zalloc(vmspace_zone);
250 vm = uma_zalloc(vmspace_zone, M_WAITOK);
169 CTR1(KTR_VM, "vmspace_alloc: %p", vm);
251 CTR1(KTR_VM, "vmspace_alloc: %p", vm);
170 vm_map_init(&vm->vm_map, min, max);
252 _vm_map_init(&vm->vm_map, min, max);
171 pmap_pinit(vmspace_pmap(vm));
172 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */
173 vm->vm_refcnt = 1;
174 vm->vm_shm = NULL;
175 vm->vm_freer = NULL;
176 return (vm);
177}
178
179void
180vm_init2(void)
181{
253 pmap_pinit(vmspace_pmap(vm));
254 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */
255 vm->vm_refcnt = 1;
256 vm->vm_shm = NULL;
257 vm->vm_freer = NULL;
258 return (vm);
259}
260
261void
262vm_init2(void)
263{
182 zinitna(kmapentzone, &kmapentobj,
183 NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
184 zinitna(mapentzone, &mapentobj,
185 NULL, 0, 0, 0, 1);
186 zinitna(mapzone, &mapobj,
187 NULL, 0, 0, 0, 1);
188 vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
264 uma_zone_set_obj(kmapentzone, &kmapentobj, cnt.v_page_count / 4);
265 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
266#ifdef INVARIANTS
267 vmspace_zdtor,
268#else
269 NULL,
270#endif
271 vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
189 pmap_init2();
190 vm_object_init2();
191}
192
193static __inline void
194vmspace_dofree(struct vmspace *vm)
195{
196 CTR1(KTR_VM, "vmspace_free: %p", vm);
197 /*
198 * Lock the map, to wait out all other references to it.
199 * Delete all of the mappings and pages they hold, then call
200 * the pmap module to reclaim anything left.
201 */
202 vm_map_lock(&vm->vm_map);
203 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
204 vm->vm_map.max_offset);
205 vm_map_unlock(&vm->vm_map);
272 pmap_init2();
273 vm_object_init2();
274}
275
276static __inline void
277vmspace_dofree(struct vmspace *vm)
278{
279 CTR1(KTR_VM, "vmspace_free: %p", vm);
280 /*
281 * Lock the map, to wait out all other references to it.
282 * Delete all of the mappings and pages they hold, then call
283 * the pmap module to reclaim anything left.
284 */
285 vm_map_lock(&vm->vm_map);
286 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
287 vm->vm_map.max_offset);
288 vm_map_unlock(&vm->vm_map);
289
206 pmap_release(vmspace_pmap(vm));
290 pmap_release(vmspace_pmap(vm));
207 vm_map_destroy(&vm->vm_map);
208 zfree(vmspace_zone, vm);
291 uma_zfree(vmspace_zone, vm);
209}
210
211void
212vmspace_free(struct vmspace *vm)
213{
214 GIANT_REQUIRED;
215
216 if (vm->vm_refcnt == 0)

--- 168 unchanged lines hidden (view full) ---

385 */
386vm_map_t
387vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
388{
389 vm_map_t result;
390
391 GIANT_REQUIRED;
392
292}
293
294void
295vmspace_free(struct vmspace *vm)
296{
297 GIANT_REQUIRED;
298
299 if (vm->vm_refcnt == 0)

--- 168 unchanged lines hidden (view full) ---

468 */
469vm_map_t
470vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
471{
472 vm_map_t result;
473
474 GIANT_REQUIRED;
475
393 result = zalloc(mapzone);
476 result = uma_zalloc(mapzone, M_WAITOK);
394 CTR1(KTR_VM, "vm_map_create: %p", result);
477 CTR1(KTR_VM, "vm_map_create: %p", result);
395 vm_map_init(result, min, max);
478 _vm_map_init(result, min, max);
396 result->pmap = pmap;
397 return (result);
398}
399
400/*
401 * Initialize an existing vm_map structure
402 * such as that in the vmspace structure.
403 * The pmap is set elsewhere.
404 */
479 result->pmap = pmap;
480 return (result);
481}
482
483/*
484 * Initialize an existing vm_map structure
485 * such as that in the vmspace structure.
486 * The pmap is set elsewhere.
487 */
405void
406vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
488static void
489_vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
407{
408 GIANT_REQUIRED;
409
410 map->header.next = map->header.prev = &map->header;
490{
491 GIANT_REQUIRED;
492
493 map->header.next = map->header.prev = &map->header;
411 map->nentries = 0;
412 map->size = 0;
413 map->system_map = 0;
494 map->system_map = 0;
414 map->infork = 0;
415 map->min_offset = min;
416 map->max_offset = max;
417 map->first_free = &map->header;
418 map->hint = &map->header;
419 map->timestamp = 0;
495 map->min_offset = min;
496 map->max_offset = max;
497 map->first_free = &map->header;
498 map->hint = &map->header;
499 map->timestamp = 0;
420 lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
421}
422
423void
500}
501
502void
424vm_map_destroy(map)
425 struct vm_map *map;
503vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
426{
504{
427 GIANT_REQUIRED;
428 lockdestroy(&map->lock);
505 _vm_map_init(map, min, max);
506 lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
429}
430
431/*
432 * vm_map_entry_dispose: [ internal use only ]
433 *
434 * Inverse of vm_map_entry_create.
435 */
436static void
437vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
438{
507}
508
509/*
510 * vm_map_entry_dispose: [ internal use only ]
511 *
512 * Inverse of vm_map_entry_create.
513 */
514static void
515vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
516{
439 zfree((map->system_map || !mapentzone) ? kmapentzone : mapentzone, entry);
517 uma_zfree((map->system_map || !mapentzone)
518 ? kmapentzone : mapentzone, entry);
440}
441
442/*
443 * vm_map_entry_create: [ internal use only ]
444 *
445 * Allocates a VM map entry for insertion.
446 * No entry fields are filled in.
447 */
448static vm_map_entry_t
449vm_map_entry_create(vm_map_t map)
450{
451 vm_map_entry_t new_entry;
452
519}
520
521/*
522 * vm_map_entry_create: [ internal use only ]
523 *
524 * Allocates a VM map entry for insertion.
525 * No entry fields are filled in.
526 */
527static vm_map_entry_t
528vm_map_entry_create(vm_map_t map)
529{
530 vm_map_entry_t new_entry;
531
453 new_entry = zalloc((map->system_map || !mapentzone) ?
454 kmapentzone : mapentzone);
532 new_entry = uma_zalloc((map->system_map || !mapentzone) ?
533 kmapentzone : mapentzone, M_WAITOK);
455 if (new_entry == NULL)
456 panic("vm_map_entry_create: kernel resources exhausted");
457 return (new_entry);
458}
459
460/*
461 * vm_map_entry_{un,}link:
462 *

--- 2805 unchanged lines hidden ---
534 if (new_entry == NULL)
535 panic("vm_map_entry_create: kernel resources exhausted");
536 return (new_entry);
537}
538
539/*
540 * vm_map_entry_{un,}link:
541 *

--- 2805 unchanged lines hidden ---