Deleted Added
sdiff udiff text old ( 92588 ) new ( 92654 )
full compact
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD: head/sys/vm/vm_map.c 92654 2002-03-19 09:11:49Z jeff $
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 10 unchanged lines hidden (view full) ---

83#include <vm/vm_param.h>
84#include <vm/pmap.h>
85#include <vm/vm_map.h>
86#include <vm/vm_page.h>
87#include <vm/vm_object.h>
88#include <vm/vm_pager.h>
89#include <vm/vm_kern.h>
90#include <vm/vm_extern.h>
91#include <vm/swap_pager.h>
92
93/*
94 * Virtual memory maps provide for the mapping, protection,
95 * and sharing of virtual memory objects. In addition,
96 * this module provides for an efficient virtual copy of
97 * memory from one map to another.
98 *

--- 26 unchanged lines hidden (view full) ---

125 *
126 * - The kernel map and kmem submap are allocated statically.
127 * - Kernel map entries are allocated out of a static pool.
128 *
129 * These restrictions are necessary since malloc() uses the
130 * maps and requires map entries.
131 */
132
133static uma_zone_t mapentzone;
134static uma_zone_t kmapentzone;
135static uma_zone_t mapzone;
136static uma_zone_t vmspace_zone;
137static struct vm_object kmapentobj;
138static void vmspace_zinit(void *mem, int size);
139static void vmspace_zfini(void *mem, int size);
140static void vm_map_zinit(void *mem, int size);
141static void vm_map_zfini(void *mem, int size);
142static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max);
143
144#ifdef INVARIANTS
145static void vm_map_zdtor(void *mem, int size, void *arg);
146static void vmspace_zdtor(void *mem, int size, void *arg);
147#endif
148
149void
150vm_map_startup(void)
151{
152 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
153#ifdef INVARIANTS
154 vm_map_zdtor,
155#else
156 NULL,
157#endif
158 vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
159 uma_prealloc(mapzone, MAX_KMAP);
160 kmapentzone = zinit("KMAP ENTRY", sizeof(struct vm_map_entry), 0, 0, 0); uma_prealloc(kmapentzone, MAX_KMAPENT);
161 mapentzone = zinit("MAP ENTRY", sizeof(struct vm_map_entry), 0, 0, 0);
162 uma_prealloc(mapentzone, MAX_MAPENT);
163}
164
165static void
166vmspace_zfini(void *mem, int size)
167{
168 struct vmspace *vm;
169
170 vm = (struct vmspace *)mem;
171
172 vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
173}
174
175static void
176vmspace_zinit(void *mem, int size)
177{
178 struct vmspace *vm;
179
180 vm = (struct vmspace *)mem;
181
182 vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map));
183}
184
185static void
186vm_map_zfini(void *mem, int size)
187{
188 vm_map_t map;
189
190 GIANT_REQUIRED;
191 map = (vm_map_t)mem;
192
193 lockdestroy(&map->lock);
194}
195
196static void
197vm_map_zinit(void *mem, int size)
198{
199 vm_map_t map;
200
201 GIANT_REQUIRED;
202
203 map = (vm_map_t)mem;
204 map->nentries = 0;
205 map->size = 0;
206 map->infork = 0;
207 lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
208}
209
210#ifdef INVARIANTS
211static void
212vmspace_zdtor(void *mem, int size, void *arg)
213{
214 struct vmspace *vm;
215
216 vm = (struct vmspace *)mem;
217
218 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
219}
220static void
221vm_map_zdtor(void *mem, int size, void *arg)
222{
223 vm_map_t map;
224
225 map = (vm_map_t)mem;
226 KASSERT(map->nentries == 0,
227 ("map %p nentries == %d on free.",
228 map, map->nentries));
229 KASSERT(map->size == 0,
230 ("map %p size == %lu on free.",
231 map, map->size));
232 KASSERT(map->infork == 0,
233 ("map %p infork == %d on free.",
234 map, map->infork));
235}
236#endif /* INVARIANTS */
237
238/*
239 * Allocate a vmspace structure, including a vm_map and pmap,
240 * and initialize those structures. The refcnt is set to 1.
241 * The remaining fields must be initialized by the caller.
242 */
243struct vmspace *
244vmspace_alloc(min, max)
245 vm_offset_t min, max;
246{
247 struct vmspace *vm;
248
249 GIANT_REQUIRED;
250 vm = uma_zalloc(vmspace_zone, M_WAITOK);
251 CTR1(KTR_VM, "vmspace_alloc: %p", vm);
252 _vm_map_init(&vm->vm_map, min, max);
253 pmap_pinit(vmspace_pmap(vm));
254 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */
255 vm->vm_refcnt = 1;
256 vm->vm_shm = NULL;
257 vm->vm_freer = NULL;
258 return (vm);
259}
260
261void
262vm_init2(void)
263{
264 uma_zone_set_obj(kmapentzone, &kmapentobj, cnt.v_page_count / 4);
265 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
266#ifdef INVARIANTS
267 vmspace_zdtor,
268#else
269 NULL,
270#endif
271 vmspace_zinit, vmspace_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
272 pmap_init2();
273 vm_object_init2();
274}
275
276static __inline void
277vmspace_dofree(struct vmspace *vm)
278{
279 CTR1(KTR_VM, "vmspace_free: %p", vm);
280 /*
281 * Lock the map, to wait out all other references to it.
282 * Delete all of the mappings and pages they hold, then call
283 * the pmap module to reclaim anything left.
284 */
285 vm_map_lock(&vm->vm_map);
286 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
287 vm->vm_map.max_offset);
288 vm_map_unlock(&vm->vm_map);
289
290 pmap_release(vmspace_pmap(vm));
291 uma_zfree(vmspace_zone, vm);
292}
293
294void
295vmspace_free(struct vmspace *vm)
296{
297 GIANT_REQUIRED;
298
299 if (vm->vm_refcnt == 0)

--- 168 unchanged lines hidden (view full) ---

468 */
469vm_map_t
470vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
471{
472 vm_map_t result;
473
474 GIANT_REQUIRED;
475
476 result = uma_zalloc(mapzone, M_WAITOK);
477 CTR1(KTR_VM, "vm_map_create: %p", result);
478 _vm_map_init(result, min, max);
479 result->pmap = pmap;
480 return (result);
481}
482
483/*
484 * Initialize an existing vm_map structure
485 * such as that in the vmspace structure.
486 * The pmap is set elsewhere.
487 */
488static void
489_vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
490{
491 GIANT_REQUIRED;
492
493 map->header.next = map->header.prev = &map->header;
494 map->system_map = 0;
495 map->min_offset = min;
496 map->max_offset = max;
497 map->first_free = &map->header;
498 map->hint = &map->header;
499 map->timestamp = 0;
500}
501
502void
503vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max)
504{
505 _vm_map_init(map, min, max);
506 lockinit(&map->lock, PVM, "thrd_sleep", 0, LK_NOPAUSE);
507}
508
509/*
510 * vm_map_entry_dispose: [ internal use only ]
511 *
512 * Inverse of vm_map_entry_create.
513 */
514static void
515vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
516{
517 uma_zfree((map->system_map || !mapentzone)
518 ? kmapentzone : mapentzone, entry);
519}
520
521/*
522 * vm_map_entry_create: [ internal use only ]
523 *
524 * Allocates a VM map entry for insertion.
525 * No entry fields are filled in.
526 */
527static vm_map_entry_t
528vm_map_entry_create(vm_map_t map)
529{
530 vm_map_entry_t new_entry;
531
532 new_entry = uma_zalloc((map->system_map || !mapentzone) ?
533 kmapentzone : mapentzone, M_WAITOK);
534 if (new_entry == NULL)
535 panic("vm_map_entry_create: kernel resources exhausted");
536 return (new_entry);
537}
538
539/*
540 * vm_map_entry_{un,}link:
541 *

--- 2805 unchanged lines hidden ---