Deleted Added
full compact
vm_map.c (32670) vm_map.c (32702)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_map.c,v 1.106 1998/01/17 09:16:51 dyson Exp $
64 * $Id: vm_map.c,v 1.107 1998/01/21 12:18:00 dyson Exp $
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 80 unchanged lines hidden (view full) ---

153 * These restrictions are necessary since malloc() uses the
154 * maps and requires map entries.
155 */
156
157extern char kstack[];
158extern int inmprotect;
159
160static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 80 unchanged lines hidden (view full) ---

153 * These restrictions are necessary since malloc() uses the
154 * maps and requires map entries.
155 */
156
157extern char kstack[];
158extern int inmprotect;
159
160static struct vm_zone kmapentzone_store, mapentzone_store, mapzone_store;
161static vm_zone_t mapentzone, kmapentzone, mapzone;
161static vm_zone_t mapentzone, kmapentzone, mapzone, vmspace_zone;
162static struct vm_object kmapentobj, mapentobj, mapobj;
163#define MAP_ENTRY_INIT 128
164struct vm_map_entry map_entry_init[MAX_MAPENT];
165struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
166struct vm_map map_init[MAX_KMAP];
167
168static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
169static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));

--- 20 unchanged lines hidden (view full) ---

190}
191
192/*
193 * Allocate a vmspace structure, including a vm_map and pmap,
194 * and initialize those structures. The refcnt is set to 1.
195 * The remaining fields must be initialized by the caller.
196 */
197struct vmspace *
162static struct vm_object kmapentobj, mapentobj, mapobj;
163#define MAP_ENTRY_INIT 128
164struct vm_map_entry map_entry_init[MAX_MAPENT];
165struct vm_map_entry kmap_entry_init[MAX_KMAPENT];
166struct vm_map map_init[MAX_KMAP];
167
168static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
169static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));

--- 20 unchanged lines hidden (view full) ---

190}
191
192/*
193 * Allocate a vmspace structure, including a vm_map and pmap,
194 * and initialize those structures. The refcnt is set to 1.
195 * The remaining fields must be initialized by the caller.
196 */
197struct vmspace *
198vmspace_alloc(min, max, pageable)
198vmspace_alloc(min, max)
199 vm_offset_t min, max;
199 vm_offset_t min, max;
200 int pageable;
201{
202 register struct vmspace *vm;
203
200{
201 register struct vmspace *vm;
202
204 MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
205 bzero(vm, (caddr_t) &vm->vm_startcopy - (caddr_t) vm);
206 vm_map_init(&vm->vm_map, min, max, pageable);
203 vm = zalloc(vmspace_zone);
204 bzero(&vm->vm_map, sizeof vm->vm_map);
205 vm_map_init(&vm->vm_map, min, max);
207 pmap_pinit(&vm->vm_pmap);
208 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
209 vm->vm_refcnt = 1;
206 pmap_pinit(&vm->vm_pmap);
207 vm->vm_map.pmap = &vm->vm_pmap; /* XXX */
208 vm->vm_refcnt = 1;
209 vm->vm_shm = NULL;
210 return (vm);
211}
212
213void
214vm_init2(void) {
215 zinitna(kmapentzone, &kmapentobj,
216 NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
217 zinitna(mapentzone, &mapentobj,
218 NULL, 0, 0, 0, 1);
219 zinitna(mapzone, &mapobj,
220 NULL, 0, 0, 0, 1);
210 return (vm);
211}
212
213void
214vm_init2(void) {
215 zinitna(kmapentzone, &kmapentobj,
216 NULL, 0, cnt.v_page_count / 4, ZONE_INTERRUPT, 1);
217 zinitna(mapentzone, &mapentobj,
218 NULL, 0, 0, 0, 1);
219 zinitna(mapzone, &mapobj,
220 NULL, 0, 0, 0, 1);
221 vmspace_zone = zinit("VMSPACE", sizeof (struct vmspace), 0, 0, 3);
221 pmap_init2();
222 vm_object_init2();
223}
224
225void
226vmspace_free(vm)
227 register struct vmspace *vm;
228{

--- 8 unchanged lines hidden (view full) ---

237 * Delete all of the mappings and pages they hold, then call
238 * the pmap module to reclaim anything left.
239 */
240 vm_map_lock(&vm->vm_map);
241 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
242 vm->vm_map.max_offset);
243 vm_map_unlock(&vm->vm_map);
244
222 pmap_init2();
223 vm_object_init2();
224}
225
226void
227vmspace_free(vm)
228 register struct vmspace *vm;
229{

--- 8 unchanged lines hidden (view full) ---

238 * Delete all of the mappings and pages they hold, then call
239 * the pmap module to reclaim anything left.
240 */
241 vm_map_lock(&vm->vm_map);
242 (void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
243 vm->vm_map.max_offset);
244 vm_map_unlock(&vm->vm_map);
245
245 while( vm->vm_map.ref_count != 1)
246 tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0);
247 --vm->vm_map.ref_count;
248 pmap_release(&vm->vm_pmap);
246 pmap_release(&vm->vm_pmap);
249 FREE(vm, M_VMMAP);
250 } else {
251 wakeup(&vm->vm_map.ref_count);
247 zfree(vmspace_zone, vm);
252 }
253}
254
255/*
256 * vm_map_create:
257 *
258 * Creates and returns a new empty VM map with
259 * the given physical map structure, and having
260 * the given lower and upper address bounds.
261 */
262vm_map_t
248 }
249}
250
251/*
252 * vm_map_create:
253 *
254 * Creates and returns a new empty VM map with
255 * the given physical map structure, and having
256 * the given lower and upper address bounds.
257 */
258vm_map_t
263vm_map_create(pmap, min, max, pageable)
259vm_map_create(pmap, min, max)
264 pmap_t pmap;
265 vm_offset_t min, max;
260 pmap_t pmap;
261 vm_offset_t min, max;
266 boolean_t pageable;
267{
268 register vm_map_t result;
269
270 result = zalloc(mapzone);
262{
263 register vm_map_t result;
264
265 result = zalloc(mapzone);
271 vm_map_init(result, min, max, pageable);
266 vm_map_init(result, min, max);
272 result->pmap = pmap;
273 return (result);
274}
275
276/*
277 * Initialize an existing vm_map structure
278 * such as that in the vmspace structure.
279 * The pmap is set elsewhere.
280 */
281void
267 result->pmap = pmap;
268 return (result);
269}
270
271/*
272 * Initialize an existing vm_map structure
273 * such as that in the vmspace structure.
274 * The pmap is set elsewhere.
275 */
276void
282vm_map_init(map, min, max, pageable)
277vm_map_init(map, min, max)
283 register struct vm_map *map;
284 vm_offset_t min, max;
278 register struct vm_map *map;
279 vm_offset_t min, max;
285 boolean_t pageable;
286{
287 map->header.next = map->header.prev = &map->header;
288 map->nentries = 0;
289 map->size = 0;
280{
281 map->header.next = map->header.prev = &map->header;
282 map->nentries = 0;
283 map->size = 0;
290 map->ref_count = 1;
291 map->is_main_map = TRUE;
292 map->system_map = 0;
293 map->min_offset = min;
294 map->max_offset = max;
284 map->is_main_map = TRUE;
285 map->system_map = 0;
286 map->min_offset = min;
287 map->max_offset = max;
295 map->entries_pageable = pageable;
296 map->first_free = &map->header;
297 map->hint = &map->header;
298 map->timestamp = 0;
299 lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
288 map->first_free = &map->header;
289 map->hint = &map->header;
290 map->timestamp = 0;
291 lockinit(&map->lock, PVM, "thrd_sleep", 0, 0);
300 simple_lock_init(&map->ref_lock);
301}
302
303/*
304 * vm_map_entry_dispose: [ internal use only ]
305 *
306 * Inverse of vm_map_entry_create.
307 */
308static void

--- 35 unchanged lines hidden (view full) ---

344 { \
345 (map)->nentries--; \
346 (map)->timestamp++; \
347 (entry)->next->prev = (entry)->prev; \
348 (entry)->prev->next = (entry)->next; \
349 }
350
351/*
292}
293
294/*
295 * vm_map_entry_dispose: [ internal use only ]
296 *
297 * Inverse of vm_map_entry_create.
298 */
299static void

--- 35 unchanged lines hidden (view full) ---

335 { \
336 (map)->nentries--; \
337 (map)->timestamp++; \
338 (entry)->next->prev = (entry)->prev; \
339 (entry)->prev->next = (entry)->next; \
340 }
341
342/*
352 * vm_map_reference:
353 *
354 * Creates another valid reference to the given map.
355 *
356 */
357void
358vm_map_reference(map)
359 register vm_map_t map;
360{
361 if (map == NULL)
362 return;
363
364 map->ref_count++;
365}
366
367/*
368 * vm_map_deallocate:
369 *
370 * Removes a reference from the specified map,
371 * destroying it if no references remain.
372 * The map should not be locked.
373 */
374void
375vm_map_deallocate(map)
376 register vm_map_t map;
377{
378 register int c;
379
380 if (map == NULL)
381 return;
382
383 c = map->ref_count;
384
385 if (c == 0)
386 panic("vm_map_deallocate: deallocating already freed map");
387
388 if (c != 1) {
389 --map->ref_count;
390 wakeup(&map->ref_count);
391 return;
392 }
393 /*
394 * Lock the map, to wait out all other references to it.
395 */
396
397 vm_map_lock_drain_interlock(map);
398 (void) vm_map_delete(map, map->min_offset, map->max_offset);
399 --map->ref_count;
400 if( map->ref_count != 0) {
401 vm_map_unlock(map);
402 return;
403 }
404
405 pmap_destroy(map->pmap);
406
407 vm_map_unlock(map);
408
409 zfree(mapzone, map);
410}
411
412/*
413 * SAVE_HINT:
414 *
415 * Saves the specified entry as the hint for
416 * future lookups.
417 */
418#define SAVE_HINT(map,value) \
419 (map)->hint = (value);
420

--- 444 unchanged lines hidden (view full) ---

865 *new_entry = *entry;
866
867 new_entry->end = start;
868 entry->offset += (start - entry->start);
869 entry->start = start;
870
871 vm_map_entry_link(map, entry->prev, new_entry);
872
343 * SAVE_HINT:
344 *
345 * Saves the specified entry as the hint for
346 * future lookups.
347 */
348#define SAVE_HINT(map,value) \
349 (map)->hint = (value);
350

--- 444 unchanged lines hidden (view full) ---

795 *new_entry = *entry;
796
797 new_entry->end = start;
798 entry->offset += (start - entry->start);
799 entry->start = start;
800
801 vm_map_entry_link(map, entry->prev, new_entry);
802
873 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
874 vm_map_reference(new_entry->object.share_map);
875 else
803 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0)
876 vm_object_reference(new_entry->object.vm_object);
877}
878
879/*
880 * vm_map_clip_end: [ internal use only ]
881 *
882 * Asserts that the given entry ends at or before
883 * the specified address; if necessary,

--- 42 unchanged lines hidden (view full) ---

926 new_entry = vm_map_entry_create(map);
927 *new_entry = *entry;
928
929 new_entry->start = entry->end = end;
930 new_entry->offset += (end - entry->start);
931
932 vm_map_entry_link(map, entry, new_entry);
933
804 vm_object_reference(new_entry->object.vm_object);
805}
806
807/*
808 * vm_map_clip_end: [ internal use only ]
809 *
810 * Asserts that the given entry ends at or before
811 * the specified address; if necessary,

--- 42 unchanged lines hidden (view full) ---

854 new_entry = vm_map_entry_create(map);
855 *new_entry = *entry;
856
857 new_entry->start = entry->end = end;
858 new_entry->offset += (end - entry->start);
859
860 vm_map_entry_link(map, entry, new_entry);
861
934 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
935 vm_map_reference(new_entry->object.share_map);
936 else
862 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0)
937 vm_object_reference(new_entry->object.vm_object);
938}
939
940/*
941 * VM_MAP_RANGE_CHECK: [ internal use only ]
942 *
943 * Asserts that the starting and ending region
944 * addresses fall within the valid range of the map.

--- 45 unchanged lines hidden (view full) ---

990 } else
991 entry = entry->next;
992
993 vm_map_clip_end(map, entry, end);
994
995 if ((entry->start == start) && (entry->end == end) &&
996 ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
997 (entry->object.vm_object == NULL)) {
863 vm_object_reference(new_entry->object.vm_object);
864}
865
866/*
867 * VM_MAP_RANGE_CHECK: [ internal use only ]
868 *
869 * Asserts that the starting and ending region
870 * addresses fall within the valid range of the map.

--- 45 unchanged lines hidden (view full) ---

916 } else
917 entry = entry->next;
918
919 vm_map_clip_end(map, entry, end);
920
921 if ((entry->start == start) && (entry->end == end) &&
922 ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
923 (entry->object.vm_object == NULL)) {
924 entry->object.sub_map = submap;
998 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
925 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
999 vm_map_reference(entry->object.sub_map = submap);
1000 result = KERN_SUCCESS;
1001 }
1002 vm_map_unlock(map);
1003
1004 return (result);
1005}
1006
1007/*

--- 104 unchanged lines hidden (view full) ---

1112#undef MASK
1113 }
1114
1115 vm_map_simplify_entry(map, current);
1116
1117 current = current->next;
1118 }
1119
926 result = KERN_SUCCESS;
927 }
928 vm_map_unlock(map);
929
930 return (result);
931}
932
933/*

--- 104 unchanged lines hidden (view full) ---

1038#undef MASK
1039 }
1040
1041 vm_map_simplify_entry(map, current);
1042
1043 current = current->next;
1044 }
1045
1046 map->timestamp++;
1120 vm_map_unlock(map);
1121 return (KERN_SUCCESS);
1122}
1123
1124/*
1125 * vm_map_madvise:
1126 *
1127 * This routine traverses a processes map handling the madvise

--- 659 unchanged lines hidden (view full) ---

1787static void
1788vm_map_entry_delete(map, entry)
1789 register vm_map_t map;
1790 register vm_map_entry_t entry;
1791{
1792 vm_map_entry_unlink(map, entry);
1793 map->size -= entry->end - entry->start;
1794
1047 vm_map_unlock(map);
1048 return (KERN_SUCCESS);
1049}
1050
1051/*
1052 * vm_map_madvise:
1053 *
1054 * This routine traverses a processes map handling the madvise

--- 659 unchanged lines hidden (view full) ---

1714static void
1715vm_map_entry_delete(map, entry)
1716 register vm_map_t map;
1717 register vm_map_entry_t entry;
1718{
1719 vm_map_entry_unlink(map, entry);
1720 map->size -= entry->end - entry->start;
1721
1795 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1796 vm_map_deallocate(entry->object.share_map);
1797 } else {
1722 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1798 vm_object_deallocate(entry->object.vm_object);
1799 }
1800
1801 vm_map_entry_dispose(map, entry);
1802}
1803
1804/*
1805 * vm_map_delete: [ internal use only ]

--- 186 unchanged lines hidden (view full) ---

1992
1993 if (src_entry->wired_count == 0) {
1994
1995 /*
1996 * If the source entry is marked needs_copy, it is already
1997 * write-protected.
1998 */
1999 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
1723 vm_object_deallocate(entry->object.vm_object);
1724 }
1725
1726 vm_map_entry_dispose(map, entry);
1727}
1728
1729/*
1730 * vm_map_delete: [ internal use only ]

--- 186 unchanged lines hidden (view full) ---

1917
1918 if (src_entry->wired_count == 0) {
1919
1920 /*
1921 * If the source entry is marked needs_copy, it is already
1922 * write-protected.
1923 */
1924 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
2000
2001 boolean_t su;
2002
2003 /*
2004 * If the source entry has only one mapping, we can
2005 * just protect the virtual address range.
2006 */
2007 if (!(su = src_map->is_main_map)) {
2008 su = (src_map->ref_count == 1);
2009 }
2010 if (su) {
2011 pmap_protect(src_map->pmap,
2012 src_entry->start,
2013 src_entry->end,
2014 src_entry->protection & ~VM_PROT_WRITE);
2015 } else {
2016 vm_object_pmap_copy(src_entry->object.vm_object,
2017 OFF_TO_IDX(src_entry->offset),
2018 OFF_TO_IDX(src_entry->offset + (src_entry->end
2019 - src_entry->start)));
2020 }
1925 pmap_protect(src_map->pmap,
1926 src_entry->start,
1927 src_entry->end,
1928 src_entry->protection & ~VM_PROT_WRITE);
2021 }
2022
2023 /*
2024 * Make a copy of the object.
2025 */
2026 if (src_entry->object.vm_object) {
2027 if ((src_entry->object.vm_object->handle == NULL) &&
2028 (src_entry->object.vm_object->type == OBJT_DEFAULT ||

--- 40 unchanged lines hidden (view full) ---

2069 vm_map_t new_map;
2070 vm_map_entry_t old_entry;
2071 vm_map_entry_t new_entry;
2072 pmap_t new_pmap;
2073 vm_object_t object;
2074
2075 vm_map_lock(old_map);
2076
1929 }
1930
1931 /*
1932 * Make a copy of the object.
1933 */
1934 if (src_entry->object.vm_object) {
1935 if ((src_entry->object.vm_object->handle == NULL) &&
1936 (src_entry->object.vm_object->type == OBJT_DEFAULT ||

--- 40 unchanged lines hidden (view full) ---

1977 vm_map_t new_map;
1978 vm_map_entry_t old_entry;
1979 vm_map_entry_t new_entry;
1980 pmap_t new_pmap;
1981 vm_object_t object;
1982
1983 vm_map_lock(old_map);
1984
2077 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset,
2078 old_map->entries_pageable);
1985 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
2079 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
2080 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
2081 new_pmap = &vm2->vm_pmap; /* XXX */
2082 new_map = &vm2->vm_map; /* XXX */
2083 new_map->timestamp = 1;
2084
2085 old_entry = old_map->header.next;
2086

--- 79 unchanged lines hidden (view full) ---

2166 */
2167
2168void
2169vmspace_exec(struct proc *p) {
2170 struct vmspace *oldvmspace = p->p_vmspace;
2171 struct vmspace *newvmspace;
2172 vm_map_t map = &p->p_vmspace->vm_map;
2173
1986 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1987 (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1988 new_pmap = &vm2->vm_pmap; /* XXX */
1989 new_map = &vm2->vm_map; /* XXX */
1990 new_map->timestamp = 1;
1991
1992 old_entry = old_map->header.next;
1993

--- 79 unchanged lines hidden (view full) ---

2073 */
2074
2075void
2076vmspace_exec(struct proc *p) {
2077 struct vmspace *oldvmspace = p->p_vmspace;
2078 struct vmspace *newvmspace;
2079 vm_map_t map = &p->p_vmspace->vm_map;
2080
2174 newvmspace = vmspace_alloc(map->min_offset, map->max_offset,
2175 map->entries_pageable);
2081 newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
2176 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2177 (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2178 /*
2179 * This code is written like this for prototype purposes. The
2180 * goal is to avoid running down the vmspace here, but let the
2181 * other process's that are still using the vmspace to finally
2182 * run it down. Even though there is little or no chance of blocking
2183 * here, it is a good idea to keep this form for future mods.
2184 */
2082 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
2083 (caddr_t) (newvmspace + 1) - (caddr_t) &newvmspace->vm_startcopy);
2084 /*
2085 * This code is written like this for prototype purposes. The
2086 * goal is to avoid running down the vmspace here, but let the
2087 * other process's that are still using the vmspace to finally
2088 * run it down. Even though there is little or no chance of blocking
2089 * here, it is a good idea to keep this form for future mods.
2090 */
2185 vm_map_reference(&oldvmspace->vm_map);
2186 vmspace_free(oldvmspace);
2187 p->p_vmspace = newvmspace;
2188 if (p == curproc)
2189 pmap_activate(p);
2091 vmspace_free(oldvmspace);
2092 p->p_vmspace = newvmspace;
2093 if (p == curproc)
2094 pmap_activate(p);
2190 vm_map_deallocate(&oldvmspace->vm_map);
2191}
2192
2193/*
2194 * Unshare the specified VM space for forcing COW. This
2195 * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2196 */
2197
2198void
2199vmspace_unshare(struct proc *p) {
2200 struct vmspace *oldvmspace = p->p_vmspace;
2201 struct vmspace *newvmspace;
2202
2203 if (oldvmspace->vm_refcnt == 1)
2204 return;
2205 newvmspace = vmspace_fork(oldvmspace);
2095}
2096
2097/*
2098 * Unshare the specified VM space for forcing COW. This
2099 * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
2100 */
2101
2102void
2103vmspace_unshare(struct proc *p) {
2104 struct vmspace *oldvmspace = p->p_vmspace;
2105 struct vmspace *newvmspace;
2106
2107 if (oldvmspace->vm_refcnt == 1)
2108 return;
2109 newvmspace = vmspace_fork(oldvmspace);
2206 vm_map_reference(&oldvmspace->vm_map);
2207 vmspace_free(oldvmspace);
2208 p->p_vmspace = newvmspace;
2209 if (p == curproc)
2210 pmap_activate(p);
2110 vmspace_free(oldvmspace);
2111 p->p_vmspace = newvmspace;
2112 if (p == curproc)
2113 pmap_activate(p);
2211 vm_map_deallocate(&oldvmspace->vm_map);
2212}
2213
2214
2215/*
2216 * vm_map_lookup:
2217 *
2218 * Finds the VM object, offset, and
2219 * protection for a given virtual address in the

--- 17 unchanged lines hidden (view full) ---

2237int
2238vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
2239 vm_offset_t vaddr,
2240 vm_prot_t fault_typea,
2241 vm_map_entry_t *out_entry, /* OUT */
2242 vm_object_t *object, /* OUT */
2243 vm_pindex_t *pindex, /* OUT */
2244 vm_prot_t *out_prot, /* OUT */
2114}
2115
2116
2117/*
2118 * vm_map_lookup:
2119 *
2120 * Finds the VM object, offset, and
2121 * protection for a given virtual address in the

--- 17 unchanged lines hidden (view full) ---

2139int
2140vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
2141 vm_offset_t vaddr,
2142 vm_prot_t fault_typea,
2143 vm_map_entry_t *out_entry, /* OUT */
2144 vm_object_t *object, /* OUT */
2145 vm_pindex_t *pindex, /* OUT */
2146 vm_prot_t *out_prot, /* OUT */
2245 boolean_t *wired, /* OUT */
2246 boolean_t *single_use) /* OUT */
2147 boolean_t *wired) /* OUT */
2247{
2248 vm_map_t share_map;
2249 vm_offset_t share_offset;
2250 register vm_map_entry_t entry;
2251 register vm_map_t map = *var_map;
2252 register vm_prot_t prot;
2253 register boolean_t su;
2254 vm_prot_t fault_type = fault_typea;

--- 147 unchanged lines hidden (view full) ---

2402 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2403 vm_map_lock_downgrade(share_map);
2404 } else {
2405 /*
2406 * We're attempting to read a copy-on-write page --
2407 * don't allow writes.
2408 */
2409
2148{
2149 vm_map_t share_map;
2150 vm_offset_t share_offset;
2151 register vm_map_entry_t entry;
2152 register vm_map_t map = *var_map;
2153 register vm_prot_t prot;
2154 register boolean_t su;
2155 vm_prot_t fault_type = fault_typea;

--- 147 unchanged lines hidden (view full) ---

2303 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
2304 vm_map_lock_downgrade(share_map);
2305 } else {
2306 /*
2307 * We're attempting to read a copy-on-write page --
2308 * don't allow writes.
2309 */
2310
2410 prot &= (~VM_PROT_WRITE);
2311 prot &= ~VM_PROT_WRITE;
2411 }
2412 }
2312 }
2313 }
2314
2413 /*
2414 * Create an object if necessary.
2415 */
2416 if (entry->object.vm_object == NULL) {
2417
2418 if (vm_map_lock_upgrade(share_map)) {
2419 if (share_map != map)
2420 vm_map_unlock_read(map);

--- 14 unchanged lines hidden (view full) ---

2435
2436 *pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset);
2437 *object = entry->object.vm_object;
2438
2439 /*
2440 * Return whether this is the only map sharing this data.
2441 */
2442
2315 /*
2316 * Create an object if necessary.
2317 */
2318 if (entry->object.vm_object == NULL) {
2319
2320 if (vm_map_lock_upgrade(share_map)) {
2321 if (share_map != map)
2322 vm_map_unlock_read(map);

--- 14 unchanged lines hidden (view full) ---

2337
2338 *pindex = OFF_TO_IDX((share_offset - entry->start) + entry->offset);
2339 *object = entry->object.vm_object;
2340
2341 /*
2342 * Return whether this is the only map sharing this data.
2343 */
2344
2443 if (!su) {
2444 su = (share_map->ref_count == 1);
2445 }
2446 *out_prot = prot;
2345 *out_prot = prot;
2447 *single_use = su;
2448
2449 return (KERN_SUCCESS);
2450
2451#undef RETURN
2452}
2453
2454/*
2455 * vm_map_lookup_done:
2456 *

--- 31 unchanged lines hidden (view full) ---

2488 vm_object_t srcobject;
2489 off_t cp;
2490 int cnta;
2491 vm_offset_t uaddra;
2492 int *npages;
2493{
2494 vm_map_t map;
2495 vm_object_t first_object, oldobject, object;
2346 return (KERN_SUCCESS);
2347
2348#undef RETURN
2349}
2350
2351/*
2352 * vm_map_lookup_done:
2353 *

--- 31 unchanged lines hidden (view full) ---

2385 vm_object_t srcobject;
2386 off_t cp;
2387 int cnta;
2388 vm_offset_t uaddra;
2389 int *npages;
2390{
2391 vm_map_t map;
2392 vm_object_t first_object, oldobject, object;
2496 vm_map_entry_t first_entry, entry;
2393 vm_map_entry_t entry;
2497 vm_prot_t prot;
2394 vm_prot_t prot;
2498 boolean_t wired, su;
2395 boolean_t wired;
2499 int tcnt, rv;
2396 int tcnt, rv;
2500 vm_offset_t uaddr, start, end;
2397 vm_offset_t uaddr, start, end, tend;
2501 vm_pindex_t first_pindex, osize, oindex;
2502 off_t ooffset;
2398 vm_pindex_t first_pindex, osize, oindex;
2399 off_t ooffset;
2503 int skipinit, allremoved;
2504 int cnt;
2505
2506 if (npages)
2507 *npages = 0;
2508
2400 int cnt;
2401
2402 if (npages)
2403 *npages = 0;
2404
2509 allremoved = 0;
2510
2511 cnt = cnta;
2405 cnt = cnta;
2406 uaddr = uaddra;
2407
2512 while (cnt > 0) {
2513 map = mapa;
2408 while (cnt > 0) {
2409 map = mapa;
2514 uaddr = uaddra;
2515 skipinit = 0;
2516
2517 if ((vm_map_lookup(&map, uaddr,
2410
2411 if ((vm_map_lookup(&map, uaddr,
2518 VM_PROT_READ, &first_entry, &first_object,
2519 &first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) {
2412 VM_PROT_READ, &entry, &first_object,
2413 &first_pindex, &prot, &wired)) != KERN_SUCCESS) {
2520 return EFAULT;
2521 }
2522
2414 return EFAULT;
2415 }
2416
2523 vm_map_clip_start(map, first_entry, uaddr);
2417 vm_map_clip_start(map, entry, uaddr);
2524
2525 tcnt = cnt;
2418
2419 tcnt = cnt;
2526 if ((uaddr + tcnt) > first_entry->end)
2527 tcnt = first_entry->end - uaddr;
2420 tend = uaddr + tcnt;
2421 if (tend > entry->end) {
2422 tcnt = entry->end - uaddr;
2423 tend = entry->end;
2424 }
2528
2425
2529 vm_map_clip_end(map, first_entry, uaddr + tcnt);
2426 vm_map_clip_end(map, entry, tend);
2530
2427
2531 start = first_entry->start;
2532 end = first_entry->end;
2428 start = entry->start;
2429 end = entry->end;
2533
2534 osize = atop(tcnt);
2535
2536 oindex = OFF_TO_IDX(cp);
2537 if (npages) {
2538 vm_pindex_t idx;
2539 for (idx = 0; idx < osize; idx++) {
2540 vm_page_t m;
2541 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
2430
2431 osize = atop(tcnt);
2432
2433 oindex = OFF_TO_IDX(cp);
2434 if (npages) {
2435 vm_pindex_t idx;
2436 for (idx = 0; idx < osize; idx++) {
2437 vm_page_t m;
2438 if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
2542 vm_map_lookup_done(map, first_entry);
2439 vm_map_lookup_done(map, entry);
2543 return 0;
2544 }
2545 if ((m->flags & PG_BUSY) ||
2546 ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
2440 return 0;
2441 }
2442 if ((m->flags & PG_BUSY) ||
2443 ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
2547 vm_map_lookup_done(map, first_entry);
2444 vm_map_lookup_done(map, entry);
2548 return 0;
2549 }
2550 }
2551 }
2552
2553/*
2554 * If we are changing an existing map entry, just redirect
2555 * the object, and change mappings.
2556 */
2445 return 0;
2446 }
2447 }
2448 }
2449
2450/*
2451 * If we are changing an existing map entry, just redirect
2452 * the object, and change mappings.
2453 */
2557 if ((first_object->ref_count == 1) &&
2454 if ((first_object->type == OBJT_VNODE) &&
2455 ((oldobject = entry->object.vm_object) == first_object)) {
2456
2457 if ((entry->offset != cp) || (oldobject != srcobject)) {
2458 /*
2459 * Remove old window into the file
2460 */
2461 pmap_remove (map->pmap, uaddr, tend);
2462
2463 /*
2464 * Force copy on write for mmaped regions
2465 */
2466 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2467
2468 /*
2469 * Point the object appropriately
2470 */
2471 if (oldobject != srcobject) {
2472
2473 /*
2474 * Set the object optimization hint flag
2475 */
2476 srcobject->flags |= OBJ_OPT;
2477 vm_object_reference(srcobject);
2478 entry->object.vm_object = srcobject;
2479
2480 if (oldobject) {
2481 vm_object_deallocate(oldobject);
2482 }
2483 }
2484
2485 entry->offset = cp;
2486 map->timestamp++;
2487 } else {
2488 pmap_remove (map->pmap, uaddr, tend);
2489 }
2490
2491 } else if ((first_object->ref_count == 1) &&
2558 (first_object->size == osize) &&
2559 ((first_object->type == OBJT_DEFAULT) ||
2560 (first_object->type == OBJT_SWAP)) ) {
2561
2562 oldobject = first_object->backing_object;
2563
2564 if ((first_object->backing_object_offset != cp) ||
2565 (oldobject != srcobject)) {
2566 /*
2567 * Remove old window into the file
2568 */
2492 (first_object->size == osize) &&
2493 ((first_object->type == OBJT_DEFAULT) ||
2494 (first_object->type == OBJT_SWAP)) ) {
2495
2496 oldobject = first_object->backing_object;
2497
2498 if ((first_object->backing_object_offset != cp) ||
2499 (oldobject != srcobject)) {
2500 /*
2501 * Remove old window into the file
2502 */
2569 if (!allremoved) {
2570 pmap_remove (map->pmap, uaddra, uaddra + cnt);
2571 allremoved = 1;
2572 }
2503 pmap_remove (map->pmap, uaddr, tend);
2573
2574 /*
2575 * Remove unneeded old pages
2576 */
2577 if (first_object->resident_page_count) {
2578 vm_object_page_remove (first_object, 0, 0, 0);
2579 }
2580

--- 21 unchanged lines hidden (view full) ---

2602 */
2603 srcobject->flags |= OBJ_OPT;
2604 vm_object_reference(srcobject);
2605
2606 if (oldobject) {
2607 TAILQ_REMOVE(&oldobject->shadow_head,
2608 first_object, shadow_list);
2609 oldobject->shadow_count--;
2504
2505 /*
2506 * Remove unneeded old pages
2507 */
2508 if (first_object->resident_page_count) {
2509 vm_object_page_remove (first_object, 0, 0, 0);
2510 }
2511

--- 21 unchanged lines hidden (view full) ---

2533 */
2534 srcobject->flags |= OBJ_OPT;
2535 vm_object_reference(srcobject);
2536
2537 if (oldobject) {
2538 TAILQ_REMOVE(&oldobject->shadow_head,
2539 first_object, shadow_list);
2540 oldobject->shadow_count--;
2610 if (oldobject->shadow_count == 0)
2611 oldobject->flags &= ~OBJ_OPT;
2612 vm_object_deallocate(oldobject);
2613 }
2614
2615 TAILQ_INSERT_TAIL(&srcobject->shadow_head,
2616 first_object, shadow_list);
2617 srcobject->shadow_count++;
2541 vm_object_deallocate(oldobject);
2542 }
2543
2544 TAILQ_INSERT_TAIL(&srcobject->shadow_head,
2545 first_object, shadow_list);
2546 srcobject->shadow_count++;
2618 srcobject->flags |= OBJ_OPT;
2619
2620 first_object->backing_object = srcobject;
2621 }
2547
2548 first_object->backing_object = srcobject;
2549 }
2622
2623 first_object->backing_object_offset = cp;
2550 first_object->backing_object_offset = cp;
2551 map->timestamp++;
2624 } else {
2552 } else {
2625 skipinit = 1;
2553 pmap_remove (map->pmap, uaddr, tend);
2626 }
2627/*
2628 * Otherwise, we have to do a logical mmap.
2629 */
2630 } else {
2631
2632 srcobject->flags |= OBJ_OPT;
2633 vm_object_reference(srcobject);
2634
2554 }
2555/*
2556 * Otherwise, we have to do a logical mmap.
2557 */
2558 } else {
2559
2560 srcobject->flags |= OBJ_OPT;
2561 vm_object_reference(srcobject);
2562
2635 object = srcobject;
2636 ooffset = cp;
2637 vm_object_shadow(&object, &ooffset, osize);
2563 pmap_remove (map->pmap, uaddr, tend);
2638
2564
2639 if (!allremoved) {
2640 pmap_remove (map->pmap, uaddra, uaddra + cnt);
2641 allremoved = 1;
2642 }
2643
2644 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2645 vm_map_lock_upgrade(map);
2646
2565 vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
2566 vm_map_lock_upgrade(map);
2567
2647 if (first_entry == &map->header) {
2568 if (entry == &map->header) {
2648 map->first_free = &map->header;
2649 } else if (map->first_free->start >= start) {
2569 map->first_free = &map->header;
2570 } else if (map->first_free->start >= start) {
2650 map->first_free = first_entry->prev;
2571 map->first_free = entry->prev;
2651 }
2652
2572 }
2573
2653 SAVE_HINT(map, first_entry->prev);
2654 vm_map_entry_delete(map, first_entry);
2574 SAVE_HINT(map, entry->prev);
2575 vm_map_entry_delete(map, entry);
2655
2576
2656 rv = vm_map_insert(map, object, ooffset, start, end,
2657 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
2577 object = srcobject;
2578 ooffset = cp;
2579#if 0
2580 vm_object_shadow(&object, &ooffset, osize);
2581#endif
2658
2582
2583 rv = vm_map_insert(map, object, ooffset, start, tend,
2584 VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE|MAP_COPY_NEEDED);
2585
2659 if (rv != KERN_SUCCESS)
2660 panic("vm_uiomove: could not insert new entry: %d", rv);
2661 }
2662
2663/*
2664 * Map the window directly, if it is already in memory
2665 */
2586 if (rv != KERN_SUCCESS)
2587 panic("vm_uiomove: could not insert new entry: %d", rv);
2588 }
2589
2590/*
2591 * Map the window directly, if it is already in memory
2592 */
2666 if (!skipinit)
2667 pmap_object_init_pt(map->pmap, uaddra,
2668 srcobject, (vm_pindex_t) OFF_TO_IDX(cp), tcnt, 0);
2593 pmap_object_init_pt(map->pmap, uaddr,
2594 srcobject, oindex, tcnt, 0);
2669
2670 map->timestamp++;
2671 vm_map_unlock(map);
2672
2673 cnt -= tcnt;
2595
2596 map->timestamp++;
2597 vm_map_unlock(map);
2598
2599 cnt -= tcnt;
2674 uaddra += tcnt;
2600 uaddr += tcnt;
2675 cp += tcnt;
2676 if (npages)
2677 *npages += osize;
2678 }
2679 return 0;
2680}
2681
2682/*

--- 26 unchanged lines hidden (view full) ---

2709vm_freeze_copyopts(object, froma, toa)
2710 vm_object_t object;
2711 vm_pindex_t froma, toa;
2712{
2713 int s;
2714 vm_object_t robject, robjectn;
2715 vm_pindex_t idx, from, to;
2716
2601 cp += tcnt;
2602 if (npages)
2603 *npages += osize;
2604 }
2605 return 0;
2606}
2607
2608/*

--- 26 unchanged lines hidden (view full) ---

2635vm_freeze_copyopts(object, froma, toa)
2636 vm_object_t object;
2637 vm_pindex_t froma, toa;
2638{
2639 int s;
2640 vm_object_t robject, robjectn;
2641 vm_pindex_t idx, from, to;
2642
2717 if ((vfs_ioopt == 0) ||
2718 (object == NULL) ||
2643 if ((object == NULL) ||
2719 ((object->flags & OBJ_OPT) == 0))
2720 return;
2721
2722 if (object->shadow_count > object->ref_count)
2723 panic("vm_freeze_copyopts: sc > rc");
2724
2725 while( robject = TAILQ_FIRST(&object->shadow_head)) {
2726 vm_pindex_t bo_pindex;

--- 104 unchanged lines hidden (view full) ---

2831{
2832 static int nlines;
2833 /* XXX convert args. */
2834 register vm_map_t map = (vm_map_t)addr;
2835 boolean_t full = have_addr;
2836
2837 register vm_map_entry_t entry;
2838
2644 ((object->flags & OBJ_OPT) == 0))
2645 return;
2646
2647 if (object->shadow_count > object->ref_count)
2648 panic("vm_freeze_copyopts: sc > rc");
2649
2650 while( robject = TAILQ_FIRST(&object->shadow_head)) {
2651 vm_pindex_t bo_pindex;

--- 104 unchanged lines hidden (view full) ---

2756{
2757 static int nlines;
2758 /* XXX convert args. */
2759 register vm_map_t map = (vm_map_t)addr;
2760 boolean_t full = have_addr;
2761
2762 register vm_map_entry_t entry;
2763
2839 db_iprintf("%s map 0x%x: pmap=0x%x, ref=%d, nentries=%d, version=%d\n",
2764 db_iprintf("%s map 0x%x: pmap=0x%x, nentries=%d, version=%d\n",
2840 (map->is_main_map ? "Task" : "Share"),
2765 (map->is_main_map ? "Task" : "Share"),
2841 (int) map, (int) (map->pmap), map->ref_count, map->nentries,
2766 (int) map, (int) (map->pmap), map->nentries,
2842 map->timestamp);
2843 nlines++;
2844
2845 if (!full && db_indent)
2846 return;
2847
2848 db_indent += 2;
2849 for (entry = map->header.next; entry != &map->header;

--- 83 unchanged lines hidden ---
2767 map->timestamp);
2768 nlines++;
2769
2770 if (!full && db_indent)
2771 return;
2772
2773 db_indent += 2;
2774 for (entry = map->header.next; entry != &map->header;

--- 83 unchanged lines hidden ---