Deleted Added
full compact
vm_map.c (43547) vm_map.c (43748)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_map.c,v 1.146 1999/02/01 08:49:30 dillon Exp $
64 * $Id: vm_map.c,v 1.147 1999/02/03 01:57:16 dillon Exp $
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 413 unchanged lines hidden (view full) ---

486 /*
487 * See if we can avoid creating a new entry by extending one of our
488 * neighbors. Or at least extend the object.
489 */
490
491 if (
492 (object == NULL) &&
493 (prev_entry != &map->header) &&
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>

--- 413 unchanged lines hidden (view full) ---

486 /*
487 * See if we can avoid creating a new entry by extending one of our
488 * neighbors. Or at least extend the object.
489 */
490
491 if (
492 (object == NULL) &&
493 (prev_entry != &map->header) &&
494 ((prev_entry->eflags & (MAP_ENTRY_IS_A_MAP | MAP_ENTRY_IS_SUB_MAP)) == 0) &&
494 ((prev_entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) &&
495 ((prev_entry->object.vm_object == NULL) ||
496 (prev_entry->object.vm_object->type == OBJT_DEFAULT) ||
497 (prev_entry->object.vm_object->type == OBJT_SWAP)) &&
498 (prev_entry->end == start) &&
499 (prev_entry->wired_count == 0)
500 ) {
501 if ((protoeflags == prev_entry->eflags) &&
502 ((cow & MAP_NOFAULT) ||

--- 393 unchanged lines hidden (view full) ---

896void
897vm_map_simplify_entry(map, entry)
898 vm_map_t map;
899 vm_map_entry_t entry;
900{
901 vm_map_entry_t next, prev;
902 vm_size_t prevsize, esize;
903
495 ((prev_entry->object.vm_object == NULL) ||
496 (prev_entry->object.vm_object->type == OBJT_DEFAULT) ||
497 (prev_entry->object.vm_object->type == OBJT_SWAP)) &&
498 (prev_entry->end == start) &&
499 (prev_entry->wired_count == 0)
500 ) {
501 if ((protoeflags == prev_entry->eflags) &&
502 ((cow & MAP_NOFAULT) ||

--- 393 unchanged lines hidden (view full) ---

896void
897vm_map_simplify_entry(map, entry)
898 vm_map_t map;
899 vm_map_entry_t entry;
900{
901 vm_map_entry_t next, prev;
902 vm_size_t prevsize, esize;
903
904 if (entry->eflags & (MAP_ENTRY_IS_SUB_MAP|MAP_ENTRY_IS_A_MAP))
904 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
905 return;
906
907 prev = entry->prev;
908 if (prev != &map->header) {
909 prevsize = prev->end - prev->start;
910 if ( (prev->end == entry->start) &&
911 (prev->object.vm_object == entry->object.vm_object) &&
912 (!prev->object.vm_object ||

--- 95 unchanged lines hidden (view full) ---

1008 *new_entry = *entry;
1009
1010 new_entry->end = start;
1011 entry->offset += (start - entry->start);
1012 entry->start = start;
1013
1014 vm_map_entry_link(map, entry->prev, new_entry);
1015
905 return;
906
907 prev = entry->prev;
908 if (prev != &map->header) {
909 prevsize = prev->end - prev->start;
910 if ( (prev->end == entry->start) &&
911 (prev->object.vm_object == entry->object.vm_object) &&
912 (!prev->object.vm_object ||

--- 95 unchanged lines hidden (view full) ---

1008 *new_entry = *entry;
1009
1010 new_entry->end = start;
1011 entry->offset += (start - entry->start);
1012 entry->start = start;
1013
1014 vm_map_entry_link(map, entry->prev, new_entry);
1015
1016 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1016 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1017 if (new_entry->object.vm_object->ref_count == 1)
1018 vm_object_set_flag(new_entry->object.vm_object,
1019 OBJ_ONEMAPPING);
1020 vm_object_reference(new_entry->object.vm_object);
1021 }
1022}
1023
1024/*

--- 47 unchanged lines hidden (view full) ---

1072 new_entry = vm_map_entry_create(map);
1073 *new_entry = *entry;
1074
1075 new_entry->start = entry->end = end;
1076 new_entry->offset += (end - entry->start);
1077
1078 vm_map_entry_link(map, entry, new_entry);
1079
1017 if (new_entry->object.vm_object->ref_count == 1)
1018 vm_object_set_flag(new_entry->object.vm_object,
1019 OBJ_ONEMAPPING);
1020 vm_object_reference(new_entry->object.vm_object);
1021 }
1022}
1023
1024/*

--- 47 unchanged lines hidden (view full) ---

1072 new_entry = vm_map_entry_create(map);
1073 *new_entry = *entry;
1074
1075 new_entry->start = entry->end = end;
1076 new_entry->offset += (end - entry->start);
1077
1078 vm_map_entry_link(map, entry, new_entry);
1079
1080 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1080 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1081 if (new_entry->object.vm_object->ref_count == 1)
1082 vm_object_set_flag(new_entry->object.vm_object,
1083 OBJ_ONEMAPPING);
1084 vm_object_reference(new_entry->object.vm_object);
1085 }
1086}
1087
1088/*

--- 47 unchanged lines hidden (view full) ---

1136 if (vm_map_lookup_entry(map, start, &entry)) {
1137 vm_map_clip_start(map, entry, start);
1138 } else
1139 entry = entry->next;
1140
1141 vm_map_clip_end(map, entry, end);
1142
1143 if ((entry->start == start) && (entry->end == end) &&
1081 if (new_entry->object.vm_object->ref_count == 1)
1082 vm_object_set_flag(new_entry->object.vm_object,
1083 OBJ_ONEMAPPING);
1084 vm_object_reference(new_entry->object.vm_object);
1085 }
1086}
1087
1088/*

--- 47 unchanged lines hidden (view full) ---

1136 if (vm_map_lookup_entry(map, start, &entry)) {
1137 vm_map_clip_start(map, entry, start);
1138 } else
1139 entry = entry->next;
1140
1141 vm_map_clip_end(map, entry, end);
1142
1143 if ((entry->start == start) && (entry->end == end) &&
1144 ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_COW)) == 0) &&
1144 ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1145 (entry->object.vm_object == NULL)) {
1146 entry->object.sub_map = submap;
1147 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1148 result = KERN_SUCCESS;
1149 }
1150 vm_map_unlock(map);
1151
1152 return (result);

--- 65 unchanged lines hidden (view full) ---

1218 * Update physical map if necessary. Worry about copy-on-write
1219 * here -- CHECK THIS XXX
1220 */
1221
1222 if (current->protection != old_prot) {
1223#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1224 VM_PROT_ALL)
1225
1145 (entry->object.vm_object == NULL)) {
1146 entry->object.sub_map = submap;
1147 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
1148 result = KERN_SUCCESS;
1149 }
1150 vm_map_unlock(map);
1151
1152 return (result);

--- 65 unchanged lines hidden (view full) ---

1218 * Update physical map if necessary. Worry about copy-on-write
1219 * here -- CHECK THIS XXX
1220 */
1221
1222 if (current->protection != old_prot) {
1223#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1224 VM_PROT_ALL)
1225
1226 if (current->eflags & MAP_ENTRY_IS_A_MAP) {
1227 vm_map_entry_t share_entry;
1228 vm_offset_t share_end;
1229
1230 vm_map_lock(current->object.share_map);
1231 (void) vm_map_lookup_entry(
1232 current->object.share_map,
1233 current->offset,
1234 &share_entry);
1235 share_end = current->offset +
1236 (current->end - current->start);
1237 while ((share_entry !=
1238 &current->object.share_map->header) &&
1239 (share_entry->start < share_end)) {
1240
1241 pmap_protect(map->pmap,
1242 (qmax(share_entry->start,
1243 current->offset) -
1244 current->offset +
1245 current->start),
1246 min(share_entry->end,
1247 share_end) -
1248 current->offset +
1249 current->start,
1250 current->protection &
1251 MASK(share_entry));
1252
1253 share_entry = share_entry->next;
1254 }
1255 vm_map_unlock(current->object.share_map);
1256 } else
1257 pmap_protect(map->pmap, current->start,
1258 current->end,
1259 current->protection & MASK(entry));
1226 pmap_protect(map->pmap, current->start,
1227 current->end,
1228 current->protection & MASK(entry));
1260#undef MASK
1261 }
1262
1263 vm_map_simplify_entry(map, current);
1264
1265 current = current->next;
1266 }
1267

--- 27 unchanged lines hidden (view full) ---

1295 } else
1296 entry = entry->next;
1297
1298 for(current = entry;
1299 (current != &map->header) && (current->start < end);
1300 current = current->next) {
1301 vm_size_t size;
1302
1229#undef MASK
1230 }
1231
1232 vm_map_simplify_entry(map, current);
1233
1234 current = current->next;
1235 }
1236

--- 27 unchanged lines hidden (view full) ---

1264 } else
1265 entry = entry->next;
1266
1267 for(current = entry;
1268 (current != &map->header) && (current->start < end);
1269 current = current->next) {
1270 vm_size_t size;
1271
1303 if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1272 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1304 continue;
1305 }
1306
1307 vm_map_clip_end(map, current, end);
1308 size = current->end - current->start;
1309
1310 /*
1311 * Create an object if needed

--- 174 unchanged lines hidden (view full) ---

1486 entry->wired_count++;
1487 entry->eflags |= MAP_ENTRY_USER_WIRED;
1488 entry = entry->next;
1489 continue;
1490 }
1491
1492 /* Here on entry being newly wired */
1493
1273 continue;
1274 }
1275
1276 vm_map_clip_end(map, current, end);
1277 size = current->end - current->start;
1278
1279 /*
1280 * Create an object if needed

--- 174 unchanged lines hidden (view full) ---

1455 entry->wired_count++;
1456 entry->eflags |= MAP_ENTRY_USER_WIRED;
1457 entry = entry->next;
1458 continue;
1459 }
1460
1461 /* Here on entry being newly wired */
1462
1494 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1463 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1495 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1496 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1497
1498 vm_object_shadow(&entry->object.vm_object,
1499 &entry->offset,
1500 atop(entry->end - entry->start));
1501 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1502

--- 181 unchanged lines hidden (view full) ---

1684 * the write lock on the map: create a shadow
1685 * object for a copy-on-write region, or an
1686 * object for a zero-fill region.
1687 *
1688 * We don't have to do this for entries that
1689 * point to sharing maps, because we won't
1690 * hold the lock on the sharing map.
1691 */
1464 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1465 if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) {
1466
1467 vm_object_shadow(&entry->object.vm_object,
1468 &entry->offset,
1469 atop(entry->end - entry->start));
1470 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
1471

--- 181 unchanged lines hidden (view full) ---

1653 * the write lock on the map: create a shadow
1654 * object for a copy-on-write region, or an
1655 * object for a zero-fill region.
1656 *
1657 * We don't have to do this for entries that
1658 * point to sharing maps, because we won't
1659 * hold the lock on the sharing map.
1660 */
1692 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1661 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1693 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1694 if (copyflag &&
1695 ((entry->protection & VM_PROT_WRITE) != 0)) {
1696
1697 vm_object_shadow(&entry->object.vm_object,
1698 &entry->offset,
1699 atop(entry->end - entry->start));
1700 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;

--- 152 unchanged lines hidden (view full) ---

1853 pmap_remove(vm_map_pmap(map), start, end);
1854 /*
1855 * Make a second pass, cleaning/uncaching pages from the indicated
1856 * objects as we go.
1857 */
1858 for (current = entry; current->start < end; current = current->next) {
1859 offset = current->offset + (start - current->start);
1860 size = (end <= current->end ? end : current->end) - start;
1662 int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY;
1663 if (copyflag &&
1664 ((entry->protection & VM_PROT_WRITE) != 0)) {
1665
1666 vm_object_shadow(&entry->object.vm_object,
1667 &entry->offset,
1668 atop(entry->end - entry->start));
1669 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;

--- 152 unchanged lines hidden (view full) ---

1822 pmap_remove(vm_map_pmap(map), start, end);
1823 /*
1824 * Make a second pass, cleaning/uncaching pages from the indicated
1825 * objects as we go.
1826 */
1827 for (current = entry; current->start < end; current = current->next) {
1828 offset = current->offset + (start - current->start);
1829 size = (end <= current->end ? end : current->end) - start;
1861 if (current->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
1830 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
1862 vm_map_t smap;
1863 vm_map_entry_t tentry;
1864 vm_size_t tsize;
1865
1831 vm_map_t smap;
1832 vm_map_entry_t tentry;
1833 vm_size_t tsize;
1834
1866 smap = current->object.share_map;
1835 smap = current->object.sub_map;
1867 vm_map_lock_read(smap);
1868 (void) vm_map_lookup_entry(smap, offset, &tentry);
1869 tsize = tentry->end - offset;
1870 if (tsize < size)
1871 size = tsize;
1872 object = tentry->object.vm_object;
1873 offset = tentry->offset + (offset - tentry->start);
1874 vm_map_unlock_read(smap);

--- 75 unchanged lines hidden (view full) ---

1950static void
1951vm_map_entry_delete(map, entry)
1952 vm_map_t map;
1953 vm_map_entry_t entry;
1954{
1955 vm_map_entry_unlink(map, entry);
1956 map->size -= entry->end - entry->start;
1957
1836 vm_map_lock_read(smap);
1837 (void) vm_map_lookup_entry(smap, offset, &tentry);
1838 tsize = tentry->end - offset;
1839 if (tsize < size)
1840 size = tsize;
1841 object = tentry->object.vm_object;
1842 offset = tentry->offset + (offset - tentry->start);
1843 vm_map_unlock_read(smap);

--- 75 unchanged lines hidden (view full) ---

1919static void
1920vm_map_entry_delete(map, entry)
1921 vm_map_t map;
1922 vm_map_entry_t entry;
1923{
1924 vm_map_entry_unlink(map, entry);
1925 map->size -= entry->end - entry->start;
1926
1958 if ((entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) == 0) {
1927 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
1959 vm_object_deallocate(entry->object.vm_object);
1960 }
1961
1962 vm_map_entry_dispose(map, entry);
1963}
1964
1965/*
1966 * vm_map_delete: [ internal use only ]

--- 285 unchanged lines hidden (view full) ---

2252 */
2253static void
2254vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
2255 vm_map_t src_map, dst_map;
2256 vm_map_entry_t src_entry, dst_entry;
2257{
2258 vm_object_t src_object;
2259
1928 vm_object_deallocate(entry->object.vm_object);
1929 }
1930
1931 vm_map_entry_dispose(map, entry);
1932}
1933
1934/*
1935 * vm_map_delete: [ internal use only ]

--- 285 unchanged lines hidden (view full) ---

2221 */
2222static void
2223vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
2224 vm_map_t src_map, dst_map;
2225 vm_map_entry_t src_entry, dst_entry;
2226{
2227 vm_object_t src_object;
2228
2260 if ((dst_entry->eflags|src_entry->eflags) &
2261 (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP))
2229 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
2262 return;
2263
2264 if (src_entry->wired_count == 0) {
2265
2266 /*
2267 * If the source entry is marked needs_copy, it is already
2268 * write-protected.
2269 */

--- 131 unchanged lines hidden (view full) ---

2401 case VM_INHERIT_COPY:
2402 /*
2403 * Clone the entry and link into the map.
2404 */
2405 new_entry = vm_map_entry_create(new_map);
2406 *new_entry = *old_entry;
2407 new_entry->wired_count = 0;
2408 new_entry->object.vm_object = NULL;
2230 return;
2231
2232 if (src_entry->wired_count == 0) {
2233
2234 /*
2235 * If the source entry is marked needs_copy, it is already
2236 * write-protected.
2237 */

--- 131 unchanged lines hidden (view full) ---

2369 case VM_INHERIT_COPY:
2370 /*
2371 * Clone the entry and link into the map.
2372 */
2373 new_entry = vm_map_entry_create(new_map);
2374 *new_entry = *old_entry;
2375 new_entry->wired_count = 0;
2376 new_entry->object.vm_object = NULL;
2409 new_entry->eflags &= ~MAP_ENTRY_IS_A_MAP;
2410 vm_map_entry_link(new_map, new_map->header.prev,
2411 new_entry);
2412 vm_map_copy_entry(old_map, new_map, old_entry,
2413 new_entry);
2414 break;
2415 }
2416 old_entry = old_entry->next;
2417 }

--- 84 unchanged lines hidden (view full) ---

2502 vm_prot_t *out_prot, /* OUT */
2503 boolean_t *wired) /* OUT */
2504{
2505 vm_map_t share_map;
2506 vm_offset_t share_offset;
2507 vm_map_entry_t entry;
2508 vm_map_t map = *var_map;
2509 vm_prot_t prot;
2377 vm_map_entry_link(new_map, new_map->header.prev,
2378 new_entry);
2379 vm_map_copy_entry(old_map, new_map, old_entry,
2380 new_entry);
2381 break;
2382 }
2383 old_entry = old_entry->next;
2384 }

--- 84 unchanged lines hidden (view full) ---

2469 vm_prot_t *out_prot, /* OUT */
2470 boolean_t *wired) /* OUT */
2471{
2472 vm_map_t share_map;
2473 vm_offset_t share_offset;
2474 vm_map_entry_t entry;
2475 vm_map_t map = *var_map;
2476 vm_prot_t prot;
2477#if 0
2510 boolean_t su;
2478 boolean_t su;
2479#endif
2511 vm_prot_t fault_type = fault_typea;
2512
2513RetryLookup:;
2514
2515 /*
2516 * Lookup the faulting address.
2517 */
2518

--- 72 unchanged lines hidden (view full) ---

2591 *wired = (entry->wired_count != 0);
2592 if (*wired)
2593 prot = fault_type = entry->protection;
2594
2595 /*
2596 * If we don't already have a VM object, track it down.
2597 */
2598
2480 vm_prot_t fault_type = fault_typea;
2481
2482RetryLookup:;
2483
2484 /*
2485 * Lookup the faulting address.
2486 */
2487

--- 72 unchanged lines hidden (view full) ---

2560 *wired = (entry->wired_count != 0);
2561 if (*wired)
2562 prot = fault_type = entry->protection;
2563
2564 /*
2565 * If we don't already have a VM object, track it down.
2566 */
2567
2599 su = (entry->eflags & MAP_ENTRY_IS_A_MAP) == 0;
2600 if (su) {
2601 share_map = map;
2602 share_offset = vaddr;
2603 } else {
2604 vm_map_entry_t share_entry;
2568 share_map = map;
2569 share_offset = vaddr;
2605
2570
2606 /*
2607 * Compute the sharing map, and offset into it.
2608 */
2609
2610 share_map = entry->object.share_map;
2611 share_offset = (vaddr - entry->start) + entry->offset;
2612
2613 /*
2614 * Look for the backing store object and offset
2615 */
2616
2617 vm_map_lock_read(share_map);
2618
2619 if (!vm_map_lookup_entry(share_map, share_offset,
2620 &share_entry)) {
2621 vm_map_unlock_read(share_map);
2622 RETURN(KERN_INVALID_ADDRESS);
2623 }
2624 entry = share_entry;
2625 }
2626
2627 /*
2628 * If the entry was copy-on-write, we either ...
2629 */
2630
2631 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2632 /*
2633 * If we want to write the page, we may as well handle that
2634 * now since we've got the sharing map locked.

--- 82 unchanged lines hidden (view full) ---

2717 */
2718
2719void
2720vm_map_lookup_done(map, entry)
2721 vm_map_t map;
2722 vm_map_entry_t entry;
2723{
2724 /*
2571 /*
2572 * If the entry was copy-on-write, we either ...
2573 */
2574
2575 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
2576 /*
2577 * If we want to write the page, we may as well handle that
2578 * now since we've got the sharing map locked.

--- 82 unchanged lines hidden (view full) ---

2661 */
2662
2663void
2664vm_map_lookup_done(map, entry)
2665 vm_map_t map;
2666 vm_map_entry_t entry;
2667{
2668 /*
2725 * If this entry references a map, unlock it first.
2726 */
2727
2728 if (entry->eflags & MAP_ENTRY_IS_A_MAP)
2729 vm_map_unlock_read(entry->object.share_map);
2730
2731 /*
2732 * Unlock the main-level map
2733 */
2734
2735 vm_map_unlock_read(map);
2736}
2737
2738/*
2739 * Implement uiomove with VM operations. This handles (and collateral changes)

--- 355 unchanged lines hidden (view full) ---

3095
3096 db_iprintf(" prot=%x/%x/%s",
3097 entry->protection,
3098 entry->max_protection,
3099 inheritance_name[(int)(unsigned char)entry->inheritance]);
3100 if (entry->wired_count != 0)
3101 db_printf(", wired");
3102 }
2669 * Unlock the main-level map
2670 */
2671
2672 vm_map_unlock_read(map);
2673}
2674
2675/*
2676 * Implement uiomove with VM operations. This handles (and collateral changes)

--- 355 unchanged lines hidden (view full) ---

3032
3033 db_iprintf(" prot=%x/%x/%s",
3034 entry->protection,
3035 entry->max_protection,
3036 inheritance_name[(int)(unsigned char)entry->inheritance]);
3037 if (entry->wired_count != 0)
3038 db_printf(", wired");
3039 }
3103 if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) {
3040 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
3104 /* XXX no %qd in kernel. Truncate entry->offset. */
3105 db_printf(", share=%p, offset=0x%lx\n",
3041 /* XXX no %qd in kernel. Truncate entry->offset. */
3042 db_printf(", share=%p, offset=0x%lx\n",
3106 (void *)entry->object.share_map,
3043 (void *)entry->object.sub_map,
3107 (long)entry->offset);
3108 nlines++;
3109 if ((entry->prev == &map->header) ||
3044 (long)entry->offset);
3045 nlines++;
3046 if ((entry->prev == &map->header) ||
3110 ((entry->prev->eflags & MAP_ENTRY_IS_A_MAP) == 0) ||
3111 (entry->prev->object.share_map !=
3112 entry->object.share_map)) {
3047 (entry->prev->object.sub_map !=
3048 entry->object.sub_map)) {
3113 db_indent += 2;
3114 vm_map_print((db_expr_t)(intptr_t)
3049 db_indent += 2;
3050 vm_map_print((db_expr_t)(intptr_t)
3115 entry->object.share_map,
3051 entry->object.sub_map,
3116 full, 0, (char *)0);
3117 db_indent -= 2;
3118 }
3119 } else {
3120 /* XXX no %qd in kernel. Truncate entry->offset. */
3121 db_printf(", object=%p, offset=0x%lx",
3122 (void *)entry->object.vm_object,
3123 (long)entry->offset);
3124 if (entry->eflags & MAP_ENTRY_COW)
3125 db_printf(", copy (%s)",
3126 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3127 db_printf("\n");
3128 nlines++;
3129
3130 if ((entry->prev == &map->header) ||
3052 full, 0, (char *)0);
3053 db_indent -= 2;
3054 }
3055 } else {
3056 /* XXX no %qd in kernel. Truncate entry->offset. */
3057 db_printf(", object=%p, offset=0x%lx",
3058 (void *)entry->object.vm_object,
3059 (long)entry->offset);
3060 if (entry->eflags & MAP_ENTRY_COW)
3061 db_printf(", copy (%s)",
3062 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
3063 db_printf("\n");
3064 nlines++;
3065
3066 if ((entry->prev == &map->header) ||
3131 (entry->prev->eflags & MAP_ENTRY_IS_A_MAP) ||
3132 (entry->prev->object.vm_object !=
3133 entry->object.vm_object)) {
3134 db_indent += 2;
3135 vm_object_print((db_expr_t)(intptr_t)
3136 entry->object.vm_object,
3137 full, 0, (char *)0);
3138 nlines += 4;
3139 db_indent -= 2;

--- 27 unchanged lines hidden ---
3067 (entry->prev->object.vm_object !=
3068 entry->object.vm_object)) {
3069 db_indent += 2;
3070 vm_object_print((db_expr_t)(intptr_t)
3071 entry->object.vm_object,
3072 full, 0, (char *)0);
3073 nlines += 4;
3074 db_indent -= 2;

--- 27 unchanged lines hidden ---