Deleted Added
full compact
vm_map.c (8876) vm_map.c (9507)
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 47 unchanged lines hidden (view full) ---

56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_map.c,v 1.21 1995/04/16 12:56:17 davidg Exp $
64 * $Id: vm_map.c,v 1.22 1995/05/30 08:16:07 rgrimes Exp $
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/malloc.h>
74#include <sys/proc.h>
75
76#include <vm/vm.h>
77#include <vm/vm_page.h>
78#include <vm/vm_object.h>
79#include <vm/vm_kern.h>
65 */
66
67/*
68 * Virtual memory mapping module.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/malloc.h>
74#include <sys/proc.h>
75
76#include <vm/vm.h>
77#include <vm/vm_page.h>
78#include <vm/vm_object.h>
79#include <vm/vm_kern.h>
80#include <vm/vm_pager.h>
80
81/*
82 * Virtual memory maps provide for the mapping, protection,
83 * and sharing of virtual memory objects. In addition,
84 * this module provides for an efficient virtual copy of
85 * memory from one map to another.
86 *
87 * Synchronization is required prior to most operations.

--- 197 unchanged lines hidden (view full) ---

285 map->is_main_map = TRUE;
286 map->min_offset = min;
287 map->max_offset = max;
288 map->entries_pageable = pageable;
289 map->first_free = &map->header;
290 map->hint = &map->header;
291 map->timestamp = 0;
292 lock_init(&map->lock, TRUE);
81
82/*
83 * Virtual memory maps provide for the mapping, protection,
84 * and sharing of virtual memory objects. In addition,
85 * this module provides for an efficient virtual copy of
86 * memory from one map to another.
87 *
88 * Synchronization is required prior to most operations.

--- 197 unchanged lines hidden (view full) ---

286 map->is_main_map = TRUE;
287 map->min_offset = min;
288 map->max_offset = max;
289 map->entries_pageable = pageable;
290 map->first_free = &map->header;
291 map->hint = &map->header;
292 map->timestamp = 0;
293 lock_init(&map->lock, TRUE);
293 simple_lock_init(&map->ref_lock);
294 simple_lock_init(&map->hint_lock);
295}
296
297/*
298 * vm_map_entry_create: [ internal use only ]
299 *
300 * Allocates a VM map entry for insertion.
301 * No entry fields are filled in. This routine is
302 */

--- 128 unchanged lines hidden (view full) ---

431 */
432void
433vm_map_reference(map)
434 register vm_map_t map;
435{
436 if (map == NULL)
437 return;
438
294}
295
296/*
297 * vm_map_entry_create: [ internal use only ]
298 *
299 * Allocates a VM map entry for insertion.
300 * No entry fields are filled in. This routine is
301 */

--- 128 unchanged lines hidden (view full) ---

430 */
431void
432vm_map_reference(map)
433 register vm_map_t map;
434{
435 if (map == NULL)
436 return;
437
439 simple_lock(&map->ref_lock);
440 map->ref_count++;
438 map->ref_count++;
441 simple_unlock(&map->ref_lock);
442}
443
444/*
445 * vm_map_deallocate:
446 *
447 * Removes a reference from the specified map,
448 * destroying it if no references remain.
449 * The map should not be locked.
450 */
451void
452vm_map_deallocate(map)
453 register vm_map_t map;
454{
455 register int c;
456
457 if (map == NULL)
458 return;
459
439}
440
441/*
442 * vm_map_deallocate:
443 *
444 * Removes a reference from the specified map,
445 * destroying it if no references remain.
446 * The map should not be locked.
447 */
448void
449vm_map_deallocate(map)
450 register vm_map_t map;
451{
452 register int c;
453
454 if (map == NULL)
455 return;
456
460 simple_lock(&map->ref_lock);
461 c = map->ref_count;
457 c = map->ref_count;
462 simple_unlock(&map->ref_lock);
463
464 if (c == 0)
465 panic("vm_map_deallocate: deallocating already freed map");
466
467 if (c != 1) {
468 --map->ref_count;
458
459 if (c == 0)
460 panic("vm_map_deallocate: deallocating already freed map");
461
462 if (c != 1) {
463 --map->ref_count;
469 wakeup((caddr_t) &map->ref_count);
464 wakeup(&map->ref_count);
470 return;
471 }
472 /*
473 * Lock the map, to wait out all other references to it.
474 */
475
476 vm_map_lock(map);
477 (void) vm_map_delete(map, map->min_offset, map->max_offset);

--- 126 unchanged lines hidden (view full) ---

604
605 return (KERN_SUCCESS);
606}
607
608/*
609 * SAVE_HINT:
610 *
611 * Saves the specified entry as the hint for
465 return;
466 }
467 /*
468 * Lock the map, to wait out all other references to it.
469 */
470
471 vm_map_lock(map);
472 (void) vm_map_delete(map, map->min_offset, map->max_offset);

--- 126 unchanged lines hidden (view full) ---

599
600 return (KERN_SUCCESS);
601}
602
603/*
604 * SAVE_HINT:
605 *
606 * Saves the specified entry as the hint for
612 * future lookups. Performs necessary interlocks.
607 * future lookups.
613 */
614#define SAVE_HINT(map,value) \
608 */
609#define SAVE_HINT(map,value) \
615 simple_lock(&(map)->hint_lock); \
616 (map)->hint = (value); \
617 simple_unlock(&(map)->hint_lock);
610 (map)->hint = (value);
618
619/*
620 * vm_map_lookup_entry: [ internal use only ]
621 *
622 * Finds the map entry containing (or
623 * immediately preceding) the specified address
624 * in the given map; the entry is returned
625 * in the "entry" parameter. The boolean

--- 8 unchanged lines hidden (view full) ---

634{
635 register vm_map_entry_t cur;
636 register vm_map_entry_t last;
637
638 /*
639 * Start looking either from the head of the list, or from the hint.
640 */
641
611
612/*
613 * vm_map_lookup_entry: [ internal use only ]
614 *
615 * Finds the map entry containing (or
616 * immediately preceding) the specified address
617 * in the given map; the entry is returned
618 * in the "entry" parameter. The boolean

--- 8 unchanged lines hidden (view full) ---

627{
628 register vm_map_entry_t cur;
629 register vm_map_entry_t last;
630
631 /*
632 * Start looking either from the head of the list, or from the hint.
633 */
634
642 simple_lock(&map->hint_lock);
643 cur = map->hint;
635 cur = map->hint;
644 simple_unlock(&map->hint_lock);
645
646 if (cur == &map->header)
647 cur = cur->next;
648
649 if (address >= cur->start) {
650 /*
651 * Go from hint to end of list.
652 *

--- 170 unchanged lines hidden (view full) ---

823 if (entry->is_sub_map)
824 return;
825 if (entry->is_a_map) {
826#if 0
827 vm_map_t my_share_map;
828 int count;
829
830 my_share_map = entry->object.share_map;
636
637 if (cur == &map->header)
638 cur = cur->next;
639
640 if (address >= cur->start) {
641 /*
642 * Go from hint to end of list.
643 *

--- 170 unchanged lines hidden (view full) ---

814 if (entry->is_sub_map)
815 return;
816 if (entry->is_a_map) {
817#if 0
818 vm_map_t my_share_map;
819 int count;
820
821 my_share_map = entry->object.share_map;
831 simple_lock(&my_share_map->ref_lock);
832 count = my_share_map->ref_count;
822 count = my_share_map->ref_count;
833 simple_unlock(&my_share_map->ref_lock);
834
835 if (count == 1) {
836 /*
837 * Can move the region from entry->start to entry->end
838 * (+ entry->offset) in my_share_map into place of
839 * entry. Later.
840 */
841 }

--- 444 unchanged lines hidden (view full) ---

1286 * count. We create objects before clipping the map entries
1287 * to avoid object proliferation.
1288 *
1289 * 2. We downgrade to a read lock, and call vm_fault_wire to
1290 * fault in the pages for any newly wired area (wired_count is
1291 * 1).
1292 *
1293 * Downgrading to a read lock for vm_fault_wire avoids a possible
823
824 if (count == 1) {
825 /*
826 * Can move the region from entry->start to entry->end
827 * (+ entry->offset) in my_share_map into place of
828 * entry. Later.
829 */
830 }

--- 444 unchanged lines hidden (view full) ---

1275 * count. We create objects before clipping the map entries
1276 * to avoid object proliferation.
1277 *
1278 * 2. We downgrade to a read lock, and call vm_fault_wire to
1279 * fault in the pages for any newly wired area (wired_count is
1280 * 1).
1281 *
1282 * Downgrading to a read lock for vm_fault_wire avoids a possible
1294 * deadlock with another thread that may have faulted on one
1283 * deadlock with another process that may have faulted on one
1295 * of the pages to be wired (it would mark the page busy,
1296 * blocking us, then in turn block on the map lock that we
1297 * hold). Because of problems in the recursive lock package,
1298 * we cannot upgrade to a write lock in vm_map_lookup. Thus,
1299 * any actions that require the write lock must be done
1300 * beforehand. Because we keep the read lock on the map, the
1301 * copy-on-write status of the entries we modify here cannot
1302 * change.

--- 21 unchanged lines hidden (view full) ---

1324
1325 vm_object_shadow(&entry->object.vm_object,
1326 &entry->offset,
1327 (vm_size_t) (entry->end
1328 - entry->start));
1329 entry->needs_copy = FALSE;
1330 } else if (entry->object.vm_object == NULL) {
1331 entry->object.vm_object =
1284 * of the pages to be wired (it would mark the page busy,
1285 * blocking us, then in turn block on the map lock that we
1286 * hold). Because of problems in the recursive lock package,
1287 * we cannot upgrade to a write lock in vm_map_lookup. Thus,
1288 * any actions that require the write lock must be done
1289 * beforehand. Because we keep the read lock on the map, the
1290 * copy-on-write status of the entries we modify here cannot
1291 * change.

--- 21 unchanged lines hidden (view full) ---

1313
1314 vm_object_shadow(&entry->object.vm_object,
1315 &entry->offset,
1316 (vm_size_t) (entry->end
1317 - entry->start));
1318 entry->needs_copy = FALSE;
1319 } else if (entry->object.vm_object == NULL) {
1320 entry->object.vm_object =
1332 vm_object_allocate((vm_size_t) (entry->end
1321 vm_object_allocate(OBJT_DEFAULT, (vm_size_t) (entry->end
1333 - entry->start));
1334 entry->offset = (vm_offset_t) 0;
1335 }
1336 }
1337 }
1338 vm_map_clip_start(map, entry, start);
1339 vm_map_clip_end(map, entry, end);
1340 entry->wired_count++;

--- 21 unchanged lines hidden (view full) ---

1362
1363 /*
1364 * Pass 2.
1365 */
1366
1367 /*
1368 * HACK HACK HACK HACK
1369 *
1322 - entry->start));
1323 entry->offset = (vm_offset_t) 0;
1324 }
1325 }
1326 }
1327 vm_map_clip_start(map, entry, start);
1328 vm_map_clip_end(map, entry, end);
1329 entry->wired_count++;

--- 21 unchanged lines hidden (view full) ---

1351
1352 /*
1353 * Pass 2.
1354 */
1355
1356 /*
1357 * HACK HACK HACK HACK
1358 *
1370 * If we are wiring in the kernel map or a submap of it, unlock
1371 * the map to avoid deadlocks. We trust that the kernel
1372 * threads are well-behaved, and therefore will not do
1373 * anything destructive to this region of the map while we
1374 * have it unlocked. We cannot trust user threads to do the
1375 * same.
1359 * If we are wiring in the kernel map or a submap of it,
1360 * unlock the map to avoid deadlocks. We trust that the
1361 * kernel is well-behaved, and therefore will not do
1362 * anything destructive to this region of the map while
1363 * we have it unlocked. We cannot trust user processes
1364 * to do the same.
1376 *
1377 * HACK HACK HACK HACK
1378 */
1379 if (vm_map_pmap(map) == kernel_pmap) {
1380 vm_map_unlock(map); /* trust me ... */
1381 } else {
1382 lock_set_recursive(&map->lock);
1383 lock_write_to_read(&map->lock);

--- 104 unchanged lines hidden (view full) ---

1488 if (tsize < size)
1489 size = tsize;
1490 object = tentry->object.vm_object;
1491 offset = tentry->offset + (offset - tentry->start);
1492 vm_map_unlock_read(smap);
1493 } else {
1494 object = current->object.vm_object;
1495 }
1365 *
1366 * HACK HACK HACK HACK
1367 */
1368 if (vm_map_pmap(map) == kernel_pmap) {
1369 vm_map_unlock(map); /* trust me ... */
1370 } else {
1371 lock_set_recursive(&map->lock);
1372 lock_write_to_read(&map->lock);

--- 104 unchanged lines hidden (view full) ---

1477 if (tsize < size)
1478 size = tsize;
1479 object = tentry->object.vm_object;
1480 offset = tentry->offset + (offset - tentry->start);
1481 vm_map_unlock_read(smap);
1482 } else {
1483 object = current->object.vm_object;
1484 }
1496 if (object && (object->pager != NULL) &&
1497 (object->pager->pg_type == PG_VNODE)) {
1498 vm_object_lock(object);
1485 if (object && (object->type == OBJT_VNODE)) {
1499 /*
1500 * Flush pages if writing is allowed. XXX should we continue
1501 * on an error?
1502 *
1503 * XXX Doing async I/O and then removing all the pages from
1504 * the object before it completes is probably a very bad
1505 * idea.
1506 */
1507 if (current->protection & VM_PROT_WRITE)
1486 /*
1487 * Flush pages if writing is allowed. XXX should we continue
1488 * on an error?
1489 *
1490 * XXX Doing async I/O and then removing all the pages from
1491 * the object before it completes is probably a very bad
1492 * idea.
1493 */
1494 if (current->protection & VM_PROT_WRITE)
1508 vm_object_page_clean(object, offset, offset + size, syncio);
1495 vm_object_page_clean(object, offset, offset + size, syncio, TRUE);
1509 if (invalidate)
1510 vm_object_page_remove(object, offset, offset + size, FALSE);
1496 if (invalidate)
1497 vm_object_page_remove(object, offset, offset + size, FALSE);
1511 vm_object_unlock(object);
1512 }
1513 start += size;
1514 }
1515
1516 vm_map_unlock_read(map);
1517 return (KERN_SUCCESS);
1518}
1519

--- 221 unchanged lines hidden (view full) ---

1741 vm_map_t src_map, dst_map;
1742 register vm_map_entry_t src_entry, dst_entry;
1743{
1744 vm_object_t temp_object;
1745
1746 if (src_entry->is_sub_map || dst_entry->is_sub_map)
1747 return;
1748
1498 }
1499 start += size;
1500 }
1501
1502 vm_map_unlock_read(map);
1503 return (KERN_SUCCESS);
1504}
1505

--- 221 unchanged lines hidden (view full) ---

1727 vm_map_t src_map, dst_map;
1728 register vm_map_entry_t src_entry, dst_entry;
1729{
1730 vm_object_t temp_object;
1731
1732 if (src_entry->is_sub_map || dst_entry->is_sub_map)
1733 return;
1734
1749 if (dst_entry->object.vm_object != NULL &&
1750 (dst_entry->object.vm_object->flags & OBJ_INTERNAL) == 0)
1751 printf("vm_map_copy_entry: copying over permanent data!\n");
1735 if (dst_entry->object.vm_object != NULL)
1736 printf("vm_map_copy_entry: dst_entry object not NULL!\n");
1752
1753 /*
1754 * If our destination map was wired down, unwire it now.
1755 */
1756
1757 if (dst_entry->wired_count != 0)
1758 vm_map_entry_unwire(dst_map, dst_entry);
1759

--- 23 unchanged lines hidden (view full) ---

1783
1784 boolean_t su;
1785
1786 /*
1787 * If the source entry has only one mapping, we can
1788 * just protect the virtual address range.
1789 */
1790 if (!(su = src_map->is_main_map)) {
1737
1738 /*
1739 * If our destination map was wired down, unwire it now.
1740 */
1741
1742 if (dst_entry->wired_count != 0)
1743 vm_map_entry_unwire(dst_map, dst_entry);
1744

--- 23 unchanged lines hidden (view full) ---

1768
1769 boolean_t su;
1770
1771 /*
1772 * If the source entry has only one mapping, we can
1773 * just protect the virtual address range.
1774 */
1775 if (!(su = src_map->is_main_map)) {
1791 simple_lock(&src_map->ref_lock);
1792 su = (src_map->ref_count == 1);
1776 su = (src_map->ref_count == 1);
1793 simple_unlock(&src_map->ref_lock);
1794 }
1795 if (su) {
1796 pmap_protect(src_map->pmap,
1797 src_entry->start,
1798 src_entry->end,
1799 src_entry->protection & ~VM_PROT_WRITE);
1800 } else {
1801 vm_object_pmap_copy(src_entry->object.vm_object,
1802 src_entry->offset,
1803 src_entry->offset + (src_entry->end
1804 - src_entry->start));
1805 }
1806 }
1807 /*
1808 * Make a copy of the object.
1809 */
1777 }
1778 if (su) {
1779 pmap_protect(src_map->pmap,
1780 src_entry->start,
1781 src_entry->end,
1782 src_entry->protection & ~VM_PROT_WRITE);
1783 } else {
1784 vm_object_pmap_copy(src_entry->object.vm_object,
1785 src_entry->offset,
1786 src_entry->offset + (src_entry->end
1787 - src_entry->start));
1788 }
1789 }
1790 /*
1791 * Make a copy of the object.
1792 */
1810 temp_object = dst_entry->object.vm_object;
1811 vm_object_copy(src_entry->object.vm_object,
1812 src_entry->offset,
1813 (vm_size_t) (src_entry->end -
1814 src_entry->start),
1815 &dst_entry->object.vm_object,
1816 &dst_entry->offset,
1817 &src_needs_copy);
1818 /*

--- 10 unchanged lines hidden (view full) ---

1829 dst_entry->needs_copy = TRUE;
1830
1831 /*
1832 * Mark the entries copy-on-write, so that write-enabling the
1833 * entry won't make copy-on-write pages writable.
1834 */
1835 src_entry->copy_on_write = TRUE;
1836 dst_entry->copy_on_write = TRUE;
1793 vm_object_copy(src_entry->object.vm_object,
1794 src_entry->offset,
1795 (vm_size_t) (src_entry->end -
1796 src_entry->start),
1797 &dst_entry->object.vm_object,
1798 &dst_entry->offset,
1799 &src_needs_copy);
1800 /*

--- 10 unchanged lines hidden (view full) ---

1811 dst_entry->needs_copy = TRUE;
1812
1813 /*
1814 * Mark the entries copy-on-write, so that write-enabling the
1815 * entry won't make copy-on-write pages writable.
1816 */
1817 src_entry->copy_on_write = TRUE;
1818 dst_entry->copy_on_write = TRUE;
1837 /*
1838 * Get rid of the old object.
1839 */
1840 vm_object_deallocate(temp_object);
1841
1842 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1843 dst_entry->end - dst_entry->start, src_entry->start);
1844 } else {
1845 /*
1846 * Of course, wired down pages can't be set copy-on-write.
1847 * Cause wired pages to be copied into the new map by
1848 * simulating faults (the new pages are pageable)
1849 */
1850 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1851 }
1852}
1853
1854/*
1819
1820 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
1821 dst_entry->end - dst_entry->start, src_entry->start);
1822 } else {
1823 /*
1824 * Of course, wired down pages can't be set copy-on-write.
1825 * Cause wired pages to be copied into the new map by
1826 * simulating faults (the new pages are pageable)
1827 */
1828 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
1829 }
1830}
1831
1832/*
1855 * vm_map_copy:
1856 *
1857 * Perform a virtual memory copy from the source
1858 * address map/range to the destination map/range.
1859 *
1860 * If src_destroy or dst_alloc is requested,
1861 * the source and destination regions should be
1862 * disjoint, not only in the top-level map, but
1863 * in the sharing maps as well. [The best way
1864 * to guarantee this is to use a new intermediate
1865 * map to make copies. This also reduces map
1866 * fragmentation.]
1867 */
1868int
1869vm_map_copy(dst_map, src_map,
1870 dst_addr, len, src_addr,
1871 dst_alloc, src_destroy)
1872 vm_map_t dst_map;
1873 vm_map_t src_map;
1874 vm_offset_t dst_addr;
1875 vm_size_t len;
1876 vm_offset_t src_addr;
1877 boolean_t dst_alloc;
1878 boolean_t src_destroy;
1879{
1880 register
1881 vm_map_entry_t src_entry;
1882 register
1883 vm_map_entry_t dst_entry;
1884 vm_map_entry_t tmp_entry;
1885 vm_offset_t src_start;
1886 vm_offset_t src_end;
1887 vm_offset_t dst_start;
1888 vm_offset_t dst_end;
1889 vm_offset_t src_clip;
1890 vm_offset_t dst_clip;
1891 int result;
1892 boolean_t old_src_destroy;
1893
1894 /*
1895 * XXX While we figure out why src_destroy screws up, we'll do it by
1896 * explicitly vm_map_delete'ing at the end.
1897 */
1898
1899 old_src_destroy = src_destroy;
1900 src_destroy = FALSE;
1901
1902 /*
1903 * Compute start and end of region in both maps
1904 */
1905
1906 src_start = src_addr;
1907 src_end = src_start + len;
1908 dst_start = dst_addr;
1909 dst_end = dst_start + len;
1910
1911 /*
1912 * Check that the region can exist in both source and destination.
1913 */
1914
1915 if ((dst_end < dst_start) || (src_end < src_start))
1916 return (KERN_NO_SPACE);
1917
1918 /*
1919 * Lock the maps in question -- we avoid deadlock by ordering lock
1920 * acquisition by map value
1921 */
1922
1923 if (src_map == dst_map) {
1924 vm_map_lock(src_map);
1925 } else if ((int) src_map < (int) dst_map) {
1926 vm_map_lock(src_map);
1927 vm_map_lock(dst_map);
1928 } else {
1929 vm_map_lock(dst_map);
1930 vm_map_lock(src_map);
1931 }
1932
1933 result = KERN_SUCCESS;
1934
1935 /*
1936 * Check protections... source must be completely readable and
1937 * destination must be completely writable. [Note that if we're
1938 * allocating the destination region, we don't have to worry about
1939 * protection, but instead about whether the region exists.]
1940 */
1941
1942 if (src_map->is_main_map && dst_map->is_main_map) {
1943 if (!vm_map_check_protection(src_map, src_start, src_end,
1944 VM_PROT_READ)) {
1945 result = KERN_PROTECTION_FAILURE;
1946 goto Return;
1947 }
1948 if (dst_alloc) {
1949 /* XXX Consider making this a vm_map_find instead */
1950 if ((result = vm_map_insert(dst_map, NULL,
1951 (vm_offset_t) 0, dst_start, dst_end)) != KERN_SUCCESS)
1952 goto Return;
1953 } else if (!vm_map_check_protection(dst_map, dst_start, dst_end,
1954 VM_PROT_WRITE)) {
1955 result = KERN_PROTECTION_FAILURE;
1956 goto Return;
1957 }
1958 }
1959 /*
1960 * Find the start entries and clip.
1961 *
1962 * Note that checking protection asserts that the lookup cannot fail.
1963 *
1964 * Also note that we wait to do the second lookup until we have done the
1965 * first clip, as the clip may affect which entry we get!
1966 */
1967
1968 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1969 src_entry = tmp_entry;
1970 vm_map_clip_start(src_map, src_entry, src_start);
1971
1972 (void) vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry);
1973 dst_entry = tmp_entry;
1974 vm_map_clip_start(dst_map, dst_entry, dst_start);
1975
1976 /*
1977 * If both source and destination entries are the same, retry the
1978 * first lookup, as it may have changed.
1979 */
1980
1981 if (src_entry == dst_entry) {
1982 (void) vm_map_lookup_entry(src_map, src_addr, &tmp_entry);
1983 src_entry = tmp_entry;
1984 }
1985 /*
1986 * If source and destination entries are still the same, a null copy
1987 * is being performed.
1988 */
1989
1990 if (src_entry == dst_entry)
1991 goto Return;
1992
1993 /*
1994 * Go through entries until we get to the end of the region.
1995 */
1996
1997 while (src_start < src_end) {
1998 /*
1999 * Clip the entries to the endpoint of the entire region.
2000 */
2001
2002 vm_map_clip_end(src_map, src_entry, src_end);
2003 vm_map_clip_end(dst_map, dst_entry, dst_end);
2004
2005 /*
2006 * Clip each entry to the endpoint of the other entry.
2007 */
2008
2009 src_clip = src_entry->start + (dst_entry->end - dst_entry->start);
2010 vm_map_clip_end(src_map, src_entry, src_clip);
2011
2012 dst_clip = dst_entry->start + (src_entry->end - src_entry->start);
2013 vm_map_clip_end(dst_map, dst_entry, dst_clip);
2014
2015 /*
2016 * Both entries now match in size and relative endpoints.
2017 *
2018 * If both entries refer to a VM object, we can deal with them
2019 * now.
2020 */
2021
2022 if (!src_entry->is_a_map && !dst_entry->is_a_map) {
2023 vm_map_copy_entry(src_map, dst_map, src_entry,
2024 dst_entry);
2025 } else {
2026 register vm_map_t new_dst_map;
2027 vm_offset_t new_dst_start;
2028 vm_size_t new_size;
2029 vm_map_t new_src_map;
2030 vm_offset_t new_src_start;
2031
2032 /*
2033 * We have to follow at least one sharing map.
2034 */
2035
2036 new_size = (dst_entry->end - dst_entry->start);
2037
2038 if (src_entry->is_a_map) {
2039 new_src_map = src_entry->object.share_map;
2040 new_src_start = src_entry->offset;
2041 } else {
2042 new_src_map = src_map;
2043 new_src_start = src_entry->start;
2044 lock_set_recursive(&src_map->lock);
2045 }
2046
2047 if (dst_entry->is_a_map) {
2048 vm_offset_t new_dst_end;
2049
2050 new_dst_map = dst_entry->object.share_map;
2051 new_dst_start = dst_entry->offset;
2052
2053 /*
2054 * Since the destination sharing entries will
2055 * be merely deallocated, we can do that now,
2056 * and replace the region with a null object.
2057 * [This prevents splitting the source map to
2058 * match the form of the destination map.]
2059 * Note that we can only do so if the source
2060 * and destination do not overlap.
2061 */
2062
2063 new_dst_end = new_dst_start + new_size;
2064
2065 if (new_dst_map != new_src_map) {
2066 vm_map_lock(new_dst_map);
2067 (void) vm_map_delete(new_dst_map,
2068 new_dst_start,
2069 new_dst_end);
2070 (void) vm_map_insert(new_dst_map,
2071 NULL,
2072 (vm_offset_t) 0,
2073 new_dst_start,
2074 new_dst_end);
2075 vm_map_unlock(new_dst_map);
2076 }
2077 } else {
2078 new_dst_map = dst_map;
2079 new_dst_start = dst_entry->start;
2080 lock_set_recursive(&dst_map->lock);
2081 }
2082
2083 /*
2084 * Recursively copy the sharing map.
2085 */
2086
2087 (void) vm_map_copy(new_dst_map, new_src_map,
2088 new_dst_start, new_size, new_src_start,
2089 FALSE, FALSE);
2090
2091 if (dst_map == new_dst_map)
2092 lock_clear_recursive(&dst_map->lock);
2093 if (src_map == new_src_map)
2094 lock_clear_recursive(&src_map->lock);
2095 }
2096
2097 /*
2098 * Update variables for next pass through the loop.
2099 */
2100
2101 src_start = src_entry->end;
2102 src_entry = src_entry->next;
2103 dst_start = dst_entry->end;
2104 dst_entry = dst_entry->next;
2105
2106 /*
2107 * If the source is to be destroyed, here is the place to do
2108 * it.
2109 */
2110
2111 if (src_destroy && src_map->is_main_map &&
2112 dst_map->is_main_map)
2113 vm_map_entry_delete(src_map, src_entry->prev);
2114 }
2115
2116 /*
2117 * Update the physical maps as appropriate
2118 */
2119
2120 if (src_map->is_main_map && dst_map->is_main_map) {
2121 if (src_destroy)
2122 pmap_remove(src_map->pmap, src_addr, src_addr + len);
2123 }
2124 /*
2125 * Unlock the maps
2126 */
2127
2128Return:;
2129
2130 if (old_src_destroy)
2131 vm_map_delete(src_map, src_addr, src_addr + len);
2132
2133 vm_map_unlock(src_map);
2134 if (src_map != dst_map)
2135 vm_map_unlock(dst_map);
2136
2137 return (result);
2138}
2139
2140/*
2141 * vmspace_fork:
2142 * Create a new process vmspace structure and vm_map
2143 * based on those of an existing process. The new map
2144 * is based on the old map, according to the inheritance
2145 * values on the regions in that map.
2146 *
2147 * The source map must not be locked.
2148 */

--- 24 unchanged lines hidden (view full) ---

2173 panic("vm_map_fork: encountered a submap");
2174
2175 switch (old_entry->inheritance) {
2176 case VM_INHERIT_NONE:
2177 break;
2178
2179 case VM_INHERIT_SHARE:
2180 /*
1833 * vmspace_fork:
1834 * Create a new process vmspace structure and vm_map
1835 * based on those of an existing process. The new map
1836 * is based on the old map, according to the inheritance
1837 * values on the regions in that map.
1838 *
1839 * The source map must not be locked.
1840 */

--- 24 unchanged lines hidden (view full) ---

1865 panic("vm_map_fork: encountered a submap");
1866
1867 switch (old_entry->inheritance) {
1868 case VM_INHERIT_NONE:
1869 break;
1870
1871 case VM_INHERIT_SHARE:
1872 /*
2181 * If we don't already have a sharing map:
2182 */
2183
2184 if (!old_entry->is_a_map) {
2185 vm_map_t new_share_map;
2186 vm_map_entry_t new_share_entry;
2187
2188 /*
2189 * Create a new sharing map
2190 */
2191
2192 new_share_map = vm_map_create(NULL,
2193 old_entry->start,
2194 old_entry->end,
2195 TRUE);
2196 new_share_map->is_main_map = FALSE;
2197
2198 /*
2199 * Create the only sharing entry from the old
2200 * task map entry.
2201 */
2202
2203 new_share_entry =
2204 vm_map_entry_create(new_share_map);
2205 *new_share_entry = *old_entry;
2206 new_share_entry->wired_count = 0;
2207
2208 /*
2209 * Insert the entry into the new sharing map
2210 */
2211
2212 vm_map_entry_link(new_share_map,
2213 new_share_map->header.prev,
2214 new_share_entry);
2215
2216 /*
2217 * Fix up the task map entry to refer to the
2218 * sharing map now.
2219 */
2220
2221 old_entry->is_a_map = TRUE;
2222 old_entry->object.share_map = new_share_map;
2223 old_entry->offset = old_entry->start;
2224 }
2225 /*
2226 * Clone the entry, referencing the sharing map.
2227 */
1873 * Clone the entry, referencing the sharing map.
1874 */
2228
2229 new_entry = vm_map_entry_create(new_map);
2230 *new_entry = *old_entry;
2231 new_entry->wired_count = 0;
1875 new_entry = vm_map_entry_create(new_map);
1876 *new_entry = *old_entry;
1877 new_entry->wired_count = 0;
2232 vm_map_reference(new_entry->object.share_map);
1878 ++new_entry->object.vm_object->ref_count;
2233
2234 /*
2235 * Insert the entry into the new map -- we know we're
2236 * inserting at the end of the new map.
2237 */
2238
2239 vm_map_entry_link(new_map, new_map->header.prev,
2240 new_entry);

--- 15 unchanged lines hidden (view full) ---

2256
2257 new_entry = vm_map_entry_create(new_map);
2258 *new_entry = *old_entry;
2259 new_entry->wired_count = 0;
2260 new_entry->object.vm_object = NULL;
2261 new_entry->is_a_map = FALSE;
2262 vm_map_entry_link(new_map, new_map->header.prev,
2263 new_entry);
1879
1880 /*
1881 * Insert the entry into the new map -- we know we're
1882 * inserting at the end of the new map.
1883 */
1884
1885 vm_map_entry_link(new_map, new_map->header.prev,
1886 new_entry);

--- 15 unchanged lines hidden (view full) ---

1902
1903 new_entry = vm_map_entry_create(new_map);
1904 *new_entry = *old_entry;
1905 new_entry->wired_count = 0;
1906 new_entry->object.vm_object = NULL;
1907 new_entry->is_a_map = FALSE;
1908 vm_map_entry_link(new_map, new_map->header.prev,
1909 new_entry);
2264 if (old_entry->is_a_map) {
2265 int check;
2266
2267 check = vm_map_copy(new_map,
2268 old_entry->object.share_map,
2269 new_entry->start,
2270 (vm_size_t) (new_entry->end -
2271 new_entry->start),
2272 old_entry->offset,
2273 FALSE, FALSE);
2274 if (check != KERN_SUCCESS)
2275 printf("vm_map_fork: copy in share_map region failed\n");
2276 } else {
2277 vm_map_copy_entry(old_map, new_map, old_entry,
2278 new_entry);
2279 }
1910 vm_map_copy_entry(old_map, new_map, old_entry, new_entry);
2280 break;
2281 }
2282 old_entry = old_entry->next;
2283 }
2284
2285 new_map->size = old_map->size;
2286 vm_map_unlock(old_map);
2287

--- 57 unchanged lines hidden (view full) ---

2345 return(why); \
2346 }
2347
2348 /*
2349 * If the map has an interesting hint, try it before calling full
2350 * blown lookup routine.
2351 */
2352
1911 break;
1912 }
1913 old_entry = old_entry->next;
1914 }
1915
1916 new_map->size = old_map->size;
1917 vm_map_unlock(old_map);
1918

--- 57 unchanged lines hidden (view full) ---

1976 return(why); \
1977 }
1978
1979 /*
1980 * If the map has an interesting hint, try it before calling full
1981 * blown lookup routine.
1982 */
1983
2353 simple_lock(&map->hint_lock);
2354 entry = map->hint;
1984 entry = map->hint;
2355 simple_unlock(&map->hint_lock);
2356
2357 *out_entry = entry;
2358
2359 if ((entry == &map->header) ||
2360 (vaddr < entry->start) || (vaddr >= entry->end)) {
2361 vm_map_entry_t tmp_entry;
2362
2363 /*

--- 114 unchanged lines hidden (view full) ---

2478 */
2479 if (entry->object.vm_object == NULL) {
2480
2481 if (lock_read_to_write(&share_map->lock)) {
2482 if (share_map != map)
2483 vm_map_unlock_read(map);
2484 goto RetryLookup;
2485 }
1985
1986 *out_entry = entry;
1987
1988 if ((entry == &map->header) ||
1989 (vaddr < entry->start) || (vaddr >= entry->end)) {
1990 vm_map_entry_t tmp_entry;
1991
1992 /*

--- 114 unchanged lines hidden (view full) ---

2107 */
2108 if (entry->object.vm_object == NULL) {
2109
2110 if (lock_read_to_write(&share_map->lock)) {
2111 if (share_map != map)
2112 vm_map_unlock_read(map);
2113 goto RetryLookup;
2114 }
2486 entry->object.vm_object = vm_object_allocate(
2115 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
2487 (vm_size_t) (entry->end - entry->start));
2488 entry->offset = 0;
2489 lock_write_to_read(&share_map->lock);
2490 }
2491 /*
2492 * Return the object/offset from this entry. If the entry was
2493 * copy-on-write or empty, it has been fixed up.
2494 */
2495
2496 *offset = (share_offset - entry->start) + entry->offset;
2497 *object = entry->object.vm_object;
2498
2499 /*
2500 * Return whether this is the only map sharing this data.
2501 */
2502
2503 if (!su) {
2116 (vm_size_t) (entry->end - entry->start));
2117 entry->offset = 0;
2118 lock_write_to_read(&share_map->lock);
2119 }
2120 /*
2121 * Return the object/offset from this entry. If the entry was
2122 * copy-on-write or empty, it has been fixed up.
2123 */
2124
2125 *offset = (share_offset - entry->start) + entry->offset;
2126 *object = entry->object.vm_object;
2127
2128 /*
2129 * Return whether this is the only map sharing this data.
2130 */
2131
2132 if (!su) {
2504 simple_lock(&share_map->ref_lock);
2505 su = (share_map->ref_count == 1);
2133 su = (share_map->ref_count == 1);
2506 simple_unlock(&share_map->ref_lock);
2507 }
2508 *out_prot = prot;
2509 *single_use = su;
2510
2511 return (KERN_SUCCESS);
2512
2513#undef RETURN
2514}

--- 156 unchanged lines hidden ---
2134 }
2135 *out_prot = prot;
2136 *single_use = su;
2137
2138 return (KERN_SUCCESS);
2139
2140#undef RETURN
2141}

--- 156 unchanged lines hidden ---