• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching refs:entry

123 	vm_map_entry_t	*entry);
130 vm_map_entry_t entry);
142 vm_map_entry_t entry,
147 vm_map_entry_t entry,
152 vm_map_entry_t entry);
163 vm_map_entry_t entry,
198 vm_map_entry_t entry,
204 vm_map_entry_t entry,
268 vm_map_entry_t entry,
274 * map entry to the same memory - the wired count in the new entry
276 * entry that is identical to the old entry. This preserves the
367 * [That is, an entry is split into two, bordering at a
390 static zone_t vm_map_kentry_zone; /* zone for kernel entry structures */
419 * crypt_info contains entry points and session data for the crypt module.
511 * Map and entry structures are allocated from zones -- we must
522 * the kernel to allocate more memory to a entry zone when it became
524 * of a new entry.
566 * individually; we guess at most one entry per eight pages in the
637 * Allocates a VM map entry for insertion in the
651 register vm_map_entry_t entry;
658 entry = (vm_map_entry_t) zalloc(zone);
659 if (entry == VM_MAP_ENTRY_NULL)
662 return(entry);
674 #define vm_map_entry_dispose(map, entry) \
676 if((entry) == (map)->first_free) \
678 if((entry) == (map)->hint) \
680 _vm_map_entry_dispose(&(map)->hdr, (entry)); \
683 #define vm_map_copy_entry_dispose(map, entry) \
684 _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
689 register vm_map_entry_t entry)
698 zfree(zone, entry);
708 vm_map_entry_t entry, next;
713 entry = vm_map_to_entry(map);
714 next = entry->vme_next;
715 while (vm_map_trunc_page(next->vme_start) == vm_map_trunc_page(entry->vme_end) ||
716 (vm_map_trunc_page(next->vme_start) == vm_map_trunc_page(entry->vme_start) &&
718 entry = next;
719 next = entry->vme_next;
720 if (entry == vm_map_to_entry(map))
723 if (map->first_free != entry) {
725 map, map->first_free, entry);
736 * entry immediately before the first hole in the map.
766 #define vm_map_entry_link(map, after_where, entry) \
771 VMEL_entry = (entry); \
777 #define vm_map_copy_entry_link(copy, after_where, entry) \
778 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry))
780 #define _vm_map_entry_link(hdr, after_where, entry) \
783 (entry)->vme_prev = (after_where); \
784 (entry)->vme_next = (after_where)->vme_next; \
785 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
788 #define vm_map_entry_unlink(map, entry) \
794 VMEU_entry = (entry); \
803 #define vm_map_copy_entry_unlink(copy, entry) \
804 _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry))
806 #define _vm_map_entry_unlink(hdr, entry) \
809 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
810 (entry)->vme_prev->vme_next = (entry)->vme_next; \
940 * -- If a map entry is a share map, then we hold both locks, in
976 register vm_map_entry_t entry;
1008 * We now operate upon each map entry. If the entry is a sub-
1010 * If the entry is an object, we call vm_object_res_reference
1016 entry = vm_map_first_entry(map);
1018 while (entry != vm_map_to_entry(map)) {
1019 if (entry->object.vm_object != VM_OBJECT_NULL) {
1020 if (entry->is_sub_map) {
1021 vm_map_t lmap = entry->object.sub_map;
1026 vm_object_t object = entry->object.vm_object;
1036 entry = entry->vme_next;
1044 register vm_map_entry_t entry;
1069 * We now operate upon each map entry. If the entry is a sub-
1071 * If the entry is an object, we call vm_object_res_deallocate
1077 entry = vm_map_first_entry(map);
1079 while (entry != vm_map_to_entry(map)) {
1080 if (entry->object.vm_object != VM_OBJECT_NULL) {
1081 if (entry->is_sub_map) {
1082 vm_map_t lmap = entry->object.sub_map;
1087 vm_object_t object = entry->object.vm_object;
1099 entry = entry->vme_next;
1111 * Saves the specified entry as the hint for
1126 * Saves the specified entry as the hint for
1140 * Finds the map entry containing (or
1142 * in the given map; the entry is returned
1143 * in the "entry" parameter. The boolean
1151 vm_map_entry_t *entry) /* OUT */
1170 * we are already looking at the entry we
1179 *entry = cur;
1203 *entry = cur;
1212 *entry = cur->vme_prev;
1213 SAVE_HINT_MAP_READ(map, *entry);
1222 * returning the entry allocated for that range.
1228 * If an entry is allocated, the object/offset fields
1240 register vm_map_entry_t entry, new_entry;
1264 if ((entry = map->first_free) == vm_map_to_entry(map))
1267 start = entry->vme_end;
1270 * In any case, the "entry" always precedes
1307 next = entry->vme_next;
1312 * If there is another entry, it must be
1320 * Didn't fit -- move to the next entry.
1323 entry = next;
1324 start = entry->vme_end;
1331 * "entry" should refer to the region before the new
1372 * Insert the new entry into the list
1375 vm_map_entry_link(map, entry, new_entry);
1403 * The source map should not be locked on entry.
1517 vm_map_entry_t entry, new_entry;
1663 if ((entry = map->first_free) != vm_map_to_entry(map))
1664 start = entry->vme_end;
1669 entry = tmp_entry;
1673 * In any case, the "entry" always precedes
1712 next = entry->vme_next;
1717 * If there is another entry, it must be
1725 * Didn't fit -- move to the next entry.
1728 entry = next;
1729 start = entry->vme_end;
1771 if (vm_map_lookup_entry(map, start, &entry)) {
1780 if (entry->vme_start < start) {
1781 tmp_start -= start - entry->vme_start;
1782 tmp_offset -= start - entry->vme_start;
1785 for (; entry->vme_start < end;
1786 entry = entry->vme_next) {
1789 * match the existing map entry.
1791 if (entry == vm_map_to_entry(map) ||
1792 entry->vme_start != tmp_start ||
1793 entry->is_sub_map != is_submap ||
1794 entry->offset != tmp_offset ||
1795 entry->needs_copy != needs_copy ||
1796 entry->protection != cur_protection ||
1797 entry->max_protection != max_protection ||
1798 entry->inheritance != inheritance ||
1799 entry->alias != alias) {
1807 if (entry->object.sub_map !=
1813 if (entry->object.vm_object != object) {
1817 obj2 = entry->object.vm_object;
1833 tmp_offset += entry->vme_end - entry->vme_start;
1834 tmp_start += entry->vme_end - entry->vme_start;
1835 if (entry->vme_end >= end) {
1849 if ((entry->vme_next != vm_map_to_entry(map)) &&
1850 (entry->vme_next->vme_start < end))
1858 * "entry" should refer to the region before the new
1865 * See whether we can avoid creating a new entry (and object) by
1882 (entry != vm_map_to_entry(map)) &&
1883 (entry->vme_end == start) &&
1884 (!entry->is_shared) &&
1885 (!entry->is_sub_map) &&
1886 (entry->alias == alias) &&
1887 (entry->inheritance == inheritance) &&
1888 (entry->protection == cur_protection) &&
1889 (entry->max_protection == max_protection) &&
1890 (entry->behavior == VM_BEHAVIOR_DEFAULT) &&
1891 (entry->in_transition == 0) &&
1892 (entry->no_cache == no_cache) &&
1894 ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT)) &&
1895 (entry->wired_count == 0)) { /* implies user_wired_count == 0 */
1896 if (vm_object_coalesce(entry->object.vm_object,
1898 entry->offset,
1900 (vm_map_size_t)(entry->vme_end - entry->vme_start),
1901 (vm_map_size_t)(end - entry->vme_end))) {
1905 * the previous map entry to include the
1908 map->size += (end - entry->vme_end);
1909 entry->vme_end = end;
1916 * Create a new entry
1938 new_entry = vm_map_entry_insert(map, entry, tmp_start, tmp_end,
1983 entry = new_entry;
1994 /* Wire down the new entry if the user
2227 /* offset from beginning of named entry offset in object */
2297 /* create an extra ref for the named entry */
2747 vm_map_entry_t entry,
2751 assert(entry->is_sub_map);
2752 assert(entry->object.sub_map != NULL);
2754 if (entry->vme_start > start_unnest ||
2755 entry->vme_end < end_unnest) {
2757 "bad nested entry: start=0x%llx end=0x%llx\n",
2759 (long long)entry->vme_start, (long long)entry->vme_end);
2761 if (start_unnest > entry->vme_start) {
2763 entry,
2767 if (entry->vme_end > end_unnest) {
2769 entry,
2775 entry->vme_start,
2776 entry->vme_end - entry->vme_start);
2780 map, entry->vme_start,
2781 entry->vme_end,
2782 entry->object.sub_map,
2783 entry->offset);
2785 entry->use_pmap = FALSE;
2791 * Asserts that the given entry begins at or after
2793 * it splits the entry into two.
2798 vm_map_entry_t entry,
2802 if (entry->use_pmap &&
2803 startaddr >= entry->vme_start) {
2813 vm_map_clip_unnest(map, entry, start_unnest, end_unnest);
2816 if (startaddr > entry->vme_start) {
2817 if (entry->object.vm_object &&
2818 !entry->is_sub_map &&
2819 entry->object.vm_object->phys_contiguous) {
2821 (addr64_t)(entry->vme_start),
2822 (addr64_t)(entry->vme_end));
2824 _vm_map_clip_start(&map->hdr, entry, startaddr);
2830 #define vm_map_copy_clip_start(copy, entry, startaddr) \
2832 if ((startaddr) > (entry)->vme_start) \
2833 _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \
2838 * the entry must be split.
2843 register vm_map_entry_t entry,
2851 * entry BEFORE this one, so that
2852 * this entry has the specified starting
2857 vm_map_entry_copy_full(new_entry, entry);
2860 entry->offset += (start - entry->vme_start);
2861 entry->vme_start = start;
2863 _vm_map_entry_link(map_header, entry->vme_prev, new_entry);
2865 if (entry->is_sub_map)
2875 * Asserts that the given entry ends at or before
2877 * it splits the entry into two.
2882 vm_map_entry_t entry,
2885 if (endaddr > entry->vme_end) {
2888 * the end of this map entry...
2890 endaddr = entry->vme_end;
2893 if (entry->use_pmap) {
2897 * Make sure the range between the start of this entry and
2901 start_unnest = entry->vme_start;
2905 vm_map_clip_unnest(map, entry, start_unnest, end_unnest);
2908 if (endaddr < entry->vme_end) {
2909 if (entry->object.vm_object &&
2910 !entry->is_sub_map &&
2911 entry->object.vm_object->phys_contiguous) {
2913 (addr64_t)(entry->vme_start),
2914 (addr64_t)(entry->vme_end));
2916 _vm_map_clip_end(&map->hdr, entry, endaddr);
2922 #define vm_map_copy_clip_end(copy, entry, endaddr) \
2924 if ((endaddr) < (entry)->vme_end) \
2925 _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \
2930 * the entry must be split.
2935 register vm_map_entry_t entry,
2941 * Create a new entry and insert it
2942 * AFTER the specified entry
2946 vm_map_entry_copy_full(new_entry, entry);
2948 new_entry->vme_start = entry->vme_end = end;
2949 new_entry->offset += (end - entry->vme_start);
2951 _vm_map_entry_link(map_header, entry, new_entry);
2953 if (entry->is_sub_map)
2981 * entry or set of adjacent map entries of the spacified map,
2984 * Otherwise, TRUE is returned and if the output argument 'entry'
2985 * is not NULL it points to the map entry containing the start
2988 * The map is locked for reading on entry and is left locked.
2995 vm_map_entry_t *entry)
3015 * in a single map entry.
3017 if (entry != (vm_map_entry_t *) NULL)
3018 *entry = cur;
3024 * single entry, walk the entries looking for holes.
3067 vm_map_entry_t entry;
3073 if (! vm_map_lookup_entry(map, start, &entry)) {
3074 entry = entry->vme_next;
3077 if (entry == vm_map_to_entry(map) ||
3078 entry->is_sub_map) {
3083 assert(!entry->use_pmap); /* we don't want to unnest anything here */
3084 vm_map_clip_start(map, entry, start);
3085 vm_map_clip_end(map, entry, end);
3087 if ((entry->vme_start == start) && (entry->vme_end == end) &&
3088 (!entry->is_sub_map) &&
3089 ((object = entry->object.vm_object) == vm_submap_object) &&
3094 entry->offset = (vm_object_offset_t)offset;
3095 entry->object.vm_object = VM_OBJECT_NULL;
3097 entry->is_sub_map = TRUE;
3098 entry->object.sub_map = submap;
3113 (entry->object.sub_map)->pmap,
3119 entry->use_pmap = TRUE;
3149 vm_map_entry_t entry;
3173 * Lookup the entry. If it doesn't start in a valid
3174 * entry, return an error.
3176 if (! vm_map_lookup_entry(map, start, &entry)) {
3186 current = entry;
3233 * the entry.
3236 current = entry;
3307 current = entry;
3333 register vm_map_entry_t entry;
3341 entry = temp_entry;
3345 entry = temp_entry;
3350 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
3351 if(entry->is_sub_map) {
3358 entry = entry->vme_next;
3361 entry = temp_entry;
3362 if (entry != vm_map_to_entry(map)) {
3364 vm_map_clip_start(map, entry, start);
3367 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
3368 vm_map_clip_end(map, entry, end);
3369 assert(!entry->use_pmap); /* clip did unnest if needed */
3371 entry->inheritance = new_inheritance;
3373 entry = entry->vme_next;
3388 vm_map_entry_t entry,
3397 * this map entry.
3400 if (entry->user_wired_count == 0) {
3401 size = entry->vme_end - entry->vme_start;
3404 * Since this is the first time the user is wiring this map entry, check to see if we're
3416 * The first time the user wires an entry, we also increment the wired_count and add this to
3420 if (entry->wired_count >= MAX_WIRE_COUNT)
3423 entry->wired_count++;
3427 if (entry->user_wired_count >= MAX_WIRE_COUNT)
3430 entry->user_wired_count++;
3438 if (entry->wired_count >= MAX_WIRE_COUNT)
3441 entry->wired_count++;
3448 * Update the memory wiring accounting now that the given map entry is being unwired.
3454 vm_map_entry_t entry,
3464 if (entry->user_wired_count == 1) {
3471 assert(entry->wired_count >= 1);
3472 entry->wired_count--;
3473 map->user_wire_size -= entry->vme_end - entry->vme_start;
3476 assert(entry->user_wired_count >= 1);
3477 entry->user_wired_count--;
3485 assert(entry->wired_count >= 1);
3486 entry->wired_count--;
3513 register vm_map_entry_t entry;
3546 entry = first_entry;
3557 while ((entry != vm_map_to_entry(map)) && (s < end)) {
3562 * "entry" hasn't been clipped, so it could start before "s"
3566 /* "e" is how far we want to wire in this entry */
3567 e = entry->vme_end;
3572 * If another thread is wiring/unwiring this entry then
3575 if (entry->in_transition) {
3579 * We have not clipped the entry. Make sure that
3587 entry->needs_wakeup = TRUE;
3620 * The entry could have been clipped, look it up again.
3629 * entry. let vm_map_unwire worry about
3635 entry = first_entry;
3639 if (entry->is_sub_map) {
3646 vm_map_clip_start(map, entry, s);
3647 vm_map_clip_end(map, entry, end);
3649 sub_start = entry->offset;
3650 sub_end = entry->vme_end;
3651 sub_end += entry->offset - entry->vme_start;
3653 local_end = entry->vme_end;
3663 if(entry->use_pmap) {
3664 pmap = entry->object.sub_map->pmap;
3677 if (entry->wired_count) {
3678 if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS)
3684 * Just go directly to next entry.
3686 entry = entry->vme_next;
3687 s = entry->vme_start;
3695 local_start = entry->vme_start;
3727 * entry could have been "simplified",
3730 entry = local_entry;
3732 vm_map_clip_start(map, entry, s);
3733 vm_map_clip_end(map, entry, end);
3735 e = entry->vme_end;
3740 if (!entry->is_sub_map) {
3745 local_start = entry->vme_start;
3749 if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS)
3752 entry->in_transition = TRUE;
3755 rc = vm_map_wire_nested(entry->object.sub_map,
3762 * Find the entry again. It could have been clipped
3768 entry = first_entry;
3772 e = entry->vme_end;
3777 while ((entry != vm_map_to_entry(map)) &&
3778 (entry->vme_start < e)) {
3779 assert(entry->in_transition);
3780 entry->in_transition = FALSE;
3781 if (entry->needs_wakeup) {
3782 entry->needs_wakeup = FALSE;
3786 subtract_wire_counts(map, entry, user_wire);
3788 entry = entry->vme_next;
3795 s = entry->vme_start;
3800 * If this entry is already wired then increment
3803 if (entry->wired_count) {
3805 * entry is already wired down, get our reference
3808 vm_map_clip_start(map, entry, s);
3809 vm_map_clip_end(map, entry, end);
3811 if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS)
3815 entry = entry->vme_next;
3816 s = entry->vme_start;
3821 * Unwired entry or wire request transmitted via submap
3831 size = entry->vme_end - entry->vme_start;
3837 if (entry->needs_copy) {
3838 vm_object_shadow(&entry->object.vm_object,
3839 &entry->offset, size);
3840 entry->needs_copy = FALSE;
3841 } else if (entry->object.vm_object == VM_OBJECT_NULL) {
3842 entry->object.vm_object = vm_object_allocate(size);
3843 entry->offset = (vm_object_offset_t)0;
3846 vm_map_clip_start(map, entry, s);
3847 vm_map_clip_end(map, entry, end);
3850 e = entry->vme_end;
3856 * Holes: Next entry should be contiguous unless this
3861 if ((entry->vme_end < end) &&
3862 ((entry->vme_next == vm_map_to_entry(map)) ||
3863 (entry->vme_next->vme_start > entry->vme_end))) {
3868 if ((entry->protection & access_type) != access_type) {
3874 assert(entry->wired_count == 0 && entry->user_wired_count == 0);
3876 if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS)
3879 entry->in_transition = TRUE;
3882 * This entry might get split once we unlock the map.
3884 * defined by this entry. In order for this to work
3886 * temporary copy of this entry and use that for the
3890 tmp_entry = *entry;
3893 * The in_transition state guarentees that the entry
3919 * Find the entry again. It could have been clipped
3926 entry = first_entry;
3931 while ((entry != vm_map_to_entry(map)) &&
3932 (entry->vme_start < tmp_entry.vme_end)) {
3933 assert(entry->in_transition);
3934 entry->in_transition = FALSE;
3935 if (entry->needs_wakeup) {
3936 entry->needs_wakeup = FALSE;
3940 subtract_wire_counts(map, entry, user_wire);
3942 entry = entry->vme_next;
3949 s = entry->vme_start;
4029 register vm_map_entry_t entry;
4051 entry = first_entry;
4067 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
4068 if (entry->in_transition) {
4071 * Another thread is wiring down this entry. Note
4073 * be unwiring an unwired entry. This is not
4078 * Another thread is unwiring this entry. We did not
4080 * entry will not be getting unwired now.
4090 * entry could get "simplified" after
4092 * we re-lookup the entry, so we would
4093 * have to re-clip the entry to avoid
4099 * count(s) on this entry. That should be fine
4104 panic("vm_map_unwire: in_transition entry");
4107 entry = entry->vme_next;
4111 if (entry->is_sub_map) {
4117 vm_map_clip_start(map, entry, start);
4118 vm_map_clip_end(map, entry, end);
4120 sub_start = entry->offset;
4121 sub_end = entry->vme_end - entry->vme_start;
4122 sub_end += entry->offset;
4123 local_end = entry->vme_end;
4125 if(entry->use_pmap) {
4126 pmap = entry->object.sub_map->pmap;
4132 if (entry->wired_count == 0 ||
4133 (user_wire && entry->user_wired_count == 0)) {
4135 panic("vm_map_unwire: entry is unwired");
4136 entry = entry->vme_next;
4142 * Holes: Next entry should be contiguous unless
4145 if (((entry->vme_end < end) &&
4146 ((entry->vme_next == vm_map_to_entry(map)) ||
4147 (entry->vme_next->vme_start
4148 > entry->vme_end)))) {
4152 entry = entry->vme_next;
4157 subtract_wire_counts(map, entry, user_wire);
4159 if (entry->wired_count != 0) {
4160 entry = entry->vme_next;
4164 entry->in_transition = TRUE;
4165 tmp_entry = *entry;/* see comment in vm_map_wire() */
4169 * guarantees existance of the entry.
4172 vm_map_unwire_nested(entry->object.sub_map,
4178 * Find the entry again. It could have been
4186 entry = first_entry->vme_next;
4188 entry = first_entry;
4194 * that were in the original entry (saved in
4197 while ((entry != vm_map_to_entry(map)) &&
4198 (entry->vme_start < tmp_entry.vme_end)) {
4199 assert(entry->in_transition);
4200 entry->in_transition = FALSE;
4201 if (entry->needs_wakeup) {
4202 entry->needs_wakeup = FALSE;
4205 entry = entry->vme_next;
4210 vm_map_unwire_nested(entry->object.sub_map,
4217 * Find the entry again. It could have been
4225 entry = first_entry->vme_next;
4227 entry = first_entry;
4234 if ((entry->wired_count == 0) ||
4235 (user_wire && entry->user_wired_count == 0)) {
4237 panic("vm_map_unwire: entry is unwired");
4239 entry = entry->vme_next;
4243 assert(entry->wired_count > 0 &&
4244 (!user_wire || entry->user_wired_count > 0));
4246 vm_map_clip_start(map, entry, start);
4247 vm_map_clip_end(map, entry, end);
4251 * Holes: Next entry should be contiguous unless
4254 if (((entry->vme_end < end) &&
4255 ((entry->vme_next == vm_map_to_entry(map)) ||
4256 (entry->vme_next->vme_start > entry->vme_end)))) {
4260 entry = entry->vme_next;
4264 subtract_wire_counts(map, entry, user_wire);
4266 if (entry->wired_count != 0) {
4267 entry = entry->vme_next;
4271 entry->in_transition = TRUE;
4272 tmp_entry = *entry; /* see comment in vm_map_wire() */
4276 * guarantees existance of the entry.
4291 * Find the entry again. It could have been clipped
4298 entry = first_entry->vme_next;
4300 entry = first_entry;
4306 * were in the original entry (saved in tmp_entry). Also
4309 while ((entry != vm_map_to_entry(map)) &&
4310 (entry->vme_start < tmp_entry.vme_end)) {
4311 assert(entry->in_transition);
4312 entry->in_transition = FALSE;
4313 if (entry->needs_wakeup) {
4314 entry->needs_wakeup = FALSE;
4317 entry = entry->vme_next;
4356 * Deallocate the given entry from the target map.
4361 register vm_map_entry_t entry)
4367 s = entry->vme_start;
4368 e = entry->vme_end;
4371 assert(entry->wired_count == 0);
4372 assert(entry->user_wired_count == 0);
4374 if (entry->is_sub_map) {
4376 submap = entry->object.sub_map;
4379 object = entry->object.vm_object;
4382 vm_map_entry_unlink(map, entry);
4385 vm_map_entry_dispose(map, entry);
4410 vm_map_entry_t entry;
4414 if(vm_map_lookup_entry(sub_map, offset, &entry)) {
4416 remove_size = (entry->vme_end - entry->vme_start);
4417 if(offset > entry->vme_start)
4418 remove_size -= offset - entry->vme_start;
4421 if(submap_end < entry->vme_end) {
4423 entry->vme_end - submap_end;
4425 if(entry->is_sub_map) {
4430 entry->object.sub_map,
4431 entry->offset);
4435 && (entry->object.vm_object != NULL)) {
4437 entry->object.vm_object,
4438 entry->offset,
4441 entry->vme_start,
4451 entry = entry->vme_next;
4453 while((entry != vm_map_to_entry(sub_map))
4454 && (entry->vme_start < submap_end)) {
4455 remove_size = (entry->vme_end - entry->vme_start);
4456 if(submap_end < entry->vme_end) {
4457 remove_size -= entry->vme_end - submap_end;
4459 if(entry->is_sub_map) {
4462 (start + entry->vme_start) - offset,
4463 ((start + entry->vme_start) - offset) + remove_size,
4464 entry->object.sub_map,
4465 entry->offset);
4468 && (entry->object.vm_object != NULL)) {
4470 entry->object.vm_object,
4471 entry->offset,
4474 entry->vme_start,
4478 (addr64_t)((start + entry->vme_start)
4480 (addr64_t)(((start + entry->vme_start)
4484 entry = entry->vme_next;
4508 vm_map_entry_t entry, next;
4536 entry = first_entry;
4537 if (start == entry->vme_start) {
4543 vm_map_clip_start(map, entry, start);
4550 SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
4552 entry = first_entry->vme_next;
4559 s = entry->vme_start;
4560 while ((entry != vm_map_to_entry(map)) && (s < end)) {
4569 * with an entry after "s" (in vm_map_simplify_entry()), so
4573 if (entry->vme_start >= s) {
4575 * This entry starts on or after "s"
4580 * This entry has been re-assembled by a
4584 vm_map_clip_start(map, entry, s);
4586 if (entry->vme_end <= end) {
4588 * This entry is going away completely, so no need
4592 vm_map_clip_end(map, entry, end);
4594 if (entry->in_transition) {
4598 * Another thread is wiring/unwiring this entry.
4601 assert(s == entry->vme_start);
4602 entry->needs_wakeup = TRUE;
4626 * The entry could have been clipped or it
4631 (!entry->is_sub_map));
4633 * User: use the next entry
4635 entry = first_entry->vme_next;
4636 s = entry->vme_start;
4638 entry = first_entry;
4639 SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
4645 if (entry->wired_count) {
4648 user_wire = entry->user_wired_count > 0;
4655 (entry->user_wired_count > 0))
4656 entry->wired_count--;
4659 entry->user_wired_count = 0;
4661 if (entry->wired_count != 0) {
4673 assert(s == entry->vme_start);
4674 entry->needs_wakeup = TRUE;
4691 * The entry could have been clipped or
4699 * User: use the next entry
4701 entry = first_entry->vme_next;
4702 s = entry->vme_start;
4704 entry = first_entry;
4705 SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
4715 entry->in_transition = TRUE;
4717 * copy current entry. see comment in vm_map_wire()
4719 tmp_entry = *entry;
4720 assert(s == entry->vme_start);
4724 * state guarentees existance of the entry.
4761 * Find the entry again. It could have
4766 (!entry->is_sub_map));
4770 SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
4773 SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
4774 first_entry = entry;
4779 entry = first_entry;
4780 while ((entry != vm_map_to_entry(map)) &&
4781 (entry->vme_start < tmp_entry.vme_end)) {
4782 assert(entry->in_transition);
4783 entry->in_transition = FALSE;
4784 if (entry->needs_wakeup) {
4785 entry->needs_wakeup = FALSE;
4788 entry = entry->vme_next;
4791 * We have unwired the entry(s). Go back and
4794 entry = first_entry;
4798 /* entry is unwired */
4799 assert(entry->wired_count == 0);
4800 assert(entry->user_wired_count == 0);
4802 assert(s == entry->vme_start);
4815 } else if (entry->is_sub_map) {
4816 if (entry->use_pmap) {
4819 (addr64_t)entry->vme_start,
4820 entry->vme_end - entry->vme_start);
4825 map, entry->vme_start,
4826 entry->vme_end,
4827 entry->object.sub_map,
4828 entry->offset);
4832 map, entry->vme_start, entry->vme_end,
4833 entry->object.sub_map,
4834 entry->offset);
4836 } else if (entry->object.vm_object != kernel_object) {
4837 object = entry->object.vm_object;
4840 object, entry->offset,
4841 entry->vme_end - entry->vme_start,
4843 entry->vme_start,
4847 (addr64_t)entry->vme_start,
4848 (addr64_t)entry->vme_end);
4853 * All pmap mappings for this map entry must have been
4857 entry->vme_start,
4858 entry->vme_end));
4860 next = entry->vme_next;
4872 /* unlink the entry from "map" ... */
4873 vm_map_entry_unlink(map, entry);
4877 entry);
4878 entry_size = entry->vme_end - entry->vme_start;
4884 vm_map_entry_delete(map, entry);
4889 entry = next;
4891 if(entry == vm_map_to_entry(map)) {
4903 if (!vm_map_lookup_entry(map, s, &entry)){
4904 entry = entry->vme_next;
4905 s = entry->vme_start;
4907 SAVE_HINT_MAP_WRITE(map, entry->vme_prev);
4913 if(entry == vm_map_to_entry(map)) {
4977 vm_map_entry_t entry = vm_map_copy_first_entry(copy);
4979 vm_map_copy_entry_unlink(copy, entry);
4980 vm_object_deallocate(entry->object.vm_object);
4981 vm_map_copy_entry_dispose(copy, entry);
5036 * The links in the entry chain must be
5066 vm_map_entry_t entry;
5091 for (entry = tmp_entry;;) {
5094 next = entry->vme_next;
5095 while(entry->is_sub_map) {
5100 if (entry->in_transition) {
5102 * Say that we are waiting, and wait for entry.
5104 entry->needs_wakeup = TRUE;
5111 sub_start = entry->offset;
5113 if(entry->vme_end < dst_end)
5114 sub_end = entry->vme_end;
5117 sub_end -= entry->vme_start;
5118 sub_end += entry->offset;
5119 local_end = entry->vme_end;
5123 entry->object.sub_map,
5129 if (dst_end <= entry->vme_end)
5137 entry = tmp_entry;
5138 next = entry->vme_next;
5141 if ( ! (entry->protection & VM_PROT_WRITE)) {
5147 * If the entry is in transition, we must wait
5151 if (entry->in_transition) {
5154 * Say that we are waiting, and wait for entry.
5156 entry->needs_wakeup = TRUE;
5163 * our range is contained completely within this map entry
5165 if (dst_end <= entry->vme_end) {
5173 (next->vme_start != entry->vme_end)) {
5181 if ((entry->object.vm_object != VM_OBJECT_NULL) &&
5182 ((!entry->object.vm_object->internal) ||
5183 (entry->object.vm_object->true_share))) {
5191 entry = next;
5258 vm_map_entry_t entry;
5287 * Only works for entry lists at the moment. Will
5332 for (entry = tmp_entry;;) {
5333 vm_map_entry_t next = entry->vme_next;
5335 while(entry->is_sub_map) {
5340 if (entry->in_transition) {
5343 * Say that we are waiting, and wait for entry.
5345 entry->needs_wakeup = TRUE;
5351 local_end = entry->vme_end;
5352 if (!(entry->needs_copy)) {
5358 sub_start = entry->offset;
5360 if(entry->vme_end < dst_end)
5361 sub_end = entry->vme_end;
5364 sub_end -= entry->vme_start;
5365 sub_end += entry->offset;
5369 entry->object.sub_map,
5377 if (dst_end <= entry->vme_end)
5380 &entry)) {
5384 next = entry->vme_next;
5387 if ( ! (entry->protection & VM_PROT_WRITE)) {
5393 * If the entry is in transition, we must wait
5397 if (entry->in_transition) {
5400 * Say that we are waiting, and wait for entry.
5402 entry->needs_wakeup = TRUE;
5409 * our range is contained completely within this map entry
5411 if (dst_end <= entry->vme_end)
5417 (next->vme_start != entry->vme_end)) {
5426 if ((entry->object.vm_object != VM_OBJECT_NULL) &&
5427 ((!entry->object.vm_object->internal) ||
5428 (entry->object.vm_object->true_share))) {
5432 entry = next;
5450 * the next entry to be overwritten is "tmp_entry"
5452 * and the starting address expected in that entry
5480 for (entry = tmp_entry; copy_size == 0;) {
5483 next = entry->vme_next;
5487 /* entry can outpase tmp_entry, and the copy_size */
5489 /* if the current entry is found to be in transition */
5493 if (entry->in_transition) {
5495 * Say that we are waiting, and wait for entry.
5497 entry->needs_wakeup = TRUE;
5506 entry = tmp_entry;
5509 if(entry->is_sub_map) {
5514 if (entry->needs_copy) {
5517 /* anonymous entry */
5518 if(entry->vme_end < dst_end)
5519 sub_end = entry->vme_end;
5522 if(entry->vme_start < base_addr)
5525 sub_start = entry->vme_start;
5527 dst_map, entry, sub_end);
5529 dst_map, entry, sub_start);
5530 assert(!entry->use_pmap);
5531 entry->is_sub_map = FALSE;
5533 entry->object.sub_map);
5534 entry->object.sub_map = NULL;
5535 entry->is_shared = FALSE;
5536 entry->needs_copy = FALSE;
5537 entry->offset = 0;
5541 * of the submap entry here instead
5546 entry->protection = VM_PROT_ALL;
5547 entry->max_protection = VM_PROT_ALL;
5548 entry->wired_count = 0;
5549 entry->user_wired_count = 0;
5550 if(entry->inheritance
5552 entry->inheritance = VM_INHERIT_COPY;
5557 if(base_addr < entry->vme_start) {
5560 entry->vme_start - base_addr;
5563 sub_start = entry->offset;
5565 if(entry->vme_end < dst_end)
5566 sub_end = entry->vme_end;
5569 sub_end -= entry->vme_start;
5570 sub_end += entry->offset;
5571 local_end = entry->vme_end;
5621 if((entry->use_pmap) && (pmap == NULL)) {
5623 entry->object.sub_map,
5627 entry->object.sub_map->pmap);
5630 entry->object.sub_map,
5636 entry->object.sub_map,
5686 entry = tmp_entry;
5689 if (dst_end <= entry->vme_end) {
5695 (next->vme_start != entry->vme_end)) {
5700 entry = next;
5819 entry->needs_wakeup = TRUE;
5827 entry = tmp_entry;
5864 * (copy) object should be one map entry, the target range may be split
5869 * dst_map is locked on entry and is return locked on success,
5876 vm_map_entry_t entry,
5898 * unaligned so we never clipped this entry, we need the offset into
5903 if (entry == vm_map_to_entry(dst_map)) {
5908 /* "start" must be within the current map entry */
5909 assert ((start>=entry->vme_start) && (start<entry->vme_end));
5911 dst_offset = start - entry->vme_start;
5913 dst_size = entry->vme_end - start;
5921 * we have to get the next destination entry
5927 * we have to get the next source copy entry
5939 if (entry->needs_copy &&
5940 ((entry->protection & VM_PROT_WRITE) != 0))
5946 vm_object_shadow(&entry->object.vm_object,
5947 &entry->offset,
5948 (vm_map_size_t)(entry->vme_end
5949 - entry->vme_start));
5950 entry->needs_copy = FALSE;
5953 dst_object = entry->object.vm_object;
5964 entry->vme_end - entry->vme_start);
5965 entry->object.vm_object = dst_object;
5966 entry->offset = 0;
5970 * Take an object reference and unlock map. The "entry" may
5975 entry_offset = entry->offset;
5976 entry_end = entry->vme_end;
6008 * all done with this copy entry, dispose.
6035 entry = entry->vme_next;
6040 if (start != entry->vme_start) {
6048 * we must lookup the entry because somebody
6052 if (!vm_map_lookup_entry(dst_map, start, &entry))
6072 * and the source and destination map entry zones match,
6073 * and the destination map entry is not shared,
6094 vm_map_entry_t entry;
6101 entry = tmp_entry;
6102 assert(!entry->use_pmap); /* unnested when clipped earlier */
6103 if (entry == vm_map_to_entry(dst_map)) {
6107 size = (entry->vme_end - entry->vme_start);
6115 if ((entry->vme_start != start) || ((entry->is_sub_map)
6116 && !entry->needs_copy)) {
6120 assert(entry != vm_map_to_entry(dst_map));
6126 if ( ! (entry->protection & VM_PROT_WRITE)) {
6136 vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size);
6150 assert((entry->vme_end - entry->vme_start) == size);
6160 object = entry->object.vm_object;
6161 if ((!entry->is_shared &&
6164 entry->needs_copy) {
6165 vm_object_t old_object = entry->object.vm_object;
6166 vm_object_offset_t old_offset = entry->offset;
6187 if(entry->is_sub_map) {
6188 if(entry->use_pmap) {
6191 (addr64_t)entry->vme_start,
6192 entry->vme_end - entry->vme_start);
6198 dst_map, entry->vme_start,
6199 entry->vme_end,
6200 entry->object.sub_map,
6201 entry->offset);
6205 dst_map, entry->vme_start,
6206 entry->vme_end,
6207 entry->object.sub_map,
6208 entry->offset);
6211 entry->object.sub_map);
6215 entry->object.vm_object,
6216 entry->offset,
6217 entry->vme_end
6218 - entry->vme_start,
6220 entry->vme_start,
6224 (addr64_t)(entry->vme_start),
6225 (addr64_t)(entry->vme_end));
6231 entry->is_sub_map = FALSE;
6232 entry->object = copy_entry->object;
6233 object = entry->object.vm_object;
6234 entry->needs_copy = copy_entry->needs_copy;
6235 entry->wired_count = 0;
6236 entry->user_wired_count = 0;
6237 offset = entry->offset = copy_entry->offset;
6255 * entry, and the next map entry should be
6263 vm_object_t dst_object = entry->object.vm_object;
6264 vm_object_offset_t dst_offset = entry->offset;
6545 vm_map_entry_t entry;
6654 entry = vm_map_copy_first_entry(copy);
6667 * Copy each entry.
6669 while (entry != vm_map_copy_to_entry(copy)) {
6671 vm_map_entry_copy_full(new, entry);
6676 next = entry->vme_next;
6677 zfree(old_zone, entry);
6678 entry = next;
6688 for (entry = vm_map_copy_first_entry(copy);
6689 entry != vm_map_copy_to_entry(copy);
6690 entry = entry->vme_next) {
6691 entry->vme_start += adjustment;
6692 entry->vme_end += adjustment;
6694 entry->inheritance = VM_INHERIT_DEFAULT;
6695 entry->protection = VM_PROT_DEFAULT;
6696 entry->max_protection = VM_PROT_ALL;
6697 entry->behavior = VM_BEHAVIOR_DEFAULT;
6700 * If the entry is now wired,
6703 if (entry->wired_count != 0) {
6710 object = entry->object.vm_object;
6711 offset = entry->offset;
6712 va = entry->vme_start;
6715 entry->vme_start,
6716 entry->vme_end,
6719 while (va < entry->vme_end) {
6733 * map entry; because the old map
6734 * entry was wired, all of the pages
6756 prot = entry->protection;
6758 if (override_nx(dst_map, entry->alias) && prot)
6842 * The source map should not be locked on entry.
6865 * entry contains the actual
6869 vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */
6871 vm_map_offset_t src_start; /* Start of current entry --
6979 vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */
6981 * map entry (in both
6994 boolean_t new_entry_needs_copy; /* Will new entry be COW? */
7049 * Create a new address map entry to hold the result.
7052 * to allocate a map entry.
7108 * destroying the source entry will deallocate it.
7163 * to move to the next entry rather than doing
7286 * Start over with this top-level entry.
7312 * Link in the new copy entry.
7450 * New sharing code. New map entry
7516 * via the entry->needs_copy mechanism.
7518 * only one entry points to the source
7520 * a second entry pointing to the
7525 * works with one entry because occurs
7527 * entry to the object when handling
7534 * needs_copy set in our entry. (This
7538 * a single entry pointing to an object
7570 * outside of the entry we're working
7624 * new entry.
7641 * Clone the entry, using object ref from above.
7651 * Insert the entry into the new map -- we
7861 * Insert the entry at the end
7949 vm_map_entry_t entry;
7965 entry = map->hint;
7967 if ((entry == vm_map_to_entry(map)) ||
7968 (vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) {
7973 * was not contained in the entry, so do a full lookup.
7984 entry = tmp_entry;
7987 old_start = entry->vme_start;
7988 old_end = entry->vme_end;
7997 if (entry->is_sub_map) {
8006 if ((entry->use_pmap && !(fault_type & VM_PROT_WRITE))) {
8011 *real_map = entry->object.sub_map;
8014 if(entry->needs_copy && (fault_type & VM_PROT_WRITE)) {
8018 /* XXX FBDP: entry still valid ? */
8019 if(*real_map == entry->object.sub_map)
8023 vm_map_lock_read(entry->object.sub_map);
8028 old_start = entry->vme_start;
8029 old_end = entry->vme_end;
8033 vm_map_lock_read(entry->object.sub_map);
8039 vm_map_lock_read(entry->object.sub_map);
8049 /* XXX FBDP: map has been unlocked, what protects "entry" !? */
8050 *var_map = map = entry->object.sub_map;
8053 local_vaddr = (local_vaddr - entry->vme_start) + entry->offset;
8072 /* region mapped by the entry or, may only fill a portion */
8076 /* to be as big as the portion of the underlying entry */
8078 start_delta = submap_entry->vme_start > entry->offset ?
8079 submap_entry->vme_start - entry->offset : 0;
8082 (entry->offset + start_delta + (old_end - old_start)) <=
8084 0 : (entry->offset +
8092 entry = submap_entry;
8133 /* an entry in our space to the underlying */
8175 * Adjust the fault offset to the submap entry.
8197 vaddr, &entry)) {
8231 vm_map_clip_start(map, entry, local_start);
8232 vm_map_clip_end(map, entry, local_end);
8234 assert(!entry->use_pmap);
8237 /* shared map entry */
8238 vm_map_deallocate(entry->object.sub_map);
8239 entry->is_sub_map = FALSE;
8240 entry->object.vm_object = copy_object;
8242 /* propagate the submap entry's protections */
8243 entry->protection |= submap_entry->protection;
8244 entry->max_protection |= submap_entry->max_protection;
8247 entry->offset = local_start - old_start;
8248 entry->needs_copy = FALSE;
8249 entry->is_shared = FALSE;
8251 entry->offset = copy_offset;
8252 entry->needs_copy = TRUE;
8253 if(entry->inheritance == VM_INHERIT_SHARE)
8254 entry->inheritance = VM_INHERIT_COPY;
8256 entry->is_shared = TRUE;
8258 if(entry->inheritance == VM_INHERIT_SHARE)
8259 entry->inheritance = VM_INHERIT_COPY;
8268 entry = submap_entry;
8278 prot = entry->protection;
8280 if (override_nx(map, entry->alias) && prot) {
8305 *wired = (entry->wired_count != 0);
8310 * If the entry was copy-on-write, we either ...
8313 if (entry->needs_copy) {
8334 vm_object_shadow(&entry->object.vm_object,
8335 &entry->offset,
8336 (vm_map_size_t) (entry->vme_end -
8337 entry->vme_start));
8339 entry->object.vm_object->shadowed = TRUE;
8340 entry->needs_copy = FALSE;
8356 if (entry->object.vm_object == VM_OBJECT_NULL) {
8363 entry->object.vm_object = vm_object_allocate(
8364 (vm_map_size_t)(entry->vme_end - entry->vme_start));
8365 entry->offset = 0;
8370 * Return the object/offset from this entry. If the entry
8375 *offset = (vaddr - entry->vme_start) + entry->offset;
8376 *object = entry->object.vm_object;
8383 fault_info->user_tag = entry->alias;
8384 fault_info->behavior = entry->behavior;
8385 fault_info->lo_offset = entry->offset;
8386 fault_info->hi_offset = (entry->vme_end - entry->vme_start) + entry->offset;
8387 fault_info->no_cache = entry->no_cache;
8465 * "curr_entry" is the VM map entry preceding or including the
8551 * map entry preceding the address. We want the next
8557 /* no next entry at this level: stop looking */
8571 * Is the next entry at this level closer to the address (or
8577 /* no next entry at this level */
8794 vm_map_entry_t entry;
8817 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
8822 entry = tmp_entry;
8825 start = entry->vme_start;
8827 basic->offset = (uint32_t)entry->offset;
8828 basic->protection = entry->protection;
8829 basic->inheritance = entry->inheritance;
8830 basic->max_protection = entry->max_protection;
8831 basic->behavior = entry->behavior;
8832 basic->user_wired_count = entry->user_wired_count;
8833 basic->reserved = entry->is_sub_map;
8835 *size = (entry->vme_end - start);
8838 if (entry->is_sub_map) {
8841 basic->shared = entry->is_shared;
8862 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
8867 entry = tmp_entry;
8870 start = entry->vme_start;
8872 basic->offset = entry->offset;
8873 basic->protection = entry->protection;
8874 basic->inheritance = entry->inheritance;
8875 basic->max_protection = entry->max_protection;
8876 basic->behavior = entry->behavior;
8877 basic->user_wired_count = entry->user_wired_count;
8878 basic->reserved = entry->is_sub_map;
8880 *size = (entry->vme_end - start);
8883 if (entry->is_sub_map) {
8886 basic->shared = entry->is_shared;
8906 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
8911 entry = tmp_entry;
8913 start = entry->vme_start;
8915 extended->protection = entry->protection;
8916 extended->user_tag = entry->alias;
8924 vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, extended, TRUE);
8932 *size = (entry->vme_end - start);
8951 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
8956 entry = tmp_entry;
8959 start = entry->vme_start;
8964 vm_map_region_top_walk(entry, top);
8969 *size = (entry->vme_end - start);
8983 vm_map_entry_t entry,
8987 if (entry->object.vm_object == 0 || entry->is_sub_map) {
8999 entry_size = (entry->vme_end - entry->vme_start) / PAGE_SIZE;
9001 obj = entry->object.vm_object;
9028 if (entry->needs_copy) {
9053 vm_map_entry_t entry,
9066 if ((entry->object.vm_object == 0) ||
9067 (entry->is_sub_map) ||
9068 (entry->object.vm_object->phys_contiguous)) {
9074 obj = entry->object.vm_object;
9109 if (extended->shadow_depth || entry->needs_copy)
9142 obj = entry->object.vm_object;
9160 /* object is locked on entry and locked on return */
9274 vm_map_entry_t entry,
9281 if (entry->object.vm_object == 0)
9284 if (entry->is_sub_map)
9289 chk_obj = entry->object.vm_object;
9316 * benefit from the "expand a map entry" technology
9317 * at allocation time because the adjacent entry
9399 vm_map_entry_t entry;
9413 if (!vm_map_lookup_entry(map, start, &entry)) {
9414 /* "start" is not mapped and "entry" ends before "start" */
9415 if (entry == vm_map_to_entry(map)) {
9416 /* start with first entry in the map */
9417 entry = vm_map_first_entry(map);
9419 /* start with next entry */
9420 entry = entry->vme_next;
9424 while (entry != vm_map_to_entry(map) &&
9425 entry->vme_start <= end) {
9426 /* try and coalesce "entry" with its previous entry */
9427 vm_map_simplify_entry(map, entry);
9428 entry = entry->vme_next;
9457 vm_map_entry_t entry;
9479 if (vm_map_lookup_entry(map, start, &entry)) {
9481 if((entry->vme_end - start) > sync_size) {
9485 sub_size = entry->vme_end - start;
9488 if(entry->is_sub_map) {
9492 sub_start = (start - entry->vme_start)
9493 + entry->offset;
9496 entry->object.sub_map,
9501 if(entry->object.vm_object) {
9510 offset = (start - entry->vme_start)
9511 + entry->offset;
9513 object = entry->object.vm_object;
9580 register vm_map_entry_t entry;
9607 * entry containing the start of the address range if
9611 entry = temp_entry;
9612 vm_map_clip_start(map, entry, start);
9619 while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
9620 vm_map_clip_end(map, entry, end);
9621 assert(!entry->use_pmap);
9623 entry->behavior = new_behavior;
9625 entry = entry->vme_next;
9650 vm_map_entry_t entry);
9653 vm_map_entry_t entry);
9690 vm_map_entry_t entry)
9694 iprintf("map entry %08X\n", entry);
9698 shadows = vm_follow_object(entry->object.vm_object);
9709 register vm_map_entry_t entry)
9716 iprintf("map entry %08X - prev = %08X next = %08X\n", entry, entry->vme_prev, entry->vme_next);
9720 vm_map_links_print(&entry->links);
9723 (unsigned long long)entry->vme_start,
9724 (unsigned long long)entry->vme_end,
9725 entry->protection,
9726 entry->max_protection,
9727 inheritance_name[(entry->inheritance & 0x3)]);
9730 behavior_name[(entry->behavior & 0x3)],
9731 entry->wired_count,
9732 entry->user_wired_count);
9734 (entry->in_transition ? "" : "!"),
9735 (entry->needs_wakeup ? "" : "!"));
9737 if (entry->is_sub_map) {
9739 entry->object.sub_map,
9740 (unsigned long long)entry->offset);
9743 entry->object.vm_object,
9744 (unsigned long long)entry->offset);
9746 (entry->is_shared ? "" : "!"),
9747 (entry->needs_copy ? "" : "!"));
9760 register vm_map_entry_t entry;
9766 for (entry = vm_map_first_entry(map);
9767 entry && entry != vm_map_to_entry(map);
9768 entry = entry->vme_next) {
9769 vm_follow_entry(entry);
9782 register vm_map_entry_t entry;
9824 for (entry = vm_map_first_entry(map);
9825 entry && entry != vm_map_to_entry(map);
9826 entry = entry->vme_next) {
9827 vm_map_entry_print(entry);
9844 vm_map_entry_t entry;
9877 for (entry = vm_map_copy_first_entry(copy);
9878 entry && entry != vm_map_copy_to_entry(copy);
9879 entry = entry->vme_next) {
9880 vm_map_entry_print(entry);
9907 vm_map_entry_t entry;
9915 for (entry = vm_map_first_entry(map);
9916 entry != vm_map_to_entry(map);
9917 entry = entry->vme_next) {
9918 total += entry->vme_end - entry->vme_start;
9978 * Insert the new entry into the list.
10017 vm_map_offset_t src_start; /* start of entry to map */
10143 vm_object_reference_locked(object); /* object ref. for new entry */
10211 * Throw away the old object reference of the new entry.
10255 * Throw away the old object reference of the new entry.
10347 vm_map_entry_t entry;
10389 for (entry = map_header.links.next;
10390 entry != (struct vm_map_entry *)&map_header.links;
10391 entry = new_entry) {
10392 new_entry = entry->vme_next;
10393 _vm_map_entry_unlink(&map_header, entry);
10395 entry->vme_start += *address;
10396 entry->vme_end += *address;
10397 vm_map_entry_link(target_map, insp_entry, entry);
10398 insp_entry = entry;
10400 if (!entry->is_sub_map) {
10401 vm_object_deallocate(entry->object.vm_object);
10403 vm_map_deallocate(entry->object.sub_map);
10405 _vm_map_entry_dispose(&map_header, entry);
10426 * returns the address and the map entry just before the allocated
10441 register vm_map_entry_t entry;
10468 if ((entry = map->first_free) != vm_map_to_entry(map))
10469 start = entry->vme_end;
10474 entry = tmp_entry;
10478 * In any case, the "entry" always precedes
10517 next = entry->vme_next;
10522 * If there is another entry, it must be
10530 * Didn't fit -- move to the next entry.
10533 entry = next;
10534 start = entry->vme_end;
10569 entry = temp_entry;
10576 if ((entry->vme_next != vm_map_to_entry(map)) &&
10577 (entry->vme_next->vme_start < end))
10580 *map_entry = entry;
10707 vm_map_entry_t entry;
10723 entry = tmp_entry;
10726 if (entry == vm_map_to_entry(map)) {
10735 if (start < entry->vme_start) {
10741 * Check protection associated with entry.
10744 if ((entry->protection & protection) != protection) {
10749 /* go to next entry */
10751 start = entry->vme_end;
10752 entry = entry->vme_next;
10765 vm_map_entry_t entry;
10787 if (!vm_map_lookup_entry(map, address, &entry) || entry->is_sub_map) {
10796 if ((entry->protection & VM_PROT_WRITE) == 0) {
10804 object = entry->object.vm_object;
10815 if (entry->offset != 0 ||
10816 entry->vme_end - entry->vme_start != object->size) {
10859 offset -= map_entry->vme_start; /* adjust to offset within entry */
11040 vm_map_entry_t entry;
11073 vm_map_trunc_page(address), &entry)) {
11085 if (entry == vm_map_to_entry(map) &&
11086 entry->vme_next == entry) {
11092 * we have at least one real map entry.
11095 (entry->vme_next->vme_start < address)) {
11100 * Move up to the next entry if needed
11102 skip = (entry->vme_next->vme_start - address);
11107 address = entry->vme_next->vme_start;
11112 offset = address - entry->vme_start;
11116 * entry ?
11118 if (amount_left + entry->vme_start + offset > entry->vme_end) {
11119 flush_size = entry->vme_end -
11120 (entry->vme_start + offset);
11127 if (entry->is_sub_map == TRUE) {
11131 local_map = entry->object.sub_map;
11132 local_offset = entry->offset;
11143 object = entry->object.vm_object;
11153 offset += entry->offset;
11154 modifiable = (entry->protection & VM_PROT_WRITE)
11163 if (object->ref_count == 1 && !entry->needs_copy && !object->shadow)
11203 * only send a m_o_s if we returned pages or if the entry
11250 * need to check for overlapping entry, if found, wait
11317 * Convert from a port specifying an entry or a task
11320 * port may be task or a named entry backed.
11376 * Convert from a port specifying a named entry to an