• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching refs:object

445  * If IPC isn't used, the caller must deal with the vm_map_copy_t object
487 * If IPC isn't used, the caller must deal with the vm_map_copy_t object
824 * Map some range of an object into an address space.
826 * The object can be one of several types of objects:
829 * or a range within a memory object
830 * a whole memory object
1663 /* Retrieve a upl for an object underlying an address range in a map */
1728 vm_object_t object;
1782 object = parent_entry->backing.object;
1783 if(parent_is_object && object != VM_OBJECT_NULL)
1784 wimg_mode = object->wimg_bits;
1804 if(parent_is_object && object &&
1806 (!(object->nophyscache))) {
1807 if(object->wimg_bits != wimg_mode) {
1814 vm_object_lock(object);
1815 vm_object_paging_wait(object, THREAD_UNINT);
1816 object->wimg_bits = wimg_mode;
1817 queue_iterate(&object->memq,
1826 vm_object_unlock(object);
1841 * Force the creation of the VM object now.
1853 object = vm_object_allocate(map_size);
1854 assert(object != VM_OBJECT_NULL);
1859 vm_object_deallocate(object);
1863 object->purgable = VM_PURGABLE_NONVOLATILE;
1867 * The VM object is brand new and nobody else knows about it,
1871 wimg_mode = object->wimg_bits;
1882 object->wimg_bits = wimg_mode;
1884 /* the object has no pages, so no WIMG bits to update here */
1889 * nobody messes with the object (coalesce, for
1895 object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1897 user_entry->backing.object = object;
1907 /* when the object field is filled in. */
1917 /* Create a named object based on address range within the task map */
1918 /* Go find the object at given address */
1927 /* get the object associated with the target address */
1933 &object, &obj_off, &prot, &wired,
1941 || (object == kernel_object)) {
1943 vm_object_unlock(object);
1947 if(object == kernel_object) {
1954 /* We have an object, now check to see if this object */
1958 * We have to unlock the VM object to avoid deadlocking with
1959 * a VM map lock (the lock ordering is map, the object), if we
1960 * need to modify the VM map to create a shadow object. Since
1965 * Take an extra reference on the VM object to make sure it's
1968 vm_object_reference_locked(object); /* extra ref to hold obj */
1969 vm_object_unlock(object);
1988 vm_object_deallocate(object); /* release extra ref */
1989 object = VM_OBJECT_NULL;
1993 if(map_entry->object.vm_object != object) {
1998 vm_object_deallocate(object); /* release extra ref */
1999 object = VM_OBJECT_NULL;
2006 local_map = map_entry->object.sub_map;
2018 * We found the VM map entry, lock the VM object again.
2020 vm_object_lock(object);
2023 object->true_share = TRUE;
2027 vm_object_unlock(object);
2031 vm_object_deallocate(object);
2032 object = VM_OBJECT_NULL;
2040 /* following are from the same object and are */
2044 /* pointing at this object and is contiguous */
2046 if((next_entry->object.vm_object == object) &&
2072 if(object->internal) {
2076 /* set up an object which will not be pulled from */
2079 if ((map_entry->needs_copy || object->shadowed ||
2080 (object->size > total_size))
2081 && !object->true_share) {
2083 * We have to unlock the VM object before
2085 * honor lock ordering (map then object).
2088 * is trying to acquire the VM object's lock.
2090 * VM object, guaranteeing that it won't
2093 vm_object_unlock(object);
2102 vm_object_deallocate(object); /* extra ref */
2106 vm_object_lock(object);
2109 * JMM - We need to avoid coming here when the object
2115 /* create a shadow object */
2116 vm_object_shadow(&map_entry->object.vm_object,
2118 shadow_object = map_entry->object.vm_object;
2119 vm_object_unlock(object);
2127 object, map_entry->offset,
2142 if(next_entry->object.vm_object == object) {
2144 next_entry->object.vm_object
2146 vm_object_deallocate(object);
2164 * shadow object.
2167 vm_object_deallocate(object); /* extra ref */
2168 object = shadow_object;
2178 /* memory object lists, this will better support */
2187 wimg_mode = object->wimg_bits;
2188 if(!(object->nophyscache)) {
2200 object->true_share = TRUE;
2201 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
2202 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
2205 * The memory entry now points to this VM object and we
2206 * need to hold a reference on the VM object. Use the extra
2207 * reference we took earlier to keep the object alive when we
2215 if(object->wimg_bits != wimg_mode) {
2218 vm_object_paging_wait(object, THREAD_UNINT);
2226 queue_iterate(&object->memq,
2235 object->wimg_bits = wimg_mode;
2254 parent_entry->backing.object == object &&
2255 parent_entry->internal == object->internal &&
2264 /* release our extra reference on object */
2265 vm_object_unlock(object);
2266 vm_object_deallocate(object);
2280 vm_object_unlock(object);
2283 /* release our unused reference on the object */
2284 vm_object_deallocate(object);
2288 user_entry->backing.object = object;
2289 user_entry->internal = object->internal;
2297 /* when the object field is filled in. */
2304 /* The new object will be base on an existing named object */
2347 object = parent_entry->backing.object;
2348 assert(object != VM_OBJECT_NULL);
2349 user_entry->backing.object = object;
2350 /* we now point to this object, hold on */
2351 vm_object_reference(object);
2352 vm_object_lock(object);
2353 object->true_share = TRUE;
2354 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
2355 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
2356 vm_object_unlock(object);
2559 vm_object_t object;
2583 object = mem_entry->backing.object;
2584 if (object == VM_OBJECT_NULL) {
2589 vm_object_lock(object);
2591 /* check that named entry covers entire object ? */
2592 if (mem_entry->offset != 0 || object->size != mem_entry->size) {
2593 vm_object_unlock(object);
2600 kr = vm_object_purgable_control(object, control, state);
2602 vm_object_unlock(object);
2649 /* release the memory object we've been pointing to */
2650 vm_object_deallocate(named_entry->backing.object);
2673 vm_object_t object;
2690 object = mem_entry->backing.object;
2691 if (object == VM_OBJECT_NULL) {
2696 vm_object_reference(object);
2699 kr = vm_object_page_op(object, offset, ops, phys_entry, flags);
2701 vm_object_deallocate(object);
2725 vm_object_t object;
2742 object = mem_entry->backing.object;
2743 if (object == VM_OBJECT_NULL) {
2748 vm_object_reference(object);
2751 kr = vm_object_range_op(object,
2757 vm_object_deallocate(object);
2940 /* Create a named object based on a submap of specified size */
2968 vm_object_t object;
2978 if (entry->object.vm_object == VM_OBJECT_NULL) {
2984 vm_map_lock(entry->object.sub_map);
2986 map = entry->object.sub_map;
2991 if (entry->object.vm_object->phys_contiguous) {
2993 /* If they are not present in the object they will */
2996 if(entry->object.vm_object->shadow_offset == 0) {
3006 ((entry->object.vm_object->shadow_offset
3012 object = entry->object.vm_object;
3013 vm_object_lock(object);
3015 vm_page_t dst_page = vm_page_lookup(object,offset);
3017 if(object->shadow) {
3019 vm_object_lock(object->shadow);
3020 old_object = object;
3021 offset = offset + object->shadow_offset;
3022 object = object->shadow;
3025 vm_object_unlock(object);
3030 vm_object_unlock(object);
3063 vm_object_t object;
3100 /* offset from beginning of named entry offset in object */
3109 object = vm_object_enter(named_entry->backing.pager,
3114 if (object == VM_OBJECT_NULL) {
3121 /* create an extra reference for the object */
3122 vm_object_lock(object);
3123 vm_object_reference_locked(object);
3124 named_entry->backing.object = object;
3128 /* wait for object (if any) to be ready */
3130 while (!object->pager_ready) {
3131 vm_object_wait(object,
3134 vm_object_lock(object);
3137 vm_object_unlock(object);
3141 /* an an already known object. If the object is */
3143 /* object cannot be mapped until it is ready */
3146 object = named_entry->backing.object;
3147 vm_object_reference(object);
3151 if (!object->private) {
3154 if (object->phys_contiguous) {
3163 ret = vm_object_iopl_request(object,
3170 vm_object_deallocate(object);