• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.9.5/xnu-2422.115.4/osfmk/vm/

Lines Matching refs:offset

2394  * Return true if the page at offset 'p' in the bit map has already been handled
2401 * Mark the page at offset 'p' in the bit map as having been processed.
2411 * Return true if the page at the given offset has been paged out. Object is
2418 vm_object_offset_t offset)
2430 if (vm_external_state_get(object->existence_map, offset)
2446 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
2465 offset + object->paging_offset,
2510 vm_object_offset_t offset,
2537 for(p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64) {
2540 * If this offset has already been found and handled in a higher level object, then don't
2548 * See if the page at this offset is around. First check to see if the page is resident,
2552 if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
2556 * so that we won't bother looking for a page at this offset again if there are more
2594 vm_external_state_clr(object->existence_map, offset);
2597 offset);
2634 * The page at this offset isn't memory resident, check to see if it's
2639 if (page_is_paged_out(object, offset)) {
2649 vm_external_state_clr(object->existence_map, offset);
2652 offset);
2670 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2681 vm_object_offset_t offset,
2719 deactivate_pages_in_object(object, offset, length, kill_page, reusable_page, all_reusable, &chunk_state, pfc);
2725 * there is, update the offset and lock the new object. We also turn off
2735 offset += object->vo_shadow_offset;
2762 vm_object_offset_t offset,
2798 length = deactivate_a_chunk(object, offset, size, kill_page, reusable_page, all_reusable, &pmap_flush_context_storage);
2801 offset += length;
2862 if (m->offset < start_offset ||
2863 m->offset >= end_offset) {
2892 if (m->offset < start_offset ||
2893 m->offset >= end_offset) {
2933 register vm_object_offset_t offset,
2939 vm_object_pmap_protect_options(object, offset, size,
2946 register vm_object_offset_t offset,
2959 offset = vm_object_trunc_page(offset);
2975 phys_start = object->vo_shadow_offset + offset;
3022 end = offset + size;
3025 if (!p->fictitious && (offset <= p->offset) && (p->offset < end)) {
3028 start = pmap_start + p->offset - offset;
3053 end = offset + size;
3055 for (target_off = offset;
3063 start = pmap_start + (p->offset - offset);
3095 offset += object->vo_shadow_offset;
3144 * new object, starting at a zero offset, are a copy
3181 * We fill the new object starting at offset 0,
3182 * regardless of the input offset.
3381 __unused vm_object_offset_t offset, /* IN */
3390 *_object, offset, size, 0, 0);
3407 * Leave object/offset unchanged.
3727 p->offset >= old_copy->vo_size &&
3728 p->offset < copy_size) {
3826 if (!p->fictitious && p->offset < copy_size) {
4009 * The new object and offset into that object
4017 vm_object_offset_t *offset, /* IN/OUT */
4082 * Store the offset into the source object,
4083 * and fix up the offset into the new object.
4086 result->vo_shadow_offset = *offset;
4092 *offset = 0;
4708 new_offset = (p->offset - backing_offset);
4720 if (p->offset < backing_offset || new_offset >= size) {
4831 * If the shadow offset is 0, the use the existence map from
4832 * the backing object if there is one. If the shadow offset is
4835 * XXX - If the shadow offset is not 0 then a bit copy is needed
4867 /* no shadow, therefore no shadow offset... */
4938 /* no shadow, therefore no shadow offset... */
5259 vm_object_offset_t offset;
5381 * to run the offset based 2nd pass. Because we may
5392 offset = (p->offset - backing_offset);
5394 if (offset < object->vo_size &&
5395 offset != hint_offset &&
5396 !EXISTS_IN_OBJECT(object, offset, rc)) {
5398 object->cow_hint = (vm_offset_t) offset; /* atomic */
5425 offset = hint_offset;
5427 while((offset =
5428 (offset + PAGE_SIZE_64 < object->vo_size) ?
5429 (offset + PAGE_SIZE_64) : 0) != hint_offset) {
5431 if (EXISTS_IN_OBJECT(backing_object, offset +
5433 !EXISTS_IN_OBJECT(object, offset, rcount)) {
5435 object->cow_hint = (vm_offset_t) offset; /* atomic */
5439 if (offset != hint_offset) {
5463 /* reset the offset hint for any objects deeper in the chain */
5533 if ((start <= p->offset) && (p->offset < end)) {
5671 vm_object_offset_t offset,
5674 vm_object_offset_t offset),
5685 for (i = 0; i < num_pages; i++, offset += PAGE_SIZE_64) {
5687 addr = (*map_fn)(map_fn_data, offset);
5693 if ((old_page = vm_page_lookup(object, offset))
5707 vm_page_insert(m, object, offset);
5717 vm_object_offset_t offset,
5735 if ((base_offset = trunc_page_64(offset)) != offset) {
5806 /* we therefore can use the offset field */
6124 vm_object_offset_t offset,
6136 object, offset, size,
6158 offset, size, NULL, NULL, should_return, flags, prot);
6556 vm_object_offset_t offset,
6574 cur_offset = offset;
6576 end_offset = offset + size;
6591 if (offset == 0 && (object->vo_size == size)) {
6602 if (p->offset >= cur_offset && p->offset < end_offset) {
6617 for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
6907 vm_page_rename(page, object1, page->offset, FALSE);
6918 vm_page_rename(page, object2, page->offset, FALSE);
6925 page_offset = page->offset;
6927 page->offset = page_offset;
6934 vm_page_rename(page, object1, page->offset, FALSE);
6942 vm_page_insert(page, object2, page->offset);
7113 * Inputs: *start == offset of page needed
7115 * Outputs: *start == beginning offset of cluster
7143 vm_object_offset_t offset;
7223 * starting offset
7349 * 'target_start' at this point represents the beginning offset
7386 * faulting offset... recalculate this in case
7392 for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
7394 * don't poke below the lowest offset
7396 if (offset < fault_info->lo_offset)
7403 if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
7411 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
7415 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7421 *start = offset;
7426 for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
7428 * don't poke above the highest offset
7430 if (offset >= fault_info->hi_offset)
7432 assert(offset < object_size);
7439 if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
7447 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
7451 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7480 vm_object_offset_t offset,
7508 if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
7645 vm_object_offset_t offset;
7670 offset = offset_beg & ~PAGE_MASK_64;
7672 while (offset < offset_end) {
7673 dst_page = vm_page_lookup(object, offset);
7705 offset += PAGE_SIZE;
7710 if (offset > offset_end)
7711 offset = offset_end;
7712 if(offset > offset_beg) {
7713 *range = (uint32_t) (offset - offset_beg);
7714 assert(*range == (offset - offset_beg));
7724 * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
7729 memory_object_offset_t offset,
7754 retval = vm_object_populate_with_private(pager_object, offset, page_num, size);