• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/vm/

Lines Matching refs:upl

79 #include <mach/upl.h>
455 /* page. When the bit is on the upl commit code will */
2437 upl_t upl;
2440 int upl_size = sizeof(struct upl);
2453 upl = (upl_t)kalloc(upl_size + page_field_size);
2456 bzero((char *)upl + upl_size, page_field_size);
2458 upl->flags = upl_flags | flags;
2459 upl->src_object = NULL;
2460 upl->kaddr = (vm_offset_t)0;
2461 upl->size = 0;
2462 upl->map_object = NULL;
2463 upl->ref_count = 1;
2464 upl->highest_page = 0;
2465 upl_lock_init(upl);
2467 upl->ubc_alias1 = 0;
2468 upl->ubc_alias2 = 0;
2470 return(upl);
2474 upl_destroy(upl_t upl)
2483 if (upl->flags & UPL_SHADOWED) {
2484 object = upl->map_object->shadow;
2486 object = upl->map_object;
2489 queue_remove(&object->uplq, upl, upl_t, uplq);
2497 if (upl->flags & UPL_SHADOWED)
2498 vm_object_deallocate(upl->map_object);
2500 if (upl->flags & UPL_DEVICE_MEMORY)
2503 size = upl->size;
2506 if (upl->flags & UPL_LITE) {
2510 if (upl->flags & UPL_INTERNAL) {
2511 kfree(upl,
2512 sizeof(struct upl) +
2516 kfree(upl, sizeof(struct upl) + page_field_size);
2520 void uc_upl_dealloc(upl_t upl);
2522 uc_upl_dealloc(upl_t upl)
2524 if (--upl->ref_count == 0)
2525 upl_destroy(upl);
2529 upl_deallocate(upl_t upl)
2531 if (--upl->ref_count == 0)
2532 upl_destroy(upl);
2604 upl_t upl = NULL;
2638 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, 0, size);
2640 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
2645 upl = upl_create(UPL_CREATE_INTERNAL, 0, size);
2647 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
2652 upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE, 0, size);
2654 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
2656 upl = upl_create(UPL_CREATE_EXTERNAL, 0, size);
2659 *upl_ptr = upl;
2665 upl->map_object = object;
2667 upl->map_object = vm_object_allocate(size);
2672 upl->map_object->shadow = object;
2673 upl->map_object->pageout = TRUE;
2674 upl->map_object->can_persist = FALSE;
2675 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
2676 upl->map_object->shadow_offset = offset;
2677 upl->map_object->wimg_bits = object->wimg_bits;
2681 upl->flags |= UPL_SHADOWED;
2691 upl->flags |= UPL_ENCRYPTED;
2694 upl->flags |= UPL_PAGEOUT;
2702 upl->size = size;
2703 upl->offset = offset + object->paging_offset;
2706 queue_enter(&object->uplq, upl, upl_t, uplq);
2777 upl->flags |= UPL_PAGE_SYNC_DONE;
2946 if (dst_page->phys_page > upl->highest_page)
2947 upl->highest_page = dst_page->phys_page;
2970 vm_object_lock(upl->map_object);
2971 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
2972 vm_object_unlock(upl->map_object);
3118 * of upl who encounter device or
3192 * upl for a clustered read/pagein
3269 vm_object_lock(upl->map_object);
3270 vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
3271 vm_object_unlock(upl->map_object);
3282 * a upl commit is done. The caller
3286 upl->flags |= UPL_CLEAR_DIRTY;
3319 if (dst_page->phys_page > upl->highest_page)
3320 upl->highest_page = dst_page->phys_page;
3393 if (upl->flags & UPL_INTERNAL)
3471 upl_t *upl,
3508 return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags);
3517 upl_t *upl,
3543 if (upl == NULL)
3626 upl_size, upl, page_list, count, flags);
3696 upl,
3718 upl_t upl,
3727 if (upl == UPL_NULL)
3730 upl_lock(upl);
3735 if (UPL_PAGE_LIST_MAPPED & upl->flags) {
3736 upl_unlock(upl);
3740 if ((!(upl->flags & UPL_SHADOWED)) && !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) ||
3741 (upl->map_object->phys_contiguous))) {
3748 if (upl->flags & UPL_INTERNAL) {
3750 ((((uintptr_t)upl) + sizeof(struct upl))
3751 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
3753 lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
3755 object = upl->map_object;
3756 upl->map_object = vm_object_allocate(upl->size);
3758 vm_object_lock(upl->map_object);
3760 upl->map_object->shadow = object;
3761 upl->map_object->pageout = TRUE;
3762 upl->map_object->can_persist = FALSE;
3763 upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
3764 upl->map_object->shadow_offset = upl->offset - object->paging_offset;
3765 upl->map_object->wimg_bits = object->wimg_bits;
3766 offset = upl->map_object->shadow_offset;
3768 size = upl->size;
3770 upl->flags |= UPL_SHADOWED;
3795 * since m is a page in the upl it must
3824 vm_page_insert(alias_page, upl->map_object, new_offset);
3834 vm_object_unlock(upl->map_object);
3836 if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || upl->map_object->phys_contiguous)
3837 offset = upl->offset - upl->map_object->paging_offset;
3840 size = upl->size;
3842 vm_object_reference(upl->map_object);
3849 VM_FLAGS_ANYWHERE, upl->map_object, offset, FALSE,
3853 upl_unlock(upl);
3856 vm_object_lock(upl->map_object);
3859 m = vm_page_lookup(upl->map_object, offset);
3872 vm_object_unlock(upl->map_object);
3877 upl->ref_count++;
3878 upl->flags |= UPL_PAGE_LIST_MAPPED;
3879 upl->kaddr = *dst_addr;
3880 upl_unlock(upl);
3898 upl_t upl)
3903 if (upl == UPL_NULL)
3906 upl_lock(upl);
3908 if (upl->flags & UPL_PAGE_LIST_MAPPED) {
3909 addr = upl->kaddr;
3910 size = upl->size;
3912 assert(upl->ref_count > 1);
3913 upl->ref_count--; /* removing mapping ref */
3915 upl->flags &= ~UPL_PAGE_LIST_MAPPED;
3916 upl->kaddr = (vm_offset_t) 0;
3917 upl_unlock(upl);
3926 upl_unlock(upl);
3933 upl_t upl,
3955 if (upl == UPL_NULL)
3961 if (upl->flags & UPL_DEVICE_MEMORY)
3963 else if ((offset + size) <= upl->size)
3968 upl_lock(upl);
3970 if (upl->flags & UPL_ACCESS_BLOCKED) {
3978 if (upl->flags & UPL_CLEAR_DIRTY)
3981 if (upl->flags & UPL_INTERNAL)
3982 lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
3983 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
3985 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
3987 object = upl->map_object;
3989 if (upl->flags & UPL_SHADOWED) {
4043 if (upl->flags & UPL_LITE) {
4051 m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
4054 if (upl->flags & UPL_SHADOWED) {
4080 if (upl->flags & UPL_IO_WIRE) {
4213 if (upl->flags & UPL_PAGEOUT) {
4233 if (upl->flags & UPL_PAGEOUT) {
4298 if (upl->flags & UPL_PAGE_SYNC_DONE)
4365 if (upl->flags & UPL_DEVICE_MEMORY) {
4367 } else if (upl->flags & UPL_LITE) {
4371 pg_num = upl->size/PAGE_SIZE;
4382 if (queue_empty(&upl->map_object->memq))
4386 if (upl->flags & UPL_COMMIT_NOTIFY_EMPTY)
4408 upl_unlock(upl);
4419 upl_t upl,
4437 if (upl == UPL_NULL)
4440 if ( (upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES) )
4441 return upl_commit_range(upl, offset, size, 0, NULL, 0, empty);
4443 if (upl->flags & UPL_DEVICE_MEMORY)
4445 else if ((offset + size) <= upl->size)
4450 upl_lock(upl);
4452 if (upl->flags & UPL_INTERNAL) {
4454 ((((uintptr_t)upl) + sizeof(struct upl))
4455 + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
4458 (((uintptr_t)upl) + sizeof(struct upl));
4460 object = upl->map_object;
4462 if (upl->flags & UPL_SHADOWED) {
4499 if (upl->flags & UPL_LITE) {
4507 (upl->offset - shadow_object->paging_offset));
4510 if (upl->flags & UPL_SHADOWED) {
4639 if (upl->flags & UPL_DEVICE_MEMORY) {
4641 } else if (upl->flags & UPL_LITE) {
4645 pg_num = upl->size/PAGE_SIZE;
4656 if (queue_empty(&upl->map_object->memq))
4660 if (upl->flags & UPL_COMMIT_NOTIFY_EMPTY)
4682 upl_unlock(upl);
4690 upl_t upl,
4695 return upl_abort_range(upl, 0, upl->size, error, &empty);
4702 upl_t upl,
4708 return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
4725 upl_t upl = NULL;
4797 upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, UPL_IO_WIRE, psize);
4799 user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
4803 upl = upl_create(UPL_CREATE_LITE, UPL_IO_WIRE, psize);
4805 lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
4809 *upl_ptr = upl;
4811 upl->map_object = object;
4812 upl->size = size;
4819 upl->offset = offset + object->paging_offset;
4823 queue_enter(&object->uplq, upl, upl_t, uplq);
4832 upl->flags |= UPL_DEVICE_MEMORY;
4834 upl->highest_page = (offset + object->shadow_offset + size - 1)>>PAGE_SHIFT;
4841 if (upl->flags & UPL_INTERNAL)
4857 queue_enter(&object->uplq, upl, upl_t, uplq);
4865 upl->flags |= UPL_ACCESS_BLOCKED;
5068 if (dst_page->phys_page > upl->highest_page)
5069 upl->highest_page = dst_page->phys_page;
5104 if (upl->flags & UPL_INTERNAL)
5141 upl_destroy(upl);
5984 upl_t upl,
6000 upl_object = upl->map_object;
6001 upl_offset = upl->offset;
6002 upl_size = upl->size;
6013 * accounted for in upl->offset. It possibly shouldn't be
6080 __unused upl_t upl,
6105 return sizeof(struct upl);
6110 upl_t upl,
6114 upl->flags |= UPL_CLEAR_DIRTY;
6116 upl->flags &= ~UPL_CLEAR_DIRTY;
6123 boolean_t upl_device_page(upl_page_info_t *upl)
6125 return(UPL_DEVICE_PAGE(upl));
6127 boolean_t upl_page_present(upl_page_info_t *upl, int index)
6129 return(UPL_PAGE_PRESENT(upl, index));
6131 boolean_t upl_speculative_page(upl_page_info_t *upl, int index)
6133 return(UPL_SPECULATIVE_PAGE(upl, index));
6135 boolean_t upl_dirty_page(upl_page_info_t *upl, int index)
6137 return(UPL_DIRTY_PAGE(upl, index));
6139 boolean_t upl_valid_page(upl_page_info_t *upl, int index)
6141 return(UPL_VALID_PAGE(upl, index));
6143 ppnum_t upl_phys_page(upl_page_info_t *upl, int index)
6145 return(UPL_PHYS_PAGE(upl, index));
6237 upl_t upl)
6239 return upl->highest_page;
6243 kern_return_t upl_ubc_alias_set(upl_t upl, unsigned int alias1, unsigned int alias2)
6245 upl->ubc_alias1 = alias1;
6246 upl->ubc_alias2 = alias2;
6249 int upl_ubc_alias_get(upl_t upl, unsigned int * al, unsigned int * al2)
6252 *al = upl->ubc_alias1;
6254 *al2 = upl->ubc_alias2;