Lines Matching defs:upl

85 #include <mach/upl.h>
162 static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
165 static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags);
172 static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference);
548 cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags)
558 ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY);
581 ubc_upl_abort_range(upl, upl_offset, abort_size, upl_abort_code);
600 upl_t upl;
661 upl = cbp->b_upl;
706 cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
744 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags);
754 ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size, upl_flags);
767 upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0);
785 cluster_zero(upl_t upl, upl_offset_t upl_offset, int size, buf_t bp)
795 pl = ubc_upl_pageinfo(upl);
911 cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
1123 if (upl_get_internal_vectorupl(upl))
1228 if (vnode_pageout(vp, upl, trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) {
1289 * out the affected area in the upl
1293 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
1298 * last page in this upl... we need to zero both the hole and the tail
1311 cluster_zero(upl, upl_offset, bytes_to_zero, real_bp);
1361 ubc_upl_commit_range(upl, commit_offset, pg_count * PAGE_SIZE,
1474 if (buf_setupl(cbp, upl, upl_offset))
1593 * Rewind the upl offset to the beginning of the
1642 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, abort_size, error, io_flags);
1645 upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0);
1796 cluster_pageout(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1799 return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1805 cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1831 * big the upl really is
1838 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1850 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1864 ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
1867 return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
1873 cluster_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1876 return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1881 cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1890 if (upl == NULL || size < 0)
1891 panic("cluster_pagein: NULL upl passed in");
1915 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
1928 ubc_upl_abort_range(upl, upl_offset + rounded_size,
1931 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
2117 upl_t upl;
2286 &upl,
2304 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2316 * needed... release this upl and try again
2318 ubc_upl_abort(upl, 0);
2347 ubc_upl_abort(upl, 0);
2393 ubc_upl_abort(upl, 0);
2402 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
2412 vector_upl_set_subupl(vector_upl,upl,upl_size);
2413 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
2512 upl_t upl[MAX_VECTS];
2563 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
2584 pl = ubc_upl_pageinfo(upl[cur_upl]);
2651 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
2698 ubc_upl_abort(upl[n], 0);
2715 cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off_t zero_off, off_t upl_f_offset, int bytes_to_zero)
2734 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
2745 upl_t upl;
2919 * compute the size of the upl needed to encompass
2948 &upl,
2955 upl, (int)upl_f_offset, start_offset, 0, 0);
2961 * we're starting in the middle of the first page of the upl
2970 retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
2976 * to release the rest of the pages in the upl without modifying
2979 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
2982 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
2985 upl, 0, 0, retval, 0);
2991 * the last offset we're writing to in this upl does not end on a page
3006 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
3012 * need to release the rest of the pages in the upl without
3015 ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
3018 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
3021 upl, 0, 0, retval, 0);
3036 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off, upl_f_offset, bytes_to_zero);
3049 retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested);
3052 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
3055 upl, 0, 0, retval, 0);
3069 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off1, upl_f_offset, bytes_to_zero);
3089 cluster_zero(upl, io_size, upl_size - io_size, NULL);
3092 * release the upl now if we hold one since...
3096 * this upl, a deadlock may result on page BUSY
3109 ubc_upl_commit_range(upl, 0, upl_size,
3366 * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set
3470 cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference)
3479 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, range * PAGE_SIZE, abort_flags);
3488 upl_t upl;
3705 * compute the size of the upl needed to encompass
3730 upl, (int)upl_f_offset, upl_size, start_offset, 0);
3735 &upl,
3742 upl, (int)upl_f_offset, upl_size, start_offset, 0);
3745 * scan from the beginning of the upl looking for the first
3757 * page before the end of the upl is reached, if we
3788 error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
3808 * pages that were present in the upl when we acquired it.
3823 ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
3890 retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested);
3907 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, start_pg * PAGE_SIZE, io_size, error, 0);
3910 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
3920 ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags);
3922 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, start_pg * PAGE_SIZE, io_size, error, 0);
3927 * the entire upl... so just release these without modifying
3931 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
3935 upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
3939 * the upl... release these appropriately
3941 cluster_read_upl_release(upl, 0, start_pg, take_reference);
3947 cluster_read_upl_release(upl, last_pg, uio_last, take_reference);
3949 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, -1, -1, 0, 0);
4013 upl_t upl;
4342 &upl_size, &upl, NULL, &pages_in_pl, &upl_flags);
4357 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
4366 ubc_upl_abort(upl, 0);
4384 ubc_upl_abort(upl, 0);
4419 ubc_upl_abort(upl, 0);
4424 upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
4433 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4443 vector_upl_set_subupl(vector_upl,upl, upl_size);
4444 vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size);
4477 upl, (int)uio->uio_offset, io_req_size, retval, 0);
4536 upl_t upl[MAX_VECTS];
4606 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
4627 pl = ubc_upl_pageinfo(upl[cur_upl]);
4692 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize,
4739 ubc_upl_abort(upl[n], 0);
4750 upl_t upl;
4780 &upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS) {
4824 upl_t upl;
4864 * compute the size of the upl needed to encompass
4914 upl, (int)upl_f_offset, upl_size, start_offset, 0);
4919 &upl,
4929 * upl
4939 upl, (int)upl_f_offset, upl_size, start_offset, 0);
4944 * scan from the beginning of the upl looking for the first
4956 * page before the end of the upl is reached, if we
4981 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
4988 ubc_upl_abort(upl, 0);
5337 upl_t upl;
5410 &upl,
5416 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, upl, upl_f_offset, 0, 0, 0);
5423 * end up working with a freed upl due to the FREE_ON_EMPTY semantics
5433 ubc_upl_abort(upl, 0);
5454 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
5489 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
5601 upl_t upl;
5638 &upl,
5649 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
5652 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
5677 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
5688 ubc_upl_abort_range(upl, 0, PAGE_SIZE, abort_flags);
5696 cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid)
5735 pl = ubc_upl_pageinfo(upl);