• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/bsd/vfs/

Lines Matching refs:upl

84 #include <mach/upl.h>
137 static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
140 static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags);
145 static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int flags);
435 cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags)
445 ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY);
468 ubc_upl_abort_range(upl, upl_offset, abort_size, upl_abort_code);
487 upl_t upl;
527 upl = cbp->b_upl;
572 cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp);
610 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags);
620 ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size, upl_flags);
633 (int)upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0);
640 cluster_zero(upl_t upl, vm_offset_t upl_offset, int size, buf_t bp)
650 pl = ubc_upl_pageinfo(upl);
749 cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
1049 if (vnode_pageout(vp, upl, trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) {
1106 * out the affected area in the upl
1110 * if this upl contains the EOF and it is not a multiple of PAGE_SIZE
1115 * last page in this upl... we need to zero both the hole and the tail
1128 cluster_zero(upl, upl_offset, bytes_to_zero, real_bp);
1178 ubc_upl_commit_range(upl, commit_offset, pg_count * PAGE_SIZE,
1289 if (buf_setupl(cbp, upl, upl_offset))
1402 * Rewind the upl offset to the beginning of the
1451 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, abort_size, error, io_flags);
1454 (int)upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0);
1579 cluster_pageout(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset,
1582 return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1588 cluster_pageout_ext(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset,
1625 * big the upl really is
1632 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1644 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
1658 ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size,
1661 return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
1667 cluster_pagein(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset,
1670 return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1675 cluster_pagein_ext(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset,
1684 if (upl == NULL || size < 0)
1685 panic("cluster_pagein: NULL upl passed in");
1707 ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
1720 ubc_upl_abort_range(upl, upl_offset + rounded_size,
1723 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
1896 upl_t upl;
2007 &upl,
2025 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2037 * needed... release this upl and try again
2039 ubc_upl_abort(upl, 0);
2068 ubc_upl_abort(upl, 0);
2114 ubc_upl_abort(upl, 0);
2123 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
2200 upl_t upl[MAX_VECTS];
2249 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
2270 pl = ubc_upl_pageinfo(upl[cur_upl]);
2350 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
2406 ubc_upl_abort(upl[n], 0);
2417 upl_t upl;
2557 * compute the size of the upl needed to encompass
2586 &upl,
2593 (int)upl, (int)upl_f_offset, start_offset, 0, 0);
2599 * we're starting in the middle of the first page of the upl
2608 retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
2614 * to release the rest of the pages in the upl without modifying
2617 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
2620 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
2623 (int)upl, 0, 0, retval, 0);
2629 * the last offset we're writing to in this upl does not end on a page
2644 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
2650 * need to release the rest of the pages in the upl without
2653 ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY);
2656 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
2659 (int)upl, 0, 0, retval, 0);
2675 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
2683 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
2687 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
2701 retval = cluster_copy_upl_data(uio, upl, io_offset, (int *)&io_requested);
2705 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
2708 (int)upl, 0, 0, retval, 0);
2723 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
2731 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
2734 cluster_zero(upl, io_offset, bytes_to_zero, NULL);
2756 cluster_zero(upl, io_size, upl_size - io_size, NULL);
2759 * release the upl now if we hold one since...
2763 * this upl, a deadlock may result on page BUSY
2776 ubc_upl_commit_range(upl, 0, upl_size,
3022 * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set
3109 cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int flags)
3118 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, range * PAGE_SIZE, abort_flags);
3127 upl_t upl;
3219 * compute the size of the upl needed to encompass
3332 (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
3337 &upl,
3344 (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
3347 * scan from the beginning of the upl looking for the first
3359 * page before the end of the upl is reached, if we
3389 error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
3397 * pages that were present in the upl when we acquired it.
3412 ubc_upl_abort_range(upl, uio_last * PAGE_SIZE,
3490 retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested);
3504 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, (int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
3507 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
3510 ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size,
3513 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, (int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
3518 * the entire upl... so just release these without modifying
3522 ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY);
3526 (int)upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0);
3530 * the upl... release these appropriately
3532 cluster_read_upl_release(upl, 0, start_pg, flags);
3538 cluster_read_upl_release(upl, last_pg, uio_last, flags);
3540 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, (int)upl, -1, -1, 0, 0);
3585 upl_t upl;
3805 &upl_size, &upl, NULL, &pages_in_pl, &upl_flags);
3820 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
3829 ubc_upl_abort(upl, abort_flag);
3847 ubc_upl_abort(upl, abort_flag);
3881 ubc_upl_abort(upl, abort_flag);
3886 (int)upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
3893 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3903 (int)upl, (int)uio->uio_offset, io_req_size, retval, 0);
3965 upl_t upl[MAX_VECTS];
4030 &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, 0);
4051 pl = ubc_upl_pageinfo(upl[cur_upl]);
4128 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize,
4184 ubc_upl_abort(upl[n], 0);
4195 upl_t upl;
4225 &upl_size, &upl, NULL, NULL, &upl_flags, 0)) != KERN_SUCCESS) {
4269 upl_t upl;
4299 * compute the size of the upl needed to encompass
4349 (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
4354 &upl,
4364 * upl
4374 (int)upl, (int)upl_f_offset, upl_size, start_offset, 0);
4379 * scan from the beginning of the upl looking for the first
4391 * page before the end of the upl is reached, if we
4416 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
4423 ubc_upl_abort(upl, 0);
4705 upl_t upl;
4778 &upl,
4784 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, (int)upl, upl_f_offset, 0, 0, 0);
4791 * end up working with a freed upl due to the FREE_ON_EMPTY semantics
4801 ubc_upl_abort(upl, 0);
4822 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
4851 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
4970 upl_t upl;
5004 &upl,
5015 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
5018 ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY);
5043 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
5054 ubc_upl_abort_range(upl, 0, PAGE_SIZE, abort_flags);
5062 cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid)
5108 pl = ubc_upl_pageinfo(upl);