Lines Matching defs:vp

163 static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
166 static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp);
167 static int cluster_is_throttled(vnode_t vp);
171 static void cluster_syncup(vnode_t vp, off_t newEOF, int (*)(buf_t, void *), void *callback_arg, int flags);
174 static int cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference);
176 static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags,
178 static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
180 static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
183 static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF,
185 static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF,
187 static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF,
190 static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*)(buf_t, void *), void *callback_arg);
192 static int cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
193 static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra, int (*callback)(buf_t, void *), void *callback_arg, int bflag);
195 static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg);
197 static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int flags, int (*)(buf_t, void *), void *callback_arg);
199 static void sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg);
200 static void sparse_cluster_push(void **cmapp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*)(buf_t, void *), void *callback_arg);
201 static void sparse_cluster_add(void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF, int (*)(buf_t, void *), void *callback_arg);
252 #define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * (base))
253 #define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE))
254 #define MAX_PREFETCH(vp, size, is_ssd) (size * IO_SCALE(vp, ((is_ssd && !ignore_is_ssd) ? PREFETCH_SSD : PREFETCH)))
363 cluster_get_rap(vnode_t vp)
368 ubc = vp->v_ubcinfo;
377 vnode_lock(vp);
386 vnode_unlock(vp);
409 cluster_get_wbp(vnode_t vp, int flags)
414 ubc = vp->v_ubcinfo;
426 vnode_lock(vp);
435 vnode_unlock(vp);
445 cluster_syncup(vnode_t vp, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, int flags)
449 if ((wbp = cluster_get_wbp(vp, 0)) != NULL) {
454 cluster_try_push(wbp, vp, newEOF, PUSH_ALL | flags, 0, callback, callback_arg);
463 cluster_io_present_in_BC(vnode_t vp, off_t f_offset)
470 if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ, NULL))
476 if (bootcache_check_fn(vp->v_mount->mnt_devvp->v_rdev, blkno))
484 cluster_is_throttled(vnode_t vp)
486 return (throttle_io_will_be_throttled(-1, vp->v_mount));
512 cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp)
535 else if (page_out && ((error != ENXIO) || vnode_isswap(vp)))
569 vnode_t vp;
625 vp = cbp->b_vp;
710 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags, vp);
740 cluster_throttle_io_limit(vnode_t vp, uint32_t *limit)
742 if (cluster_is_throttled(vp)) {
884 cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size,
918 mp = vp->v_mount;
990 if ( !(flags & CL_PAGEOUT) && cluster_is_throttled(vp)) {
996 async_throttle = IO_SCALE(vp, VNODE_ASYNC_THROTTLE);
1002 max_cluster_size = MAX_CLUSTER_SIZE(vp);
1012 if ((vp->v_mount->mnt_kern_flag & MNTK_SSD) && !ignore_is_ssd)
1020 async_throttle = min(IO_SCALE(vp, VNODE_ASYNC_THROTTLE), ((scale * max_cluster_size) / max_cluster) - 1);
1040 if (vp->v_flag & VSYSTEM)
1067 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL)))
1201 if (vnode_pageout(vp, upl, trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) {
1424 cbp = alloc_io_buf(vp, priv);
1430 if (buf_invalblkno(vp, lblkno + i, 0) == EBUSY)
1531 (void)vnode_waitforwrites(vp, async_throttle, 0, 0, "cluster_io");
1534 vnode_startwrite(vp);
1617 upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, abort_size, error, io_flags, vp);
1648 vector_cluster_io(vnode_t vp, upl_t vector_upl, vm_offset_t vector_upl_offset, off_t v_upl_uio_offset, int vector_upl_iosize,
1659 return (cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, real_bp, iostate, callback, callback_arg));
1664 cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
1680 advisory_read_ext(vp, filesize, f_offset, size, callback, callback_arg, bflag);
1691 cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *rap, int (*callback)(buf_t, void *), void *callback_arg,
1717 max_prefetch = MAX_PREFETCH(vp, cluster_max_io_size(vp->v_mount, CL_READ), (vp->v_mount->mnt_kern_flag & MNTK_SSD));
1740 ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch);
1760 size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag);
1771 cluster_pageout(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1774 return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1780 cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1811 if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1842 return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
1848 cluster_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1851 return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1856 cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset,
1906 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
1944 cluster_write(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, int xflags)
1946 return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL);
1951 cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff,
1969 if (vp->v_flag & VNOCACHE_DATA){
1978 retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg);
2029 retval = cluster_write_copy(vp, uio, write_length, oldEOF, newEOF, headOff, tailOff, zflags, callback, callback_arg);
2041 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, uio->uio_offset,
2046 retval = cluster_write_contig(vp, uio, newEOF, &write_type, &write_length, callback, callback_arg, bflag);
2055 retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, tailOff, uio->uio_offset,
2064 retval = cluster_write_direct(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg);
2089 cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length,
2133 max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
2153 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
2154 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
2196 if ( (throttle_type = cluster_is_throttled(vp)) ) {
2210 throttle_info_update_by_mount(vp->v_mount);
2223 cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
2239 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2352 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + io_size, UPL_ROP_DUMP, NULL);
2360 cluster_iostate_wait(&iostate, max_upl_size * IO_SCALE(vp, 2), "cluster_write_direct");
2379 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset,
2396 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2442 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2470 retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
2482 cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, u_int32_t *write_length,
2511 cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0);
2513 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
2514 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
2571 error = cluster_align_phys_io(vp, uio, src_paddr, head_size, 0, callback, callback_arg);
2611 cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_write_contig");
2625 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset,
2664 error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg);
2714 cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, off_t headOff,
2772 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
2773 max_io_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
2867 retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1);
2921 kret = ubc_create_upl(vp,
2946 retval = cluster_io(vp, upl, 0, upl_f_offset, read_size,
2982 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size,
3106 wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED);
3115 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg);
3130 sparse_cluster_push(&(wbp->cl_scmap), vp, newEOF, PUSH_ALL, 0, callback, callback_arg);
3264 if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) &&
3269 if (vp->v_mount->mnt_kern_flag & MNTK_SSD)
3275 cluster_try_push(wbp, vp, newEOF, 0, 0, callback, callback_arg);
3300 if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE)) {
3302 ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg);
3314 sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg);
3315 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg);
3348 retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg);
3359 cluster_read(vnode_t vp, struct uio *uio, off_t filesize, int xflags)
3361 return cluster_read_ext(vp, uio, filesize, xflags, NULL, NULL);
3366 cluster_read_ext(vnode_t vp, struct uio *uio, off_t filesize, int xflags, int (*callback)(buf_t, void *), void *callback_arg)
3377 if (vp->v_flag & VNOCACHE_DATA)
3379 if ((vp->v_flag & VRAOFF) || speculative_reads_disabled)
3427 retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
3431 retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg);
3435 retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags);
3464 cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
3521 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
3522 max_prefetch = MAX_PREFETCH(vp, max_io_size, (vp->v_mount->mnt_kern_flag & MNTK_SSD));
3534 if (cluster_is_throttled(vp)) {
3545 if ((rap = cluster_get_rap(vp)) == NULL)
3603 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
3623 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_resid, 0, take_reference);
3643 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
3669 if (cluster_is_throttled(vp) == THROTTLE_NOW) {
3670 if ( !cluster_io_present_in_BC(vp, uio->uio_offset)) {
3678 throttle_info_update_by_mount(vp->v_mount);
3719 kret = ubc_create_upl(vp,
3775 error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset,
3843 size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag);
3859 cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag);
3943 if (cluster_is_throttled(vp)) {
3995 cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
4043 max_upl_size = cluster_max_io_size(vp->v_mount, CL_READ);
4046 max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
4071 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
4072 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
4091 strict_uncached_IO = ubc_strict_uncached_IO(vp);
4141 if (cluster_is_throttled(vp)) {
4152 max_rd_ahead = max_rd_size * IO_SCALE(vp, 2);
4168 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
4186 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4281 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
4293 if (cluster_is_throttled(vp) == THROTTLE_NOW) {
4294 if ( !cluster_io_present_in_BC(vp, uio->uio_offset)) {
4302 throttle_info_update_by_mount(vp->v_mount);
4427 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4444 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4493 retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4524 retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg);
4536 cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length,
4575 cluster_syncup(vp, filesize, callback, callback_arg, PUSH_SYNC);
4577 devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize;
4578 mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask;
4643 error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, CL_READ, callback, callback_arg);
4684 cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_read_contig");
4695 error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize,
4734 error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg);
4817 advisory_read(vnode_t vp, off_t filesize, off_t f_offset, int resid)
4819 return advisory_read_ext(vp, filesize, f_offset, resid, NULL, NULL, CL_PASSIVE);
4823 advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*callback)(buf_t, void *), void *callback_arg, int bflag)
4843 if ( !UBCINFOEXISTS(vp))
4849 max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
4851 if ((vp->v_mount->mnt_kern_flag & MNTK_SSD) && !ignore_is_ssd) {
4886 ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range);
4913 kret = ubc_create_upl(vp,
4978 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
5003 cluster_push(vnode_t vp, int flags)
5005 return cluster_push_ext(vp, flags, NULL, NULL);
5010 cluster_push_ext(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg)
5016 if ( !UBCINFOEXISTS(vp)) {
5017 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -1, 0);
5021 if (((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && (flags & IO_DEFWRITE)) {
5024 if ((wbp = cluster_get_wbp(vp, CLW_RETURNLOCKED)) == NULL) {
5025 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -2, 0);
5031 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -3, 0);
5045 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START, vp, 0, 0, 0, 0);
5049 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END, vp, 0, 0, 0, 0);
5062 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_START, vp, 0, 0, 0, 0);
5066 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 98)) | DBG_FUNC_END, vp, 0, 0, 0, 0);
5081 sparse_cluster_push(&scmap, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg);
5090 sparse_cluster_push(&(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg);
5094 retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg);
5099 (void)vnode_waitforwrites(vp, 0, 0, 0, "cluster_push");
5155 cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg)
5166 max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
5255 cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg);
5281 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
5301 sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg);
5331 cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, int (*callback)(buf_t, void *), void *callback_arg)
5402 if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE))
5407 kret = ubc_create_upl(vp,
5489 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
5507 sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg)
5511 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, vp, wbp->cl_scmap, 0, 0, 0);
5519 if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) {
5523 sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg);
5530 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, vp, wbp->cl_scmap, 0, 0, 0);
5540 sparse_cluster_push(void **scmap, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg)
5546 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, vp, (*scmap), 0, push_flag, 0);
5558 cluster_push_now(vp, &cl, EOF, io_flags & (IO_PASSIVE|IO_CLOSE), callback, callback_arg);
5563 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, vp, (*scmap), 0, 0, 0);
5571 sparse_cluster_add(void **scmap, vnode_t vp, struct cl_extent *cl, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg)
5588 sparse_cluster_push(scmap, vp, EOF, 0, 0, callback, callback_arg);
5593 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, vp, (*scmap), 0, 0, 0);
5598 cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
5635 kret = ubc_create_upl(vp,
5649 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
5677 error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE,
5765 cluster_copy_ubc_data(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty)
5768 return (cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1));
5773 cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference)
5787 control = ubc_getobject(vp, UBC_FLAGS_NONE);
5839 is_file_clean(vnode_t vp, off_t filesize)
5846 if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) {