Lines Matching defs:flags

158 static void cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait);
163 int flags, buf_t real_bp, struct clios *iostate, int (*)(buf_t, void *), void *callback_arg);
175 static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags,
178 int flags, int (*)(buf_t, void *), void *callback_arg);
180 int (*)(buf_t, void *), void *callback_arg, int flags);
183 off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg);
185 int *write_type, u_int32_t *write_length, int flags, int (*)(buf_t, void *), void *callback_arg);
189 static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*)(buf_t, void *), void *callback_arg);
194 static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg);
196 static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int flags, int (*)(buf_t, void *), void *callback_arg);
427 cluster_get_wbp(vnode_t vp, int flags)
436 if ( !(flags & CLW_ALLOCATE))
455 if (flags & CLW_RETURNLOCKED)
878 cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait)
902 if ( !(flags & CL_ASYNC) && error && *retval == 0) {
903 if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) != CL_PAGEOUT) || (error != ENXIO))
912 int flags, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg)
953 if (mp->mnt_devblocksize > 1 && !(flags & (CL_DEV_MEMORY | CL_DIRECT_IO))) {
979 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, (int)f_offset, size, upl_offset, flags, 0);
986 if (flags & CL_DEV_MEMORY)
989 if (flags & CL_READ) {
1016 if (flags & CL_THROTTLE) {
1017 if ( !(flags & CL_PAGEOUT) && cluster_hard_throttle_on(vp, 1)) {
1022 if ( (flags & CL_DEV_MEMORY) )
1044 if (flags & CL_CLOSE)
1051 if (flags & CL_AGE)
1053 if (flags & (CL_PAGEIN | CL_PAGEOUT))
1055 if (flags & (CL_IOSTREAMING))
1057 if (flags & CL_COMMIT)
1059 if (flags & CL_DIRECT_IO)
1061 if (flags & (CL_PRESERVE | CL_KEEPCACHED))
1063 if (flags & CL_PASSIVE)
1065 if (flags & CL_ENCRYPTED)
1070 if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) {
1119 if ( !(flags & CL_READ) && blkno == -1) {
1128 if (flags & CL_PAGEOUT) {
1165 if ( !(flags & CL_ASYNC))
1167 if ( !(flags & CL_COMMIT))
1179 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1223 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1260 flags &= ~CL_COMMIT;
1273 if (flags & CL_DEV_MEMORY) {
1281 if ((flags & CL_READ) && blkno == -1) {
1302 if (!(flags & CL_NOZERO))
1360 if ( (flags & CL_COMMIT) && pg_count) {
1386 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1390 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0);
1414 if ( !(flags & CL_DEV_MEMORY) && trans_count >= max_trans_count &&
1443 else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT))
1453 if (flags & CL_PAGEOUT) {
1461 if (flags & CL_ASYNC) {
1467 if (flags & CL_NOCACHE)
1486 if (flags & CL_READ) {
1535 } else if ( ((flags & CL_DEV_MEMORY) || (upl_offset & PAGE_MASK) == 0) &&
1536 ((flags & CL_ASYNC) || trans_count > max_trans_count) ) {
1555 if (flags & CL_THROTTLE)
1561 if (flags & CL_RAW_ENCRYPTED) {
1572 if ( !(flags & CL_ASYNC))
1573 cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 1);
1590 cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
1636 if (flags & CL_COMMIT) {
1797 int size, off_t filesize, int flags)
1799 return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1806 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
1815 if ((flags & UPL_IOSYNC) == 0)
1817 if ((flags & UPL_NOCOMMIT) == 0)
1819 if ((flags & UPL_KEEPCACHED))
1821 if (flags & UPL_PAGING_ENCRYPTED)
1874 int size, off_t filesize, int flags)
1876 return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL);
1882 int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
1893 if ((flags & UPL_IOSYNC) == 0)
1895 if ((flags & UPL_NOCOMMIT) == 0)
1897 if (flags & UPL_IOSTREAMING)
1899 if (flags & UPL_PAGING_ENCRYPTED)
1949 int flags;
1955 flags = CL_ASYNC | CL_READ;
1957 flags = CL_ASYNC;
1959 flags |= CL_PASSIVE;
1963 return (cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg));
1981 int flags;
1987 flags = xflags;
1989 if (flags & IO_PASSIVE)
1995 flags |= IO_NOCACHE;
2003 retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg);
2014 if ( ((flags & (IO_NOCACHE | IO_NODIRECT)) == IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg) )
2017 if ( (flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT)
2042 zflags = flags & ~IO_TAILZEROFILL;
2043 flags &= ~IO_HEADZEROFILL;
2050 zflags = flags;
2058 zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL);
2060 if (flags & IO_HEADZEROFILL) {
2064 flags &= ~IO_HEADZEROFILL;
2073 if (retval == 0 && (flags & IO_TAILZEROFILL) && uio_resid(uio) == 0) {
2089 retval = cluster_write_direct(vp, uio, oldEOF, newEOF, &write_type, &write_length, flags, callback, callback_arg);
2115 int flags, int (*callback)(buf_t, void *), void *callback_arg)
2162 if (flags & IO_PASSIVE)
2165 if (flags & IO_NOCACHE)
2224 if ( (flags & IO_RETURN_ON_THROTTLE) && throttle_type == 2) {
2489 * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set
2495 retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg);
2715 cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off_t zero_off, off_t upl_f_offset, int bytes_to_zero)
2720 if ((flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) {
2742 off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg)
2784 if (flags & IO_PASSIVE)
2788 if (flags & IO_NOCACHE)
2799 if (flags & IO_HEADZEROFILL) {
2823 flags |= IO_HEADZEROFILL;
2827 if (flags & IO_TAILZEROFILL) {
2841 flags |= IO_TAILZEROFILL;
2882 if (uio && ((flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0)) {
3036 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off, upl_f_offset, bytes_to_zero);
3069 bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off1, upl_f_offset, bytes_to_zero);
3101 * UPL has been automatically set to clear the dirty flags (both software and hardware)
3118 if (flags & IO_SYNC) {
3134 if ( !(flags & IO_NOCACHE)) {
3326 ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg);
3351 if (flags & IO_NOCACHE)
3372 retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg);
3393 int flags;
3399 flags = xflags;
3402 flags |= IO_NOCACHE;
3404 flags |= IO_RAOFF;
3411 if (flags & IO_ENCRYPTED) {
3429 if (((flags & IO_NOCACHE) || (flags & IO_ENCRYPTED)) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
3448 retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
3452 retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg);
3456 retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags);
3485 cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
3520 (int)uio->uio_offset, io_req_size, (int)filesize, flags, 0);
3522 if (flags & IO_ENCRYPTED) {
3528 if (policy == IOPOL_THROTTLE || policy == IOPOL_UTILITY || (flags & IO_NOCACHE))
3531 if (flags & IO_PASSIVE)
3536 if (flags & IO_NOCACHE)
3548 if ((flags & (IO_RAOFF|IO_NOCACHE)) || ((last_request_offset & ~PAGE_MASK_64) == (uio->uio_offset & ~PAGE_MASK_64))) {
3596 if (!(flags & IO_NOCACHE)) {
3686 if ( (flags & IO_RETURN_ON_THROTTLE) ) {
3720 if (flags & IO_NOCACHE) {
3909 if (error || (flags & IO_NOCACHE))
4011 int flags, int (*callback)(buf_t, void *), void *callback_arg)
4061 if (flags & IO_PASSIVE)
4064 if (flags & IO_ENCRYPTED) {
4068 if (flags & IO_NOCACHE) {
4134 if ((flags & IO_ENCRYPTED) && (misaligned)) {
4172 if ((strict_uncached_IO == FALSE) && ((flags & IO_ENCRYPTED) == 0)) {
4239 if (flags & IO_ENCRYPTED) {
4279 if ((strict_uncached_IO == FALSE) && ((flags & IO_ENCRYPTED) == 0)) {
4297 if ( (flags & IO_RETURN_ON_THROTTLE) ) {
4457 if ((flags & IO_ENCRYPTED) && (max_io_size < io_size)) {
4469 if ((flags & IO_ENCRYPTED) && (io_size > io_req_size)) {
4520 retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg);
4533 int (*callback)(buf_t, void *), void *callback_arg, int flags)
4558 if (flags & IO_PASSIVE)
4563 if (flags & IO_NOCACHE)
5006 cluster_push(vnode_t vp, int flags)
5008 return cluster_push_ext(vp, flags, NULL, NULL);
5013 cluster_push_ext(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg)
5020 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -1, 0);
5024 if (((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && (flags & IO_DEFWRITE)) {
5028 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -2, 0);
5034 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, vp, flags, 0, -3, 0);
5038 wbp->cl_scmap, wbp->cl_number, flags, 0, 0);
5054 if (flags & IO_SYNC) {
5084 sparse_cluster_push(&scmap, vp, ubc_getsize(vp), PUSH_ALL, flags | IO_PASSIVE, callback, callback_arg);
5093 sparse_cluster_push(&(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags | IO_PASSIVE, callback, callback_arg);
5097 retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL, flags | IO_PASSIVE, callback, callback_arg);
5101 if (flags & IO_SYNC)
5238 int flags;
5241 flags = io_flags & (IO_PASSIVE|IO_CLOSE);
5247 flags |= IO_NOCACHE;
5250 flags |= IO_PASSIVE;
5253 flags |= IO_SYNC;
5258 cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg);
5334 cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, int (*callback)(buf_t, void *), void *callback_arg)
5353 if (flags & IO_PASSIVE)
5359 (int)cl->b_addr, (int)cl->e_addr, (int)EOF, flags, 0);
5402 if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE))
5480 if ( !(flags & IO_SYNC))
5483 if (flags & IO_CLOSE)
5486 if (flags & IO_NOCACHE)
5514 int flags;
5519 if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) {
5520 if (flags & UPL_POP_DIRTY) {
5598 cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*callback)(buf_t, void *), void *callback_arg)
5610 if (flags & IO_PASSIVE)
5615 if (flags & IO_NOCACHE)
5620 if ( !(flags & CL_READ) ) {
5666 if (flags & CL_READ)
5673 if ( !(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) {
5842 int flags;
5846 if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) {
5847 if (flags & UPL_POP_DIRTY) {