Lines Matching refs:io_size

484 	size_t	  io_size;
488 if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ, NULL))
491 if (io_size == 0)
916 u_int io_size;
1087 io_size = max_iosize;
1089 io_size = size;
1091 io_size_wanted = io_size;
1092 io_size_tmp = (size_t)io_size;
1094 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL)))
1098 io_size = io_size_wanted;
1100 io_size = (u_int)io_size_tmp;
1106 (int)f_offset, (int)(blkno>>32), (int)blkno, io_size, 0);
1108 if (io_size == 0) {
1113 * file would be returned as a blkno of -1 with a non-zero io_size
1114 * a real extent is returned with a blkno != -1 and a non-zero io_size
1232 io_size = e_offset - f_offset;
1234 f_offset += io_size;
1235 upl_offset += io_size;
1237 if (size >= io_size)
1238 size -= io_size;
1247 non_rounded_size -= io_size;
1267 * we have now figured out how much I/O we can do - this is in 'io_size'
1269 * pg_count is the number of full and partial pages that 'io_size' encompasses
1279 pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
1291 if (io_size >= (u_int)non_rounded_size) {
1296 * (indicated by the io_size finishing off the I/O request for this UPL)
1303 bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset;
1307 bytes_to_zero = io_size;
1337 if ((int)io_size >= non_rounded_size)
1353 if ((int)io_size >= non_rounded_size)
1364 upl_offset += io_size;
1365 f_offset += io_size;
1366 size -= io_size;
1374 non_rounded_size -= io_size;
1397 if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) {
1398 io_size = PAGE_SIZE - pg_offset;
1401 io_size -= (pg_count - max_vectors) * PAGE_SIZE;
1415 ((upl_offset + io_size) & PAGE_MASK)) {
1418 aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK;
1420 * If the io_size does not actually finish off even a
1430 io_size = aligned_ofs - upl_offset;
1472 cbp->b_bcount = io_size;
1484 iostate->io_issued += io_size;
1488 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1492 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1509 upl_offset += io_size;
1510 f_offset += io_size;
1511 size -= io_size;
1518 non_rounded_size -= io_size;
1583 io_size = 0;
1602 io_size += cbp->b_bcount;
1621 iostate->io_issued -= io_size;
1808 int io_size;
1856 io_size = size;
1858 io_size = max_size;
1860 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1867 return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
1884 u_int io_size;
1921 io_size = size;
1923 io_size = max_size;
1925 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1931 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
2124 u_int32_t io_size;
2248 io_size = io_req_size & ~PAGE_MASK;
2251 if (io_size > max_io_size)
2252 io_size = max_io_size;
2272 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
2275 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
2339 io_size = upl_size;
2341 io_size = 0;
2344 (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
2346 if (io_size == 0) {
2357 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
2372 * io_size is a multiple of PAGE_SIZE
2374 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + io_size, UPL_ROP_DUMP, NULL);
2399 (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
2403 io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2415 vector_upl_iosize += io_size;
2428 uio_update(uio, (user_size_t)io_size);
2442 io_req_size -= io_size;
2515 u_int32_t io_size;
2549 io_size = *write_length;
2554 upl_needed_size = upl_offset + io_size;
2588 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
2593 if (head_size > io_size)
2594 head_size = io_size;
2603 io_size -= head_size;
2619 tail_size = io_size & (devblocksize - 1);
2620 io_size -= tail_size;
2622 while (io_size && error == 0) {
2624 if (io_size > MAX_IO_CONTIG_SIZE)
2627 xsize = io_size;
2663 io_size -= xsize;
2752 int io_size;
2922 * this exceeds the maximum io_size for the device,
2932 io_size = upl_size - start_offset;
2934 if ((long long)io_size > total_size)
2935 io_size = total_size;
2937 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
2989 if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
2997 if ((upl_f_offset + start_offset + io_size) < oldEOF &&
3026 xfer_resid = io_size;
3080 io_size += start_offset;
3082 if ((upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
3089 cluster_zero(upl, io_size, upl_size - io_size, NULL);
3395 u_int32_t io_size;
3444 io_size = MAX_IO_REQUEST_SIZE;
3446 io_size = (u_int32_t)cur_resid;
3448 retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
3505 u_int32_t io_size;
3592 io_size = io_req_size;
3594 io_size = max_size;
3598 while (io_size) {
3634 if (last_ioread_offset && io_size > (max_io_size / 4))
3637 io_resid = io_size;
3645 io_size -= xsize;
3656 if (rd_ahead_enabled && (io_size == 0 || last_ioread_offset == last_request_offset)) {
3666 if (io_size == 0) {
3708 * this exceeds the maximum io_size for the device,
3715 if (io_size > max_rd_size)
3716 io_size = max_rd_size;
3718 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
3779 io_size = (last_pg - start_pg) * PAGE_SIZE;
3781 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize)
3782 io_size = filesize - (upl_f_offset + upl_offset);
3789 io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg);
3905 io_size = (last_pg - start_pg) * PAGE_SIZE;
3907 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, start_pg * PAGE_SIZE, io_size, error, 0);
3910 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
3920 ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags);
3922 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, start_pg * PAGE_SIZE, io_size, error, 0);
4033 u_int32_t io_size;
4160 io_start = io_size = io_req_size;
4170 * in io_size
4173 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
4179 xsize = io_start - io_size;
4228 * however, if io_size isn't a multiple of devblocksize we
4238 if (io_size & (devblocksize - 1)) {
4251 io_size = ((io_size + devblocksize) & ~(devblocksize - 1));
4252 io_min = io_size;
4259 io_size &= ~PAGE_MASK;
4264 if (retval || io_size < io_min) {
4281 if ((xsize = io_size) > max_rd_size)
4284 io_size = 0;
4286 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
4288 if (io_size == 0) {
4314 if (io_size > max_rd_size)
4315 io_size = max_rd_size;
4320 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
4323 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
4325 if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0))
4346 (int)upl_offset, upl_size, io_size, kret, 0);
4370 (int)upl_offset, upl_size, io_size, kret, 0);
4379 io_size = upl_size;
4381 io_size = 0;
4383 if (io_size == 0) {
4388 (int)upl_offset, upl_size, io_size, kret, 0);
4391 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
4424 upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
4433 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4447 vector_upl_iosize += io_size;
4457 if ((flags & IO_ENCRYPTED) && (max_io_size < io_size)) {
4461 uio_update(uio, (user_size_t)io_size);
4464 * Under normal circumstances, the io_size should not be
4469 if ((flags & IO_ENCRYPTED) && (io_size > io_req_size)) {
4473 io_req_size -= io_size;
4552 u_int32_t io_size;
4584 io_size = *read_length;
4588 if (io_size > max_size)
4589 io_size = max_size;
4594 upl_needed_size = upl_offset + io_size;
4602 (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0);
4609 (int)upl_offset, upl_size, io_size, kret, 0);
4631 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
4636 if (head_size > io_size)
4637 head_size = io_size;
4646 io_size -= head_size;
4662 tail_size = io_size & (devblocksize - 1);
4664 io_size -= tail_size;
4666 while (io_size && error == 0) {
4668 if (io_size > MAX_IO_CONTIG_SIZE)
4671 xsize = io_size;
4704 io_size -= xsize;
4833 int io_size;
4867 * this exceeds the maximum io_size for the device,
4876 io_size = resid;
4878 io_size = max_size;
4880 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4895 io_size = skip_range - start_offset;
4897 f_offset += io_size;
4898 resid -= io_size;
4973 io_size = (last_pg - start_pg) * PAGE_SIZE;
4975 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize)
4976 io_size = filesize - (upl_f_offset + upl_offset);
4981 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
4990 io_size = upl_size - start_offset;
4992 if (io_size > resid)
4993 io_size = resid;
4994 f_offset += io_size;
4995 resid -= io_size;
5344 int io_size;
5476 io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
5489 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
5495 size -= io_size;
5776 int io_size;
5782 io_size = *io_resid;
5785 (int)uio->uio_offset, io_size, mark_dirty, take_reference, 0);
5791 (int)uio->uio_offset, io_size, retval, 3, 0);
5819 if ( (io_size = *io_resid) ) {
5824 start_offset, io_size, mark_dirty, take_reference);
5826 io_size -= xsize;
5829 *io_resid = io_size;
5832 (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);