Lines Matching refs:io_size

466 	size_t	  io_size;
470 if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ, NULL))
473 if (io_size == 0)
889 u_int io_size;
1060 io_size = max_iosize;
1062 io_size = size;
1064 io_size_wanted = io_size;
1065 io_size_tmp = (size_t)io_size;
1067 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL)))
1071 io_size = io_size_wanted;
1073 io_size = (u_int)io_size_tmp;
1079 (int)f_offset, (int)(blkno>>32), (int)blkno, io_size, 0);
1081 if (io_size == 0) {
1086 * file would be returned as a blkno of -1 with a non-zero io_size
1087 * a real extent is returned with a blkno != -1 and a non-zero io_size
1205 io_size = e_offset - f_offset;
1207 f_offset += io_size;
1208 upl_offset += io_size;
1210 if (size >= io_size)
1211 size -= io_size;
1220 non_rounded_size -= io_size;
1240 * we have now figured out how much I/O we can do - this is in 'io_size'
1242 * pg_count is the number of full and partial pages that 'io_size' encompasses
1252 pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
1264 if (io_size >= (u_int)non_rounded_size) {
1269 * (indicated by the io_size finishing off the I/O request for this UPL)
1276 bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset;
1280 bytes_to_zero = io_size;
1310 if ((int)io_size >= non_rounded_size)
1326 if ((int)io_size >= non_rounded_size)
1337 upl_offset += io_size;
1338 f_offset += io_size;
1339 size -= io_size;
1347 non_rounded_size -= io_size;
1370 if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) {
1371 io_size = PAGE_SIZE - pg_offset;
1374 io_size -= (pg_count - max_vectors) * PAGE_SIZE;
1388 ((upl_offset + io_size) & PAGE_MASK)) {
1391 aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK;
1393 * If the io_size does not actually finish off even a
1403 io_size = aligned_ofs - upl_offset;
1445 cbp->b_bcount = io_size;
1450 upl_set_blkno(upl, upl_offset, io_size, blkno);
1459 iostate->io_issued += io_size;
1463 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1467 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1484 upl_offset += io_size;
1485 f_offset += io_size;
1486 size -= io_size;
1493 non_rounded_size -= io_size;
1558 io_size = 0;
1577 io_size += cbp->b_bcount;
1596 iostate->io_issued -= io_size;
1783 int io_size;
1831 io_size = size;
1833 io_size = max_size;
1835 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1842 return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
1859 u_int io_size;
1896 io_size = size;
1898 io_size = max_size;
1900 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1906 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
2099 u_int32_t io_size;
2226 io_size = io_req_size & ~PAGE_MASK;
2229 if (io_size > max_io_size)
2230 io_size = max_io_size;
2250 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
2253 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
2317 io_size = upl_size;
2319 io_size = 0;
2322 (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
2324 if (io_size == 0) {
2335 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
2350 * io_size is a multiple of PAGE_SIZE
2352 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + io_size, UPL_ROP_DUMP, NULL);
2376 (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
2380 io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2392 vector_upl_iosize += io_size;
2405 uio_update(uio, (user_size_t)io_size);
2419 io_req_size -= io_size;
2490 u_int32_t io_size;
2524 io_size = *write_length;
2529 upl_needed_size = upl_offset + io_size;
2563 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
2568 if (head_size > io_size)
2569 head_size = io_size;
2578 io_size -= head_size;
2594 tail_size = io_size & (devblocksize - 1);
2595 io_size -= tail_size;
2597 while (io_size && error == 0) {
2599 if (io_size > MAX_IO_CONTIG_SIZE)
2602 xsize = io_size;
2637 io_size -= xsize;
2725 int io_size;
2898 * this exceeds the maximum io_size for the device,
2908 io_size = upl_size - start_offset;
2910 if ((long long)io_size > total_size)
2911 io_size = total_size;
2913 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
2965 if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
2973 if ((upl_f_offset + start_offset + io_size) < oldEOF &&
3002 xfer_resid = io_size;
3056 io_size += start_offset;
3058 if ((upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
3065 cluster_zero(upl, io_size, upl_size - io_size, NULL);
3371 u_int32_t io_size;
3423 io_size = MAX_IO_REQUEST_SIZE;
3425 io_size = (u_int32_t)cur_resid;
3427 retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
3484 u_int32_t io_size;
3574 io_size = io_req_size;
3576 io_size = max_size;
3580 while (io_size) {
3616 if (last_ioread_offset && io_size > (max_io_size / 4))
3619 io_resid = io_size;
3627 io_size -= xsize;
3638 if (rd_ahead_enabled && (io_size == 0 || last_ioread_offset == last_request_offset)) {
3648 if (io_size == 0) {
3690 * this exceeds the maximum io_size for the device,
3697 if (io_size > max_rd_size)
3698 io_size = max_rd_size;
3700 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
3766 io_size = (last_pg - start_pg) * PAGE_SIZE;
3768 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize)
3769 io_size = filesize - (upl_f_offset + upl_offset);
3776 io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg);
3892 io_size = (last_pg - start_pg) * PAGE_SIZE;
3894 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, start_pg * PAGE_SIZE, io_size, error, 0);
3897 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
3907 ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags);
3909 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, start_pg * PAGE_SIZE, io_size, error, 0);
4018 u_int32_t io_size;
4155 io_start = io_size = io_req_size;
4165 * in io_size
4168 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
4174 xsize = io_start - io_size;
4223 * however, if io_size isn't a multiple of devblocksize we
4233 if (io_size & (devblocksize - 1)) {
4246 io_size = ((io_size + devblocksize) & ~(devblocksize - 1));
4247 io_min = io_size;
4254 io_size &= ~PAGE_MASK;
4259 if (retval || io_size < io_min) {
4276 if ((xsize = io_size) > max_rd_size)
4279 io_size = 0;
4281 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
4283 if (io_size == 0) {
4309 if (io_size > max_rd_size)
4310 io_size = max_rd_size;
4315 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
4318 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
4320 if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0))
4341 (int)upl_offset, upl_size, io_size, kret, 0);
4365 (int)upl_offset, upl_size, io_size, kret, 0);
4374 io_size = upl_size;
4376 io_size = 0;
4378 if (io_size == 0) {
4383 (int)upl_offset, upl_size, io_size, kret, 0);
4386 vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK);
4418 upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
4427 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
4441 vector_upl_iosize += io_size;
4448 last_iov_base = iov_base + io_size;
4453 if ((flags & IO_ENCRYPTED) && (max_io_size < io_size)) {
4457 uio_update(uio, (user_size_t)io_size);
4460 * Under normal circumstances, the io_size should not be
4465 if ((flags & IO_ENCRYPTED) && (io_size > io_req_size)) {
4469 io_req_size -= io_size;
4556 u_int32_t io_size;
4588 io_size = *read_length;
4592 if (io_size > max_size)
4593 io_size = max_size;
4598 upl_needed_size = upl_offset + io_size;
4606 (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0);
4613 (int)upl_offset, upl_size, io_size, kret, 0);
4635 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
4640 if (head_size > io_size)
4641 head_size = io_size;
4650 io_size -= head_size;
4666 tail_size = io_size & (devblocksize - 1);
4668 io_size -= tail_size;
4670 while (io_size && error == 0) {
4672 if (io_size > MAX_IO_CONTIG_SIZE)
4675 xsize = io_size;
4707 io_size -= xsize;
4835 int io_size;
4864 * this exceeds the maximum io_size for the device,
4873 io_size = resid;
4875 io_size = max_size;
4877 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4892 io_size = skip_range - start_offset;
4894 f_offset += io_size;
4895 resid -= io_size;
4970 io_size = (last_pg - start_pg) * PAGE_SIZE;
4972 if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize)
4973 io_size = filesize - (upl_f_offset + upl_offset);
4978 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
4987 io_size = upl_size - start_offset;
4989 if (io_size > resid)
4990 io_size = resid;
4991 f_offset += io_size;
4992 resid -= io_size;
5341 int io_size;
5476 io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
5489 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
5495 size -= io_size;
5776 int io_size;
5782 io_size = *io_resid;
5785 (int)uio->uio_offset, io_size, mark_dirty, take_reference, 0);
5791 (int)uio->uio_offset, io_size, retval, 3, 0);
5819 if ( (io_size = *io_resid) ) {
5824 start_offset, io_size, mark_dirty, take_reference);
5826 io_size -= xsize;
5829 *io_resid = io_size;
5832 (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);