• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/bsd/vfs/

Lines Matching defs:io_size

754 	u_int	io_size;
913 io_size = max_iosize;
915 io_size = size;
917 io_size_wanted = io_size;
919 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, (size_t *)&io_size, NULL, bmap_flags, NULL)))
922 if (io_size > io_size_wanted)
923 io_size = io_size_wanted;
929 (int)f_offset, (int)(blkno>>32), (int)blkno, io_size, 0);
931 if (io_size == 0) {
936 * file would be returned as a blkno of -1 with a non-zero io_size
937 * a real extent is returned with a blkno != -1 and a non-zero io_size
1054 io_size = e_offset - f_offset;
1056 f_offset += io_size;
1057 upl_offset += io_size;
1059 if (size >= io_size)
1060 size -= io_size;
1069 non_rounded_size -= io_size;
1084 * we have now figured out how much I/O we can do - this is in 'io_size'
1086 * pg_count is the number of full and partial pages that 'io_size' encompasses
1096 pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE;
1108 if (io_size >= (u_int)non_rounded_size) {
1113 * (indicated by the io_size finishing off the I/O request for this UPL)
1120 bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset;
1124 bytes_to_zero = io_size;
1154 if ((int)io_size >= non_rounded_size)
1170 if ((int)io_size >= non_rounded_size)
1181 upl_offset += io_size;
1182 f_offset += io_size;
1183 size -= io_size;
1191 non_rounded_size -= io_size;
1214 if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) {
1215 io_size = PAGE_SIZE - pg_offset;
1218 io_size -= (pg_count - max_vectors) * PAGE_SIZE;
1232 ((upl_offset + io_size) & PAGE_MASK)) {
1235 aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK;
1237 * If the io_size does not actually finish off even a
1247 io_size = aligned_ofs - upl_offset;
1287 cbp->b_bcount = io_size;
1299 iostate->io_issued += io_size;
1303 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1307 (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0);
1326 upl_offset += io_size;
1327 f_offset += io_size;
1328 size -= io_size;
1335 non_rounded_size -= io_size;
1392 io_size = 0;
1411 io_size += cbp->b_bcount;
1430 iostate->io_issued -= io_size;
1591 int io_size;
1650 io_size = size;
1652 io_size = max_size;
1654 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1661 return (cluster_io(vp, upl, upl_offset, f_offset, io_size,
1678 u_int io_size;
1713 io_size = size;
1715 io_size = max_size;
1717 rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
1723 retval = cluster_io(vp, upl, upl_offset, f_offset, io_size,
1902 u_int32_t io_size;
1986 io_size = io_req_size & ~PAGE_MASK;
1989 if (io_size > max_upl_size)
1990 io_size = max_upl_size;
1993 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
1996 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
2060 io_size = upl_size;
2062 io_size = 0;
2065 (int)upl_offset, upl_size, (int)iov_base, io_size, 0);
2067 if (io_size == 0) {
2081 * io_size is a multiple of PAGE_SIZE
2083 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + io_size, UPL_ROP_DUMP, NULL);
2121 (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0);
2124 io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
2130 uio_update(uio, (user_size_t)io_size);
2132 io_req_size -= io_size;
2203 u_int32_t io_size;
2235 io_size = *write_length;
2240 upl_needed_size = upl_offset + io_size;
2274 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
2279 if (head_size > io_size)
2280 head_size = io_size;
2289 io_size -= head_size;
2305 tail_size = io_size & (devblocksize - 1);
2306 io_size -= tail_size;
2308 while (io_size && error == 0) {
2310 if (io_size > MAX_IO_CONTIG_SIZE)
2313 xsize = io_size;
2362 io_size -= xsize;
2424 int io_size;
2560 * this exceeds the maximum io_size for the device,
2570 io_size = upl_size - start_offset;
2572 if ((long long)io_size > total_size)
2573 io_size = total_size;
2575 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0);
2627 if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) {
2635 if ((upl_f_offset + start_offset + io_size) < oldEOF &&
2664 xfer_resid = io_size;
2747 io_size += start_offset;
2749 if ((upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) {
2756 cluster_zero(upl, io_size, upl_size - io_size, NULL);
3051 u_int32_t io_size;
3083 io_size = MAX_IO_REQUEST_SIZE;
3085 io_size = (u_int32_t)cur_resid;
3087 retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg);
3144 u_int32_t io_size;
3222 * this exceeds the maximum io_size for the device,
3231 io_size = io_req_size;
3233 io_size = max_size;
3237 while (io_size) {
3273 if (last_ioread_offset && io_size > (max_io_size / 4))
3276 io_resid = io_size;
3284 io_size -= xsize;
3295 if ((io_size == 0 || last_ioread_offset == last_request_offset) && rd_ahead_enabled) {
3305 if (io_size == 0) {
3317 if (io_size > max_rd_size)
3318 io_size = max_rd_size;
3320 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
3380 io_size = (last_pg - start_pg) * PAGE_SIZE;
3382 if ((upl_f_offset + upl_offset + io_size) > filesize)
3383 io_size = filesize - (upl_f_offset + upl_offset);
3390 io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg);
3502 io_size = (last_pg - start_pg) * PAGE_SIZE;
3504 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, (int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
3507 ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size,
3510 ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size,
3513 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, (int)upl, start_pg * PAGE_SIZE, io_size, error, 0);
3607 u_int32_t io_size;
3697 io_start = io_size = io_req_size;
3704 * in io_size
3706 retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_size, 0, 0);
3712 xsize = io_start - io_size;
3735 * however, if io_size isn't a multiple of devblocksize we
3745 if (io_size & (devblocksize - 1)) {
3750 io_size &= ~PAGE_MASK;
3753 if (retval || io_size < io_min) {
3763 if ((xsize = io_size) > max_rd_size)
3766 io_size = 0;
3768 ubc_range_op(vp, uio->uio_offset, uio->uio_offset + xsize, UPL_ROP_ABSENT, (int *)&io_size);
3770 if (io_size == 0) {
3781 upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
3784 (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
3786 if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0)) {
3809 (int)upl_offset, upl_size, io_size, kret, 0);
3833 (int)upl_offset, upl_size, io_size, kret, 0);
3842 io_size = upl_size;
3844 io_size = 0;
3846 if (io_size == 0) {
3851 (int)upl_offset, upl_size, io_size, kret, 0);
3886 (int)upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0);
3893 retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg);
3898 uio_update(uio, (user_size_t)io_size);
3900 io_req_size -= io_size;
3981 u_int32_t io_size;
4008 io_size = *read_length;
4012 if (io_size > max_size)
4013 io_size = max_size;
4018 upl_needed_size = upl_offset + io_size;
4026 (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0);
4033 (int)upl_offset, upl_size, io_size, kret, 0);
4055 while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) {
4060 if (head_size > io_size)
4061 head_size = io_size;
4070 io_size -= head_size;
4086 tail_size = io_size & (devblocksize - 1);
4088 io_size -= tail_size;
4090 while (io_size && error == 0) {
4092 if (io_size > MAX_IO_CONTIG_SIZE)
4095 xsize = io_size;
4140 io_size -= xsize;
4278 int io_size;
4302 * this exceeds the maximum io_size for the device,
4311 io_size = resid;
4313 io_size = max_size;
4315 upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
4330 io_size = skip_range - start_offset;
4332 f_offset += io_size;
4333 resid -= io_size;
4408 io_size = (last_pg - start_pg) * PAGE_SIZE;
4410 if ((upl_f_offset + upl_offset + io_size) > filesize)
4411 io_size = filesize - (upl_f_offset + upl_offset);
4416 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
4425 io_size = upl_size - start_offset;
4427 if (io_size > resid)
4428 io_size = resid;
4429 f_offset += io_size;
4430 resid -= io_size;
4712 int io_size;
4844 io_size = min(size, (last_pg - start_pg) * PAGE_SIZE);
4851 retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size,
4857 size -= io_size;
5149 int io_size;
5155 io_size = *io_resid;
5158 (int)uio->uio_offset, 0, io_size, 0, 0);
5164 (int)uio->uio_offset, io_size, retval, 3, 0);
5200 if ( (io_size = *io_resid) ) {
5205 start_offset, io_size, mark_dirty, take_reference);
5207 io_size -= xsize;
5210 *io_resid = io_size;
5213 (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0);