45int zfs_vdev_min_pending = 4; 46 47/* 48 * The deadlines are grouped into buckets based on zfs_vdev_time_shift: 49 * deadline = pri + gethrtime() >> time_shift) 50 */ 51int zfs_vdev_time_shift = 29; /* each bucket is 0.537 seconds */ 52 53/* exponential I/O issue ramp-up rate */ 54int zfs_vdev_ramp_rate = 2; 55 56/* 57 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O. 58 * For read I/Os, we also aggregate across small adjacency gaps; for writes 59 * we include spans of optional I/Os to aid aggregation at the disk even when 60 * they aren't able to help us aggregate at this level. 61 */ 62int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE; 63int zfs_vdev_read_gap_limit = 32 << 10; 64int zfs_vdev_write_gap_limit = 4 << 10; 65 66SYSCTL_DECL(_vfs_zfs_vdev); 67TUNABLE_INT("vfs.zfs.vdev.max_pending", &zfs_vdev_max_pending); 68SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, max_pending, CTLFLAG_RW, 69 &zfs_vdev_max_pending, 0, "Maximum I/O requests pending on each device"); 70TUNABLE_INT("vfs.zfs.vdev.min_pending", &zfs_vdev_min_pending); 71SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, min_pending, CTLFLAG_RW, 72 &zfs_vdev_min_pending, 0, 73 "Initial number of I/O requests pending to each device"); 74TUNABLE_INT("vfs.zfs.vdev.time_shift", &zfs_vdev_time_shift); 75SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, time_shift, CTLFLAG_RW, 76 &zfs_vdev_time_shift, 0, "Used for calculating I/O request deadline"); 77TUNABLE_INT("vfs.zfs.vdev.ramp_rate", &zfs_vdev_ramp_rate); 78SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, ramp_rate, CTLFLAG_RW, 79 &zfs_vdev_ramp_rate, 0, "Exponential I/O issue ramp-up rate"); 80TUNABLE_INT("vfs.zfs.vdev.aggregation_limit", &zfs_vdev_aggregation_limit); 81SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, aggregation_limit, CTLFLAG_RW, 82 &zfs_vdev_aggregation_limit, 0, 83 "I/O requests are aggregated up to this size"); 84TUNABLE_INT("vfs.zfs.vdev.read_gap_limit", &zfs_vdev_read_gap_limit); 85SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, read_gap_limit, CTLFLAG_RW, 86 &zfs_vdev_read_gap_limit, 0, 87 "Acceptable gap between two reads being aggregated"); 88TUNABLE_INT("vfs.zfs.vdev.write_gap_limit", &zfs_vdev_write_gap_limit); 89SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, write_gap_limit, CTLFLAG_RW, 90 &zfs_vdev_write_gap_limit, 0, 91 "Acceptable gap between two writes being aggregated"); 92 93/* 94 * Virtual device vector for disk I/O scheduling. 95 */ 96int 97vdev_queue_deadline_compare(const void *x1, const void *x2) 98{ 99 const zio_t *z1 = x1; 100 const zio_t *z2 = x2; 101 102 if (z1->io_deadline < z2->io_deadline) 103 return (-1); 104 if (z1->io_deadline > z2->io_deadline) 105 return (1); 106 107 if (z1->io_offset < z2->io_offset) 108 return (-1); 109 if (z1->io_offset > z2->io_offset) 110 return (1); 111 112 if (z1 < z2) 113 return (-1); 114 if (z1 > z2) 115 return (1); 116 117 return (0); 118} 119 120int 121vdev_queue_offset_compare(const void *x1, const void *x2) 122{ 123 const zio_t *z1 = x1; 124 const zio_t *z2 = x2; 125 126 if (z1->io_offset < z2->io_offset) 127 return (-1); 128 if (z1->io_offset > z2->io_offset) 129 return (1); 130 131 if (z1 < z2) 132 return (-1); 133 if (z1 > z2) 134 return (1); 135 136 return (0); 137} 138 139void 140vdev_queue_init(vdev_t *vd) 141{ 142 vdev_queue_t *vq = &vd->vdev_queue; 143 144 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); 145 146 avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare, 147 sizeof (zio_t), offsetof(struct zio, io_deadline_node)); 148 149 avl_create(&vq->vq_read_tree, vdev_queue_offset_compare, 150 sizeof (zio_t), offsetof(struct zio, io_offset_node)); 151 152 avl_create(&vq->vq_write_tree, vdev_queue_offset_compare, 153 sizeof (zio_t), offsetof(struct zio, io_offset_node)); 154 155 avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare, 156 sizeof (zio_t), offsetof(struct zio, io_offset_node)); 157} 158 159void 160vdev_queue_fini(vdev_t *vd) 161{ 162 vdev_queue_t *vq = &vd->vdev_queue; 163 164 avl_destroy(&vq->vq_deadline_tree); 165 avl_destroy(&vq->vq_read_tree); 166 avl_destroy(&vq->vq_write_tree); 167 avl_destroy(&vq->vq_pending_tree); 168 169 mutex_destroy(&vq->vq_lock); 170} 171 172static void 173vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) 174{ 175 avl_add(&vq->vq_deadline_tree, zio); 176 avl_add(zio->io_vdev_tree, zio); 177} 178 179static void 180vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) 181{ 182 avl_remove(&vq->vq_deadline_tree, zio); 183 avl_remove(zio->io_vdev_tree, zio); 184} 185 186static void 187vdev_queue_agg_io_done(zio_t *aio) 188{ 189 zio_t *pio; 190 191 while ((pio = zio_walk_parents(aio)) != NULL) 192 if (aio->io_type == ZIO_TYPE_READ) 193 bcopy((char *)aio->io_data + (pio->io_offset - 194 aio->io_offset), pio->io_data, pio->io_size); 195 196 zio_buf_free(aio->io_data, aio->io_size); 197} 198 199/* 200 * Compute the range spanned by two i/os, which is the endpoint of the last 201 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). 202 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); 203 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0. 204 */ 205#define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset) 206#define IO_GAP(fio, lio) (-IO_SPAN(lio, fio)) 207 208static zio_t * 209vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit) 210{ 211 zio_t *fio, *lio, *aio, *dio, *nio, *mio; 212 avl_tree_t *t; 213 int flags; 214 uint64_t maxspan = zfs_vdev_aggregation_limit; 215 uint64_t maxgap; 216 int stretch; 217 218again: 219 ASSERT(MUTEX_HELD(&vq->vq_lock)); 220 221 if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit || 222 avl_numnodes(&vq->vq_deadline_tree) == 0) 223 return (NULL); 224 225 fio = lio = avl_first(&vq->vq_deadline_tree); 226 227 t = fio->io_vdev_tree; 228 flags = fio->io_flags & ZIO_FLAG_AGG_INHERIT; 229 maxgap = (t == &vq->vq_read_tree) ? zfs_vdev_read_gap_limit : 0; 230 231 if (!(flags & ZIO_FLAG_DONT_AGGREGATE)) { 232 /* 233 * We can aggregate I/Os that are sufficiently adjacent and of 234 * the same flavor, as expressed by the AGG_INHERIT flags. 235 * The latter requirement is necessary so that certain 236 * attributes of the I/O, such as whether it's a normal I/O 237 * or a scrub/resilver, can be preserved in the aggregate. 238 * We can include optional I/Os, but don't allow them 239 * to begin a range as they add no benefit in that situation. 240 */ 241 242 /* 243 * We keep track of the last non-optional I/O. 244 */ 245 mio = (fio->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : fio; 246 247 /* 248 * Walk backwards through sufficiently contiguous I/Os 249 * recording the last non-option I/O. 250 */ 251 while ((dio = AVL_PREV(t, fio)) != NULL && 252 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 253 IO_SPAN(dio, lio) <= maxspan && 254 IO_GAP(dio, fio) <= maxgap) { 255 fio = dio; 256 if (mio == NULL && !(fio->io_flags & ZIO_FLAG_OPTIONAL)) 257 mio = fio; 258 } 259 260 /* 261 * Skip any initial optional I/Os. 262 */ 263 while ((fio->io_flags & ZIO_FLAG_OPTIONAL) && fio != lio) { 264 fio = AVL_NEXT(t, fio); 265 ASSERT(fio != NULL); 266 } 267 268 /* 269 * Walk forward through sufficiently contiguous I/Os. 270 */ 271 while ((dio = AVL_NEXT(t, lio)) != NULL && 272 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 273 IO_SPAN(fio, dio) <= maxspan && 274 IO_GAP(lio, dio) <= maxgap) { 275 lio = dio; 276 if (!(lio->io_flags & ZIO_FLAG_OPTIONAL)) 277 mio = lio; 278 } 279 280 /* 281 * Now that we've established the range of the I/O aggregation 282 * we must decide what to do with trailing optional I/Os. 283 * For reads, there's nothing to do. While we are unable to 284 * aggregate further, it's possible that a trailing optional 285 * I/O would allow the underlying device to aggregate with 286 * subsequent I/Os. We must therefore determine if the next 287 * non-optional I/O is close enough to make aggregation 288 * worthwhile. 289 */ 290 stretch = B_FALSE; 291 if (t != &vq->vq_read_tree && mio != NULL) { 292 nio = lio; 293 while ((dio = AVL_NEXT(t, nio)) != NULL && 294 IO_GAP(nio, dio) == 0 && 295 IO_GAP(mio, dio) <= zfs_vdev_write_gap_limit) { 296 nio = dio; 297 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) { 298 stretch = B_TRUE; 299 break; 300 } 301 } 302 } 303 304 if (stretch) { 305 /* This may be a no-op. */ 306 VERIFY((dio = AVL_NEXT(t, lio)) != NULL); 307 dio->io_flags &= ~ZIO_FLAG_OPTIONAL; 308 } else { 309 while (lio != mio && lio != fio) { 310 ASSERT(lio->io_flags & ZIO_FLAG_OPTIONAL); 311 lio = AVL_PREV(t, lio); 312 ASSERT(lio != NULL); 313 } 314 } 315 } 316 317 if (fio != lio) { 318 uint64_t size = IO_SPAN(fio, lio); 319 ASSERT(size <= zfs_vdev_aggregation_limit); 320 321 aio = zio_vdev_delegated_io(fio->io_vd, fio->io_offset, 322 zio_buf_alloc(size), size, fio->io_type, ZIO_PRIORITY_AGG, 323 flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, 324 vdev_queue_agg_io_done, NULL); 325 aio->io_timestamp = fio->io_timestamp; 326 327 nio = fio; 328 do { 329 dio = nio; 330 nio = AVL_NEXT(t, dio); 331 ASSERT(dio->io_type == aio->io_type); 332 ASSERT(dio->io_vdev_tree == t); 333 334 if (dio->io_flags & ZIO_FLAG_NODATA) { 335 ASSERT(dio->io_type == ZIO_TYPE_WRITE); 336 bzero((char *)aio->io_data + (dio->io_offset - 337 aio->io_offset), dio->io_size); 338 } else if (dio->io_type == ZIO_TYPE_WRITE) { 339 bcopy(dio->io_data, (char *)aio->io_data + 340 (dio->io_offset - aio->io_offset), 341 dio->io_size); 342 } 343 344 zio_add_child(dio, aio); 345 vdev_queue_io_remove(vq, dio); 346 zio_vdev_io_bypass(dio); 347 zio_execute(dio); 348 } while (dio != lio); 349 350 avl_add(&vq->vq_pending_tree, aio); 351 352 return (aio); 353 } 354 355 ASSERT(fio->io_vdev_tree == t); 356 vdev_queue_io_remove(vq, fio); 357 358 /* 359 * If the I/O is or was optional and therefore has no data, we need to 360 * simply discard it. We need to drop the vdev queue's lock to avoid a 361 * deadlock that we could encounter since this I/O will complete 362 * immediately. 363 */ 364 if (fio->io_flags & ZIO_FLAG_NODATA) { 365 mutex_exit(&vq->vq_lock); 366 zio_vdev_io_bypass(fio); 367 zio_execute(fio); 368 mutex_enter(&vq->vq_lock); 369 goto again; 370 } 371 372 avl_add(&vq->vq_pending_tree, fio); 373 374 return (fio); 375} 376 377zio_t * 378vdev_queue_io(zio_t *zio) 379{ 380 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 381 zio_t *nio; 382 383 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 384 385 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) 386 return (zio); 387 388 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; 389 390 if (zio->io_type == ZIO_TYPE_READ) 391 zio->io_vdev_tree = &vq->vq_read_tree; 392 else 393 zio->io_vdev_tree = &vq->vq_write_tree; 394 395 mutex_enter(&vq->vq_lock); 396 397 zio->io_timestamp = gethrtime(); 398 zio->io_deadline = (zio->io_timestamp >> zfs_vdev_time_shift) + 399 zio->io_priority; 400 401 vdev_queue_io_add(vq, zio); 402 403 nio = vdev_queue_io_to_issue(vq, zfs_vdev_min_pending); 404 405 mutex_exit(&vq->vq_lock); 406 407 if (nio == NULL) 408 return (NULL); 409 410 if (nio->io_done == vdev_queue_agg_io_done) { 411 zio_nowait(nio); 412 return (NULL); 413 } 414 415 return (nio); 416} 417 418void 419vdev_queue_io_done(zio_t *zio) 420{ 421 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 422 423 if (zio_injection_enabled) 424 delay(SEC_TO_TICK(zio_handle_io_delay(zio))); 425 426 mutex_enter(&vq->vq_lock); 427 428 avl_remove(&vq->vq_pending_tree, zio); 429 430 vq->vq_io_complete_ts = gethrtime(); 431 432 for (int i = 0; i < zfs_vdev_ramp_rate; i++) { 433 zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending); 434 if (nio == NULL) 435 break; 436 mutex_exit(&vq->vq_lock); 437 if (nio->io_done == vdev_queue_agg_io_done) { 438 zio_nowait(nio); 439 } else { 440 zio_vdev_io_reissue(nio); 441 zio_execute(nio); 442 } 443 mutex_enter(&vq->vq_lock); 444 } 445 446 mutex_exit(&vq->vq_lock); 447}
| 46int zfs_vdev_min_pending = 4; 47 48/* 49 * The deadlines are grouped into buckets based on zfs_vdev_time_shift: 50 * deadline = pri + gethrtime() >> time_shift) 51 */ 52int zfs_vdev_time_shift = 29; /* each bucket is 0.537 seconds */ 53 54/* exponential I/O issue ramp-up rate */ 55int zfs_vdev_ramp_rate = 2; 56 57/* 58 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O. 59 * For read I/Os, we also aggregate across small adjacency gaps; for writes 60 * we include spans of optional I/Os to aid aggregation at the disk even when 61 * they aren't able to help us aggregate at this level. 62 */ 63int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE; 64int zfs_vdev_read_gap_limit = 32 << 10; 65int zfs_vdev_write_gap_limit = 4 << 10; 66 67SYSCTL_DECL(_vfs_zfs_vdev); 68TUNABLE_INT("vfs.zfs.vdev.max_pending", &zfs_vdev_max_pending); 69SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, max_pending, CTLFLAG_RW, 70 &zfs_vdev_max_pending, 0, "Maximum I/O requests pending on each device"); 71TUNABLE_INT("vfs.zfs.vdev.min_pending", &zfs_vdev_min_pending); 72SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, min_pending, CTLFLAG_RW, 73 &zfs_vdev_min_pending, 0, 74 "Initial number of I/O requests pending to each device"); 75TUNABLE_INT("vfs.zfs.vdev.time_shift", &zfs_vdev_time_shift); 76SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, time_shift, CTLFLAG_RW, 77 &zfs_vdev_time_shift, 0, "Used for calculating I/O request deadline"); 78TUNABLE_INT("vfs.zfs.vdev.ramp_rate", &zfs_vdev_ramp_rate); 79SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, ramp_rate, CTLFLAG_RW, 80 &zfs_vdev_ramp_rate, 0, "Exponential I/O issue ramp-up rate"); 81TUNABLE_INT("vfs.zfs.vdev.aggregation_limit", &zfs_vdev_aggregation_limit); 82SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, aggregation_limit, CTLFLAG_RW, 83 &zfs_vdev_aggregation_limit, 0, 84 "I/O requests are aggregated up to this size"); 85TUNABLE_INT("vfs.zfs.vdev.read_gap_limit", &zfs_vdev_read_gap_limit); 86SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, read_gap_limit, CTLFLAG_RW, 87 &zfs_vdev_read_gap_limit, 0, 88 "Acceptable gap between two reads being aggregated"); 89TUNABLE_INT("vfs.zfs.vdev.write_gap_limit", &zfs_vdev_write_gap_limit); 90SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, write_gap_limit, CTLFLAG_RW, 91 &zfs_vdev_write_gap_limit, 0, 92 "Acceptable gap between two writes being aggregated"); 93 94/* 95 * Virtual device vector for disk I/O scheduling. 96 */ 97int 98vdev_queue_deadline_compare(const void *x1, const void *x2) 99{ 100 const zio_t *z1 = x1; 101 const zio_t *z2 = x2; 102 103 if (z1->io_deadline < z2->io_deadline) 104 return (-1); 105 if (z1->io_deadline > z2->io_deadline) 106 return (1); 107 108 if (z1->io_offset < z2->io_offset) 109 return (-1); 110 if (z1->io_offset > z2->io_offset) 111 return (1); 112 113 if (z1 < z2) 114 return (-1); 115 if (z1 > z2) 116 return (1); 117 118 return (0); 119} 120 121int 122vdev_queue_offset_compare(const void *x1, const void *x2) 123{ 124 const zio_t *z1 = x1; 125 const zio_t *z2 = x2; 126 127 if (z1->io_offset < z2->io_offset) 128 return (-1); 129 if (z1->io_offset > z2->io_offset) 130 return (1); 131 132 if (z1 < z2) 133 return (-1); 134 if (z1 > z2) 135 return (1); 136 137 return (0); 138} 139 140void 141vdev_queue_init(vdev_t *vd) 142{ 143 vdev_queue_t *vq = &vd->vdev_queue; 144 145 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); 146 147 avl_create(&vq->vq_deadline_tree, vdev_queue_deadline_compare, 148 sizeof (zio_t), offsetof(struct zio, io_deadline_node)); 149 150 avl_create(&vq->vq_read_tree, vdev_queue_offset_compare, 151 sizeof (zio_t), offsetof(struct zio, io_offset_node)); 152 153 avl_create(&vq->vq_write_tree, vdev_queue_offset_compare, 154 sizeof (zio_t), offsetof(struct zio, io_offset_node)); 155 156 avl_create(&vq->vq_pending_tree, vdev_queue_offset_compare, 157 sizeof (zio_t), offsetof(struct zio, io_offset_node)); 158} 159 160void 161vdev_queue_fini(vdev_t *vd) 162{ 163 vdev_queue_t *vq = &vd->vdev_queue; 164 165 avl_destroy(&vq->vq_deadline_tree); 166 avl_destroy(&vq->vq_read_tree); 167 avl_destroy(&vq->vq_write_tree); 168 avl_destroy(&vq->vq_pending_tree); 169 170 mutex_destroy(&vq->vq_lock); 171} 172 173static void 174vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) 175{ 176 avl_add(&vq->vq_deadline_tree, zio); 177 avl_add(zio->io_vdev_tree, zio); 178} 179 180static void 181vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) 182{ 183 avl_remove(&vq->vq_deadline_tree, zio); 184 avl_remove(zio->io_vdev_tree, zio); 185} 186 187static void 188vdev_queue_agg_io_done(zio_t *aio) 189{ 190 zio_t *pio; 191 192 while ((pio = zio_walk_parents(aio)) != NULL) 193 if (aio->io_type == ZIO_TYPE_READ) 194 bcopy((char *)aio->io_data + (pio->io_offset - 195 aio->io_offset), pio->io_data, pio->io_size); 196 197 zio_buf_free(aio->io_data, aio->io_size); 198} 199 200/* 201 * Compute the range spanned by two i/os, which is the endpoint of the last 202 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). 203 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); 204 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0. 205 */ 206#define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset) 207#define IO_GAP(fio, lio) (-IO_SPAN(lio, fio)) 208 209static zio_t * 210vdev_queue_io_to_issue(vdev_queue_t *vq, uint64_t pending_limit) 211{ 212 zio_t *fio, *lio, *aio, *dio, *nio, *mio; 213 avl_tree_t *t; 214 int flags; 215 uint64_t maxspan = zfs_vdev_aggregation_limit; 216 uint64_t maxgap; 217 int stretch; 218 219again: 220 ASSERT(MUTEX_HELD(&vq->vq_lock)); 221 222 if (avl_numnodes(&vq->vq_pending_tree) >= pending_limit || 223 avl_numnodes(&vq->vq_deadline_tree) == 0) 224 return (NULL); 225 226 fio = lio = avl_first(&vq->vq_deadline_tree); 227 228 t = fio->io_vdev_tree; 229 flags = fio->io_flags & ZIO_FLAG_AGG_INHERIT; 230 maxgap = (t == &vq->vq_read_tree) ? zfs_vdev_read_gap_limit : 0; 231 232 if (!(flags & ZIO_FLAG_DONT_AGGREGATE)) { 233 /* 234 * We can aggregate I/Os that are sufficiently adjacent and of 235 * the same flavor, as expressed by the AGG_INHERIT flags. 236 * The latter requirement is necessary so that certain 237 * attributes of the I/O, such as whether it's a normal I/O 238 * or a scrub/resilver, can be preserved in the aggregate. 239 * We can include optional I/Os, but don't allow them 240 * to begin a range as they add no benefit in that situation. 241 */ 242 243 /* 244 * We keep track of the last non-optional I/O. 245 */ 246 mio = (fio->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : fio; 247 248 /* 249 * Walk backwards through sufficiently contiguous I/Os 250 * recording the last non-option I/O. 251 */ 252 while ((dio = AVL_PREV(t, fio)) != NULL && 253 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 254 IO_SPAN(dio, lio) <= maxspan && 255 IO_GAP(dio, fio) <= maxgap) { 256 fio = dio; 257 if (mio == NULL && !(fio->io_flags & ZIO_FLAG_OPTIONAL)) 258 mio = fio; 259 } 260 261 /* 262 * Skip any initial optional I/Os. 263 */ 264 while ((fio->io_flags & ZIO_FLAG_OPTIONAL) && fio != lio) { 265 fio = AVL_NEXT(t, fio); 266 ASSERT(fio != NULL); 267 } 268 269 /* 270 * Walk forward through sufficiently contiguous I/Os. 271 */ 272 while ((dio = AVL_NEXT(t, lio)) != NULL && 273 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 274 IO_SPAN(fio, dio) <= maxspan && 275 IO_GAP(lio, dio) <= maxgap) { 276 lio = dio; 277 if (!(lio->io_flags & ZIO_FLAG_OPTIONAL)) 278 mio = lio; 279 } 280 281 /* 282 * Now that we've established the range of the I/O aggregation 283 * we must decide what to do with trailing optional I/Os. 284 * For reads, there's nothing to do. While we are unable to 285 * aggregate further, it's possible that a trailing optional 286 * I/O would allow the underlying device to aggregate with 287 * subsequent I/Os. We must therefore determine if the next 288 * non-optional I/O is close enough to make aggregation 289 * worthwhile. 290 */ 291 stretch = B_FALSE; 292 if (t != &vq->vq_read_tree && mio != NULL) { 293 nio = lio; 294 while ((dio = AVL_NEXT(t, nio)) != NULL && 295 IO_GAP(nio, dio) == 0 && 296 IO_GAP(mio, dio) <= zfs_vdev_write_gap_limit) { 297 nio = dio; 298 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) { 299 stretch = B_TRUE; 300 break; 301 } 302 } 303 } 304 305 if (stretch) { 306 /* This may be a no-op. */ 307 VERIFY((dio = AVL_NEXT(t, lio)) != NULL); 308 dio->io_flags &= ~ZIO_FLAG_OPTIONAL; 309 } else { 310 while (lio != mio && lio != fio) { 311 ASSERT(lio->io_flags & ZIO_FLAG_OPTIONAL); 312 lio = AVL_PREV(t, lio); 313 ASSERT(lio != NULL); 314 } 315 } 316 } 317 318 if (fio != lio) { 319 uint64_t size = IO_SPAN(fio, lio); 320 ASSERT(size <= zfs_vdev_aggregation_limit); 321 322 aio = zio_vdev_delegated_io(fio->io_vd, fio->io_offset, 323 zio_buf_alloc(size), size, fio->io_type, ZIO_PRIORITY_AGG, 324 flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, 325 vdev_queue_agg_io_done, NULL); 326 aio->io_timestamp = fio->io_timestamp; 327 328 nio = fio; 329 do { 330 dio = nio; 331 nio = AVL_NEXT(t, dio); 332 ASSERT(dio->io_type == aio->io_type); 333 ASSERT(dio->io_vdev_tree == t); 334 335 if (dio->io_flags & ZIO_FLAG_NODATA) { 336 ASSERT(dio->io_type == ZIO_TYPE_WRITE); 337 bzero((char *)aio->io_data + (dio->io_offset - 338 aio->io_offset), dio->io_size); 339 } else if (dio->io_type == ZIO_TYPE_WRITE) { 340 bcopy(dio->io_data, (char *)aio->io_data + 341 (dio->io_offset - aio->io_offset), 342 dio->io_size); 343 } 344 345 zio_add_child(dio, aio); 346 vdev_queue_io_remove(vq, dio); 347 zio_vdev_io_bypass(dio); 348 zio_execute(dio); 349 } while (dio != lio); 350 351 avl_add(&vq->vq_pending_tree, aio); 352 353 return (aio); 354 } 355 356 ASSERT(fio->io_vdev_tree == t); 357 vdev_queue_io_remove(vq, fio); 358 359 /* 360 * If the I/O is or was optional and therefore has no data, we need to 361 * simply discard it. We need to drop the vdev queue's lock to avoid a 362 * deadlock that we could encounter since this I/O will complete 363 * immediately. 364 */ 365 if (fio->io_flags & ZIO_FLAG_NODATA) { 366 mutex_exit(&vq->vq_lock); 367 zio_vdev_io_bypass(fio); 368 zio_execute(fio); 369 mutex_enter(&vq->vq_lock); 370 goto again; 371 } 372 373 avl_add(&vq->vq_pending_tree, fio); 374 375 return (fio); 376} 377 378zio_t * 379vdev_queue_io(zio_t *zio) 380{ 381 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 382 zio_t *nio; 383 384 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); 385 386 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) 387 return (zio); 388 389 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; 390 391 if (zio->io_type == ZIO_TYPE_READ) 392 zio->io_vdev_tree = &vq->vq_read_tree; 393 else 394 zio->io_vdev_tree = &vq->vq_write_tree; 395 396 mutex_enter(&vq->vq_lock); 397 398 zio->io_timestamp = gethrtime(); 399 zio->io_deadline = (zio->io_timestamp >> zfs_vdev_time_shift) + 400 zio->io_priority; 401 402 vdev_queue_io_add(vq, zio); 403 404 nio = vdev_queue_io_to_issue(vq, zfs_vdev_min_pending); 405 406 mutex_exit(&vq->vq_lock); 407 408 if (nio == NULL) 409 return (NULL); 410 411 if (nio->io_done == vdev_queue_agg_io_done) { 412 zio_nowait(nio); 413 return (NULL); 414 } 415 416 return (nio); 417} 418 419void 420vdev_queue_io_done(zio_t *zio) 421{ 422 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 423 424 if (zio_injection_enabled) 425 delay(SEC_TO_TICK(zio_handle_io_delay(zio))); 426 427 mutex_enter(&vq->vq_lock); 428 429 avl_remove(&vq->vq_pending_tree, zio); 430 431 vq->vq_io_complete_ts = gethrtime(); 432 433 for (int i = 0; i < zfs_vdev_ramp_rate; i++) { 434 zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending); 435 if (nio == NULL) 436 break; 437 mutex_exit(&vq->vq_lock); 438 if (nio->io_done == vdev_queue_agg_io_done) { 439 zio_nowait(nio); 440 } else { 441 zio_vdev_io_reissue(nio); 442 zio_execute(nio); 443 } 444 mutex_enter(&vq->vq_lock); 445 } 446 447 mutex_exit(&vq->vq_lock); 448}
|