198 199#ifdef __FreeBSD__ 200#ifdef _KERNEL 201SYSCTL_DECL(_vfs_zfs_vdev); 202 203static int sysctl_zfs_async_write_active_min_dirty_percent(SYSCTL_HANDLER_ARGS); 204SYSCTL_PROC(_vfs_zfs_vdev, OID_AUTO, async_write_active_min_dirty_percent, 205 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, 0, sizeof(int), 206 sysctl_zfs_async_write_active_min_dirty_percent, "I", 207 "Percentage of async write dirty data below which " 208 "async_write_min_active is used."); 209 210static int sysctl_zfs_async_write_active_max_dirty_percent(SYSCTL_HANDLER_ARGS); 211SYSCTL_PROC(_vfs_zfs_vdev, OID_AUTO, async_write_active_max_dirty_percent, 212 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, 0, sizeof(int), 213 sysctl_zfs_async_write_active_max_dirty_percent, "I", 214 "Percentage of async write dirty data above which " 215 "async_write_max_active is used."); 216 217SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, max_active, CTLFLAG_RWTUN, 218 &zfs_vdev_max_active, 0, 219 "The maximum number of I/Os of all types active for each device."); 220 221#define ZFS_VDEV_QUEUE_KNOB_MIN(name) \ 222SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _min_active, CTLFLAG_RWTUN,\ 223 &zfs_vdev_ ## name ## _min_active, 0, \ 224 "Initial number of I/O requests of type " #name \ 225 " active for each device"); 226 227#define ZFS_VDEV_QUEUE_KNOB_MAX(name) \ 228SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _max_active, CTLFLAG_RWTUN,\ 229 &zfs_vdev_ ## name ## _max_active, 0, \ 230 "Maximum number of I/O requests of type " #name \ 231 " active for each device"); 232 233ZFS_VDEV_QUEUE_KNOB_MIN(sync_read); 234ZFS_VDEV_QUEUE_KNOB_MAX(sync_read); 235ZFS_VDEV_QUEUE_KNOB_MIN(sync_write); 236ZFS_VDEV_QUEUE_KNOB_MAX(sync_write); 237ZFS_VDEV_QUEUE_KNOB_MIN(async_read); 238ZFS_VDEV_QUEUE_KNOB_MAX(async_read); 239ZFS_VDEV_QUEUE_KNOB_MIN(async_write); 240ZFS_VDEV_QUEUE_KNOB_MAX(async_write); 241ZFS_VDEV_QUEUE_KNOB_MIN(scrub); 242ZFS_VDEV_QUEUE_KNOB_MAX(scrub); 243ZFS_VDEV_QUEUE_KNOB_MIN(trim); 244ZFS_VDEV_QUEUE_KNOB_MAX(trim); 245 246#undef ZFS_VDEV_QUEUE_KNOB 247 248SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, aggregation_limit, CTLFLAG_RWTUN, 249 &zfs_vdev_aggregation_limit, 0, 250 "I/O requests are aggregated up to this size"); 251SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, read_gap_limit, CTLFLAG_RWTUN, 252 &zfs_vdev_read_gap_limit, 0, 253 "Acceptable gap between two reads being aggregated"); 254SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, write_gap_limit, CTLFLAG_RWTUN, 255 &zfs_vdev_write_gap_limit, 0, 256 "Acceptable gap between two writes being aggregated"); 257SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, queue_depth_pct, CTLFLAG_RWTUN, 258 &zfs_vdev_queue_depth_pct, 0, 259 "Queue depth percentage for each top-level"); 260 261static int 262sysctl_zfs_async_write_active_min_dirty_percent(SYSCTL_HANDLER_ARGS) 263{ 264 int val, err; 265 266 val = zfs_vdev_async_write_active_min_dirty_percent; 267 err = sysctl_handle_int(oidp, &val, 0, req); 268 if (err != 0 || req->newptr == NULL) 269 return (err); 270 271 if (val < 0 || val > 100 || 272 val >= zfs_vdev_async_write_active_max_dirty_percent) 273 return (EINVAL); 274 275 zfs_vdev_async_write_active_min_dirty_percent = val; 276 277 return (0); 278} 279 280static int 281sysctl_zfs_async_write_active_max_dirty_percent(SYSCTL_HANDLER_ARGS) 282{ 283 int val, err; 284 285 val = zfs_vdev_async_write_active_max_dirty_percent; 286 err = sysctl_handle_int(oidp, &val, 0, req); 287 if (err != 0 || req->newptr == NULL) 288 return (err); 289 290 if (val < 0 || val > 100 || 291 val <= zfs_vdev_async_write_active_min_dirty_percent) 292 return (EINVAL); 293 294 zfs_vdev_async_write_active_max_dirty_percent = val; 295 296 return (0); 297} 298#endif 299#endif 300 301int 302vdev_queue_offset_compare(const void *x1, const void *x2) 303{ 304 const zio_t *z1 = x1; 305 const zio_t *z2 = x2; 306 307 if (z1->io_offset < z2->io_offset) 308 return (-1); 309 if (z1->io_offset > z2->io_offset) 310 return (1); 311 312 if (z1 < z2) 313 return (-1); 314 if (z1 > z2) 315 return (1); 316 317 return (0); 318} 319 320static inline avl_tree_t * 321vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p) 322{ 323 return (&vq->vq_class[p].vqc_queued_tree); 324} 325 326static inline avl_tree_t * 327vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t) 328{ 329 if (t == ZIO_TYPE_READ) 330 return (&vq->vq_read_offset_tree); 331 else if (t == ZIO_TYPE_WRITE) 332 return (&vq->vq_write_offset_tree); 333 else 334 return (NULL); 335} 336 337int 338vdev_queue_timestamp_compare(const void *x1, const void *x2) 339{ 340 const zio_t *z1 = x1; 341 const zio_t *z2 = x2; 342 343 if (z1->io_timestamp < z2->io_timestamp) 344 return (-1); 345 if (z1->io_timestamp > z2->io_timestamp) 346 return (1); 347 348 if (z1->io_offset < z2->io_offset) 349 return (-1); 350 if (z1->io_offset > z2->io_offset) 351 return (1); 352 353 if (z1 < z2) 354 return (-1); 355 if (z1 > z2) 356 return (1); 357 358 return (0); 359} 360 361void 362vdev_queue_init(vdev_t *vd) 363{ 364 vdev_queue_t *vq = &vd->vdev_queue; 365 366 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); 367 vq->vq_vdev = vd; 368 369 avl_create(&vq->vq_active_tree, vdev_queue_offset_compare, 370 sizeof (zio_t), offsetof(struct zio, io_queue_node)); 371 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ), 372 vdev_queue_offset_compare, sizeof (zio_t), 373 offsetof(struct zio, io_offset_node)); 374 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE), 375 vdev_queue_offset_compare, sizeof (zio_t), 376 offsetof(struct zio, io_offset_node)); 377 378 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 379 int (*compfn) (const void *, const void *); 380 381 /* 382 * The synchronous i/o queues are dispatched in FIFO rather 383 * than LBA order. This provides more consistent latency for 384 * these i/os. 385 */ 386 if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE) 387 compfn = vdev_queue_timestamp_compare; 388 else 389 compfn = vdev_queue_offset_compare; 390 391 avl_create(vdev_queue_class_tree(vq, p), compfn, 392 sizeof (zio_t), offsetof(struct zio, io_queue_node)); 393 } 394 395 vq->vq_lastoffset = 0; 396} 397 398void 399vdev_queue_fini(vdev_t *vd) 400{ 401 vdev_queue_t *vq = &vd->vdev_queue; 402 403 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) 404 avl_destroy(vdev_queue_class_tree(vq, p)); 405 avl_destroy(&vq->vq_active_tree); 406 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ)); 407 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE)); 408 409 mutex_destroy(&vq->vq_lock); 410} 411 412static void 413vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) 414{ 415 spa_t *spa = zio->io_spa; 416 avl_tree_t *qtt; 417 418 ASSERT(MUTEX_HELD(&vq->vq_lock)); 419 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 420 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 421 qtt = vdev_queue_type_tree(vq, zio->io_type); 422 if (qtt) 423 avl_add(qtt, zio); 424 425#ifdef illumos 426 mutex_enter(&spa->spa_iokstat_lock); 427 spa->spa_queue_stats[zio->io_priority].spa_queued++; 428 if (spa->spa_iokstat != NULL) 429 kstat_waitq_enter(spa->spa_iokstat->ks_data); 430 mutex_exit(&spa->spa_iokstat_lock); 431#endif 432} 433 434static void 435vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) 436{ 437 spa_t *spa = zio->io_spa; 438 avl_tree_t *qtt; 439 440 ASSERT(MUTEX_HELD(&vq->vq_lock)); 441 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 442 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 443 qtt = vdev_queue_type_tree(vq, zio->io_type); 444 if (qtt) 445 avl_remove(qtt, zio); 446 447#ifdef illumos 448 mutex_enter(&spa->spa_iokstat_lock); 449 ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0); 450 spa->spa_queue_stats[zio->io_priority].spa_queued--; 451 if (spa->spa_iokstat != NULL) 452 kstat_waitq_exit(spa->spa_iokstat->ks_data); 453 mutex_exit(&spa->spa_iokstat_lock); 454#endif 455} 456 457static void 458vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio) 459{ 460 spa_t *spa = zio->io_spa; 461 ASSERT(MUTEX_HELD(&vq->vq_lock)); 462 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 463 vq->vq_class[zio->io_priority].vqc_active++; 464 avl_add(&vq->vq_active_tree, zio); 465 466#ifdef illumos 467 mutex_enter(&spa->spa_iokstat_lock); 468 spa->spa_queue_stats[zio->io_priority].spa_active++; 469 if (spa->spa_iokstat != NULL) 470 kstat_runq_enter(spa->spa_iokstat->ks_data); 471 mutex_exit(&spa->spa_iokstat_lock); 472#endif 473} 474 475static void 476vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) 477{ 478 spa_t *spa = zio->io_spa; 479 ASSERT(MUTEX_HELD(&vq->vq_lock)); 480 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 481 vq->vq_class[zio->io_priority].vqc_active--; 482 avl_remove(&vq->vq_active_tree, zio); 483 484#ifdef illumos 485 mutex_enter(&spa->spa_iokstat_lock); 486 ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0); 487 spa->spa_queue_stats[zio->io_priority].spa_active--; 488 if (spa->spa_iokstat != NULL) { 489 kstat_io_t *ksio = spa->spa_iokstat->ks_data; 490 491 kstat_runq_exit(spa->spa_iokstat->ks_data); 492 if (zio->io_type == ZIO_TYPE_READ) { 493 ksio->reads++; 494 ksio->nread += zio->io_size; 495 } else if (zio->io_type == ZIO_TYPE_WRITE) { 496 ksio->writes++; 497 ksio->nwritten += zio->io_size; 498 } 499 } 500 mutex_exit(&spa->spa_iokstat_lock); 501#endif 502} 503 504static void 505vdev_queue_agg_io_done(zio_t *aio) 506{ 507 if (aio->io_type == ZIO_TYPE_READ) { 508 zio_t *pio; 509 zio_link_t *zl = NULL; 510 while ((pio = zio_walk_parents(aio, &zl)) != NULL) { 511 abd_copy_off(pio->io_abd, aio->io_abd, 512 0, pio->io_offset - aio->io_offset, pio->io_size); 513 } 514 } 515 516 abd_free(aio->io_abd); 517} 518 519static int 520vdev_queue_class_min_active(zio_priority_t p) 521{ 522 switch (p) { 523 case ZIO_PRIORITY_SYNC_READ: 524 return (zfs_vdev_sync_read_min_active); 525 case ZIO_PRIORITY_SYNC_WRITE: 526 return (zfs_vdev_sync_write_min_active); 527 case ZIO_PRIORITY_ASYNC_READ: 528 return (zfs_vdev_async_read_min_active); 529 case ZIO_PRIORITY_ASYNC_WRITE: 530 return (zfs_vdev_async_write_min_active); 531 case ZIO_PRIORITY_SCRUB: 532 return (zfs_vdev_scrub_min_active); 533 case ZIO_PRIORITY_TRIM: 534 return (zfs_vdev_trim_min_active); 535 case ZIO_PRIORITY_REMOVAL: 536 return (zfs_vdev_removal_min_active); 537 default: 538 panic("invalid priority %u", p); 539 return (0); 540 } 541} 542 543static __noinline int 544vdev_queue_max_async_writes(spa_t *spa) 545{ 546 int writes; 547 uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total; 548 uint64_t min_bytes = zfs_dirty_data_max * 549 zfs_vdev_async_write_active_min_dirty_percent / 100; 550 uint64_t max_bytes = zfs_dirty_data_max * 551 zfs_vdev_async_write_active_max_dirty_percent / 100; 552 553 /* 554 * Sync tasks correspond to interactive user actions. To reduce the 555 * execution time of those actions we push data out as fast as possible. 556 */ 557 if (spa_has_pending_synctask(spa)) { 558 return (zfs_vdev_async_write_max_active); 559 } 560 561 if (dirty < min_bytes) 562 return (zfs_vdev_async_write_min_active); 563 if (dirty > max_bytes) 564 return (zfs_vdev_async_write_max_active); 565 566 /* 567 * linear interpolation: 568 * slope = (max_writes - min_writes) / (max_bytes - min_bytes) 569 * move right by min_bytes 570 * move up by min_writes 571 */ 572 writes = (dirty - min_bytes) * 573 (zfs_vdev_async_write_max_active - 574 zfs_vdev_async_write_min_active) / 575 (max_bytes - min_bytes) + 576 zfs_vdev_async_write_min_active; 577 ASSERT3U(writes, >=, zfs_vdev_async_write_min_active); 578 ASSERT3U(writes, <=, zfs_vdev_async_write_max_active); 579 return (writes); 580} 581 582static int 583vdev_queue_class_max_active(spa_t *spa, zio_priority_t p) 584{ 585 switch (p) { 586 case ZIO_PRIORITY_SYNC_READ: 587 return (zfs_vdev_sync_read_max_active); 588 case ZIO_PRIORITY_SYNC_WRITE: 589 return (zfs_vdev_sync_write_max_active); 590 case ZIO_PRIORITY_ASYNC_READ: 591 return (zfs_vdev_async_read_max_active); 592 case ZIO_PRIORITY_ASYNC_WRITE: 593 return (vdev_queue_max_async_writes(spa)); 594 case ZIO_PRIORITY_SCRUB: 595 return (zfs_vdev_scrub_max_active); 596 case ZIO_PRIORITY_TRIM: 597 return (zfs_vdev_trim_max_active); 598 case ZIO_PRIORITY_REMOVAL: 599 return (zfs_vdev_removal_max_active); 600 default: 601 panic("invalid priority %u", p); 602 return (0); 603 } 604} 605 606/* 607 * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if 608 * there is no eligible class. 609 */ 610static zio_priority_t 611vdev_queue_class_to_issue(vdev_queue_t *vq) 612{ 613 spa_t *spa = vq->vq_vdev->vdev_spa; 614 zio_priority_t p; 615 616 ASSERT(MUTEX_HELD(&vq->vq_lock)); 617 618 if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active) 619 return (ZIO_PRIORITY_NUM_QUEUEABLE); 620 621 /* find a queue that has not reached its minimum # outstanding i/os */ 622 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 623 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 624 vq->vq_class[p].vqc_active < 625 vdev_queue_class_min_active(p)) 626 return (p); 627 } 628 629 /* 630 * If we haven't found a queue, look for one that hasn't reached its 631 * maximum # outstanding i/os. 632 */ 633 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 634 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 635 vq->vq_class[p].vqc_active < 636 vdev_queue_class_max_active(spa, p)) 637 return (p); 638 } 639 640 /* No eligible queued i/os */ 641 return (ZIO_PRIORITY_NUM_QUEUEABLE); 642} 643 644/* 645 * Compute the range spanned by two i/os, which is the endpoint of the last 646 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). 647 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); 648 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0. 649 */ 650#define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset) 651#define IO_GAP(fio, lio) (-IO_SPAN(lio, fio)) 652 653static zio_t * 654vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) 655{ 656 zio_t *first, *last, *aio, *dio, *mandatory, *nio; 657 uint64_t maxgap = 0; 658 uint64_t size; 659 boolean_t stretch; 660 avl_tree_t *t; 661 enum zio_flag flags; 662 663 ASSERT(MUTEX_HELD(&vq->vq_lock)); 664 665 if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE) 666 return (NULL); 667 668 first = last = zio; 669 670 if (zio->io_type == ZIO_TYPE_READ) 671 maxgap = zfs_vdev_read_gap_limit; 672 673 /* 674 * We can aggregate I/Os that are sufficiently adjacent and of 675 * the same flavor, as expressed by the AGG_INHERIT flags. 676 * The latter requirement is necessary so that certain 677 * attributes of the I/O, such as whether it's a normal I/O 678 * or a scrub/resilver, can be preserved in the aggregate. 679 * We can include optional I/Os, but don't allow them 680 * to begin a range as they add no benefit in that situation. 681 */ 682 683 /* 684 * We keep track of the last non-optional I/O. 685 */ 686 mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first; 687 688 /* 689 * Walk backwards through sufficiently contiguous I/Os 690 * recording the last non-optional I/O. 691 */ 692 flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT; 693 t = vdev_queue_type_tree(vq, zio->io_type); 694 while (t != NULL && (dio = AVL_PREV(t, first)) != NULL && 695 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 696 IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit && 697 IO_GAP(dio, first) <= maxgap && 698 dio->io_type == zio->io_type) { 699 first = dio; 700 if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL)) 701 mandatory = first; 702 } 703 704 /* 705 * Skip any initial optional I/Os. 706 */ 707 while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) { 708 first = AVL_NEXT(t, first); 709 ASSERT(first != NULL); 710 } 711 712 /* 713 * Walk forward through sufficiently contiguous I/Os. 714 * The aggregation limit does not apply to optional i/os, so that 715 * we can issue contiguous writes even if they are larger than the 716 * aggregation limit. 717 */ 718 while ((dio = AVL_NEXT(t, last)) != NULL && 719 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 720 (IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit || 721 (dio->io_flags & ZIO_FLAG_OPTIONAL)) && 722 IO_GAP(last, dio) <= maxgap && 723 dio->io_type == zio->io_type) { 724 last = dio; 725 if (!(last->io_flags & ZIO_FLAG_OPTIONAL)) 726 mandatory = last; 727 } 728 729 /* 730 * Now that we've established the range of the I/O aggregation 731 * we must decide what to do with trailing optional I/Os. 732 * For reads, there's nothing to do. While we are unable to 733 * aggregate further, it's possible that a trailing optional 734 * I/O would allow the underlying device to aggregate with 735 * subsequent I/Os. We must therefore determine if the next 736 * non-optional I/O is close enough to make aggregation 737 * worthwhile. 738 */ 739 stretch = B_FALSE; 740 if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) { 741 zio_t *nio = last; 742 while ((dio = AVL_NEXT(t, nio)) != NULL && 743 IO_GAP(nio, dio) == 0 && 744 IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) { 745 nio = dio; 746 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) { 747 stretch = B_TRUE; 748 break; 749 } 750 } 751 } 752 753 if (stretch) { 754 /* 755 * We are going to include an optional io in our aggregated 756 * span, thus closing the write gap. Only mandatory i/os can 757 * start aggregated spans, so make sure that the next i/o 758 * after our span is mandatory. 759 */ 760 dio = AVL_NEXT(t, last); 761 dio->io_flags &= ~ZIO_FLAG_OPTIONAL; 762 } else { 763 /* do not include the optional i/o */ 764 while (last != mandatory && last != first) { 765 ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL); 766 last = AVL_PREV(t, last); 767 ASSERT(last != NULL); 768 } 769 } 770 771 if (first == last) 772 return (NULL); 773 774 size = IO_SPAN(first, last); 775 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 776 777 aio = zio_vdev_delegated_io(first->io_vd, first->io_offset, 778 abd_alloc_for_io(size, B_TRUE), size, first->io_type, 779 zio->io_priority, flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, 780 vdev_queue_agg_io_done, NULL); 781 aio->io_timestamp = first->io_timestamp; 782 783 nio = first; 784 do { 785 dio = nio; 786 nio = AVL_NEXT(t, dio); 787 ASSERT3U(dio->io_type, ==, aio->io_type); 788 789 if (dio->io_flags & ZIO_FLAG_NODATA) { 790 ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE); 791 abd_zero_off(aio->io_abd, 792 dio->io_offset - aio->io_offset, dio->io_size); 793 } else if (dio->io_type == ZIO_TYPE_WRITE) { 794 abd_copy_off(aio->io_abd, dio->io_abd, 795 dio->io_offset - aio->io_offset, 0, dio->io_size); 796 } 797 798 zio_add_child(dio, aio); 799 vdev_queue_io_remove(vq, dio); 800 zio_vdev_io_bypass(dio); 801 zio_execute(dio); 802 } while (dio != last); 803 804 return (aio); 805} 806 807static zio_t * 808vdev_queue_io_to_issue(vdev_queue_t *vq) 809{ 810 zio_t *zio, *aio; 811 zio_priority_t p; 812 avl_index_t idx; 813 avl_tree_t *tree; 814 zio_t search; 815 816again: 817 ASSERT(MUTEX_HELD(&vq->vq_lock)); 818 819 p = vdev_queue_class_to_issue(vq); 820 821 if (p == ZIO_PRIORITY_NUM_QUEUEABLE) { 822 /* No eligible queued i/os */ 823 return (NULL); 824 } 825 826 /* 827 * For LBA-ordered queues (async / scrub), issue the i/o which follows 828 * the most recently issued i/o in LBA (offset) order. 829 * 830 * For FIFO queues (sync), issue the i/o with the lowest timestamp. 831 */ 832 tree = vdev_queue_class_tree(vq, p); 833 search.io_timestamp = 0; 834 search.io_offset = vq->vq_last_offset + 1; 835 VERIFY3P(avl_find(tree, &search, &idx), ==, NULL); 836 zio = avl_nearest(tree, idx, AVL_AFTER); 837 if (zio == NULL) 838 zio = avl_first(tree); 839 ASSERT3U(zio->io_priority, ==, p); 840 841 aio = vdev_queue_aggregate(vq, zio); 842 if (aio != NULL) 843 zio = aio; 844 else 845 vdev_queue_io_remove(vq, zio); 846 847 /* 848 * If the I/O is or was optional and therefore has no data, we need to 849 * simply discard it. We need to drop the vdev queue's lock to avoid a 850 * deadlock that we could encounter since this I/O will complete 851 * immediately. 852 */ 853 if (zio->io_flags & ZIO_FLAG_NODATA) { 854 mutex_exit(&vq->vq_lock); 855 zio_vdev_io_bypass(zio); 856 zio_execute(zio); 857 mutex_enter(&vq->vq_lock); 858 goto again; 859 } 860 861 vdev_queue_pending_add(vq, zio); 862 vq->vq_last_offset = zio->io_offset; 863 864 return (zio); 865} 866 867zio_t * 868vdev_queue_io(zio_t *zio) 869{ 870 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 871 zio_t *nio; 872 873 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) 874 return (zio); 875 876 /* 877 * Children i/os inherent their parent's priority, which might 878 * not match the child's i/o type. Fix it up here. 879 */ 880 if (zio->io_type == ZIO_TYPE_READ) { 881 if (zio->io_priority != ZIO_PRIORITY_SYNC_READ && 882 zio->io_priority != ZIO_PRIORITY_ASYNC_READ && 883 zio->io_priority != ZIO_PRIORITY_SCRUB && 884 zio->io_priority != ZIO_PRIORITY_REMOVAL) 885 zio->io_priority = ZIO_PRIORITY_ASYNC_READ; 886 } else if (zio->io_type == ZIO_TYPE_WRITE) { 887 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE && 888 zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE && 889 zio->io_priority != ZIO_PRIORITY_REMOVAL) 890 zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE; 891 } else { 892 ASSERT(zio->io_type == ZIO_TYPE_FREE); 893 zio->io_priority = ZIO_PRIORITY_TRIM; 894 } 895 896 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; 897 898 mutex_enter(&vq->vq_lock); 899 zio->io_timestamp = gethrtime(); 900 vdev_queue_io_add(vq, zio); 901 nio = vdev_queue_io_to_issue(vq); 902 mutex_exit(&vq->vq_lock); 903 904 if (nio == NULL) 905 return (NULL); 906 907 if (nio->io_done == vdev_queue_agg_io_done) { 908 zio_nowait(nio); 909 return (NULL); 910 } 911 912 return (nio); 913} 914 915void 916vdev_queue_io_done(zio_t *zio) 917{ 918 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 919 zio_t *nio; 920 921 mutex_enter(&vq->vq_lock); 922 923 vdev_queue_pending_remove(vq, zio); 924 925 vq->vq_io_complete_ts = gethrtime(); 926 927 while ((nio = vdev_queue_io_to_issue(vq)) != NULL) { 928 mutex_exit(&vq->vq_lock); 929 if (nio->io_done == vdev_queue_agg_io_done) { 930 zio_nowait(nio); 931 } else { 932 zio_vdev_io_reissue(nio); 933 zio_execute(nio); 934 } 935 mutex_enter(&vq->vq_lock); 936 } 937 938 mutex_exit(&vq->vq_lock); 939} 940 941void 942vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority) 943{ 944 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 945 avl_tree_t *tree; 946 947 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 948 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 949 950 if (zio->io_type == ZIO_TYPE_READ) { 951 if (priority != ZIO_PRIORITY_SYNC_READ && 952 priority != ZIO_PRIORITY_ASYNC_READ && 953 priority != ZIO_PRIORITY_SCRUB) 954 priority = ZIO_PRIORITY_ASYNC_READ; 955 } else { 956 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 957 if (priority != ZIO_PRIORITY_SYNC_WRITE && 958 priority != ZIO_PRIORITY_ASYNC_WRITE) 959 priority = ZIO_PRIORITY_ASYNC_WRITE; 960 } 961 962 mutex_enter(&vq->vq_lock); 963 964 /* 965 * If the zio is in none of the queues we can simply change 966 * the priority. If the zio is waiting to be submitted we must 967 * remove it from the queue and re-insert it with the new priority. 968 * Otherwise, the zio is currently active and we cannot change its 969 * priority. 970 */ 971 tree = vdev_queue_class_tree(vq, zio->io_priority); 972 if (avl_find(tree, zio, NULL) == zio) { 973 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 974 zio->io_priority = priority; 975 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 976 } else if (avl_find(&vq->vq_active_tree, zio, NULL) != zio) { 977 zio->io_priority = priority; 978 } 979 980 mutex_exit(&vq->vq_lock); 981} 982 983/* 984 * As these three methods are only used for load calculations we're not concerned 985 * if we get an incorrect value on 32bit platforms due to lack of vq_lock mutex 986 * use here, instead we prefer to keep it lock free for performance. 987 */ 988int 989vdev_queue_length(vdev_t *vd) 990{ 991 return (avl_numnodes(&vd->vdev_queue.vq_active_tree)); 992} 993 994uint64_t 995vdev_queue_lastoffset(vdev_t *vd) 996{ 997 return (vd->vdev_queue.vq_lastoffset); 998} 999 1000void 1001vdev_queue_register_lastoffset(vdev_t *vd, zio_t *zio) 1002{ 1003 vd->vdev_queue.vq_lastoffset = zio->io_offset + zio->io_size; 1004}
| 206 207#ifdef __FreeBSD__ 208#ifdef _KERNEL 209SYSCTL_DECL(_vfs_zfs_vdev); 210 211static int sysctl_zfs_async_write_active_min_dirty_percent(SYSCTL_HANDLER_ARGS); 212SYSCTL_PROC(_vfs_zfs_vdev, OID_AUTO, async_write_active_min_dirty_percent, 213 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, 0, sizeof(int), 214 sysctl_zfs_async_write_active_min_dirty_percent, "I", 215 "Percentage of async write dirty data below which " 216 "async_write_min_active is used."); 217 218static int sysctl_zfs_async_write_active_max_dirty_percent(SYSCTL_HANDLER_ARGS); 219SYSCTL_PROC(_vfs_zfs_vdev, OID_AUTO, async_write_active_max_dirty_percent, 220 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, 0, sizeof(int), 221 sysctl_zfs_async_write_active_max_dirty_percent, "I", 222 "Percentage of async write dirty data above which " 223 "async_write_max_active is used."); 224 225SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, max_active, CTLFLAG_RWTUN, 226 &zfs_vdev_max_active, 0, 227 "The maximum number of I/Os of all types active for each device."); 228 229#define ZFS_VDEV_QUEUE_KNOB_MIN(name) \ 230SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _min_active, CTLFLAG_RWTUN,\ 231 &zfs_vdev_ ## name ## _min_active, 0, \ 232 "Initial number of I/O requests of type " #name \ 233 " active for each device"); 234 235#define ZFS_VDEV_QUEUE_KNOB_MAX(name) \ 236SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, name ## _max_active, CTLFLAG_RWTUN,\ 237 &zfs_vdev_ ## name ## _max_active, 0, \ 238 "Maximum number of I/O requests of type " #name \ 239 " active for each device"); 240 241ZFS_VDEV_QUEUE_KNOB_MIN(sync_read); 242ZFS_VDEV_QUEUE_KNOB_MAX(sync_read); 243ZFS_VDEV_QUEUE_KNOB_MIN(sync_write); 244ZFS_VDEV_QUEUE_KNOB_MAX(sync_write); 245ZFS_VDEV_QUEUE_KNOB_MIN(async_read); 246ZFS_VDEV_QUEUE_KNOB_MAX(async_read); 247ZFS_VDEV_QUEUE_KNOB_MIN(async_write); 248ZFS_VDEV_QUEUE_KNOB_MAX(async_write); 249ZFS_VDEV_QUEUE_KNOB_MIN(scrub); 250ZFS_VDEV_QUEUE_KNOB_MAX(scrub); 251ZFS_VDEV_QUEUE_KNOB_MIN(trim); 252ZFS_VDEV_QUEUE_KNOB_MAX(trim); 253 254#undef ZFS_VDEV_QUEUE_KNOB 255 256SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, aggregation_limit, CTLFLAG_RWTUN, 257 &zfs_vdev_aggregation_limit, 0, 258 "I/O requests are aggregated up to this size"); 259SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, read_gap_limit, CTLFLAG_RWTUN, 260 &zfs_vdev_read_gap_limit, 0, 261 "Acceptable gap between two reads being aggregated"); 262SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, write_gap_limit, CTLFLAG_RWTUN, 263 &zfs_vdev_write_gap_limit, 0, 264 "Acceptable gap between two writes being aggregated"); 265SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, queue_depth_pct, CTLFLAG_RWTUN, 266 &zfs_vdev_queue_depth_pct, 0, 267 "Queue depth percentage for each top-level"); 268 269static int 270sysctl_zfs_async_write_active_min_dirty_percent(SYSCTL_HANDLER_ARGS) 271{ 272 int val, err; 273 274 val = zfs_vdev_async_write_active_min_dirty_percent; 275 err = sysctl_handle_int(oidp, &val, 0, req); 276 if (err != 0 || req->newptr == NULL) 277 return (err); 278 279 if (val < 0 || val > 100 || 280 val >= zfs_vdev_async_write_active_max_dirty_percent) 281 return (EINVAL); 282 283 zfs_vdev_async_write_active_min_dirty_percent = val; 284 285 return (0); 286} 287 288static int 289sysctl_zfs_async_write_active_max_dirty_percent(SYSCTL_HANDLER_ARGS) 290{ 291 int val, err; 292 293 val = zfs_vdev_async_write_active_max_dirty_percent; 294 err = sysctl_handle_int(oidp, &val, 0, req); 295 if (err != 0 || req->newptr == NULL) 296 return (err); 297 298 if (val < 0 || val > 100 || 299 val <= zfs_vdev_async_write_active_min_dirty_percent) 300 return (EINVAL); 301 302 zfs_vdev_async_write_active_max_dirty_percent = val; 303 304 return (0); 305} 306#endif 307#endif 308 309int 310vdev_queue_offset_compare(const void *x1, const void *x2) 311{ 312 const zio_t *z1 = x1; 313 const zio_t *z2 = x2; 314 315 if (z1->io_offset < z2->io_offset) 316 return (-1); 317 if (z1->io_offset > z2->io_offset) 318 return (1); 319 320 if (z1 < z2) 321 return (-1); 322 if (z1 > z2) 323 return (1); 324 325 return (0); 326} 327 328static inline avl_tree_t * 329vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p) 330{ 331 return (&vq->vq_class[p].vqc_queued_tree); 332} 333 334static inline avl_tree_t * 335vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t) 336{ 337 if (t == ZIO_TYPE_READ) 338 return (&vq->vq_read_offset_tree); 339 else if (t == ZIO_TYPE_WRITE) 340 return (&vq->vq_write_offset_tree); 341 else 342 return (NULL); 343} 344 345int 346vdev_queue_timestamp_compare(const void *x1, const void *x2) 347{ 348 const zio_t *z1 = x1; 349 const zio_t *z2 = x2; 350 351 if (z1->io_timestamp < z2->io_timestamp) 352 return (-1); 353 if (z1->io_timestamp > z2->io_timestamp) 354 return (1); 355 356 if (z1->io_offset < z2->io_offset) 357 return (-1); 358 if (z1->io_offset > z2->io_offset) 359 return (1); 360 361 if (z1 < z2) 362 return (-1); 363 if (z1 > z2) 364 return (1); 365 366 return (0); 367} 368 369void 370vdev_queue_init(vdev_t *vd) 371{ 372 vdev_queue_t *vq = &vd->vdev_queue; 373 374 mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL); 375 vq->vq_vdev = vd; 376 377 avl_create(&vq->vq_active_tree, vdev_queue_offset_compare, 378 sizeof (zio_t), offsetof(struct zio, io_queue_node)); 379 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ), 380 vdev_queue_offset_compare, sizeof (zio_t), 381 offsetof(struct zio, io_offset_node)); 382 avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE), 383 vdev_queue_offset_compare, sizeof (zio_t), 384 offsetof(struct zio, io_offset_node)); 385 386 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 387 int (*compfn) (const void *, const void *); 388 389 /* 390 * The synchronous i/o queues are dispatched in FIFO rather 391 * than LBA order. This provides more consistent latency for 392 * these i/os. 393 */ 394 if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE) 395 compfn = vdev_queue_timestamp_compare; 396 else 397 compfn = vdev_queue_offset_compare; 398 399 avl_create(vdev_queue_class_tree(vq, p), compfn, 400 sizeof (zio_t), offsetof(struct zio, io_queue_node)); 401 } 402 403 vq->vq_lastoffset = 0; 404} 405 406void 407vdev_queue_fini(vdev_t *vd) 408{ 409 vdev_queue_t *vq = &vd->vdev_queue; 410 411 for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) 412 avl_destroy(vdev_queue_class_tree(vq, p)); 413 avl_destroy(&vq->vq_active_tree); 414 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ)); 415 avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE)); 416 417 mutex_destroy(&vq->vq_lock); 418} 419 420static void 421vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) 422{ 423 spa_t *spa = zio->io_spa; 424 avl_tree_t *qtt; 425 426 ASSERT(MUTEX_HELD(&vq->vq_lock)); 427 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 428 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 429 qtt = vdev_queue_type_tree(vq, zio->io_type); 430 if (qtt) 431 avl_add(qtt, zio); 432 433#ifdef illumos 434 mutex_enter(&spa->spa_iokstat_lock); 435 spa->spa_queue_stats[zio->io_priority].spa_queued++; 436 if (spa->spa_iokstat != NULL) 437 kstat_waitq_enter(spa->spa_iokstat->ks_data); 438 mutex_exit(&spa->spa_iokstat_lock); 439#endif 440} 441 442static void 443vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio) 444{ 445 spa_t *spa = zio->io_spa; 446 avl_tree_t *qtt; 447 448 ASSERT(MUTEX_HELD(&vq->vq_lock)); 449 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 450 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 451 qtt = vdev_queue_type_tree(vq, zio->io_type); 452 if (qtt) 453 avl_remove(qtt, zio); 454 455#ifdef illumos 456 mutex_enter(&spa->spa_iokstat_lock); 457 ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_queued, >, 0); 458 spa->spa_queue_stats[zio->io_priority].spa_queued--; 459 if (spa->spa_iokstat != NULL) 460 kstat_waitq_exit(spa->spa_iokstat->ks_data); 461 mutex_exit(&spa->spa_iokstat_lock); 462#endif 463} 464 465static void 466vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio) 467{ 468 spa_t *spa = zio->io_spa; 469 ASSERT(MUTEX_HELD(&vq->vq_lock)); 470 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 471 vq->vq_class[zio->io_priority].vqc_active++; 472 avl_add(&vq->vq_active_tree, zio); 473 474#ifdef illumos 475 mutex_enter(&spa->spa_iokstat_lock); 476 spa->spa_queue_stats[zio->io_priority].spa_active++; 477 if (spa->spa_iokstat != NULL) 478 kstat_runq_enter(spa->spa_iokstat->ks_data); 479 mutex_exit(&spa->spa_iokstat_lock); 480#endif 481} 482 483static void 484vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio) 485{ 486 spa_t *spa = zio->io_spa; 487 ASSERT(MUTEX_HELD(&vq->vq_lock)); 488 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 489 vq->vq_class[zio->io_priority].vqc_active--; 490 avl_remove(&vq->vq_active_tree, zio); 491 492#ifdef illumos 493 mutex_enter(&spa->spa_iokstat_lock); 494 ASSERT3U(spa->spa_queue_stats[zio->io_priority].spa_active, >, 0); 495 spa->spa_queue_stats[zio->io_priority].spa_active--; 496 if (spa->spa_iokstat != NULL) { 497 kstat_io_t *ksio = spa->spa_iokstat->ks_data; 498 499 kstat_runq_exit(spa->spa_iokstat->ks_data); 500 if (zio->io_type == ZIO_TYPE_READ) { 501 ksio->reads++; 502 ksio->nread += zio->io_size; 503 } else if (zio->io_type == ZIO_TYPE_WRITE) { 504 ksio->writes++; 505 ksio->nwritten += zio->io_size; 506 } 507 } 508 mutex_exit(&spa->spa_iokstat_lock); 509#endif 510} 511 512static void 513vdev_queue_agg_io_done(zio_t *aio) 514{ 515 if (aio->io_type == ZIO_TYPE_READ) { 516 zio_t *pio; 517 zio_link_t *zl = NULL; 518 while ((pio = zio_walk_parents(aio, &zl)) != NULL) { 519 abd_copy_off(pio->io_abd, aio->io_abd, 520 0, pio->io_offset - aio->io_offset, pio->io_size); 521 } 522 } 523 524 abd_free(aio->io_abd); 525} 526 527static int 528vdev_queue_class_min_active(zio_priority_t p) 529{ 530 switch (p) { 531 case ZIO_PRIORITY_SYNC_READ: 532 return (zfs_vdev_sync_read_min_active); 533 case ZIO_PRIORITY_SYNC_WRITE: 534 return (zfs_vdev_sync_write_min_active); 535 case ZIO_PRIORITY_ASYNC_READ: 536 return (zfs_vdev_async_read_min_active); 537 case ZIO_PRIORITY_ASYNC_WRITE: 538 return (zfs_vdev_async_write_min_active); 539 case ZIO_PRIORITY_SCRUB: 540 return (zfs_vdev_scrub_min_active); 541 case ZIO_PRIORITY_TRIM: 542 return (zfs_vdev_trim_min_active); 543 case ZIO_PRIORITY_REMOVAL: 544 return (zfs_vdev_removal_min_active); 545 default: 546 panic("invalid priority %u", p); 547 return (0); 548 } 549} 550 551static __noinline int 552vdev_queue_max_async_writes(spa_t *spa) 553{ 554 int writes; 555 uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total; 556 uint64_t min_bytes = zfs_dirty_data_max * 557 zfs_vdev_async_write_active_min_dirty_percent / 100; 558 uint64_t max_bytes = zfs_dirty_data_max * 559 zfs_vdev_async_write_active_max_dirty_percent / 100; 560 561 /* 562 * Sync tasks correspond to interactive user actions. To reduce the 563 * execution time of those actions we push data out as fast as possible. 564 */ 565 if (spa_has_pending_synctask(spa)) { 566 return (zfs_vdev_async_write_max_active); 567 } 568 569 if (dirty < min_bytes) 570 return (zfs_vdev_async_write_min_active); 571 if (dirty > max_bytes) 572 return (zfs_vdev_async_write_max_active); 573 574 /* 575 * linear interpolation: 576 * slope = (max_writes - min_writes) / (max_bytes - min_bytes) 577 * move right by min_bytes 578 * move up by min_writes 579 */ 580 writes = (dirty - min_bytes) * 581 (zfs_vdev_async_write_max_active - 582 zfs_vdev_async_write_min_active) / 583 (max_bytes - min_bytes) + 584 zfs_vdev_async_write_min_active; 585 ASSERT3U(writes, >=, zfs_vdev_async_write_min_active); 586 ASSERT3U(writes, <=, zfs_vdev_async_write_max_active); 587 return (writes); 588} 589 590static int 591vdev_queue_class_max_active(spa_t *spa, zio_priority_t p) 592{ 593 switch (p) { 594 case ZIO_PRIORITY_SYNC_READ: 595 return (zfs_vdev_sync_read_max_active); 596 case ZIO_PRIORITY_SYNC_WRITE: 597 return (zfs_vdev_sync_write_max_active); 598 case ZIO_PRIORITY_ASYNC_READ: 599 return (zfs_vdev_async_read_max_active); 600 case ZIO_PRIORITY_ASYNC_WRITE: 601 return (vdev_queue_max_async_writes(spa)); 602 case ZIO_PRIORITY_SCRUB: 603 return (zfs_vdev_scrub_max_active); 604 case ZIO_PRIORITY_TRIM: 605 return (zfs_vdev_trim_max_active); 606 case ZIO_PRIORITY_REMOVAL: 607 return (zfs_vdev_removal_max_active); 608 default: 609 panic("invalid priority %u", p); 610 return (0); 611 } 612} 613 614/* 615 * Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if 616 * there is no eligible class. 617 */ 618static zio_priority_t 619vdev_queue_class_to_issue(vdev_queue_t *vq) 620{ 621 spa_t *spa = vq->vq_vdev->vdev_spa; 622 zio_priority_t p; 623 624 ASSERT(MUTEX_HELD(&vq->vq_lock)); 625 626 if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active) 627 return (ZIO_PRIORITY_NUM_QUEUEABLE); 628 629 /* find a queue that has not reached its minimum # outstanding i/os */ 630 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 631 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 632 vq->vq_class[p].vqc_active < 633 vdev_queue_class_min_active(p)) 634 return (p); 635 } 636 637 /* 638 * If we haven't found a queue, look for one that hasn't reached its 639 * maximum # outstanding i/os. 640 */ 641 for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) { 642 if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 && 643 vq->vq_class[p].vqc_active < 644 vdev_queue_class_max_active(spa, p)) 645 return (p); 646 } 647 648 /* No eligible queued i/os */ 649 return (ZIO_PRIORITY_NUM_QUEUEABLE); 650} 651 652/* 653 * Compute the range spanned by two i/os, which is the endpoint of the last 654 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). 655 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); 656 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0. 657 */ 658#define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset) 659#define IO_GAP(fio, lio) (-IO_SPAN(lio, fio)) 660 661static zio_t * 662vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) 663{ 664 zio_t *first, *last, *aio, *dio, *mandatory, *nio; 665 uint64_t maxgap = 0; 666 uint64_t size; 667 boolean_t stretch; 668 avl_tree_t *t; 669 enum zio_flag flags; 670 671 ASSERT(MUTEX_HELD(&vq->vq_lock)); 672 673 if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE) 674 return (NULL); 675 676 first = last = zio; 677 678 if (zio->io_type == ZIO_TYPE_READ) 679 maxgap = zfs_vdev_read_gap_limit; 680 681 /* 682 * We can aggregate I/Os that are sufficiently adjacent and of 683 * the same flavor, as expressed by the AGG_INHERIT flags. 684 * The latter requirement is necessary so that certain 685 * attributes of the I/O, such as whether it's a normal I/O 686 * or a scrub/resilver, can be preserved in the aggregate. 687 * We can include optional I/Os, but don't allow them 688 * to begin a range as they add no benefit in that situation. 689 */ 690 691 /* 692 * We keep track of the last non-optional I/O. 693 */ 694 mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first; 695 696 /* 697 * Walk backwards through sufficiently contiguous I/Os 698 * recording the last non-optional I/O. 699 */ 700 flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT; 701 t = vdev_queue_type_tree(vq, zio->io_type); 702 while (t != NULL && (dio = AVL_PREV(t, first)) != NULL && 703 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 704 IO_SPAN(dio, last) <= zfs_vdev_aggregation_limit && 705 IO_GAP(dio, first) <= maxgap && 706 dio->io_type == zio->io_type) { 707 first = dio; 708 if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL)) 709 mandatory = first; 710 } 711 712 /* 713 * Skip any initial optional I/Os. 714 */ 715 while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) { 716 first = AVL_NEXT(t, first); 717 ASSERT(first != NULL); 718 } 719 720 /* 721 * Walk forward through sufficiently contiguous I/Os. 722 * The aggregation limit does not apply to optional i/os, so that 723 * we can issue contiguous writes even if they are larger than the 724 * aggregation limit. 725 */ 726 while ((dio = AVL_NEXT(t, last)) != NULL && 727 (dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags && 728 (IO_SPAN(first, dio) <= zfs_vdev_aggregation_limit || 729 (dio->io_flags & ZIO_FLAG_OPTIONAL)) && 730 IO_GAP(last, dio) <= maxgap && 731 dio->io_type == zio->io_type) { 732 last = dio; 733 if (!(last->io_flags & ZIO_FLAG_OPTIONAL)) 734 mandatory = last; 735 } 736 737 /* 738 * Now that we've established the range of the I/O aggregation 739 * we must decide what to do with trailing optional I/Os. 740 * For reads, there's nothing to do. While we are unable to 741 * aggregate further, it's possible that a trailing optional 742 * I/O would allow the underlying device to aggregate with 743 * subsequent I/Os. We must therefore determine if the next 744 * non-optional I/O is close enough to make aggregation 745 * worthwhile. 746 */ 747 stretch = B_FALSE; 748 if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) { 749 zio_t *nio = last; 750 while ((dio = AVL_NEXT(t, nio)) != NULL && 751 IO_GAP(nio, dio) == 0 && 752 IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) { 753 nio = dio; 754 if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) { 755 stretch = B_TRUE; 756 break; 757 } 758 } 759 } 760 761 if (stretch) { 762 /* 763 * We are going to include an optional io in our aggregated 764 * span, thus closing the write gap. Only mandatory i/os can 765 * start aggregated spans, so make sure that the next i/o 766 * after our span is mandatory. 767 */ 768 dio = AVL_NEXT(t, last); 769 dio->io_flags &= ~ZIO_FLAG_OPTIONAL; 770 } else { 771 /* do not include the optional i/o */ 772 while (last != mandatory && last != first) { 773 ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL); 774 last = AVL_PREV(t, last); 775 ASSERT(last != NULL); 776 } 777 } 778 779 if (first == last) 780 return (NULL); 781 782 size = IO_SPAN(first, last); 783 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 784 785 aio = zio_vdev_delegated_io(first->io_vd, first->io_offset, 786 abd_alloc_for_io(size, B_TRUE), size, first->io_type, 787 zio->io_priority, flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE, 788 vdev_queue_agg_io_done, NULL); 789 aio->io_timestamp = first->io_timestamp; 790 791 nio = first; 792 do { 793 dio = nio; 794 nio = AVL_NEXT(t, dio); 795 ASSERT3U(dio->io_type, ==, aio->io_type); 796 797 if (dio->io_flags & ZIO_FLAG_NODATA) { 798 ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE); 799 abd_zero_off(aio->io_abd, 800 dio->io_offset - aio->io_offset, dio->io_size); 801 } else if (dio->io_type == ZIO_TYPE_WRITE) { 802 abd_copy_off(aio->io_abd, dio->io_abd, 803 dio->io_offset - aio->io_offset, 0, dio->io_size); 804 } 805 806 zio_add_child(dio, aio); 807 vdev_queue_io_remove(vq, dio); 808 zio_vdev_io_bypass(dio); 809 zio_execute(dio); 810 } while (dio != last); 811 812 return (aio); 813} 814 815static zio_t * 816vdev_queue_io_to_issue(vdev_queue_t *vq) 817{ 818 zio_t *zio, *aio; 819 zio_priority_t p; 820 avl_index_t idx; 821 avl_tree_t *tree; 822 zio_t search; 823 824again: 825 ASSERT(MUTEX_HELD(&vq->vq_lock)); 826 827 p = vdev_queue_class_to_issue(vq); 828 829 if (p == ZIO_PRIORITY_NUM_QUEUEABLE) { 830 /* No eligible queued i/os */ 831 return (NULL); 832 } 833 834 /* 835 * For LBA-ordered queues (async / scrub), issue the i/o which follows 836 * the most recently issued i/o in LBA (offset) order. 837 * 838 * For FIFO queues (sync), issue the i/o with the lowest timestamp. 839 */ 840 tree = vdev_queue_class_tree(vq, p); 841 search.io_timestamp = 0; 842 search.io_offset = vq->vq_last_offset + 1; 843 VERIFY3P(avl_find(tree, &search, &idx), ==, NULL); 844 zio = avl_nearest(tree, idx, AVL_AFTER); 845 if (zio == NULL) 846 zio = avl_first(tree); 847 ASSERT3U(zio->io_priority, ==, p); 848 849 aio = vdev_queue_aggregate(vq, zio); 850 if (aio != NULL) 851 zio = aio; 852 else 853 vdev_queue_io_remove(vq, zio); 854 855 /* 856 * If the I/O is or was optional and therefore has no data, we need to 857 * simply discard it. We need to drop the vdev queue's lock to avoid a 858 * deadlock that we could encounter since this I/O will complete 859 * immediately. 860 */ 861 if (zio->io_flags & ZIO_FLAG_NODATA) { 862 mutex_exit(&vq->vq_lock); 863 zio_vdev_io_bypass(zio); 864 zio_execute(zio); 865 mutex_enter(&vq->vq_lock); 866 goto again; 867 } 868 869 vdev_queue_pending_add(vq, zio); 870 vq->vq_last_offset = zio->io_offset; 871 872 return (zio); 873} 874 875zio_t * 876vdev_queue_io(zio_t *zio) 877{ 878 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 879 zio_t *nio; 880 881 if (zio->io_flags & ZIO_FLAG_DONT_QUEUE) 882 return (zio); 883 884 /* 885 * Children i/os inherent their parent's priority, which might 886 * not match the child's i/o type. Fix it up here. 887 */ 888 if (zio->io_type == ZIO_TYPE_READ) { 889 if (zio->io_priority != ZIO_PRIORITY_SYNC_READ && 890 zio->io_priority != ZIO_PRIORITY_ASYNC_READ && 891 zio->io_priority != ZIO_PRIORITY_SCRUB && 892 zio->io_priority != ZIO_PRIORITY_REMOVAL) 893 zio->io_priority = ZIO_PRIORITY_ASYNC_READ; 894 } else if (zio->io_type == ZIO_TYPE_WRITE) { 895 if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE && 896 zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE && 897 zio->io_priority != ZIO_PRIORITY_REMOVAL) 898 zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE; 899 } else { 900 ASSERT(zio->io_type == ZIO_TYPE_FREE); 901 zio->io_priority = ZIO_PRIORITY_TRIM; 902 } 903 904 zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE; 905 906 mutex_enter(&vq->vq_lock); 907 zio->io_timestamp = gethrtime(); 908 vdev_queue_io_add(vq, zio); 909 nio = vdev_queue_io_to_issue(vq); 910 mutex_exit(&vq->vq_lock); 911 912 if (nio == NULL) 913 return (NULL); 914 915 if (nio->io_done == vdev_queue_agg_io_done) { 916 zio_nowait(nio); 917 return (NULL); 918 } 919 920 return (nio); 921} 922 923void 924vdev_queue_io_done(zio_t *zio) 925{ 926 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 927 zio_t *nio; 928 929 mutex_enter(&vq->vq_lock); 930 931 vdev_queue_pending_remove(vq, zio); 932 933 vq->vq_io_complete_ts = gethrtime(); 934 935 while ((nio = vdev_queue_io_to_issue(vq)) != NULL) { 936 mutex_exit(&vq->vq_lock); 937 if (nio->io_done == vdev_queue_agg_io_done) { 938 zio_nowait(nio); 939 } else { 940 zio_vdev_io_reissue(nio); 941 zio_execute(nio); 942 } 943 mutex_enter(&vq->vq_lock); 944 } 945 946 mutex_exit(&vq->vq_lock); 947} 948 949void 950vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority) 951{ 952 vdev_queue_t *vq = &zio->io_vd->vdev_queue; 953 avl_tree_t *tree; 954 955 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 956 ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); 957 958 if (zio->io_type == ZIO_TYPE_READ) { 959 if (priority != ZIO_PRIORITY_SYNC_READ && 960 priority != ZIO_PRIORITY_ASYNC_READ && 961 priority != ZIO_PRIORITY_SCRUB) 962 priority = ZIO_PRIORITY_ASYNC_READ; 963 } else { 964 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 965 if (priority != ZIO_PRIORITY_SYNC_WRITE && 966 priority != ZIO_PRIORITY_ASYNC_WRITE) 967 priority = ZIO_PRIORITY_ASYNC_WRITE; 968 } 969 970 mutex_enter(&vq->vq_lock); 971 972 /* 973 * If the zio is in none of the queues we can simply change 974 * the priority. If the zio is waiting to be submitted we must 975 * remove it from the queue and re-insert it with the new priority. 976 * Otherwise, the zio is currently active and we cannot change its 977 * priority. 978 */ 979 tree = vdev_queue_class_tree(vq, zio->io_priority); 980 if (avl_find(tree, zio, NULL) == zio) { 981 avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio); 982 zio->io_priority = priority; 983 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); 984 } else if (avl_find(&vq->vq_active_tree, zio, NULL) != zio) { 985 zio->io_priority = priority; 986 } 987 988 mutex_exit(&vq->vq_lock); 989} 990 991/* 992 * As these three methods are only used for load calculations we're not concerned 993 * if we get an incorrect value on 32bit platforms due to lack of vq_lock mutex 994 * use here, instead we prefer to keep it lock free for performance. 995 */ 996int 997vdev_queue_length(vdev_t *vd) 998{ 999 return (avl_numnodes(&vd->vdev_queue.vq_active_tree)); 1000} 1001 1002uint64_t 1003vdev_queue_lastoffset(vdev_t *vd) 1004{ 1005 return (vd->vdev_queue.vq_lastoffset); 1006} 1007 1008void 1009vdev_queue_register_lastoffset(vdev_t *vd, zio_t *zio) 1010{ 1011 vd->vdev_queue.vq_lastoffset = zio->io_offset + zio->io_size; 1012}
|