ctl_backend_block.c (267641) | ctl_backend_block.c (267877) |
---|---|
1/*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. --- 26 unchanged lines hidden (view full) --- 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37/* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42#include <sys/cdefs.h> | 1/*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. --- 26 unchanged lines hidden (view full) --- 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37/* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42#include <sys/cdefs.h> |
43__FBSDID("$FreeBSD: head/sys/cam/ctl/ctl_backend_block.c 267641 2014-06-19 12:43:41Z mav $"); | 43__FBSDID("$FreeBSD: head/sys/cam/ctl/ctl_backend_block.c 267877 2014-06-25 17:54:36Z mav $"); |
44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/kernel.h> 48#include <sys/types.h> 49#include <sys/kthread.h> 50#include <sys/bio.h> 51#include <sys/fcntl.h> --- 103 unchanged lines hidden (view full) --- 155 char lunname[32]; 156 char *dev_path; 157 ctl_be_block_type dev_type; 158 struct vnode *vn; 159 union ctl_be_block_bedata backend; 160 cbb_dispatch_t dispatch; 161 cbb_dispatch_t lun_flush; 162 cbb_dispatch_t unmap; | 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/kernel.h> 48#include <sys/types.h> 49#include <sys/kthread.h> 50#include <sys/bio.h> 51#include <sys/fcntl.h> --- 103 unchanged lines hidden (view full) --- 155 char lunname[32]; 156 char *dev_path; 157 ctl_be_block_type dev_type; 158 struct vnode *vn; 159 union ctl_be_block_bedata backend; 160 cbb_dispatch_t dispatch; 161 cbb_dispatch_t lun_flush; 162 cbb_dispatch_t unmap; |
163 struct mtx lock; | |
164 uma_zone_t lun_zone; 165 uint64_t size_blocks; 166 uint64_t size_bytes; 167 uint32_t blocksize; 168 int blocksize_shift; 169 uint16_t pblockexp; 170 uint16_t pblockoff; 171 struct ctl_be_block_softc *softc; 172 struct devstat *disk_stats; 173 ctl_be_block_lun_flags flags; 174 STAILQ_ENTRY(ctl_be_block_lun) links; 175 struct ctl_be_lun ctl_be_lun; 176 struct taskqueue *io_taskqueue; 177 struct task io_task; 178 int num_threads; 179 STAILQ_HEAD(, ctl_io_hdr) input_queue; 180 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 181 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; | 163 uma_zone_t lun_zone; 164 uint64_t size_blocks; 165 uint64_t size_bytes; 166 uint32_t blocksize; 167 int blocksize_shift; 168 uint16_t pblockexp; 169 uint16_t pblockoff; 170 struct ctl_be_block_softc *softc; 171 struct devstat *disk_stats; 172 ctl_be_block_lun_flags flags; 173 STAILQ_ENTRY(ctl_be_block_lun) links; 174 struct ctl_be_lun ctl_be_lun; 175 struct taskqueue *io_taskqueue; 176 struct task io_task; 177 int num_threads; 178 STAILQ_HEAD(, ctl_io_hdr) input_queue; 179 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 180 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; |
181 struct mtx_padalign io_lock; 182 struct mtx_padalign queue_lock; |
|
182}; 183 184/* 185 * Overall softc structure for the block backend module. 186 */ 187struct ctl_be_block_softc { 188 struct mtx lock; 189 int num_disks; --- 141 unchanged lines hidden (view full) --- 331 } 332 333 uma_zfree(beio_zone, beio); 334} 335 336static void 337ctl_complete_beio(struct ctl_be_block_io *beio) 338{ | 183}; 184 185/* 186 * Overall softc structure for the block backend module. 187 */ 188struct ctl_be_block_softc { 189 struct mtx lock; 190 int num_disks; --- 141 unchanged lines hidden (view full) --- 332 } 333 334 uma_zfree(beio_zone, beio); 335} 336 337static void 338ctl_complete_beio(struct ctl_be_block_io *beio) 339{ |
339 union ctl_io *io; 340 int io_len; | 340 union ctl_io *io = beio->io; |
341 | 341 |
342 io = beio->io; 343 344 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 345 io_len = beio->io_len; 346 else 347 io_len = 0; 348 349 devstat_end_transaction(beio->lun->disk_stats, 350 /*bytes*/ io_len, 351 beio->ds_tag_type, 352 beio->ds_trans_type, 353 /*now*/ NULL, 354 /*then*/&beio->ds_t0); 355 | |
356 if (beio->beio_cont != NULL) { 357 beio->beio_cont(beio); 358 } else { 359 ctl_free_beio(beio); 360 ctl_data_submit_done(io); 361 } 362} 363 --- 80 unchanged lines hidden (view full) --- 444 /* 445 * At this point, we have a write and the DMA completed 446 * successfully. We now have to queue it to the task queue to 447 * execute the backend I/O. That is because we do blocking 448 * memory allocations, and in the file backing case, blocking I/O. 449 * This move done routine is generally called in the SIM's 450 * interrupt context, and therefore we cannot block. 451 */ | 342 if (beio->beio_cont != NULL) { 343 beio->beio_cont(beio); 344 } else { 345 ctl_free_beio(beio); 346 ctl_data_submit_done(io); 347 } 348} 349 --- 80 unchanged lines hidden (view full) --- 430 /* 431 * At this point, we have a write and the DMA completed 432 * successfully. We now have to queue it to the task queue to 433 * execute the backend I/O. That is because we do blocking 434 * memory allocations, and in the file backing case, blocking I/O. 435 * This move done routine is generally called in the SIM's 436 * interrupt context, and therefore we cannot block. 437 */ |
452 mtx_lock(&be_lun->lock); | 438 mtx_lock(&be_lun->queue_lock); |
453 /* 454 * XXX KDM make sure that links is okay to use at this point. 455 * Otherwise, we either need to add another field to ctl_io_hdr, 456 * or deal with resource allocation here. 457 */ 458 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); | 439 /* 440 * XXX KDM make sure that links is okay to use at this point. 441 * Otherwise, we either need to add another field to ctl_io_hdr, 442 * or deal with resource allocation here. 443 */ 444 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); |
459 mtx_unlock(&be_lun->lock); | 445 mtx_unlock(&be_lun->queue_lock); |
460 461 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 462 463 return (0); 464} 465 466static void 467ctl_be_block_biodone(struct bio *bio) --- 5 unchanged lines hidden (view full) --- 473 474 beio = bio->bio_caller1; 475 be_lun = beio->lun; 476 io = beio->io; 477 478 DPRINTF("entered\n"); 479 480 error = bio->bio_error; | 446 447 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 448 449 return (0); 450} 451 452static void 453ctl_be_block_biodone(struct bio *bio) --- 5 unchanged lines hidden (view full) --- 459 460 beio = bio->bio_caller1; 461 be_lun = beio->lun; 462 io = beio->io; 463 464 DPRINTF("entered\n"); 465 466 error = bio->bio_error; |
481 mtx_lock(&be_lun->lock); | 467 mtx_lock(&be_lun->io_lock); |
482 if (error != 0) 483 beio->num_errors++; 484 485 beio->num_bios_done++; 486 487 /* 488 * XXX KDM will this cause WITNESS to complain? Holding a lock 489 * during the free might cause it to complain. 490 */ 491 g_destroy_bio(bio); 492 493 /* 494 * If the send complete bit isn't set, or we aren't the last I/O to 495 * complete, then we're done. 496 */ 497 if ((beio->send_complete == 0) 498 || (beio->num_bios_done < beio->num_bios_sent)) { | 468 if (error != 0) 469 beio->num_errors++; 470 471 beio->num_bios_done++; 472 473 /* 474 * XXX KDM will this cause WITNESS to complain? Holding a lock 475 * during the free might cause it to complain. 476 */ 477 g_destroy_bio(bio); 478 479 /* 480 * If the send complete bit isn't set, or we aren't the last I/O to 481 * complete, then we're done. 482 */ 483 if ((beio->send_complete == 0) 484 || (beio->num_bios_done < beio->num_bios_sent)) { |
499 mtx_unlock(&be_lun->lock); | 485 mtx_unlock(&be_lun->io_lock); |
500 return; 501 } 502 503 /* 504 * At this point, we've verified that we are the last I/O to 505 * complete, so it's safe to drop the lock. 506 */ | 486 return; 487 } 488 489 /* 490 * At this point, we've verified that we are the last I/O to 491 * complete, so it's safe to drop the lock. 492 */ |
507 mtx_unlock(&be_lun->lock); | 493 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 494 beio->ds_tag_type, beio->ds_trans_type, 495 /*now*/ NULL, /*then*/&beio->ds_t0); 496 mtx_unlock(&be_lun->io_lock); |
508 509 /* 510 * If there are any errors from the backing device, we fail the 511 * entire I/O with a medium error. 512 */ 513 if (beio->num_errors > 0) { 514 if (error == EOPNOTSUPP) { 515 ctl_set_invalid_opcode(&io->scsiio); --- 25 unchanged lines hidden (view full) --- 541 ctl_datamove(io); 542 } 543} 544 545static void 546ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 547 struct ctl_be_block_io *beio) 548{ | 497 498 /* 499 * If there are any errors from the backing device, we fail the 500 * entire I/O with a medium error. 501 */ 502 if (beio->num_errors > 0) { 503 if (error == EOPNOTSUPP) { 504 ctl_set_invalid_opcode(&io->scsiio); --- 25 unchanged lines hidden (view full) --- 530 ctl_datamove(io); 531 } 532} 533 534static void 535ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 536 struct ctl_be_block_io *beio) 537{ |
549 union ctl_io *io; | 538 union ctl_io *io = beio->io; |
550 struct mount *mountpoint; 551 int error, lock_flags; 552 553 DPRINTF("entered\n"); 554 | 539 struct mount *mountpoint; 540 int error, lock_flags; 541 542 DPRINTF("entered\n"); 543 |
555 io = beio->io; | 544 binuptime(&beio->ds_t0); 545 mtx_lock(&be_lun->io_lock); 546 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 547 mtx_unlock(&be_lun->io_lock); |
556 | 548 |
557 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); | 549 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); |
558 559 if (MNT_SHARED_WRITES(mountpoint) 560 || ((mountpoint == NULL) 561 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 562 lock_flags = LK_SHARED; 563 else 564 lock_flags = LK_EXCLUSIVE; 565 566 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 567 | 550 551 if (MNT_SHARED_WRITES(mountpoint) 552 || ((mountpoint == NULL) 553 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 554 lock_flags = LK_SHARED; 555 else 556 lock_flags = LK_EXCLUSIVE; 557 558 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 559 |
568 binuptime(&beio->ds_t0); 569 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 570 | |
571 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 572 VOP_UNLOCK(be_lun->vn, 0); 573 574 vn_finished_write(mountpoint); 575 | 560 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 561 VOP_UNLOCK(be_lun->vn, 0); 562 563 vn_finished_write(mountpoint); 564 |
565 mtx_lock(&be_lun->io_lock); 566 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 567 beio->ds_tag_type, beio->ds_trans_type, 568 /*now*/ NULL, /*then*/&beio->ds_t0); 569 mtx_unlock(&be_lun->io_lock); 570 |
|
576 if (error == 0) 577 ctl_set_success(&io->scsiio); 578 else { 579 /* XXX KDM is there is a better error here? */ 580 ctl_set_internal_failure(&io->scsiio, 581 /*sks_valid*/ 1, 582 /*retry_count*/ 0xbad1); 583 } --- 38 unchanged lines hidden (view full) --- 622 xuio.uio_iovcnt = beio->num_segs; 623 xuio.uio_td = curthread; 624 625 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 626 xiovec->iov_base = beio->sg_segs[i].addr; 627 xiovec->iov_len = beio->sg_segs[i].len; 628 } 629 | 571 if (error == 0) 572 ctl_set_success(&io->scsiio); 573 else { 574 /* XXX KDM is there is a better error here? */ 575 ctl_set_internal_failure(&io->scsiio, 576 /*sks_valid*/ 1, 577 /*retry_count*/ 0xbad1); 578 } --- 38 unchanged lines hidden (view full) --- 617 xuio.uio_iovcnt = beio->num_segs; 618 xuio.uio_td = curthread; 619 620 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 621 xiovec->iov_base = beio->sg_segs[i].addr; 622 xiovec->iov_len = beio->sg_segs[i].len; 623 } 624 |
625 binuptime(&beio->ds_t0); 626 mtx_lock(&be_lun->io_lock); 627 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 628 mtx_unlock(&be_lun->io_lock); 629 |
|
630 if (beio->bio_cmd == BIO_READ) { 631 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 632 | 630 if (beio->bio_cmd == BIO_READ) { 631 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 632 |
633 binuptime(&beio->ds_t0); 634 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 635 | |
636 /* 637 * UFS pays attention to IO_DIRECT for reads. If the 638 * DIRECTIO option is configured into the kernel, it calls 639 * ffs_rawread(). But that only works for single-segment 640 * uios with user space addresses. In our case, with a 641 * kernel uio, it still reads into the buffer cache, but it 642 * will just try to release the buffer from the cache later 643 * on in ffs_read(). --- 24 unchanged lines hidden (view full) --- 668 || ((mountpoint == NULL) 669 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 670 lock_flags = LK_SHARED; 671 else 672 lock_flags = LK_EXCLUSIVE; 673 674 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 675 | 633 /* 634 * UFS pays attention to IO_DIRECT for reads. If the 635 * DIRECTIO option is configured into the kernel, it calls 636 * ffs_rawread(). But that only works for single-segment 637 * uios with user space addresses. In our case, with a 638 * kernel uio, it still reads into the buffer cache, but it 639 * will just try to release the buffer from the cache later 640 * on in ffs_read(). --- 24 unchanged lines hidden (view full) --- 665 || ((mountpoint == NULL) 666 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 667 lock_flags = LK_SHARED; 668 else 669 lock_flags = LK_EXCLUSIVE; 670 671 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 672 |
676 binuptime(&beio->ds_t0); 677 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 678 | |
679 /* 680 * UFS pays attention to IO_DIRECT for writes. The write 681 * is done asynchronously. (Normally the write would just 682 * get put into cache. 683 * 684 * UFS pays attention to IO_SYNC for writes. It will 685 * attempt to write the buffer out synchronously if that 686 * flag is set. --- 10 unchanged lines hidden (view full) --- 697 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 698 IO_SYNC : 0, file_data->cred); 699 VOP_UNLOCK(be_lun->vn, 0); 700 701 vn_finished_write(mountpoint); 702 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 703 } 704 | 673 /* 674 * UFS pays attention to IO_DIRECT for writes. The write 675 * is done asynchronously. (Normally the write would just 676 * get put into cache. 677 * 678 * UFS pays attention to IO_SYNC for writes. It will 679 * attempt to write the buffer out synchronously if that 680 * flag is set. --- 10 unchanged lines hidden (view full) --- 691 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 692 IO_SYNC : 0, file_data->cred); 693 VOP_UNLOCK(be_lun->vn, 0); 694 695 vn_finished_write(mountpoint); 696 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 697 } 698 |
699 mtx_lock(&be_lun->io_lock); 700 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 701 beio->ds_tag_type, beio->ds_trans_type, 702 /*now*/ NULL, /*then*/&beio->ds_t0); 703 mtx_unlock(&be_lun->io_lock); 704 |
|
705 /* 706 * If we got an error, set the sense data to "MEDIUM ERROR" and 707 * return the I/O to the user. 708 */ 709 if (error != 0) { 710 char path_str[32]; 711 712 ctl_scsi_path_string(io, path_str, sizeof(path_str)); --- 53 unchanged lines hidden (view full) --- 766 * We don't need to acquire the LUN lock here, because we are only 767 * sending one bio, and so there is no other context to synchronize 768 * with. 769 */ 770 beio->num_bios_sent = 1; 771 beio->send_complete = 1; 772 773 binuptime(&beio->ds_t0); | 705 /* 706 * If we got an error, set the sense data to "MEDIUM ERROR" and 707 * return the I/O to the user. 708 */ 709 if (error != 0) { 710 char path_str[32]; 711 712 ctl_scsi_path_string(io, path_str, sizeof(path_str)); --- 53 unchanged lines hidden (view full) --- 766 * We don't need to acquire the LUN lock here, because we are only 767 * sending one bio, and so there is no other context to synchronize 768 * with. 769 */ 770 beio->num_bios_sent = 1; 771 beio->send_complete = 1; 772 773 binuptime(&beio->ds_t0); |
774 mtx_lock(&be_lun->io_lock); |
|
774 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); | 775 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); |
776 mtx_unlock(&be_lun->io_lock); |
|
775 776 (*dev_data->csw->d_strategy)(bio); 777} 778 779static void 780ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 781 struct ctl_be_block_io *beio, 782 uint64_t off, uint64_t len, int last) --- 14 unchanged lines hidden (view full) --- 797 bio->bio_data = 0; 798 bio->bio_done = ctl_be_block_biodone; 799 bio->bio_caller1 = beio; 800 bio->bio_pblkno = off / be_lun->blocksize; 801 802 off += bio->bio_length; 803 len -= bio->bio_length; 804 | 777 778 (*dev_data->csw->d_strategy)(bio); 779} 780 781static void 782ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 783 struct ctl_be_block_io *beio, 784 uint64_t off, uint64_t len, int last) --- 14 unchanged lines hidden (view full) --- 799 bio->bio_data = 0; 800 bio->bio_done = ctl_be_block_biodone; 801 bio->bio_caller1 = beio; 802 bio->bio_pblkno = off / be_lun->blocksize; 803 804 off += bio->bio_length; 805 len -= bio->bio_length; 806 |
805 mtx_lock(&be_lun->lock); | 807 mtx_lock(&be_lun->io_lock); |
806 beio->num_bios_sent++; 807 if (last && len == 0) 808 beio->send_complete = 1; | 808 beio->num_bios_sent++; 809 if (last && len == 0) 810 beio->send_complete = 1; |
809 mtx_unlock(&be_lun->lock); | 811 mtx_unlock(&be_lun->io_lock); |
810 811 (*dev_data->csw->d_strategy)(bio); 812 } 813} 814 815static void 816ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 817 struct ctl_be_block_io *beio) --- 5 unchanged lines hidden (view full) --- 823 uint64_t len; 824 825 dev_data = &be_lun->backend.dev; 826 io = beio->io; 827 828 DPRINTF("entered\n"); 829 830 binuptime(&beio->ds_t0); | 812 813 (*dev_data->csw->d_strategy)(bio); 814 } 815} 816 817static void 818ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 819 struct ctl_be_block_io *beio) --- 5 unchanged lines hidden (view full) --- 825 uint64_t len; 826 827 dev_data = &be_lun->backend.dev; 828 io = beio->io; 829 830 DPRINTF("entered\n"); 831 832 binuptime(&beio->ds_t0); |
833 mtx_lock(&be_lun->io_lock); |
|
831 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); | 834 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); |
835 mtx_unlock(&be_lun->io_lock); |
|
832 833 if (beio->io_offset == -1) { 834 beio->io_len = 0; 835 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 836 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 837 end = buf + ptrlen->len / sizeof(*buf); 838 for (; buf < end; buf++) { 839 len = (uint64_t)scsi_4btoul(buf->length) * --- 7 unchanged lines hidden (view full) --- 847 ctl_be_block_unmap_dev_range(be_lun, beio, 848 beio->io_offset, beio->io_len, TRUE); 849} 850 851static void 852ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 853 struct ctl_be_block_io *beio) 854{ | 836 837 if (beio->io_offset == -1) { 838 beio->io_len = 0; 839 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 840 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 841 end = buf + ptrlen->len / sizeof(*buf); 842 for (; buf < end; buf++) { 843 len = (uint64_t)scsi_4btoul(buf->length) * --- 7 unchanged lines hidden (view full) --- 851 ctl_be_block_unmap_dev_range(be_lun, beio, 852 beio->io_offset, beio->io_len, TRUE); 853} 854 855static void 856ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 857 struct ctl_be_block_io *beio) 858{ |
859 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); |
|
855 int i; 856 struct bio *bio; 857 struct ctl_be_block_devdata *dev_data; 858 off_t cur_offset; 859 int max_iosize; 860 861 DPRINTF("entered\n"); 862 --- 4 unchanged lines hidden (view full) --- 867 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 868 * set it properly, use DFLTPHYS. 869 */ 870 max_iosize = dev_data->cdev->si_iosize_max; 871 if (max_iosize < PAGE_SIZE) 872 max_iosize = DFLTPHYS; 873 874 cur_offset = beio->io_offset; | 860 int i; 861 struct bio *bio; 862 struct ctl_be_block_devdata *dev_data; 863 off_t cur_offset; 864 int max_iosize; 865 866 DPRINTF("entered\n"); 867 --- 4 unchanged lines hidden (view full) --- 872 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 873 * set it properly, use DFLTPHYS. 874 */ 875 max_iosize = dev_data->cdev->si_iosize_max; 876 if (max_iosize < PAGE_SIZE) 877 max_iosize = DFLTPHYS; 878 879 cur_offset = beio->io_offset; |
875 876 /* 877 * XXX KDM need to accurately reflect the number of I/Os outstanding 878 * to a device. 879 */ 880 binuptime(&beio->ds_t0); 881 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 882 | |
883 for (i = 0; i < beio->num_segs; i++) { 884 size_t cur_size; 885 uint8_t *cur_ptr; 886 887 cur_size = beio->sg_segs[i].len; 888 cur_ptr = beio->sg_segs[i].addr; 889 890 while (cur_size > 0) { --- 11 unchanged lines hidden (view full) --- 902 bio->bio_data = cur_ptr; 903 bio->bio_done = ctl_be_block_biodone; 904 bio->bio_pblkno = cur_offset / be_lun->blocksize; 905 906 cur_offset += bio->bio_length; 907 cur_ptr += bio->bio_length; 908 cur_size -= bio->bio_length; 909 | 880 for (i = 0; i < beio->num_segs; i++) { 881 size_t cur_size; 882 uint8_t *cur_ptr; 883 884 cur_size = beio->sg_segs[i].len; 885 cur_ptr = beio->sg_segs[i].addr; 886 887 while (cur_size > 0) { --- 11 unchanged lines hidden (view full) --- 899 bio->bio_data = cur_ptr; 900 bio->bio_done = ctl_be_block_biodone; 901 bio->bio_pblkno = cur_offset / be_lun->blocksize; 902 903 cur_offset += bio->bio_length; 904 cur_ptr += bio->bio_length; 905 cur_size -= bio->bio_length; 906 |
910 /* 911 * Make sure we set the complete bit just before we 912 * issue the last bio so we don't wind up with a 913 * race. 914 * 915 * Use the LUN mutex here instead of a combination 916 * of atomic variables for simplicity. 917 * 918 * XXX KDM we could have a per-IO lock, but that 919 * would cause additional per-IO setup and teardown 920 * overhead. Hopefully there won't be too much 921 * contention on the LUN lock. 922 */ 923 mtx_lock(&be_lun->lock); 924 | 907 TAILQ_INSERT_TAIL(&queue, bio, bio_queue); |
925 beio->num_bios_sent++; | 908 beio->num_bios_sent++; |
926 927 if ((i == beio->num_segs - 1) 928 && (cur_size == 0)) 929 beio->send_complete = 1; 930 931 mtx_unlock(&be_lun->lock); 932 933 (*dev_data->csw->d_strategy)(bio); | |
934 } 935 } | 909 } 910 } |
911 binuptime(&beio->ds_t0); 912 mtx_lock(&be_lun->io_lock); 913 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 914 beio->send_complete = 1; 915 mtx_unlock(&be_lun->io_lock); 916 917 /* 918 * Fire off all allocated requests! 919 */ 920 while ((bio = TAILQ_FIRST(&queue)) != NULL) { 921 TAILQ_REMOVE(&queue, bio, bio_queue); 922 (*dev_data->csw->d_strategy)(bio); 923 } |
|
936} 937 938static void 939ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 940{ 941 union ctl_io *io; 942 943 io = beio->io; --- 246 unchanged lines hidden (view full) --- 1190 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1191 ctl_data_submit_done(io); 1192 return; 1193 } 1194 1195 io->io_hdr.status &= ~CTL_STATUS_MASK; 1196 io->io_hdr.status |= CTL_STATUS_NONE; 1197 | 924} 925 926static void 927ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 928{ 929 union ctl_io *io; 930 931 io = beio->io; --- 246 unchanged lines hidden (view full) --- 1178 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1179 ctl_data_submit_done(io); 1180 return; 1181 } 1182 1183 io->io_hdr.status &= ~CTL_STATUS_MASK; 1184 io->io_hdr.status |= CTL_STATUS_NONE; 1185 |
1198 mtx_lock(&be_lun->lock); | 1186 mtx_lock(&be_lun->queue_lock); |
1199 /* 1200 * XXX KDM make sure that links is okay to use at this point. 1201 * Otherwise, we either need to add another field to ctl_io_hdr, 1202 * or deal with resource allocation here. 1203 */ 1204 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); | 1187 /* 1188 * XXX KDM make sure that links is okay to use at this point. 1189 * Otherwise, we either need to add another field to ctl_io_hdr, 1190 * or deal with resource allocation here. 1191 */ 1192 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); |
1205 mtx_unlock(&be_lun->lock); | 1193 mtx_unlock(&be_lun->queue_lock); |
1206 1207 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1208} 1209 1210static void 1211ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1212 union ctl_io *io) 1213{ --- 131 unchanged lines hidden (view full) --- 1345 struct ctl_be_block_softc *softc; 1346 union ctl_io *io; 1347 1348 be_lun = (struct ctl_be_block_lun *)context; 1349 softc = be_lun->softc; 1350 1351 DPRINTF("entered\n"); 1352 | 1194 1195 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1196} 1197 1198static void 1199ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1200 union ctl_io *io) 1201{ --- 131 unchanged lines hidden (view full) --- 1333 struct ctl_be_block_softc *softc; 1334 union ctl_io *io; 1335 1336 be_lun = (struct ctl_be_block_lun *)context; 1337 softc = be_lun->softc; 1338 1339 DPRINTF("entered\n"); 1340 |
1353 mtx_lock(&be_lun->lock); | 1341 mtx_lock(&be_lun->queue_lock); |
1354 for (;;) { 1355 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1356 if (io != NULL) { 1357 struct ctl_be_block_io *beio; 1358 1359 DPRINTF("datamove queue\n"); 1360 1361 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1362 ctl_io_hdr, links); 1363 | 1342 for (;;) { 1343 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1344 if (io != NULL) { 1345 struct ctl_be_block_io *beio; 1346 1347 DPRINTF("datamove queue\n"); 1348 1349 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1350 ctl_io_hdr, links); 1351 |
1364 mtx_unlock(&be_lun->lock); | 1352 mtx_unlock(&be_lun->queue_lock); |
1365 1366 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1367 1368 be_lun->dispatch(be_lun, beio); 1369 | 1353 1354 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1355 1356 be_lun->dispatch(be_lun, beio); 1357 |
1370 mtx_lock(&be_lun->lock); | 1358 mtx_lock(&be_lun->queue_lock); |
1371 continue; 1372 } 1373 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1374 if (io != NULL) { 1375 1376 DPRINTF("config write queue\n"); 1377 1378 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1379 ctl_io_hdr, links); 1380 | 1359 continue; 1360 } 1361 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1362 if (io != NULL) { 1363 1364 DPRINTF("config write queue\n"); 1365 1366 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1367 ctl_io_hdr, links); 1368 |
1381 mtx_unlock(&be_lun->lock); | 1369 mtx_unlock(&be_lun->queue_lock); |
1382 1383 ctl_be_block_cw_dispatch(be_lun, io); 1384 | 1370 1371 ctl_be_block_cw_dispatch(be_lun, io); 1372 |
1385 mtx_lock(&be_lun->lock); | 1373 mtx_lock(&be_lun->queue_lock); |
1386 continue; 1387 } 1388 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1389 if (io != NULL) { 1390 DPRINTF("input queue\n"); 1391 1392 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1393 ctl_io_hdr, links); | 1374 continue; 1375 } 1376 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1377 if (io != NULL) { 1378 DPRINTF("input queue\n"); 1379 1380 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1381 ctl_io_hdr, links); |
1394 mtx_unlock(&be_lun->lock); | 1382 mtx_unlock(&be_lun->queue_lock); |
1395 1396 /* 1397 * We must drop the lock, since this routine and 1398 * its children may sleep. 1399 */ 1400 ctl_be_block_dispatch(be_lun, io); 1401 | 1383 1384 /* 1385 * We must drop the lock, since this routine and 1386 * its children may sleep. 1387 */ 1388 ctl_be_block_dispatch(be_lun, io); 1389 |
1402 mtx_lock(&be_lun->lock); | 1390 mtx_lock(&be_lun->queue_lock); |
1403 continue; 1404 } 1405 1406 /* 1407 * If we get here, there is no work left in the queues, so 1408 * just break out and let the task queue go to sleep. 1409 */ 1410 break; 1411 } | 1391 continue; 1392 } 1393 1394 /* 1395 * If we get here, there is no work left in the queues, so 1396 * just break out and let the task queue go to sleep. 1397 */ 1398 break; 1399 } |
1412 mtx_unlock(&be_lun->lock); | 1400 mtx_unlock(&be_lun->queue_lock); |
1413} 1414 1415/* 1416 * Entry point from CTL to the backend for I/O. We queue everything to a 1417 * work thread, so this just puts the I/O on a queue and wakes up the 1418 * thread. 1419 */ 1420static int --- 11 unchanged lines hidden (view full) --- 1432 /* 1433 * Make sure we only get SCSI I/O. 1434 */ 1435 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1436 "%#x) encountered", io->io_hdr.io_type)); 1437 1438 PRIV(io)->len = 0; 1439 | 1401} 1402 1403/* 1404 * Entry point from CTL to the backend for I/O. We queue everything to a 1405 * work thread, so this just puts the I/O on a queue and wakes up the 1406 * thread. 1407 */ 1408static int --- 11 unchanged lines hidden (view full) --- 1420 /* 1421 * Make sure we only get SCSI I/O. 1422 */ 1423 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1424 "%#x) encountered", io->io_hdr.io_type)); 1425 1426 PRIV(io)->len = 0; 1427 |
1440 mtx_lock(&be_lun->lock); | 1428 mtx_lock(&be_lun->queue_lock); |
1441 /* 1442 * XXX KDM make sure that links is okay to use at this point. 1443 * Otherwise, we either need to add another field to ctl_io_hdr, 1444 * or deal with resource allocation here. 1445 */ 1446 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); | 1429 /* 1430 * XXX KDM make sure that links is okay to use at this point. 1431 * Otherwise, we either need to add another field to ctl_io_hdr, 1432 * or deal with resource allocation here. 1433 */ 1434 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); |
1447 mtx_unlock(&be_lun->lock); | 1435 mtx_unlock(&be_lun->queue_lock); |
1448 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1449 1450 return (CTL_RETVAL_COMPLETE); 1451} 1452 1453static int 1454ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1455 int flag, struct thread *td) --- 407 unchanged lines hidden (view full) --- 1863 1864 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1865 1866 be_lun->softc = softc; 1867 STAILQ_INIT(&be_lun->input_queue); 1868 STAILQ_INIT(&be_lun->config_write_queue); 1869 STAILQ_INIT(&be_lun->datamove_queue); 1870 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); | 1436 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1437 1438 return (CTL_RETVAL_COMPLETE); 1439} 1440 1441static int 1442ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1443 int flag, struct thread *td) --- 407 unchanged lines hidden (view full) --- 1851 1852 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1853 1854 be_lun->softc = softc; 1855 STAILQ_INIT(&be_lun->input_queue); 1856 STAILQ_INIT(&be_lun->config_write_queue); 1857 STAILQ_INIT(&be_lun->datamove_queue); 1858 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); |
1871 mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF); | 1859 mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF); 1860 mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF); |
1872 ctl_init_opts(&be_lun->ctl_be_lun, req); 1873 1874 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG, 1875 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1876 1877 if (be_lun->lun_zone == NULL) { 1878 snprintf(req->error_str, sizeof(req->error_str), 1879 "%s: error allocating UMA zone", __func__); --- 230 unchanged lines hidden (view full) --- 2110 if (be_lun->io_taskqueue != NULL) 2111 taskqueue_free(be_lun->io_taskqueue); 2112 ctl_be_block_close(be_lun); 2113 if (be_lun->dev_path != NULL) 2114 free(be_lun->dev_path, M_CTLBLK); 2115 if (be_lun->lun_zone != NULL) 2116 uma_zdestroy(be_lun->lun_zone); 2117 ctl_free_opts(&be_lun->ctl_be_lun); | 1861 ctl_init_opts(&be_lun->ctl_be_lun, req); 1862 1863 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG, 1864 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1865 1866 if (be_lun->lun_zone == NULL) { 1867 snprintf(req->error_str, sizeof(req->error_str), 1868 "%s: error allocating UMA zone", __func__); --- 230 unchanged lines hidden (view full) --- 2099 if (be_lun->io_taskqueue != NULL) 2100 taskqueue_free(be_lun->io_taskqueue); 2101 ctl_be_block_close(be_lun); 2102 if (be_lun->dev_path != NULL) 2103 free(be_lun->dev_path, M_CTLBLK); 2104 if (be_lun->lun_zone != NULL) 2105 uma_zdestroy(be_lun->lun_zone); 2106 ctl_free_opts(&be_lun->ctl_be_lun); |
2118 mtx_destroy(&be_lun->lock); | 2107 mtx_destroy(&be_lun->queue_lock); 2108 mtx_destroy(&be_lun->io_lock); |
2119 free(be_lun, M_CTLBLK); 2120 2121 return (retval); 2122} 2123 2124static int 2125ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2126{ --- 71 unchanged lines hidden (view full) --- 2198 2199 if (be_lun->disk_stats != NULL) 2200 devstat_remove_entry(be_lun->disk_stats); 2201 2202 uma_zdestroy(be_lun->lun_zone); 2203 2204 ctl_free_opts(&be_lun->ctl_be_lun); 2205 free(be_lun->dev_path, M_CTLBLK); | 2109 free(be_lun, M_CTLBLK); 2110 2111 return (retval); 2112} 2113 2114static int 2115ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2116{ --- 71 unchanged lines hidden (view full) --- 2188 2189 if (be_lun->disk_stats != NULL) 2190 devstat_remove_entry(be_lun->disk_stats); 2191 2192 uma_zdestroy(be_lun->lun_zone); 2193 2194 ctl_free_opts(&be_lun->ctl_be_lun); 2195 free(be_lun->dev_path, M_CTLBLK); |
2206 | 2196 mtx_destroy(&be_lun->queue_lock); 2197 mtx_destroy(&be_lun->io_lock); |
2207 free(be_lun, M_CTLBLK); 2208 2209 req->status = CTL_LUN_OK; 2210 2211 return (0); 2212 2213bailout_error: 2214 --- 230 unchanged lines hidden (view full) --- 2445 /* 2446 * The upper level CTL code will filter out any CDBs with 2447 * the immediate bit set and return the proper error. 2448 * 2449 * We don't really need to worry about what LBA range the 2450 * user asked to be synced out. When they issue a sync 2451 * cache command, we'll sync out the whole thing. 2452 */ | 2198 free(be_lun, M_CTLBLK); 2199 2200 req->status = CTL_LUN_OK; 2201 2202 return (0); 2203 2204bailout_error: 2205 --- 230 unchanged lines hidden (view full) --- 2436 /* 2437 * The upper level CTL code will filter out any CDBs with 2438 * the immediate bit set and return the proper error. 2439 * 2440 * We don't really need to worry about what LBA range the 2441 * user asked to be synced out. When they issue a sync 2442 * cache command, we'll sync out the whole thing. 2443 */ |
2453 mtx_lock(&be_lun->lock); | 2444 mtx_lock(&be_lun->queue_lock); |
2454 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2455 links); | 2445 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2446 links); |
2456 mtx_unlock(&be_lun->lock); | 2447 mtx_unlock(&be_lun->queue_lock); |
2457 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2458 break; 2459 case START_STOP_UNIT: { 2460 struct scsi_start_stop_unit *cdb; 2461 2462 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2463 2464 if (cdb->how & SSS_START) --- 74 unchanged lines hidden (view full) --- 2539ctl_be_block_init(void) 2540{ 2541 struct ctl_be_block_softc *softc; 2542 int retval; 2543 2544 softc = &backend_block_softc; 2545 retval = 0; 2546 | 2448 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2449 break; 2450 case START_STOP_UNIT: { 2451 struct scsi_start_stop_unit *cdb; 2452 2453 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2454 2455 if (cdb->how & SSS_START) --- 74 unchanged lines hidden (view full) --- 2530ctl_be_block_init(void) 2531{ 2532 struct ctl_be_block_softc *softc; 2533 int retval; 2534 2535 softc = &backend_block_softc; 2536 retval = 0; 2537 |
2547 mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF); | 2538 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); |
2548 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2549 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2550 STAILQ_INIT(&softc->disk_list); 2551 STAILQ_INIT(&softc->lun_list); 2552 2553 return (retval); 2554} | 2539 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2540 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2541 STAILQ_INIT(&softc->disk_list); 2542 STAILQ_INIT(&softc->lun_list); 2543 2544 return (retval); 2545} |