1/*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37/* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42#include <sys/cdefs.h> 43__FBSDID("$FreeBSD$"); 44 45#include <opt_kdtrace.h> 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/types.h> 51#include <sys/kthread.h> 52#include <sys/bio.h> 53#include <sys/fcntl.h> 54#include <sys/lock.h> 55#include <sys/mutex.h> 56#include <sys/condvar.h> 57#include <sys/malloc.h> 58#include <sys/conf.h> 59#include <sys/ioccom.h> 60#include <sys/queue.h> 61#include <sys/sbuf.h> 62#include <sys/endian.h> 63#include <sys/uio.h> 64#include <sys/buf.h> 65#include <sys/taskqueue.h> 66#include <sys/vnode.h> 67#include <sys/namei.h> 68#include <sys/mount.h> 69#include <sys/disk.h> 70#include <sys/fcntl.h> 71#include <sys/filedesc.h> 72#include <sys/proc.h> 73#include <sys/pcpu.h> 74#include <sys/module.h> 75#include <sys/sdt.h> 76#include <sys/devicestat.h> 77#include <sys/sysctl.h> 78 79#include <geom/geom.h> 80 81#include <cam/cam.h> 82#include <cam/scsi/scsi_all.h> 83#include <cam/scsi/scsi_da.h> 84#include <cam/ctl/ctl_io.h> 85#include <cam/ctl/ctl.h> 86#include <cam/ctl/ctl_backend.h> 87#include <cam/ctl/ctl_frontend_internal.h> 88#include <cam/ctl/ctl_ioctl.h> 89#include <cam/ctl/ctl_scsi_all.h> 90#include <cam/ctl/ctl_error.h> 91 92/* 93 * The idea here is that we'll allocate enough S/G space to hold a 16MB 94 * I/O. If we get an I/O larger than that, we'll reject it. 95 */ 96#define CTLBLK_MAX_IO_SIZE (16 * 1024 * 1024) 97#define CTLBLK_MAX_SEGS (CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1 98 99#ifdef CTLBLK_DEBUG 100#define DPRINTF(fmt, args...) \ 101 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 102#else 103#define DPRINTF(fmt, args...) do {} while(0) 104#endif 105 106SDT_PROVIDER_DEFINE(cbb); 107 108typedef enum { 109 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 110 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 111 CTL_BE_BLOCK_LUN_WAITING = 0x04, 112 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 113} ctl_be_block_lun_flags; 114 115typedef enum { 116 CTL_BE_BLOCK_NONE, 117 CTL_BE_BLOCK_DEV, 118 CTL_BE_BLOCK_FILE 119} ctl_be_block_type; 120 121struct ctl_be_block_devdata { 122 struct cdev *cdev; 123 struct cdevsw *csw; 124 int dev_ref; 125}; 126 127struct ctl_be_block_filedata { 128 struct ucred *cred; 129}; 130 131union ctl_be_block_bedata { 132 struct ctl_be_block_devdata dev; 133 struct ctl_be_block_filedata file; 134}; 135 136struct ctl_be_block_io; 137struct ctl_be_block_lun; 138 139typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 140 struct ctl_be_block_io *beio); 141 142/* 143 * Backend LUN structure. There is a 1:1 mapping between a block device 144 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 145 */ 146struct ctl_be_block_lun { 147 struct ctl_block_disk *disk; 148 char lunname[32]; 149 char *dev_path; 150 ctl_be_block_type dev_type; 151 struct vnode *vn; 152 union ctl_be_block_bedata backend; 153 cbb_dispatch_t dispatch; 154 cbb_dispatch_t lun_flush; 155 struct mtx lock; 156 uma_zone_t lun_zone; 157 uint64_t size_blocks; 158 uint64_t size_bytes; 159 uint32_t blocksize; 160 int blocksize_shift; 161 uint16_t pblockexp; 162 uint16_t pblockoff; 163 struct ctl_be_block_softc *softc; 164 struct devstat *disk_stats; 165 ctl_be_block_lun_flags flags; 166 STAILQ_ENTRY(ctl_be_block_lun) links; 167 struct ctl_be_lun ctl_be_lun; 168 struct taskqueue *io_taskqueue; 169 struct task io_task; 170 int num_threads; 171 STAILQ_HEAD(, ctl_io_hdr) input_queue; 172 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 173 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 174}; 175 176/* 177 * Overall softc structure for the block backend module. 178 */ 179struct ctl_be_block_softc { 180 struct mtx lock; 181 int num_disks; 182 STAILQ_HEAD(, ctl_block_disk) disk_list; 183 int num_luns; 184 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 185}; 186 187static struct ctl_be_block_softc backend_block_softc; 188 189/* 190 * Per-I/O information. 191 */ 192struct ctl_be_block_io { 193 union ctl_io *io; 194 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 195 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 196 int bio_cmd; 197 int bio_flags; 198 int num_segs; 199 int num_bios_sent; 200 int num_bios_done; 201 int send_complete; 202 int num_errors; 203 struct bintime ds_t0; 204 devstat_tag_type ds_tag_type; 205 devstat_trans_flags ds_trans_type; 206 uint64_t io_len; 207 uint64_t io_offset; 208 struct ctl_be_block_softc *softc; 209 struct ctl_be_block_lun *lun; 210}; 211 212static int cbb_num_threads = 14; 213TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads); 214SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 215 "CAM Target Layer Block Backend"); 216SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW, 217 &cbb_num_threads, 0, "Number of threads per backing file"); 218 219static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 220static void ctl_free_beio(struct ctl_be_block_io *beio); 221static void ctl_complete_beio(struct ctl_be_block_io *beio); 222static int ctl_be_block_move_done(union ctl_io *io); 223static void ctl_be_block_biodone(struct bio *bio); 224static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 225 struct ctl_be_block_io *beio); 226static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 227 struct ctl_be_block_io *beio); 228static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 229 struct ctl_be_block_io *beio); 230static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 231 struct ctl_be_block_io *beio); 232static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 233 union ctl_io *io); 234static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 235 union ctl_io *io); 236static void ctl_be_block_worker(void *context, int pending); 237static int ctl_be_block_submit(union ctl_io *io); 238static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 239 int flag, struct thread *td); 240static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 241 struct ctl_lun_req *req); 242static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 243 struct ctl_lun_req *req); 244static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 245static int ctl_be_block_open(struct ctl_be_block_softc *softc, 246 struct ctl_be_block_lun *be_lun, 247 struct ctl_lun_req *req); 248static int ctl_be_block_create(struct ctl_be_block_softc *softc, 249 struct ctl_lun_req *req); 250static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 251 struct ctl_lun_req *req); 252static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 253 struct ctl_lun_req *req); 254static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 255 struct ctl_lun_req *req); 256static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 257 struct ctl_lun_req *req); 258static void ctl_be_block_lun_shutdown(void *be_lun); 259static void ctl_be_block_lun_config_status(void *be_lun, 260 ctl_lun_config_status status); 261static int ctl_be_block_config_write(union ctl_io *io); 262static int ctl_be_block_config_read(union ctl_io *io); 263static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 264int ctl_be_block_init(void); 265 266static struct ctl_backend_driver ctl_be_block_driver = 267{ 268 .name = "block", 269 .flags = CTL_BE_FLAG_HAS_CONFIG, 270 .init = ctl_be_block_init, 271 .data_submit = ctl_be_block_submit, 272 .data_move_done = ctl_be_block_move_done, 273 .config_read = ctl_be_block_config_read, 274 .config_write = ctl_be_block_config_write, 275 .ioctl = ctl_be_block_ioctl, 276 .lun_info = ctl_be_block_lun_info 277}; 278 279MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 280CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 281 282static uma_zone_t beio_zone; 283 284static struct ctl_be_block_io * 285ctl_alloc_beio(struct ctl_be_block_softc *softc) 286{ 287 struct ctl_be_block_io *beio; 288 289 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO); 290 beio->softc = softc; 291 return (beio); 292} 293 294static void 295ctl_free_beio(struct ctl_be_block_io *beio) 296{ 297 int duplicate_free; 298 int i; 299 300 duplicate_free = 0; 301 302 for (i = 0; i < beio->num_segs; i++) { 303 if (beio->sg_segs[i].addr == NULL) 304 duplicate_free++; 305 306 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 307 beio->sg_segs[i].addr = NULL; 308 } 309 310 if (duplicate_free > 0) { 311 printf("%s: %d duplicate frees out of %d segments\n", __func__, 312 duplicate_free, beio->num_segs); 313 } 314 315 uma_zfree(beio_zone, beio); 316} 317 318static void 319ctl_complete_beio(struct ctl_be_block_io *beio) 320{ 321 union ctl_io *io; 322 int io_len; 323 324 io = beio->io; 325 326 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 327 io_len = beio->io_len; 328 else 329 io_len = 0; 330 331 devstat_end_transaction(beio->lun->disk_stats, 332 /*bytes*/ io_len, 333 beio->ds_tag_type, 334 beio->ds_trans_type, 335 /*now*/ NULL, 336 /*then*/&beio->ds_t0); 337 338 ctl_free_beio(beio); 339 ctl_done(io); 340} 341 342static int 343ctl_be_block_move_done(union ctl_io *io) 344{ 345 struct ctl_be_block_io *beio; 346 struct ctl_be_block_lun *be_lun; 347#ifdef CTL_TIME_IO 348 struct bintime cur_bt; 349#endif 350 351 beio = (struct ctl_be_block_io *) 352 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 353 354 be_lun = beio->lun; 355 356 DPRINTF("entered\n"); 357 358#ifdef CTL_TIME_IO 359 getbintime(&cur_bt); 360 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 361 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 362 io->io_hdr.num_dmas++; 363#endif 364 365 /* 366 * We set status at this point for read commands, and write 367 * commands with errors. 368 */ 369 if ((beio->bio_cmd == BIO_READ) 370 && (io->io_hdr.port_status == 0) 371 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 372 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 373 ctl_set_success(&io->scsiio); 374 else if ((io->io_hdr.port_status != 0) 375 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 376 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 377 /* 378 * For hardware error sense keys, the sense key 379 * specific value is defined to be a retry count, 380 * but we use it to pass back an internal FETD 381 * error code. XXX KDM Hopefully the FETD is only 382 * using 16 bits for an error code, since that's 383 * all the space we have in the sks field. 384 */ 385 ctl_set_internal_failure(&io->scsiio, 386 /*sks_valid*/ 1, 387 /*retry_count*/ 388 io->io_hdr.port_status); 389 } 390 391 /* 392 * If this is a read, or a write with errors, it is done. 393 */ 394 if ((beio->bio_cmd == BIO_READ) 395 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 396 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 397 ctl_complete_beio(beio); 398 return (0); 399 } 400 401 /* 402 * At this point, we have a write and the DMA completed 403 * successfully. We now have to queue it to the task queue to 404 * execute the backend I/O. That is because we do blocking 405 * memory allocations, and in the file backing case, blocking I/O. 406 * This move done routine is generally called in the SIM's 407 * interrupt context, and therefore we cannot block. 408 */ 409 mtx_lock(&be_lun->lock); 410 /* 411 * XXX KDM make sure that links is okay to use at this point. 412 * Otherwise, we either need to add another field to ctl_io_hdr, 413 * or deal with resource allocation here. 414 */ 415 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 416 mtx_unlock(&be_lun->lock); 417 418 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 419 420 return (0); 421} 422 423static void 424ctl_be_block_biodone(struct bio *bio) 425{ 426 struct ctl_be_block_io *beio; 427 struct ctl_be_block_lun *be_lun; 428 union ctl_io *io; 429 int error; 430 431 beio = bio->bio_caller1; 432 be_lun = beio->lun; 433 io = beio->io; 434 435 DPRINTF("entered\n"); 436 437 error = bio->bio_error; 438 mtx_lock(&be_lun->lock); 439 if (error != 0) 440 beio->num_errors++; 441 442 beio->num_bios_done++; 443 444 /* 445 * XXX KDM will this cause WITNESS to complain? Holding a lock 446 * during the free might cause it to complain. 447 */ 448 g_destroy_bio(bio); 449 450 /* 451 * If the send complete bit isn't set, or we aren't the last I/O to 452 * complete, then we're done. 453 */ 454 if ((beio->send_complete == 0) 455 || (beio->num_bios_done < beio->num_bios_sent)) { 456 mtx_unlock(&be_lun->lock); 457 return; 458 } 459 460 /* 461 * At this point, we've verified that we are the last I/O to 462 * complete, so it's safe to drop the lock. 463 */ 464 mtx_unlock(&be_lun->lock); 465 466 /* 467 * If there are any errors from the backing device, we fail the 468 * entire I/O with a medium error. 469 */ 470 if (beio->num_errors > 0) { 471 if (error == EOPNOTSUPP) { 472 ctl_set_invalid_opcode(&io->scsiio); 473 } else if (beio->bio_cmd == BIO_FLUSH) { 474 /* XXX KDM is there is a better error here? */ 475 ctl_set_internal_failure(&io->scsiio, 476 /*sks_valid*/ 1, 477 /*retry_count*/ 0xbad2); 478 } else 479 ctl_set_medium_error(&io->scsiio); 480 ctl_complete_beio(beio); 481 return; 482 } 483 484 /* 485 * If this is a write or a flush, we're all done. 486 * If this is a read, we can now send the data to the user. 487 */ 488 if ((beio->bio_cmd == BIO_WRITE) 489 || (beio->bio_cmd == BIO_FLUSH)) { 490 ctl_set_success(&io->scsiio); 491 ctl_complete_beio(beio); 492 } else { 493 io->scsiio.be_move_done = ctl_be_block_move_done; 494 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 495 io->scsiio.kern_data_len = beio->io_len; 496 io->scsiio.kern_total_len = beio->io_len; 497 io->scsiio.kern_rel_offset = 0; 498 io->scsiio.kern_data_resid = 0; 499 io->scsiio.kern_sg_entries = beio->num_segs; 500 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 501#ifdef CTL_TIME_IO 502 getbintime(&io->io_hdr.dma_start_bt); 503#endif 504 ctl_datamove(io); 505 } 506} 507 508static void 509ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 510 struct ctl_be_block_io *beio) 511{ 512 union ctl_io *io; 513 struct mount *mountpoint; 514 int vfs_is_locked, error, lock_flags; 515 516 DPRINTF("entered\n"); 517 518 io = beio->io; 519 520 vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount); 521 522 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 523 524 if (MNT_SHARED_WRITES(mountpoint) 525 || ((mountpoint == NULL) 526 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 527 lock_flags = LK_SHARED; 528 else 529 lock_flags = LK_EXCLUSIVE; 530 531 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 532 533 binuptime(&beio->ds_t0); 534 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 535 536 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 537 VOP_UNLOCK(be_lun->vn, 0); 538 539 vn_finished_write(mountpoint); 540 541 VFS_UNLOCK_GIANT(vfs_is_locked); 542 543 if (error == 0) 544 ctl_set_success(&io->scsiio); 545 else { 546 /* XXX KDM is there is a better error here? */ 547 ctl_set_internal_failure(&io->scsiio, 548 /*sks_valid*/ 1, 549 /*retry_count*/ 0xbad1); 550 } 551 552 ctl_complete_beio(beio); 553} 554 555SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t"); 556SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t"); 557SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t"); 558SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t"); 559 560static void 561ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 562 struct ctl_be_block_io *beio) 563{ 564 struct ctl_be_block_filedata *file_data; 565 union ctl_io *io; 566 struct uio xuio; 567 struct iovec *xiovec; 568 int vfs_is_locked, flags; 569 int error, i; 570 571 DPRINTF("entered\n"); 572 573 file_data = &be_lun->backend.file; 574 io = beio->io; 575 flags = beio->bio_flags; 576 577 if (beio->bio_cmd == BIO_READ) { 578 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 579 } else { 580 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 581 } 582 583 bzero(&xuio, sizeof(xuio)); 584 if (beio->bio_cmd == BIO_READ) 585 xuio.uio_rw = UIO_READ; 586 else 587 xuio.uio_rw = UIO_WRITE; 588 589 xuio.uio_offset = beio->io_offset; 590 xuio.uio_resid = beio->io_len; 591 xuio.uio_segflg = UIO_SYSSPACE; 592 xuio.uio_iov = beio->xiovecs; 593 xuio.uio_iovcnt = beio->num_segs; 594 xuio.uio_td = curthread; 595 596 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 597 xiovec->iov_base = beio->sg_segs[i].addr; 598 xiovec->iov_len = beio->sg_segs[i].len; 599 } 600 601 vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount); 602 if (beio->bio_cmd == BIO_READ) { 603 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 604 605 binuptime(&beio->ds_t0); 606 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 607 608 /* 609 * UFS pays attention to IO_DIRECT for reads. If the 610 * DIRECTIO option is configured into the kernel, it calls 611 * ffs_rawread(). But that only works for single-segment 612 * uios with user space addresses. In our case, with a 613 * kernel uio, it still reads into the buffer cache, but it 614 * will just try to release the buffer from the cache later 615 * on in ffs_read(). 616 * 617 * ZFS does not pay attention to IO_DIRECT for reads. 618 * 619 * UFS does not pay attention to IO_SYNC for reads. 620 * 621 * ZFS pays attention to IO_SYNC (which translates into the 622 * Solaris define FRSYNC for zfs_read()) for reads. It 623 * attempts to sync the file before reading. 624 * 625 * So, to attempt to provide some barrier semantics in the 626 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 627 */ 628 error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 629 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); 630 631 VOP_UNLOCK(be_lun->vn, 0); 632 } else { 633 struct mount *mountpoint; 634 int lock_flags; 635 636 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 637 638 if (MNT_SHARED_WRITES(mountpoint) 639 || ((mountpoint == NULL) 640 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 641 lock_flags = LK_SHARED; 642 else 643 lock_flags = LK_EXCLUSIVE; 644 645 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 646 647 binuptime(&beio->ds_t0); 648 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 649 650 /* 651 * UFS pays attention to IO_DIRECT for writes. The write 652 * is done asynchronously. (Normally the write would just 653 * get put into cache. 654 * 655 * UFS pays attention to IO_SYNC for writes. It will 656 * attempt to write the buffer out synchronously if that 657 * flag is set. 658 * 659 * ZFS does not pay attention to IO_DIRECT for writes. 660 * 661 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 662 * for writes. It will flush the transaction from the 663 * cache before returning. 664 * 665 * So if we've got the BIO_ORDERED flag set, we want 666 * IO_SYNC in either the UFS or ZFS case. 667 */ 668 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 669 IO_SYNC : 0, file_data->cred); 670 VOP_UNLOCK(be_lun->vn, 0); 671 672 vn_finished_write(mountpoint); 673 } 674 VFS_UNLOCK_GIANT(vfs_is_locked); 675 676 /* 677 * If we got an error, set the sense data to "MEDIUM ERROR" and 678 * return the I/O to the user. 679 */ 680 if (error != 0) { 681 char path_str[32]; 682 683 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 684 /* 685 * XXX KDM ZFS returns ENOSPC when the underlying 686 * filesystem fills up. What kind of SCSI error should we 687 * return for that? 688 */ 689 printf("%s%s command returned errno %d\n", path_str, 690 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 691 ctl_set_medium_error(&io->scsiio); 692 ctl_complete_beio(beio); 693 return; 694 } 695 696 /* 697 * If this is a write, we're all done. 698 * If this is a read, we can now send the data to the user. 699 */ 700 if (beio->bio_cmd == BIO_WRITE) { 701 ctl_set_success(&io->scsiio); 702 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 703 ctl_complete_beio(beio); 704 } else { 705 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 706 io->scsiio.be_move_done = ctl_be_block_move_done; 707 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 708 io->scsiio.kern_data_len = beio->io_len; 709 io->scsiio.kern_total_len = beio->io_len; 710 io->scsiio.kern_rel_offset = 0; 711 io->scsiio.kern_data_resid = 0; 712 io->scsiio.kern_sg_entries = beio->num_segs; 713 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 714#ifdef CTL_TIME_IO 715 getbintime(&io->io_hdr.dma_start_bt); 716#endif 717 ctl_datamove(io); 718 } 719} 720 721static void 722ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 723 struct ctl_be_block_io *beio) 724{ 725 struct bio *bio; 726 union ctl_io *io; 727 struct ctl_be_block_devdata *dev_data; 728 729 dev_data = &be_lun->backend.dev; 730 io = beio->io; 731 732 DPRINTF("entered\n"); 733 734 /* This can't fail, it's a blocking allocation. */ 735 bio = g_alloc_bio(); 736 737 bio->bio_cmd = BIO_FLUSH; 738 bio->bio_flags |= BIO_ORDERED; 739 bio->bio_dev = dev_data->cdev; 740 bio->bio_offset = 0; 741 bio->bio_data = 0; 742 bio->bio_done = ctl_be_block_biodone; 743 bio->bio_caller1 = beio; 744 bio->bio_pblkno = 0; 745 746 /* 747 * We don't need to acquire the LUN lock here, because we are only 748 * sending one bio, and so there is no other context to synchronize 749 * with. 750 */ 751 beio->num_bios_sent = 1; 752 beio->send_complete = 1; 753 754 binuptime(&beio->ds_t0); 755 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 756 757 (*dev_data->csw->d_strategy)(bio); 758} 759 760static void 761ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 762 struct ctl_be_block_io *beio) 763{ 764 int i; 765 struct bio *bio; 766 struct ctl_be_block_devdata *dev_data; 767 off_t cur_offset; 768 int max_iosize; 769 770 DPRINTF("entered\n"); 771 772 dev_data = &be_lun->backend.dev; 773 774 /* 775 * We have to limit our I/O size to the maximum supported by the 776 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 777 * set it properly, use DFLTPHYS. 778 */ 779 max_iosize = dev_data->cdev->si_iosize_max; 780 if (max_iosize < PAGE_SIZE) 781 max_iosize = DFLTPHYS; 782 783 cur_offset = beio->io_offset; 784 785 /* 786 * XXX KDM need to accurately reflect the number of I/Os outstanding 787 * to a device. 788 */ 789 binuptime(&beio->ds_t0); 790 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 791 792 for (i = 0; i < beio->num_segs; i++) { 793 size_t cur_size; 794 uint8_t *cur_ptr; 795 796 cur_size = beio->sg_segs[i].len; 797 cur_ptr = beio->sg_segs[i].addr; 798 799 while (cur_size > 0) { 800 /* This can't fail, it's a blocking allocation. */ 801 bio = g_alloc_bio(); 802 803 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 804 805 bio->bio_cmd = beio->bio_cmd; 806 bio->bio_flags |= beio->bio_flags; 807 bio->bio_dev = dev_data->cdev; 808 bio->bio_caller1 = beio; 809 bio->bio_length = min(cur_size, max_iosize); 810 bio->bio_offset = cur_offset; 811 bio->bio_data = cur_ptr; 812 bio->bio_done = ctl_be_block_biodone; 813 bio->bio_pblkno = cur_offset / be_lun->blocksize; 814 815 cur_offset += bio->bio_length; 816 cur_ptr += bio->bio_length; 817 cur_size -= bio->bio_length; 818 819 /* 820 * Make sure we set the complete bit just before we 821 * issue the last bio so we don't wind up with a 822 * race. 823 * 824 * Use the LUN mutex here instead of a combination 825 * of atomic variables for simplicity. 826 * 827 * XXX KDM we could have a per-IO lock, but that 828 * would cause additional per-IO setup and teardown 829 * overhead. Hopefully there won't be too much 830 * contention on the LUN lock. 831 */ 832 mtx_lock(&be_lun->lock); 833 834 beio->num_bios_sent++; 835 836 if ((i == beio->num_segs - 1) 837 && (cur_size == 0)) 838 beio->send_complete = 1; 839 840 mtx_unlock(&be_lun->lock); 841 842 (*dev_data->csw->d_strategy)(bio); 843 } 844 } 845} 846 847static void 848ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 849 union ctl_io *io) 850{ 851 struct ctl_be_block_io *beio; 852 struct ctl_be_block_softc *softc; 853 854 DPRINTF("entered\n"); 855 856 softc = be_lun->softc; 857 beio = ctl_alloc_beio(softc); 858 KASSERT(beio != NULL, ("ctl_alloc_beio() failed")); 859 860 beio->io = io; 861 beio->softc = softc; 862 beio->lun = be_lun; 863 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 864 865 switch (io->scsiio.cdb[0]) { 866 case SYNCHRONIZE_CACHE: 867 case SYNCHRONIZE_CACHE_16: 868 beio->bio_cmd = BIO_FLUSH; 869 beio->ds_trans_type = DEVSTAT_NO_DATA; 870 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 871 beio->io_len = 0; 872 be_lun->lun_flush(be_lun, beio); 873 break; 874 default: 875 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 876 break; 877 } 878} 879 880SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t"); 881SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t"); 882SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t"); 883SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t"); 884 885static void 886ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 887 union ctl_io *io) 888{ 889 struct ctl_be_block_io *beio; 890 struct ctl_be_block_softc *softc; 891 struct ctl_lba_len lbalen; 892 uint64_t len_left, io_size_bytes; 893 int i; 894 895 softc = be_lun->softc; 896 897 DPRINTF("entered\n"); 898 899 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 900 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 901 } else { 902 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 903 } 904 905 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 906 sizeof(lbalen)); 907 908 io_size_bytes = lbalen.len * be_lun->blocksize; 909 910 /* 911 * XXX KDM this is temporary, until we implement chaining of beio 912 * structures and multiple datamove calls to move all the data in 913 * or out. 914 */ 915 if (io_size_bytes > CTLBLK_MAX_IO_SIZE) { 916 printf("%s: IO length %ju > max io size %u\n", __func__, 917 io_size_bytes, CTLBLK_MAX_IO_SIZE); 918 ctl_set_invalid_field(&io->scsiio, 919 /*sks_valid*/ 0, 920 /*command*/ 1, 921 /*field*/ 0, 922 /*bit_valid*/ 0, 923 /*bit*/ 0); 924 ctl_done(io); 925 return; 926 } 927 928 beio = ctl_alloc_beio(softc); 929 KASSERT(beio != NULL, ("ctl_alloc_beio() failed")); 930 931 beio->io = io; 932 beio->softc = softc; 933 beio->lun = be_lun; 934 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 935 936 /* 937 * If the I/O came down with an ordered or head of queue tag, set 938 * the BIO_ORDERED attribute. For head of queue tags, that's 939 * pretty much the best we can do. 940 * 941 * XXX KDM we don't have a great way to easily know about the FUA 942 * bit right now (it is decoded in ctl_read_write(), but we don't 943 * pass that knowledge to the backend), and in any case we would 944 * need to determine how to handle it. 945 */ 946 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 947 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 948 beio->bio_flags = BIO_ORDERED; 949 950 switch (io->scsiio.tag_type) { 951 case CTL_TAG_ORDERED: 952 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 953 break; 954 case CTL_TAG_HEAD_OF_QUEUE: 955 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 956 break; 957 case CTL_TAG_UNTAGGED: 958 case CTL_TAG_SIMPLE: 959 case CTL_TAG_ACA: 960 default: 961 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 962 break; 963 } 964 965 /* 966 * This path handles read and write only. The config write path 967 * handles flush operations. 968 */ 969 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 970 beio->bio_cmd = BIO_READ; 971 beio->ds_trans_type = DEVSTAT_READ; 972 } else { 973 beio->bio_cmd = BIO_WRITE; 974 beio->ds_trans_type = DEVSTAT_WRITE; 975 } 976 977 beio->io_len = lbalen.len * be_lun->blocksize; 978 beio->io_offset = lbalen.lba * be_lun->blocksize; 979 980 DPRINTF("%s at LBA %jx len %u\n", 981 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 982 (uintmax_t)lbalen.lba, lbalen.len); 983 984 for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS && 985 len_left > 0; i++) { 986 987 /* 988 * Setup the S/G entry for this chunk. 989 */ 990 beio->sg_segs[i].len = min(MAXPHYS, len_left); 991 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 992 993 DPRINTF("segment %d addr %p len %zd\n", i, 994 beio->sg_segs[i].addr, beio->sg_segs[i].len); 995 996 beio->num_segs++; 997 len_left -= beio->sg_segs[i].len; 998 } 999 1000 /* 1001 * For the read case, we need to read the data into our buffers and 1002 * then we can send it back to the user. For the write case, we 1003 * need to get the data from the user first. 1004 */ 1005 if (beio->bio_cmd == BIO_READ) { 1006 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1007 be_lun->dispatch(be_lun, beio); 1008 } else { 1009 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1010 io->scsiio.be_move_done = ctl_be_block_move_done; 1011 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1012 io->scsiio.kern_data_len = beio->io_len; 1013 io->scsiio.kern_total_len = beio->io_len; 1014 io->scsiio.kern_rel_offset = 0; 1015 io->scsiio.kern_data_resid = 0; 1016 io->scsiio.kern_sg_entries = beio->num_segs; 1017 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1018#ifdef CTL_TIME_IO 1019 getbintime(&io->io_hdr.dma_start_bt); 1020#endif 1021 ctl_datamove(io); 1022 } 1023} 1024 1025static void 1026ctl_be_block_worker(void *context, int pending) 1027{ 1028 struct ctl_be_block_lun *be_lun; 1029 struct ctl_be_block_softc *softc; 1030 union ctl_io *io; 1031 1032 be_lun = (struct ctl_be_block_lun *)context; 1033 softc = be_lun->softc; 1034 1035 DPRINTF("entered\n"); 1036 1037 mtx_lock(&be_lun->lock); 1038 for (;;) { 1039 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1040 if (io != NULL) { 1041 struct ctl_be_block_io *beio; 1042 1043 DPRINTF("datamove queue\n"); 1044 1045 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1046 ctl_io_hdr, links); 1047 1048 mtx_unlock(&be_lun->lock); 1049 1050 beio = (struct ctl_be_block_io *) 1051 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 1052 1053 be_lun->dispatch(be_lun, beio); 1054 1055 mtx_lock(&be_lun->lock); 1056 continue; 1057 } 1058 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1059 if (io != NULL) { 1060 1061 DPRINTF("config write queue\n"); 1062 1063 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1064 ctl_io_hdr, links); 1065 1066 mtx_unlock(&be_lun->lock); 1067 1068 ctl_be_block_cw_dispatch(be_lun, io); 1069 1070 mtx_lock(&be_lun->lock); 1071 continue; 1072 } 1073 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1074 if (io != NULL) { 1075 DPRINTF("input queue\n"); 1076 1077 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1078 ctl_io_hdr, links); 1079 mtx_unlock(&be_lun->lock); 1080 1081 /* 1082 * We must drop the lock, since this routine and 1083 * its children may sleep. 1084 */ 1085 ctl_be_block_dispatch(be_lun, io); 1086 1087 mtx_lock(&be_lun->lock); 1088 continue; 1089 } 1090 1091 /* 1092 * If we get here, there is no work left in the queues, so 1093 * just break out and let the task queue go to sleep. 1094 */ 1095 break; 1096 } 1097 mtx_unlock(&be_lun->lock); 1098} 1099 1100/* 1101 * Entry point from CTL to the backend for I/O. We queue everything to a 1102 * work thread, so this just puts the I/O on a queue and wakes up the 1103 * thread. 1104 */ 1105static int 1106ctl_be_block_submit(union ctl_io *io) 1107{ 1108 struct ctl_be_block_lun *be_lun; 1109 struct ctl_be_lun *ctl_be_lun; 1110 int retval; 1111 1112 DPRINTF("entered\n"); 1113 1114 retval = CTL_RETVAL_COMPLETE; 1115 1116 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1117 CTL_PRIV_BACKEND_LUN].ptr; 1118 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1119 1120 /* 1121 * Make sure we only get SCSI I/O. 1122 */ 1123 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1124 "%#x) encountered", io->io_hdr.io_type)); 1125 1126 mtx_lock(&be_lun->lock); 1127 /* 1128 * XXX KDM make sure that links is okay to use at this point. 1129 * Otherwise, we either need to add another field to ctl_io_hdr, 1130 * or deal with resource allocation here. 1131 */ 1132 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1133 mtx_unlock(&be_lun->lock); 1134 1135 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1136 1137 return (retval); 1138} 1139 1140static int 1141ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1142 int flag, struct thread *td) 1143{ 1144 struct ctl_be_block_softc *softc; 1145 int error; 1146 1147 softc = &backend_block_softc; 1148 1149 error = 0; 1150 1151 switch (cmd) { 1152 case CTL_LUN_REQ: { 1153 struct ctl_lun_req *lun_req; 1154 1155 lun_req = (struct ctl_lun_req *)addr; 1156 1157 switch (lun_req->reqtype) { 1158 case CTL_LUNREQ_CREATE: 1159 error = ctl_be_block_create(softc, lun_req); 1160 break; 1161 case CTL_LUNREQ_RM: 1162 error = ctl_be_block_rm(softc, lun_req); 1163 break; 1164 case CTL_LUNREQ_MODIFY: 1165 error = ctl_be_block_modify(softc, lun_req); 1166 break; 1167 default: 1168 lun_req->status = CTL_LUN_ERROR; 1169 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1170 "%s: invalid LUN request type %d", __func__, 1171 lun_req->reqtype); 1172 break; 1173 } 1174 break; 1175 } 1176 default: 1177 error = ENOTTY; 1178 break; 1179 } 1180 1181 return (error); 1182} 1183 1184static int 1185ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1186{ 1187 struct ctl_be_block_filedata *file_data; 1188 struct ctl_lun_create_params *params; 1189 struct vattr vattr; 1190 int error; 1191 1192 error = 0; 1193 file_data = &be_lun->backend.file; 1194 params = &req->reqdata.create; 1195 1196 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1197 be_lun->dispatch = ctl_be_block_dispatch_file; 1198 be_lun->lun_flush = ctl_be_block_flush_file; 1199 1200 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1201 if (error != 0) { 1202 snprintf(req->error_str, sizeof(req->error_str), 1203 "error calling VOP_GETATTR() for file %s", 1204 be_lun->dev_path); 1205 return (error); 1206 } 1207 1208 /* 1209 * Verify that we have the ability to upgrade to exclusive 1210 * access on this file so we can trap errors at open instead 1211 * of reporting them during first access. 1212 */ 1213 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1214 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1215 if (be_lun->vn->v_iflag & VI_DOOMED) { 1216 error = EBADF; 1217 snprintf(req->error_str, sizeof(req->error_str), 1218 "error locking file %s", be_lun->dev_path); 1219 return (error); 1220 } 1221 } 1222 1223 1224 file_data->cred = crhold(curthread->td_ucred); 1225 if (params->lun_size_bytes != 0) 1226 be_lun->size_bytes = params->lun_size_bytes; 1227 else 1228 be_lun->size_bytes = vattr.va_size; 1229 /* 1230 * We set the multi thread flag for file operations because all 1231 * filesystems (in theory) are capable of allowing multiple readers 1232 * of a file at once. So we want to get the maximum possible 1233 * concurrency. 1234 */ 1235 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1236 1237 /* 1238 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 1239 * With ZFS, it is 131072 bytes. Block sizes that large don't work 1240 * with disklabel and UFS on FreeBSD at least. Large block sizes 1241 * may not work with other OSes as well. So just export a sector 1242 * size of 512 bytes, which should work with any OS or 1243 * application. Since our backing is a file, any block size will 1244 * work fine for the backing store. 1245 */ 1246#if 0 1247 be_lun->blocksize= vattr.va_blocksize; 1248#endif 1249 if (params->blocksize_bytes != 0) 1250 be_lun->blocksize = params->blocksize_bytes; 1251 else 1252 be_lun->blocksize = 512; 1253 1254 /* 1255 * Sanity check. The media size has to be at least one 1256 * sector long. 1257 */ 1258 if (be_lun->size_bytes < be_lun->blocksize) { 1259 error = EINVAL; 1260 snprintf(req->error_str, sizeof(req->error_str), 1261 "file %s size %ju < block size %u", be_lun->dev_path, 1262 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1263 } 1264 return (error); 1265} 1266 1267static int 1268ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1269{ 1270 struct ctl_lun_create_params *params; 1271 struct vattr vattr; 1272 struct cdev *dev; 1273 struct cdevsw *devsw; 1274 int error; 1275 off_t ps, pss, po, pos; 1276 1277 params = &req->reqdata.create; 1278 1279 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1280 be_lun->dispatch = ctl_be_block_dispatch_dev; 1281 be_lun->lun_flush = ctl_be_block_flush_dev; 1282 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1283 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1284 &be_lun->backend.dev.dev_ref); 1285 if (be_lun->backend.dev.csw == NULL) 1286 panic("Unable to retrieve device switch"); 1287 1288 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1289 if (error) { 1290 snprintf(req->error_str, sizeof(req->error_str), 1291 "%s: error getting vnode attributes for device %s", 1292 __func__, be_lun->dev_path); 1293 return (error); 1294 } 1295 1296 dev = be_lun->vn->v_rdev; 1297 devsw = dev->si_devsw; 1298 if (!devsw->d_ioctl) { 1299 snprintf(req->error_str, sizeof(req->error_str), 1300 "%s: no d_ioctl for device %s!", __func__, 1301 be_lun->dev_path); 1302 return (ENODEV); 1303 } 1304 1305 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1306 (caddr_t)&be_lun->blocksize, FREAD, 1307 curthread); 1308 if (error) { 1309 snprintf(req->error_str, sizeof(req->error_str), 1310 "%s: error %d returned for DIOCGSECTORSIZE ioctl " 1311 "on %s!", __func__, error, be_lun->dev_path); 1312 return (error); 1313 } 1314 1315 /* 1316 * If the user has asked for a blocksize that is greater than the 1317 * backing device's blocksize, we can do it only if the blocksize 1318 * the user is asking for is an even multiple of the underlying 1319 * device's blocksize. 1320 */ 1321 if ((params->blocksize_bytes != 0) 1322 && (params->blocksize_bytes > be_lun->blocksize)) { 1323 uint32_t bs_multiple, tmp_blocksize; 1324 1325 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1326 1327 tmp_blocksize = bs_multiple * be_lun->blocksize; 1328 1329 if (tmp_blocksize == params->blocksize_bytes) { 1330 be_lun->blocksize = params->blocksize_bytes; 1331 } else { 1332 snprintf(req->error_str, sizeof(req->error_str), 1333 "%s: requested blocksize %u is not an even " 1334 "multiple of backing device blocksize %u", 1335 __func__, params->blocksize_bytes, 1336 be_lun->blocksize); 1337 return (EINVAL); 1338 1339 } 1340 } else if ((params->blocksize_bytes != 0) 1341 && (params->blocksize_bytes != be_lun->blocksize)) { 1342 snprintf(req->error_str, sizeof(req->error_str), 1343 "%s: requested blocksize %u < backing device " 1344 "blocksize %u", __func__, params->blocksize_bytes, 1345 be_lun->blocksize); 1346 return (EINVAL); 1347 } 1348 1349 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1350 (caddr_t)&be_lun->size_bytes, FREAD, 1351 curthread); 1352 if (error) { 1353 snprintf(req->error_str, sizeof(req->error_str), 1354 "%s: error %d returned for DIOCGMEDIASIZE " 1355 " ioctl on %s!", __func__, error, 1356 be_lun->dev_path); 1357 return (error); 1358 } 1359 1360 if (params->lun_size_bytes != 0) { 1361 if (params->lun_size_bytes > be_lun->size_bytes) { 1362 snprintf(req->error_str, sizeof(req->error_str), 1363 "%s: requested LUN size %ju > backing device " 1364 "size %ju", __func__, 1365 (uintmax_t)params->lun_size_bytes, 1366 (uintmax_t)be_lun->size_bytes); 1367 return (EINVAL); 1368 } 1369 1370 be_lun->size_bytes = params->lun_size_bytes; 1371 } 1372 1373 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE, 1374 (caddr_t)&ps, FREAD, curthread); 1375 if (error) 1376 ps = po = 0; 1377 else { 1378 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET, 1379 (caddr_t)&po, FREAD, curthread); 1380 if (error) 1381 po = 0; 1382 } 1383 pss = ps / be_lun->blocksize; 1384 pos = po / be_lun->blocksize; 1385 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) && 1386 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) { 1387 be_lun->pblockexp = fls(pss) - 1; 1388 be_lun->pblockoff = (pss - pos) % pss; 1389 } 1390 1391 return (0); 1392} 1393 1394static int 1395ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1396{ 1397 DROP_GIANT(); 1398 if (be_lun->vn) { 1399 int flags = FREAD | FWRITE; 1400 int vfs_is_locked = 0; 1401 1402 switch (be_lun->dev_type) { 1403 case CTL_BE_BLOCK_DEV: 1404 if (be_lun->backend.dev.csw) { 1405 dev_relthread(be_lun->backend.dev.cdev, 1406 be_lun->backend.dev.dev_ref); 1407 be_lun->backend.dev.csw = NULL; 1408 be_lun->backend.dev.cdev = NULL; 1409 } 1410 break; 1411 case CTL_BE_BLOCK_FILE: 1412 vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount); 1413 break; 1414 case CTL_BE_BLOCK_NONE: 1415 break; 1416 default: 1417 panic("Unexpected backend type."); 1418 break; 1419 } 1420 1421 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1422 be_lun->vn = NULL; 1423 1424 switch (be_lun->dev_type) { 1425 case CTL_BE_BLOCK_DEV: 1426 break; 1427 case CTL_BE_BLOCK_FILE: 1428 VFS_UNLOCK_GIANT(vfs_is_locked); 1429 if (be_lun->backend.file.cred != NULL) { 1430 crfree(be_lun->backend.file.cred); 1431 be_lun->backend.file.cred = NULL; 1432 } 1433 break; 1434 case CTL_BE_BLOCK_NONE: 1435 break; 1436 default: 1437 panic("Unexpected backend type."); 1438 break; 1439 } 1440 } 1441 PICKUP_GIANT(); 1442 1443 return (0); 1444} 1445 1446static int 1447ctl_be_block_open(struct ctl_be_block_softc *softc, 1448 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1449{ 1450 struct nameidata nd; 1451 int flags; 1452 int error; 1453 int vfs_is_locked; 1454 1455 /* 1456 * XXX KDM allow a read-only option? 1457 */ 1458 flags = FREAD | FWRITE; 1459 error = 0; 1460 1461 if (rootvnode == NULL) { 1462 snprintf(req->error_str, sizeof(req->error_str), 1463 "%s: Root filesystem is not mounted", __func__); 1464 return (1); 1465 } 1466 1467 if (!curthread->td_proc->p_fd->fd_cdir) { 1468 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1469 VREF(rootvnode); 1470 } 1471 if (!curthread->td_proc->p_fd->fd_rdir) { 1472 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1473 VREF(rootvnode); 1474 } 1475 if (!curthread->td_proc->p_fd->fd_jdir) { 1476 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1477 VREF(rootvnode); 1478 } 1479 1480 again: 1481 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1482 error = vn_open(&nd, &flags, 0, NULL); 1483 if (error) { 1484 /* 1485 * This is the only reasonable guess we can make as far as 1486 * path if the user doesn't give us a fully qualified path. 1487 * If they want to specify a file, they need to specify the 1488 * full path. 1489 */ 1490 if (be_lun->dev_path[0] != '/') { 1491 char *dev_path = "/dev/"; 1492 char *dev_name; 1493 1494 /* Try adding device path at beginning of name */ 1495 dev_name = malloc(strlen(be_lun->dev_path) 1496 + strlen(dev_path) + 1, 1497 M_CTLBLK, M_WAITOK); 1498 if (dev_name) { 1499 sprintf(dev_name, "%s%s", dev_path, 1500 be_lun->dev_path); 1501 free(be_lun->dev_path, M_CTLBLK); 1502 be_lun->dev_path = dev_name; 1503 goto again; 1504 } 1505 } 1506 snprintf(req->error_str, sizeof(req->error_str), 1507 "%s: error opening %s", __func__, be_lun->dev_path); 1508 return (error); 1509 } 1510 1511 vfs_is_locked = NDHASGIANT(&nd); 1512 1513 NDFREE(&nd, NDF_ONLY_PNBUF); 1514 1515 be_lun->vn = nd.ni_vp; 1516 1517 /* We only support disks and files. */ 1518 if (vn_isdisk(be_lun->vn, &error)) { 1519 error = ctl_be_block_open_dev(be_lun, req); 1520 } else if (be_lun->vn->v_type == VREG) { 1521 error = ctl_be_block_open_file(be_lun, req); 1522 } else { 1523 error = EINVAL; 1524 snprintf(req->error_str, sizeof(req->error_str), 1525 "%s is not a disk or plain file", be_lun->dev_path); 1526 } 1527 VOP_UNLOCK(be_lun->vn, 0); 1528 VFS_UNLOCK_GIANT(vfs_is_locked); 1529 1530 if (error != 0) { 1531 ctl_be_block_close(be_lun); 1532 return (error); 1533 } 1534 1535 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1536 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1537 1538 return (0); 1539} 1540 1541static int 1542ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1543{ 1544 struct ctl_be_block_lun *be_lun; 1545 struct ctl_lun_create_params *params; 1546 struct ctl_be_arg *file_arg; 1547 char tmpstr[32]; 1548 int retval, num_threads; 1549 int i; 1550 1551 params = &req->reqdata.create; 1552 retval = 0; 1553 1554 num_threads = cbb_num_threads; 1555 1556 file_arg = NULL; 1557 1558 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1559 1560 be_lun->softc = softc; 1561 STAILQ_INIT(&be_lun->input_queue); 1562 STAILQ_INIT(&be_lun->config_write_queue); 1563 STAILQ_INIT(&be_lun->datamove_queue); 1564 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1565 mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF); 1566 1567 be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS, 1568 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1569 1570 if (be_lun->lun_zone == NULL) { 1571 snprintf(req->error_str, sizeof(req->error_str), 1572 "%s: error allocating UMA zone", __func__); 1573 goto bailout_error; 1574 } 1575 1576 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1577 be_lun->ctl_be_lun.lun_type = params->device_type; 1578 else 1579 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1580 1581 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1582 for (i = 0; i < req->num_be_args; i++) { 1583 if (strcmp(req->kern_be_args[i].kname, "file") == 0) { 1584 file_arg = &req->kern_be_args[i]; 1585 break; 1586 } 1587 } 1588 1589 if (file_arg == NULL) { 1590 snprintf(req->error_str, sizeof(req->error_str), 1591 "%s: no file argument specified", __func__); 1592 goto bailout_error; 1593 } 1594 1595 be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK, 1596 M_WAITOK | M_ZERO); 1597 1598 strlcpy(be_lun->dev_path, (char *)file_arg->kvalue, 1599 file_arg->vallen); 1600 1601 retval = ctl_be_block_open(softc, be_lun, req); 1602 if (retval != 0) { 1603 retval = 0; 1604 goto bailout_error; 1605 } 1606 1607 /* 1608 * Tell the user the size of the file/device. 1609 */ 1610 params->lun_size_bytes = be_lun->size_bytes; 1611 1612 /* 1613 * The maximum LBA is the size - 1. 1614 */ 1615 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 1616 } else { 1617 /* 1618 * For processor devices, we don't have any size. 1619 */ 1620 be_lun->blocksize = 0; 1621 be_lun->pblockexp = 0; 1622 be_lun->pblockoff = 0; 1623 be_lun->size_blocks = 0; 1624 be_lun->size_bytes = 0; 1625 be_lun->ctl_be_lun.maxlba = 0; 1626 params->lun_size_bytes = 0; 1627 1628 /* 1629 * Default to just 1 thread for processor devices. 1630 */ 1631 num_threads = 1; 1632 } 1633 1634 /* 1635 * XXX This searching loop might be refactored to be combined with 1636 * the loop above, 1637 */ 1638 for (i = 0; i < req->num_be_args; i++) { 1639 if (strcmp(req->kern_be_args[i].kname, "num_threads") == 0) { 1640 struct ctl_be_arg *thread_arg; 1641 char num_thread_str[16]; 1642 int tmp_num_threads; 1643 1644 1645 thread_arg = &req->kern_be_args[i]; 1646 1647 strlcpy(num_thread_str, (char *)thread_arg->kvalue, 1648 min(thread_arg->vallen, 1649 sizeof(num_thread_str))); 1650 1651 tmp_num_threads = strtol(num_thread_str, NULL, 0); 1652 1653 /* 1654 * We don't let the user specify less than one 1655 * thread, but hope he's clueful enough not to 1656 * specify 1000 threads. 1657 */ 1658 if (tmp_num_threads < 1) { 1659 snprintf(req->error_str, sizeof(req->error_str), 1660 "%s: invalid number of threads %s", 1661 __func__, num_thread_str); 1662 goto bailout_error; 1663 } 1664 1665 num_threads = tmp_num_threads; 1666 } 1667 } 1668 1669 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 1670 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 1671 be_lun->ctl_be_lun.be_lun = be_lun; 1672 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 1673 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp; 1674 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff; 1675 /* Tell the user the blocksize we ended up using */ 1676 params->blocksize_bytes = be_lun->blocksize; 1677 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 1678 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 1679 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 1680 } else 1681 be_lun->ctl_be_lun.req_lun_id = 0; 1682 1683 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 1684 be_lun->ctl_be_lun.lun_config_status = 1685 ctl_be_block_lun_config_status; 1686 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 1687 1688 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 1689 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 1690 softc->num_luns); 1691 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 1692 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1693 sizeof(tmpstr))); 1694 1695 /* Tell the user what we used for a serial number */ 1696 strncpy((char *)params->serial_num, tmpstr, 1697 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 1698 } else { 1699 strncpy((char *)be_lun->ctl_be_lun.serial_num, 1700 params->serial_num, 1701 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1702 sizeof(params->serial_num))); 1703 } 1704 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 1705 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 1706 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 1707 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 1708 sizeof(tmpstr))); 1709 1710 /* Tell the user what we used for a device ID */ 1711 strncpy((char *)params->device_id, tmpstr, 1712 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 1713 } else { 1714 strncpy((char *)be_lun->ctl_be_lun.device_id, 1715 params->device_id, 1716 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 1717 sizeof(params->device_id))); 1718 } 1719 1720 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 1721 1722 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 1723 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 1724 1725 if (be_lun->io_taskqueue == NULL) { 1726 snprintf(req->error_str, sizeof(req->error_str), 1727 "%s: Unable to create taskqueue", __func__); 1728 goto bailout_error; 1729 } 1730 1731 /* 1732 * Note that we start the same number of threads by default for 1733 * both the file case and the block device case. For the file 1734 * case, we need multiple threads to allow concurrency, because the 1735 * vnode interface is designed to be a blocking interface. For the 1736 * block device case, ZFS zvols at least will block the caller's 1737 * context in many instances, and so we need multiple threads to 1738 * overcome that problem. Other block devices don't need as many 1739 * threads, but they shouldn't cause too many problems. 1740 * 1741 * If the user wants to just have a single thread for a block 1742 * device, he can specify that when the LUN is created, or change 1743 * the tunable/sysctl to alter the default number of threads. 1744 */ 1745 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 1746 /*num threads*/num_threads, 1747 /*priority*/PWAIT, 1748 /*thread name*/ 1749 "%s taskq", be_lun->lunname); 1750 1751 if (retval != 0) 1752 goto bailout_error; 1753 1754 be_lun->num_threads = num_threads; 1755 1756 mtx_lock(&softc->lock); 1757 softc->num_luns++; 1758 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 1759 1760 mtx_unlock(&softc->lock); 1761 1762 retval = ctl_add_lun(&be_lun->ctl_be_lun); 1763 if (retval != 0) { 1764 mtx_lock(&softc->lock); 1765 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 1766 links); 1767 softc->num_luns--; 1768 mtx_unlock(&softc->lock); 1769 snprintf(req->error_str, sizeof(req->error_str), 1770 "%s: ctl_add_lun() returned error %d, see dmesg for " 1771 "details", __func__, retval); 1772 retval = 0; 1773 goto bailout_error; 1774 } 1775 1776 mtx_lock(&softc->lock); 1777 1778 /* 1779 * Tell the config_status routine that we're waiting so it won't 1780 * clean up the LUN in the event of an error. 1781 */ 1782 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 1783 1784 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 1785 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 1786 if (retval == EINTR) 1787 break; 1788 } 1789 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 1790 1791 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 1792 snprintf(req->error_str, sizeof(req->error_str), 1793 "%s: LUN configuration error, see dmesg for details", 1794 __func__); 1795 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 1796 links); 1797 softc->num_luns--; 1798 mtx_unlock(&softc->lock); 1799 goto bailout_error; 1800 } else { 1801 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 1802 } 1803 1804 mtx_unlock(&softc->lock); 1805 1806 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 1807 be_lun->blocksize, 1808 DEVSTAT_ALL_SUPPORTED, 1809 be_lun->ctl_be_lun.lun_type 1810 | DEVSTAT_TYPE_IF_OTHER, 1811 DEVSTAT_PRIORITY_OTHER); 1812 1813 1814 req->status = CTL_LUN_OK; 1815 1816 return (retval); 1817 1818bailout_error: 1819 req->status = CTL_LUN_ERROR; 1820 1821 ctl_be_block_close(be_lun); 1822 1823 free(be_lun->dev_path, M_CTLBLK); 1824 free(be_lun, M_CTLBLK); 1825 1826 return (retval); 1827} 1828 1829static int 1830ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1831{ 1832 struct ctl_lun_rm_params *params; 1833 struct ctl_be_block_lun *be_lun; 1834 int retval; 1835 1836 params = &req->reqdata.rm; 1837 1838 mtx_lock(&softc->lock); 1839 1840 be_lun = NULL; 1841 1842 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 1843 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 1844 break; 1845 } 1846 mtx_unlock(&softc->lock); 1847 1848 if (be_lun == NULL) { 1849 snprintf(req->error_str, sizeof(req->error_str), 1850 "%s: LUN %u is not managed by the block backend", 1851 __func__, params->lun_id); 1852 goto bailout_error; 1853 } 1854 1855 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 1856 1857 if (retval != 0) { 1858 snprintf(req->error_str, sizeof(req->error_str), 1859 "%s: error %d returned from ctl_disable_lun() for " 1860 "LUN %d", __func__, retval, params->lun_id); 1861 goto bailout_error; 1862 1863 } 1864 1865 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 1866 if (retval != 0) { 1867 snprintf(req->error_str, sizeof(req->error_str), 1868 "%s: error %d returned from ctl_invalidate_lun() for " 1869 "LUN %d", __func__, retval, params->lun_id); 1870 goto bailout_error; 1871 } 1872 1873 mtx_lock(&softc->lock); 1874 1875 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 1876 1877 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 1878 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 1879 if (retval == EINTR) 1880 break; 1881 } 1882 1883 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 1884 1885 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 1886 snprintf(req->error_str, sizeof(req->error_str), 1887 "%s: interrupted waiting for LUN to be freed", 1888 __func__); 1889 mtx_unlock(&softc->lock); 1890 goto bailout_error; 1891 } 1892 1893 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 1894 1895 softc->num_luns--; 1896 mtx_unlock(&softc->lock); 1897 1898 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 1899 1900 taskqueue_free(be_lun->io_taskqueue); 1901 1902 ctl_be_block_close(be_lun); 1903 1904 if (be_lun->disk_stats != NULL) 1905 devstat_remove_entry(be_lun->disk_stats); 1906 1907 uma_zdestroy(be_lun->lun_zone); 1908 1909 free(be_lun->dev_path, M_CTLBLK); 1910 1911 free(be_lun, M_CTLBLK); 1912 1913 req->status = CTL_LUN_OK; 1914 1915 return (0); 1916 1917bailout_error: 1918 1919 req->status = CTL_LUN_ERROR; 1920 1921 return (0); 1922} 1923 1924static int 1925ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 1926 struct ctl_lun_req *req) 1927{ 1928 struct vattr vattr; 1929 int error; 1930 struct ctl_lun_modify_params *params; 1931 1932 params = &req->reqdata.modify; 1933 1934 if (params->lun_size_bytes != 0) { 1935 be_lun->size_bytes = params->lun_size_bytes; 1936 } else { 1937 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1938 if (error != 0) { 1939 snprintf(req->error_str, sizeof(req->error_str), 1940 "error calling VOP_GETATTR() for file %s", 1941 be_lun->dev_path); 1942 return (error); 1943 } 1944 1945 be_lun->size_bytes = vattr.va_size; 1946 } 1947 1948 return (0); 1949} 1950 1951static int 1952ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 1953 struct ctl_lun_req *req) 1954{ 1955 struct cdev *dev; 1956 struct cdevsw *devsw; 1957 int error; 1958 struct ctl_lun_modify_params *params; 1959 uint64_t size_bytes; 1960 1961 params = &req->reqdata.modify; 1962 1963 dev = be_lun->vn->v_rdev; 1964 devsw = dev->si_devsw; 1965 if (!devsw->d_ioctl) { 1966 snprintf(req->error_str, sizeof(req->error_str), 1967 "%s: no d_ioctl for device %s!", __func__, 1968 be_lun->dev_path); 1969 return (ENODEV); 1970 } 1971 1972 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1973 (caddr_t)&size_bytes, FREAD, 1974 curthread); 1975 if (error) { 1976 snprintf(req->error_str, sizeof(req->error_str), 1977 "%s: error %d returned for DIOCGMEDIASIZE ioctl " 1978 "on %s!", __func__, error, be_lun->dev_path); 1979 return (error); 1980 } 1981 1982 if (params->lun_size_bytes != 0) { 1983 if (params->lun_size_bytes > size_bytes) { 1984 snprintf(req->error_str, sizeof(req->error_str), 1985 "%s: requested LUN size %ju > backing device " 1986 "size %ju", __func__, 1987 (uintmax_t)params->lun_size_bytes, 1988 (uintmax_t)size_bytes); 1989 return (EINVAL); 1990 } 1991 1992 be_lun->size_bytes = params->lun_size_bytes; 1993 } else { 1994 be_lun->size_bytes = size_bytes; 1995 } 1996 1997 return (0); 1998} 1999 2000static int 2001ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2002{ 2003 struct ctl_lun_modify_params *params; 2004 struct ctl_be_block_lun *be_lun; 2005 int vfs_is_locked, error; 2006 2007 params = &req->reqdata.modify; 2008 2009 mtx_lock(&softc->lock); 2010 2011 be_lun = NULL; 2012 2013 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2014 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2015 break; 2016 } 2017 mtx_unlock(&softc->lock); 2018 2019 if (be_lun == NULL) { 2020 snprintf(req->error_str, sizeof(req->error_str), 2021 "%s: LUN %u is not managed by the block backend", 2022 __func__, params->lun_id); 2023 goto bailout_error; 2024 } 2025 2026 if (params->lun_size_bytes != 0) { 2027 if (params->lun_size_bytes < be_lun->blocksize) { 2028 snprintf(req->error_str, sizeof(req->error_str), 2029 "%s: LUN size %ju < blocksize %u", __func__, 2030 params->lun_size_bytes, be_lun->blocksize); 2031 goto bailout_error; 2032 } 2033 } 2034 2035 vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount); 2036 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2037 2038 if (be_lun->vn->v_type == VREG) 2039 error = ctl_be_block_modify_file(be_lun, req); 2040 else 2041 error = ctl_be_block_modify_dev(be_lun, req); 2042 2043 VOP_UNLOCK(be_lun->vn, 0); 2044 VFS_UNLOCK_GIANT(vfs_is_locked); 2045 2046 if (error != 0) 2047 goto bailout_error; 2048 2049 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 2050 2051 /* 2052 * The maximum LBA is the size - 1. 2053 * 2054 * XXX: Note that this field is being updated without locking, 2055 * which might cause problems on 32-bit architectures. 2056 */ 2057 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 2058 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2059 2060 /* Tell the user the exact size we ended up using */ 2061 params->lun_size_bytes = be_lun->size_bytes; 2062 2063 req->status = CTL_LUN_OK; 2064 2065 return (0); 2066 2067bailout_error: 2068 req->status = CTL_LUN_ERROR; 2069 2070 return (0); 2071} 2072 2073static void 2074ctl_be_block_lun_shutdown(void *be_lun) 2075{ 2076 struct ctl_be_block_lun *lun; 2077 struct ctl_be_block_softc *softc; 2078 2079 lun = (struct ctl_be_block_lun *)be_lun; 2080 2081 softc = lun->softc; 2082 2083 mtx_lock(&softc->lock); 2084 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2085 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2086 wakeup(lun); 2087 mtx_unlock(&softc->lock); 2088 2089} 2090 2091static void 2092ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2093{ 2094 struct ctl_be_block_lun *lun; 2095 struct ctl_be_block_softc *softc; 2096 2097 lun = (struct ctl_be_block_lun *)be_lun; 2098 softc = lun->softc; 2099 2100 if (status == CTL_LUN_CONFIG_OK) { 2101 mtx_lock(&softc->lock); 2102 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2103 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2104 wakeup(lun); 2105 mtx_unlock(&softc->lock); 2106 2107 /* 2108 * We successfully added the LUN, attempt to enable it. 2109 */ 2110 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2111 printf("%s: ctl_enable_lun() failed!\n", __func__); 2112 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2113 printf("%s: ctl_invalidate_lun() failed!\n", 2114 __func__); 2115 } 2116 } 2117 2118 return; 2119 } 2120 2121 2122 mtx_lock(&softc->lock); 2123 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2124 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2125 wakeup(lun); 2126 mtx_unlock(&softc->lock); 2127} 2128 2129 2130static int 2131ctl_be_block_config_write(union ctl_io *io) 2132{ 2133 struct ctl_be_block_lun *be_lun; 2134 struct ctl_be_lun *ctl_be_lun; 2135 int retval; 2136 2137 retval = 0; 2138 2139 DPRINTF("entered\n"); 2140 2141 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2142 CTL_PRIV_BACKEND_LUN].ptr; 2143 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2144 2145 switch (io->scsiio.cdb[0]) { 2146 case SYNCHRONIZE_CACHE: 2147 case SYNCHRONIZE_CACHE_16: 2148 /* 2149 * The upper level CTL code will filter out any CDBs with 2150 * the immediate bit set and return the proper error. 2151 * 2152 * We don't really need to worry about what LBA range the 2153 * user asked to be synced out. When they issue a sync 2154 * cache command, we'll sync out the whole thing. 2155 */ 2156 mtx_lock(&be_lun->lock); 2157 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2158 links); 2159 mtx_unlock(&be_lun->lock); 2160 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2161 break; 2162 case START_STOP_UNIT: { 2163 struct scsi_start_stop_unit *cdb; 2164 2165 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2166 2167 if (cdb->how & SSS_START) 2168 retval = ctl_start_lun(ctl_be_lun); 2169 else { 2170 retval = ctl_stop_lun(ctl_be_lun); 2171 /* 2172 * XXX KDM Copan-specific offline behavior. 2173 * Figure out a reasonable way to port this? 2174 */ 2175#ifdef NEEDTOPORT 2176 if ((retval == 0) 2177 && (cdb->byte2 & SSS_ONOFFLINE)) 2178 retval = ctl_lun_offline(ctl_be_lun); 2179#endif 2180 } 2181 2182 /* 2183 * In general, the above routines should not fail. They 2184 * just set state for the LUN. So we've got something 2185 * pretty wrong here if we can't start or stop the LUN. 2186 */ 2187 if (retval != 0) { 2188 ctl_set_internal_failure(&io->scsiio, 2189 /*sks_valid*/ 1, 2190 /*retry_count*/ 0xf051); 2191 retval = CTL_RETVAL_COMPLETE; 2192 } else { 2193 ctl_set_success(&io->scsiio); 2194 } 2195 ctl_config_write_done(io); 2196 break; 2197 } 2198 default: 2199 ctl_set_invalid_opcode(&io->scsiio); 2200 ctl_config_write_done(io); 2201 retval = CTL_RETVAL_COMPLETE; 2202 break; 2203 } 2204 2205 return (retval); 2206 2207} 2208 2209static int 2210ctl_be_block_config_read(union ctl_io *io) 2211{ 2212 return (0); 2213} 2214 2215static int 2216ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2217{ 2218 struct ctl_be_block_lun *lun; 2219 int retval; 2220 2221 lun = (struct ctl_be_block_lun *)be_lun; 2222 retval = 0; 2223 2224 retval = sbuf_printf(sb, "<num_threads>"); 2225 2226 if (retval != 0) 2227 goto bailout; 2228 2229 retval = sbuf_printf(sb, "%d", lun->num_threads); 2230 2231 if (retval != 0) 2232 goto bailout; 2233 2234 retval = sbuf_printf(sb, "</num_threads>"); 2235 2236 /* 2237 * For processor devices, we don't have a path variable. 2238 */ 2239 if ((retval != 0) 2240 || (lun->dev_path == NULL)) 2241 goto bailout; 2242 2243 retval = sbuf_printf(sb, "<file>"); 2244 2245 if (retval != 0) 2246 goto bailout; 2247 2248 retval = ctl_sbuf_printf_esc(sb, lun->dev_path); 2249 2250 if (retval != 0) 2251 goto bailout; 2252 2253 retval = sbuf_printf(sb, "</file>\n"); 2254 2255bailout: 2256 2257 return (retval); 2258} 2259 2260int 2261ctl_be_block_init(void) 2262{ 2263 struct ctl_be_block_softc *softc; 2264 int retval; 2265 2266 softc = &backend_block_softc; 2267 retval = 0; 2268 2269 mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF); 2270 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2271 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2272 STAILQ_INIT(&softc->disk_list); 2273 STAILQ_INIT(&softc->lun_list); 2274 2275 return (retval); 2276} 2277