ctl_backend_block.c revision 272616
1/*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37/* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42#include <sys/cdefs.h> 43__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl_backend_block.c 272616 2014-10-06 12:35:41Z mav $"); 44 45#include <opt_kdtrace.h> 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/types.h> 51#include <sys/kthread.h> 52#include <sys/bio.h> 53#include <sys/fcntl.h> 54#include <sys/limits.h> 55#include <sys/lock.h> 56#include <sys/mutex.h> 57#include <sys/condvar.h> 58#include <sys/malloc.h> 59#include <sys/conf.h> 60#include <sys/ioccom.h> 61#include <sys/queue.h> 62#include <sys/sbuf.h> 63#include <sys/endian.h> 64#include <sys/uio.h> 65#include <sys/buf.h> 66#include <sys/taskqueue.h> 67#include <sys/vnode.h> 68#include <sys/namei.h> 69#include <sys/mount.h> 70#include <sys/disk.h> 71#include <sys/fcntl.h> 72#include <sys/filedesc.h> 73#include <sys/proc.h> 74#include <sys/pcpu.h> 75#include <sys/module.h> 76#include <sys/sdt.h> 77#include <sys/devicestat.h> 78#include <sys/sysctl.h> 79 80#include <geom/geom.h> 81 82#include <cam/cam.h> 83#include <cam/scsi/scsi_all.h> 84#include <cam/scsi/scsi_da.h> 85#include <cam/ctl/ctl_io.h> 86#include <cam/ctl/ctl.h> 87#include <cam/ctl/ctl_backend.h> 88#include <cam/ctl/ctl_frontend_internal.h> 89#include <cam/ctl/ctl_ioctl.h> 90#include <cam/ctl/ctl_scsi_all.h> 91#include <cam/ctl/ctl_error.h> 92 93/* 94 * The idea here is that we'll allocate enough S/G space to hold a 1MB 95 * I/O. If we get an I/O larger than that, we'll split it. 96 */ 97#define CTLBLK_HALF_IO_SIZE (512 * 1024) 98#define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) 99#define CTLBLK_MAX_SEG MAXPHYS 100#define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1) 101#define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) 102 103#ifdef CTLBLK_DEBUG 104#define DPRINTF(fmt, args...) \ 105 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 106#else 107#define DPRINTF(fmt, args...) do {} while(0) 108#endif 109 110#define PRIV(io) \ 111 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 112#define ARGS(io) \ 113 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 114 115SDT_PROVIDER_DEFINE(cbb); 116 117typedef enum { 118 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 119 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 120 CTL_BE_BLOCK_LUN_WAITING = 0x04, 121 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 122} ctl_be_block_lun_flags; 123 124typedef enum { 125 CTL_BE_BLOCK_NONE, 126 CTL_BE_BLOCK_DEV, 127 CTL_BE_BLOCK_FILE 128} ctl_be_block_type; 129 130struct ctl_be_block_devdata { 131 struct cdev *cdev; 132 struct cdevsw *csw; 133 int dev_ref; 134}; 135 136struct ctl_be_block_filedata { 137 struct ucred *cred; 138}; 139 140union ctl_be_block_bedata { 141 struct ctl_be_block_devdata dev; 142 struct ctl_be_block_filedata file; 143}; 144 145struct ctl_be_block_io; 146struct ctl_be_block_lun; 147 148typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 149 struct ctl_be_block_io *beio); 150 151/* 152 * Backend LUN structure. There is a 1:1 mapping between a block device 153 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 154 */ 155struct ctl_be_block_lun { 156 struct ctl_block_disk *disk; 157 char lunname[32]; 158 char *dev_path; 159 ctl_be_block_type dev_type; 160 struct vnode *vn; 161 union ctl_be_block_bedata backend; 162 cbb_dispatch_t dispatch; 163 cbb_dispatch_t lun_flush; 164 cbb_dispatch_t unmap; 165 uma_zone_t lun_zone; 166 uint64_t size_blocks; 167 uint64_t size_bytes; 168 uint32_t blocksize; 169 int blocksize_shift; 170 uint16_t pblockexp; 171 uint16_t pblockoff; 172 struct ctl_be_block_softc *softc; 173 struct devstat *disk_stats; 174 ctl_be_block_lun_flags flags; 175 STAILQ_ENTRY(ctl_be_block_lun) links; 176 struct ctl_be_lun ctl_be_lun; 177 struct taskqueue *io_taskqueue; 178 struct task io_task; 179 int num_threads; 180 STAILQ_HEAD(, ctl_io_hdr) input_queue; 181 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 182 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 183 struct mtx_padalign io_lock; 184 struct mtx_padalign queue_lock; 185}; 186 187/* 188 * Overall softc structure for the block backend module. 189 */ 190struct ctl_be_block_softc { 191 struct mtx lock; 192 int num_disks; 193 STAILQ_HEAD(, ctl_block_disk) disk_list; 194 int num_luns; 195 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 196}; 197 198static struct ctl_be_block_softc backend_block_softc; 199 200/* 201 * Per-I/O information. 202 */ 203struct ctl_be_block_io { 204 union ctl_io *io; 205 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 206 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 207 int bio_cmd; 208 int num_segs; 209 int num_bios_sent; 210 int num_bios_done; 211 int send_complete; 212 int num_errors; 213 struct bintime ds_t0; 214 devstat_tag_type ds_tag_type; 215 devstat_trans_flags ds_trans_type; 216 uint64_t io_len; 217 uint64_t io_offset; 218 struct ctl_be_block_softc *softc; 219 struct ctl_be_block_lun *lun; 220 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ 221}; 222 223static int cbb_num_threads = 14; 224TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads); 225SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 226 "CAM Target Layer Block Backend"); 227SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW, 228 &cbb_num_threads, 0, "Number of threads per backing file"); 229 230static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 231static void ctl_free_beio(struct ctl_be_block_io *beio); 232static void ctl_complete_beio(struct ctl_be_block_io *beio); 233static int ctl_be_block_move_done(union ctl_io *io); 234static void ctl_be_block_biodone(struct bio *bio); 235static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 236 struct ctl_be_block_io *beio); 237static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 238 struct ctl_be_block_io *beio); 239static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 240 struct ctl_be_block_io *beio); 241static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 242 struct ctl_be_block_io *beio); 243static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 244 struct ctl_be_block_io *beio); 245static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 246 union ctl_io *io); 247static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 248 union ctl_io *io); 249static void ctl_be_block_worker(void *context, int pending); 250static int ctl_be_block_submit(union ctl_io *io); 251static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 252 int flag, struct thread *td); 253static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 254 struct ctl_lun_req *req); 255static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 256 struct ctl_lun_req *req); 257static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 258static int ctl_be_block_open(struct ctl_be_block_softc *softc, 259 struct ctl_be_block_lun *be_lun, 260 struct ctl_lun_req *req); 261static int ctl_be_block_create(struct ctl_be_block_softc *softc, 262 struct ctl_lun_req *req); 263static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 264 struct ctl_lun_req *req); 265static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 266 struct ctl_lun_req *req); 267static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 268 struct ctl_lun_req *req); 269static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 270 struct ctl_lun_req *req); 271static void ctl_be_block_lun_shutdown(void *be_lun); 272static void ctl_be_block_lun_config_status(void *be_lun, 273 ctl_lun_config_status status); 274static int ctl_be_block_config_write(union ctl_io *io); 275static int ctl_be_block_config_read(union ctl_io *io); 276static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 277int ctl_be_block_init(void); 278 279static struct ctl_backend_driver ctl_be_block_driver = 280{ 281 .name = "block", 282 .flags = CTL_BE_FLAG_HAS_CONFIG, 283 .init = ctl_be_block_init, 284 .data_submit = ctl_be_block_submit, 285 .data_move_done = ctl_be_block_move_done, 286 .config_read = ctl_be_block_config_read, 287 .config_write = ctl_be_block_config_write, 288 .ioctl = ctl_be_block_ioctl, 289 .lun_info = ctl_be_block_lun_info 290}; 291 292MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 293CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 294 295static uma_zone_t beio_zone; 296 297static struct ctl_be_block_io * 298ctl_alloc_beio(struct ctl_be_block_softc *softc) 299{ 300 struct ctl_be_block_io *beio; 301 302 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO); 303 beio->softc = softc; 304 return (beio); 305} 306 307static void 308ctl_free_beio(struct ctl_be_block_io *beio) 309{ 310 int duplicate_free; 311 int i; 312 313 duplicate_free = 0; 314 315 for (i = 0; i < beio->num_segs; i++) { 316 if (beio->sg_segs[i].addr == NULL) 317 duplicate_free++; 318 319 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 320 beio->sg_segs[i].addr = NULL; 321 322 /* For compare we had two equal S/G lists. */ 323 if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) { 324 uma_zfree(beio->lun->lun_zone, 325 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr); 326 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL; 327 } 328 } 329 330 if (duplicate_free > 0) { 331 printf("%s: %d duplicate frees out of %d segments\n", __func__, 332 duplicate_free, beio->num_segs); 333 } 334 335 uma_zfree(beio_zone, beio); 336} 337 338static void 339ctl_complete_beio(struct ctl_be_block_io *beio) 340{ 341 union ctl_io *io = beio->io; 342 343 if (beio->beio_cont != NULL) { 344 beio->beio_cont(beio); 345 } else { 346 ctl_free_beio(beio); 347 ctl_data_submit_done(io); 348 } 349} 350 351static int 352ctl_be_block_move_done(union ctl_io *io) 353{ 354 struct ctl_be_block_io *beio; 355 struct ctl_be_block_lun *be_lun; 356 struct ctl_lba_len_flags *lbalen; 357#ifdef CTL_TIME_IO 358 struct bintime cur_bt; 359#endif 360 int i; 361 362 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 363 be_lun = beio->lun; 364 365 DPRINTF("entered\n"); 366 367#ifdef CTL_TIME_IO 368 getbintime(&cur_bt); 369 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 370 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 371 io->io_hdr.num_dmas++; 372#endif 373 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 374 375 /* 376 * We set status at this point for read commands, and write 377 * commands with errors. 378 */ 379 if ((io->io_hdr.port_status == 0) && 380 ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) && 381 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 382 lbalen = ARGS(beio->io); 383 if (lbalen->flags & CTL_LLF_READ) { 384 ctl_set_success(&io->scsiio); 385 } else if (lbalen->flags & CTL_LLF_COMPARE) { 386 /* We have two data blocks ready for comparison. */ 387 for (i = 0; i < beio->num_segs; i++) { 388 if (memcmp(beio->sg_segs[i].addr, 389 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, 390 beio->sg_segs[i].len) != 0) 391 break; 392 } 393 if (i < beio->num_segs) 394 ctl_set_sense(&io->scsiio, 395 /*current_error*/ 1, 396 /*sense_key*/ SSD_KEY_MISCOMPARE, 397 /*asc*/ 0x1D, 398 /*ascq*/ 0x00, 399 SSD_ELEM_NONE); 400 else 401 ctl_set_success(&io->scsiio); 402 } 403 } 404 else if ((io->io_hdr.port_status != 0) 405 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 406 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 407 /* 408 * For hardware error sense keys, the sense key 409 * specific value is defined to be a retry count, 410 * but we use it to pass back an internal FETD 411 * error code. XXX KDM Hopefully the FETD is only 412 * using 16 bits for an error code, since that's 413 * all the space we have in the sks field. 414 */ 415 ctl_set_internal_failure(&io->scsiio, 416 /*sks_valid*/ 1, 417 /*retry_count*/ 418 io->io_hdr.port_status); 419 } 420 421 /* 422 * If this is a read, or a write with errors, it is done. 423 */ 424 if ((beio->bio_cmd == BIO_READ) 425 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 426 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 427 ctl_complete_beio(beio); 428 return (0); 429 } 430 431 /* 432 * At this point, we have a write and the DMA completed 433 * successfully. We now have to queue it to the task queue to 434 * execute the backend I/O. That is because we do blocking 435 * memory allocations, and in the file backing case, blocking I/O. 436 * This move done routine is generally called in the SIM's 437 * interrupt context, and therefore we cannot block. 438 */ 439 mtx_lock(&be_lun->queue_lock); 440 /* 441 * XXX KDM make sure that links is okay to use at this point. 442 * Otherwise, we either need to add another field to ctl_io_hdr, 443 * or deal with resource allocation here. 444 */ 445 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 446 mtx_unlock(&be_lun->queue_lock); 447 448 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 449 450 return (0); 451} 452 453static void 454ctl_be_block_biodone(struct bio *bio) 455{ 456 struct ctl_be_block_io *beio; 457 struct ctl_be_block_lun *be_lun; 458 union ctl_io *io; 459 int error; 460 461 beio = bio->bio_caller1; 462 be_lun = beio->lun; 463 io = beio->io; 464 465 DPRINTF("entered\n"); 466 467 error = bio->bio_error; 468 mtx_lock(&be_lun->io_lock); 469 if (error != 0) 470 beio->num_errors++; 471 472 beio->num_bios_done++; 473 474 /* 475 * XXX KDM will this cause WITNESS to complain? Holding a lock 476 * during the free might cause it to complain. 477 */ 478 g_destroy_bio(bio); 479 480 /* 481 * If the send complete bit isn't set, or we aren't the last I/O to 482 * complete, then we're done. 483 */ 484 if ((beio->send_complete == 0) 485 || (beio->num_bios_done < beio->num_bios_sent)) { 486 mtx_unlock(&be_lun->io_lock); 487 return; 488 } 489 490 /* 491 * At this point, we've verified that we are the last I/O to 492 * complete, so it's safe to drop the lock. 493 */ 494 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 495 beio->ds_tag_type, beio->ds_trans_type, 496 /*now*/ NULL, /*then*/&beio->ds_t0); 497 mtx_unlock(&be_lun->io_lock); 498 499 /* 500 * If there are any errors from the backing device, we fail the 501 * entire I/O with a medium error. 502 */ 503 if (beio->num_errors > 0) { 504 if (error == EOPNOTSUPP) { 505 ctl_set_invalid_opcode(&io->scsiio); 506 } else if (beio->bio_cmd == BIO_FLUSH) { 507 /* XXX KDM is there is a better error here? */ 508 ctl_set_internal_failure(&io->scsiio, 509 /*sks_valid*/ 1, 510 /*retry_count*/ 0xbad2); 511 } else 512 ctl_set_medium_error(&io->scsiio); 513 ctl_complete_beio(beio); 514 return; 515 } 516 517 /* 518 * If this is a write, a flush, a delete or verify, we're all done. 519 * If this is a read, we can now send the data to the user. 520 */ 521 if ((beio->bio_cmd == BIO_WRITE) 522 || (beio->bio_cmd == BIO_FLUSH) 523 || (beio->bio_cmd == BIO_DELETE) 524 || (ARGS(io)->flags & CTL_LLF_VERIFY)) { 525 ctl_set_success(&io->scsiio); 526 ctl_complete_beio(beio); 527 } else { 528#ifdef CTL_TIME_IO 529 getbintime(&io->io_hdr.dma_start_bt); 530#endif 531 ctl_datamove(io); 532 } 533} 534 535static void 536ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 537 struct ctl_be_block_io *beio) 538{ 539 union ctl_io *io = beio->io; 540 struct mount *mountpoint; 541 int error, lock_flags; 542 543 DPRINTF("entered\n"); 544 545 binuptime(&beio->ds_t0); 546 mtx_lock(&be_lun->io_lock); 547 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 548 mtx_unlock(&be_lun->io_lock); 549 550 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 551 552 if (MNT_SHARED_WRITES(mountpoint) 553 || ((mountpoint == NULL) 554 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 555 lock_flags = LK_SHARED; 556 else 557 lock_flags = LK_EXCLUSIVE; 558 559 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 560 561 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 562 VOP_UNLOCK(be_lun->vn, 0); 563 564 vn_finished_write(mountpoint); 565 566 mtx_lock(&be_lun->io_lock); 567 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 568 beio->ds_tag_type, beio->ds_trans_type, 569 /*now*/ NULL, /*then*/&beio->ds_t0); 570 mtx_unlock(&be_lun->io_lock); 571 572 if (error == 0) 573 ctl_set_success(&io->scsiio); 574 else { 575 /* XXX KDM is there is a better error here? */ 576 ctl_set_internal_failure(&io->scsiio, 577 /*sks_valid*/ 1, 578 /*retry_count*/ 0xbad1); 579 } 580 581 ctl_complete_beio(beio); 582} 583 584SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t"); 585SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t"); 586SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t"); 587SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t"); 588 589static void 590ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 591 struct ctl_be_block_io *beio) 592{ 593 struct ctl_be_block_filedata *file_data; 594 union ctl_io *io; 595 struct uio xuio; 596 struct iovec *xiovec; 597 int flags; 598 int error, i; 599 600 DPRINTF("entered\n"); 601 602 file_data = &be_lun->backend.file; 603 io = beio->io; 604 flags = 0; 605 if (ARGS(io)->flags & CTL_LLF_DPO) 606 flags |= IO_DIRECT; 607 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 608 flags |= IO_SYNC; 609 610 bzero(&xuio, sizeof(xuio)); 611 if (beio->bio_cmd == BIO_READ) { 612 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 613 xuio.uio_rw = UIO_READ; 614 } else { 615 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 616 xuio.uio_rw = UIO_WRITE; 617 } 618 xuio.uio_offset = beio->io_offset; 619 xuio.uio_resid = beio->io_len; 620 xuio.uio_segflg = UIO_SYSSPACE; 621 xuio.uio_iov = beio->xiovecs; 622 xuio.uio_iovcnt = beio->num_segs; 623 xuio.uio_td = curthread; 624 625 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 626 xiovec->iov_base = beio->sg_segs[i].addr; 627 xiovec->iov_len = beio->sg_segs[i].len; 628 } 629 630 binuptime(&beio->ds_t0); 631 mtx_lock(&be_lun->io_lock); 632 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 633 mtx_unlock(&be_lun->io_lock); 634 635 if (beio->bio_cmd == BIO_READ) { 636 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 637 638 /* 639 * UFS pays attention to IO_DIRECT for reads. If the 640 * DIRECTIO option is configured into the kernel, it calls 641 * ffs_rawread(). But that only works for single-segment 642 * uios with user space addresses. In our case, with a 643 * kernel uio, it still reads into the buffer cache, but it 644 * will just try to release the buffer from the cache later 645 * on in ffs_read(). 646 * 647 * ZFS does not pay attention to IO_DIRECT for reads. 648 * 649 * UFS does not pay attention to IO_SYNC for reads. 650 * 651 * ZFS pays attention to IO_SYNC (which translates into the 652 * Solaris define FRSYNC for zfs_read()) for reads. It 653 * attempts to sync the file before reading. 654 * 655 * So, to attempt to provide some barrier semantics in the 656 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 657 */ 658 error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred); 659 660 VOP_UNLOCK(be_lun->vn, 0); 661 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 662 } else { 663 struct mount *mountpoint; 664 int lock_flags; 665 666 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 667 668 if (MNT_SHARED_WRITES(mountpoint) 669 || ((mountpoint == NULL) 670 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 671 lock_flags = LK_SHARED; 672 else 673 lock_flags = LK_EXCLUSIVE; 674 675 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 676 677 /* 678 * UFS pays attention to IO_DIRECT for writes. The write 679 * is done asynchronously. (Normally the write would just 680 * get put into cache. 681 * 682 * UFS pays attention to IO_SYNC for writes. It will 683 * attempt to write the buffer out synchronously if that 684 * flag is set. 685 * 686 * ZFS does not pay attention to IO_DIRECT for writes. 687 * 688 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 689 * for writes. It will flush the transaction from the 690 * cache before returning. 691 * 692 * So if we've got the BIO_ORDERED flag set, we want 693 * IO_SYNC in either the UFS or ZFS case. 694 */ 695 error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred); 696 VOP_UNLOCK(be_lun->vn, 0); 697 698 vn_finished_write(mountpoint); 699 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 700 } 701 702 mtx_lock(&be_lun->io_lock); 703 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 704 beio->ds_tag_type, beio->ds_trans_type, 705 /*now*/ NULL, /*then*/&beio->ds_t0); 706 mtx_unlock(&be_lun->io_lock); 707 708 /* 709 * If we got an error, set the sense data to "MEDIUM ERROR" and 710 * return the I/O to the user. 711 */ 712 if (error != 0) { 713 char path_str[32]; 714 715 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 716 /* 717 * XXX KDM ZFS returns ENOSPC when the underlying 718 * filesystem fills up. What kind of SCSI error should we 719 * return for that? 720 */ 721 printf("%s%s command returned errno %d\n", path_str, 722 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 723 ctl_set_medium_error(&io->scsiio); 724 ctl_complete_beio(beio); 725 return; 726 } 727 728 /* 729 * If this is a write or a verify, we're all done. 730 * If this is a read, we can now send the data to the user. 731 */ 732 if ((beio->bio_cmd == BIO_WRITE) || 733 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 734 ctl_set_success(&io->scsiio); 735 ctl_complete_beio(beio); 736 } else { 737#ifdef CTL_TIME_IO 738 getbintime(&io->io_hdr.dma_start_bt); 739#endif 740 ctl_datamove(io); 741 } 742} 743 744static void 745ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun, 746 struct ctl_be_block_io *beio) 747{ 748 struct ctl_be_block_devdata *dev_data; 749 union ctl_io *io; 750 struct uio xuio; 751 struct iovec *xiovec; 752 int flags; 753 int error, i; 754 755 DPRINTF("entered\n"); 756 757 dev_data = &be_lun->backend.dev; 758 io = beio->io; 759 flags = 0; 760 if (ARGS(io)->flags & CTL_LLF_DPO) 761 flags |= IO_DIRECT; 762 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 763 flags |= IO_SYNC; 764 765 bzero(&xuio, sizeof(xuio)); 766 if (beio->bio_cmd == BIO_READ) { 767 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 768 xuio.uio_rw = UIO_READ; 769 } else { 770 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 771 xuio.uio_rw = UIO_WRITE; 772 } 773 xuio.uio_offset = beio->io_offset; 774 xuio.uio_resid = beio->io_len; 775 xuio.uio_segflg = UIO_SYSSPACE; 776 xuio.uio_iov = beio->xiovecs; 777 xuio.uio_iovcnt = beio->num_segs; 778 xuio.uio_td = curthread; 779 780 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 781 xiovec->iov_base = beio->sg_segs[i].addr; 782 xiovec->iov_len = beio->sg_segs[i].len; 783 } 784 785 binuptime(&beio->ds_t0); 786 mtx_lock(&be_lun->io_lock); 787 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 788 mtx_unlock(&be_lun->io_lock); 789 790 if (beio->bio_cmd == BIO_READ) { 791 error = (*dev_data->csw->d_read)(dev_data->cdev, &xuio, flags); 792 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 793 } else { 794 error = (*dev_data->csw->d_write)(dev_data->cdev, &xuio, flags); 795 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 796 } 797 798 mtx_lock(&be_lun->io_lock); 799 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 800 beio->ds_tag_type, beio->ds_trans_type, 801 /*now*/ NULL, /*then*/&beio->ds_t0); 802 mtx_unlock(&be_lun->io_lock); 803 804 /* 805 * If we got an error, set the sense data to "MEDIUM ERROR" and 806 * return the I/O to the user. 807 */ 808 if (error != 0) { 809 ctl_set_medium_error(&io->scsiio); 810 ctl_complete_beio(beio); 811 return; 812 } 813 814 /* 815 * If this is a write or a verify, we're all done. 816 * If this is a read, we can now send the data to the user. 817 */ 818 if ((beio->bio_cmd == BIO_WRITE) || 819 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 820 ctl_set_success(&io->scsiio); 821 ctl_complete_beio(beio); 822 } else { 823#ifdef CTL_TIME_IO 824 getbintime(&io->io_hdr.dma_start_bt); 825#endif 826 ctl_datamove(io); 827 } 828} 829 830static void 831ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 832 struct ctl_be_block_io *beio) 833{ 834 struct bio *bio; 835 union ctl_io *io; 836 struct ctl_be_block_devdata *dev_data; 837 838 dev_data = &be_lun->backend.dev; 839 io = beio->io; 840 841 DPRINTF("entered\n"); 842 843 /* This can't fail, it's a blocking allocation. */ 844 bio = g_alloc_bio(); 845 846 bio->bio_cmd = BIO_FLUSH; 847 bio->bio_flags |= BIO_ORDERED; 848 bio->bio_dev = dev_data->cdev; 849 bio->bio_offset = 0; 850 bio->bio_data = 0; 851 bio->bio_done = ctl_be_block_biodone; 852 bio->bio_caller1 = beio; 853 bio->bio_pblkno = 0; 854 855 /* 856 * We don't need to acquire the LUN lock here, because we are only 857 * sending one bio, and so there is no other context to synchronize 858 * with. 859 */ 860 beio->num_bios_sent = 1; 861 beio->send_complete = 1; 862 863 binuptime(&beio->ds_t0); 864 mtx_lock(&be_lun->io_lock); 865 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 866 mtx_unlock(&be_lun->io_lock); 867 868 (*dev_data->csw->d_strategy)(bio); 869} 870 871static void 872ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 873 struct ctl_be_block_io *beio, 874 uint64_t off, uint64_t len, int last) 875{ 876 struct bio *bio; 877 struct ctl_be_block_devdata *dev_data; 878 uint64_t maxlen; 879 880 dev_data = &be_lun->backend.dev; 881 maxlen = LONG_MAX - (LONG_MAX % be_lun->blocksize); 882 while (len > 0) { 883 bio = g_alloc_bio(); 884 bio->bio_cmd = BIO_DELETE; 885 bio->bio_dev = dev_data->cdev; 886 bio->bio_offset = off; 887 bio->bio_length = MIN(len, maxlen); 888 bio->bio_data = 0; 889 bio->bio_done = ctl_be_block_biodone; 890 bio->bio_caller1 = beio; 891 bio->bio_pblkno = off / be_lun->blocksize; 892 893 off += bio->bio_length; 894 len -= bio->bio_length; 895 896 mtx_lock(&be_lun->io_lock); 897 beio->num_bios_sent++; 898 if (last && len == 0) 899 beio->send_complete = 1; 900 mtx_unlock(&be_lun->io_lock); 901 902 (*dev_data->csw->d_strategy)(bio); 903 } 904} 905 906static void 907ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 908 struct ctl_be_block_io *beio) 909{ 910 union ctl_io *io; 911 struct ctl_be_block_devdata *dev_data; 912 struct ctl_ptr_len_flags *ptrlen; 913 struct scsi_unmap_desc *buf, *end; 914 uint64_t len; 915 916 dev_data = &be_lun->backend.dev; 917 io = beio->io; 918 919 DPRINTF("entered\n"); 920 921 binuptime(&beio->ds_t0); 922 mtx_lock(&be_lun->io_lock); 923 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 924 mtx_unlock(&be_lun->io_lock); 925 926 if (beio->io_offset == -1) { 927 beio->io_len = 0; 928 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 929 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 930 end = buf + ptrlen->len / sizeof(*buf); 931 for (; buf < end; buf++) { 932 len = (uint64_t)scsi_4btoul(buf->length) * 933 be_lun->blocksize; 934 beio->io_len += len; 935 ctl_be_block_unmap_dev_range(be_lun, beio, 936 scsi_8btou64(buf->lba) * be_lun->blocksize, len, 937 (end - buf < 2) ? TRUE : FALSE); 938 } 939 } else 940 ctl_be_block_unmap_dev_range(be_lun, beio, 941 beio->io_offset, beio->io_len, TRUE); 942} 943 944static void 945ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 946 struct ctl_be_block_io *beio) 947{ 948 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); 949 int i; 950 struct bio *bio; 951 struct ctl_be_block_devdata *dev_data; 952 off_t cur_offset; 953 int max_iosize; 954 955 DPRINTF("entered\n"); 956 957 dev_data = &be_lun->backend.dev; 958 959 /* 960 * We have to limit our I/O size to the maximum supported by the 961 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 962 * set it properly, use DFLTPHYS. 963 */ 964 max_iosize = dev_data->cdev->si_iosize_max; 965 if (max_iosize < PAGE_SIZE) 966 max_iosize = DFLTPHYS; 967 968 cur_offset = beio->io_offset; 969 for (i = 0; i < beio->num_segs; i++) { 970 size_t cur_size; 971 uint8_t *cur_ptr; 972 973 cur_size = beio->sg_segs[i].len; 974 cur_ptr = beio->sg_segs[i].addr; 975 976 while (cur_size > 0) { 977 /* This can't fail, it's a blocking allocation. */ 978 bio = g_alloc_bio(); 979 980 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 981 982 bio->bio_cmd = beio->bio_cmd; 983 bio->bio_dev = dev_data->cdev; 984 bio->bio_caller1 = beio; 985 bio->bio_length = min(cur_size, max_iosize); 986 bio->bio_offset = cur_offset; 987 bio->bio_data = cur_ptr; 988 bio->bio_done = ctl_be_block_biodone; 989 bio->bio_pblkno = cur_offset / be_lun->blocksize; 990 991 cur_offset += bio->bio_length; 992 cur_ptr += bio->bio_length; 993 cur_size -= bio->bio_length; 994 995 TAILQ_INSERT_TAIL(&queue, bio, bio_queue); 996 beio->num_bios_sent++; 997 } 998 } 999 binuptime(&beio->ds_t0); 1000 mtx_lock(&be_lun->io_lock); 1001 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 1002 beio->send_complete = 1; 1003 mtx_unlock(&be_lun->io_lock); 1004 1005 /* 1006 * Fire off all allocated requests! 1007 */ 1008 while ((bio = TAILQ_FIRST(&queue)) != NULL) { 1009 TAILQ_REMOVE(&queue, bio, bio_queue); 1010 (*dev_data->csw->d_strategy)(bio); 1011 } 1012} 1013 1014static void 1015ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 1016{ 1017 union ctl_io *io; 1018 1019 io = beio->io; 1020 ctl_free_beio(beio); 1021 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1022 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1023 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1024 ctl_config_write_done(io); 1025 return; 1026 } 1027 1028 ctl_be_block_config_write(io); 1029} 1030 1031static void 1032ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, 1033 union ctl_io *io) 1034{ 1035 struct ctl_be_block_io *beio; 1036 struct ctl_be_block_softc *softc; 1037 struct ctl_lba_len_flags *lbalen; 1038 uint64_t len_left, lba; 1039 int i, seglen; 1040 uint8_t *buf, *end; 1041 1042 DPRINTF("entered\n"); 1043 1044 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1045 softc = be_lun->softc; 1046 lbalen = ARGS(beio->io); 1047 1048 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR) || 1049 (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) { 1050 ctl_free_beio(beio); 1051 ctl_set_invalid_field(&io->scsiio, 1052 /*sks_valid*/ 1, 1053 /*command*/ 1, 1054 /*field*/ 1, 1055 /*bit_valid*/ 0, 1056 /*bit*/ 0); 1057 ctl_config_write_done(io); 1058 return; 1059 } 1060 1061 switch (io->scsiio.tag_type) { 1062 case CTL_TAG_ORDERED: 1063 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1064 break; 1065 case CTL_TAG_HEAD_OF_QUEUE: 1066 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1067 break; 1068 case CTL_TAG_UNTAGGED: 1069 case CTL_TAG_SIMPLE: 1070 case CTL_TAG_ACA: 1071 default: 1072 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1073 break; 1074 } 1075 1076 if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) { 1077 beio->io_offset = lbalen->lba * be_lun->blocksize; 1078 beio->io_len = (uint64_t)lbalen->len * be_lun->blocksize; 1079 beio->bio_cmd = BIO_DELETE; 1080 beio->ds_trans_type = DEVSTAT_FREE; 1081 1082 be_lun->unmap(be_lun, beio); 1083 return; 1084 } 1085 1086 beio->bio_cmd = BIO_WRITE; 1087 beio->ds_trans_type = DEVSTAT_WRITE; 1088 1089 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1090 (uintmax_t)lbalen->lba, lbalen->len); 1091 1092 len_left = (uint64_t)lbalen->len * be_lun->blocksize; 1093 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { 1094 1095 /* 1096 * Setup the S/G entry for this chunk. 1097 */ 1098 seglen = MIN(CTLBLK_MAX_SEG, len_left); 1099 seglen -= seglen % be_lun->blocksize; 1100 beio->sg_segs[i].len = seglen; 1101 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1102 1103 DPRINTF("segment %d addr %p len %zd\n", i, 1104 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1105 1106 beio->num_segs++; 1107 len_left -= seglen; 1108 1109 buf = beio->sg_segs[i].addr; 1110 end = buf + seglen; 1111 for (; buf < end; buf += be_lun->blocksize) { 1112 memcpy(buf, io->scsiio.kern_data_ptr, be_lun->blocksize); 1113 if (lbalen->flags & SWS_LBDATA) 1114 scsi_ulto4b(lbalen->lba + lba, buf); 1115 lba++; 1116 } 1117 } 1118 1119 beio->io_offset = lbalen->lba * be_lun->blocksize; 1120 beio->io_len = lba * be_lun->blocksize; 1121 1122 /* We can not do all in one run. Correct and schedule rerun. */ 1123 if (len_left > 0) { 1124 lbalen->lba += lba; 1125 lbalen->len -= lba; 1126 beio->beio_cont = ctl_be_block_cw_done_ws; 1127 } 1128 1129 be_lun->dispatch(be_lun, beio); 1130} 1131 1132static void 1133ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, 1134 union ctl_io *io) 1135{ 1136 struct ctl_be_block_io *beio; 1137 struct ctl_be_block_softc *softc; 1138 struct ctl_ptr_len_flags *ptrlen; 1139 1140 DPRINTF("entered\n"); 1141 1142 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1143 softc = be_lun->softc; 1144 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1145 1146 if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) { 1147 ctl_free_beio(beio); 1148 ctl_set_invalid_field(&io->scsiio, 1149 /*sks_valid*/ 0, 1150 /*command*/ 1, 1151 /*field*/ 0, 1152 /*bit_valid*/ 0, 1153 /*bit*/ 0); 1154 ctl_config_write_done(io); 1155 return; 1156 } 1157 1158 switch (io->scsiio.tag_type) { 1159 case CTL_TAG_ORDERED: 1160 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1161 break; 1162 case CTL_TAG_HEAD_OF_QUEUE: 1163 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1164 break; 1165 case CTL_TAG_UNTAGGED: 1166 case CTL_TAG_SIMPLE: 1167 case CTL_TAG_ACA: 1168 default: 1169 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1170 break; 1171 } 1172 1173 beio->io_len = 0; 1174 beio->io_offset = -1; 1175 1176 beio->bio_cmd = BIO_DELETE; 1177 beio->ds_trans_type = DEVSTAT_FREE; 1178 1179 DPRINTF("UNMAP\n"); 1180 1181 be_lun->unmap(be_lun, beio); 1182} 1183 1184static void 1185ctl_be_block_cw_done(struct ctl_be_block_io *beio) 1186{ 1187 union ctl_io *io; 1188 1189 io = beio->io; 1190 ctl_free_beio(beio); 1191 ctl_config_write_done(io); 1192} 1193 1194static void 1195ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 1196 union ctl_io *io) 1197{ 1198 struct ctl_be_block_io *beio; 1199 struct ctl_be_block_softc *softc; 1200 1201 DPRINTF("entered\n"); 1202 1203 softc = be_lun->softc; 1204 beio = ctl_alloc_beio(softc); 1205 beio->io = io; 1206 beio->lun = be_lun; 1207 beio->beio_cont = ctl_be_block_cw_done; 1208 PRIV(io)->ptr = (void *)beio; 1209 1210 switch (io->scsiio.cdb[0]) { 1211 case SYNCHRONIZE_CACHE: 1212 case SYNCHRONIZE_CACHE_16: 1213 beio->bio_cmd = BIO_FLUSH; 1214 beio->ds_trans_type = DEVSTAT_NO_DATA; 1215 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1216 beio->io_len = 0; 1217 be_lun->lun_flush(be_lun, beio); 1218 break; 1219 case WRITE_SAME_10: 1220 case WRITE_SAME_16: 1221 ctl_be_block_cw_dispatch_ws(be_lun, io); 1222 break; 1223 case UNMAP: 1224 ctl_be_block_cw_dispatch_unmap(be_lun, io); 1225 break; 1226 default: 1227 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1228 break; 1229 } 1230} 1231 1232SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t"); 1233SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t"); 1234SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t"); 1235SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t"); 1236 1237static void 1238ctl_be_block_next(struct ctl_be_block_io *beio) 1239{ 1240 struct ctl_be_block_lun *be_lun; 1241 union ctl_io *io; 1242 1243 io = beio->io; 1244 be_lun = beio->lun; 1245 ctl_free_beio(beio); 1246 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1247 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1248 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1249 ctl_data_submit_done(io); 1250 return; 1251 } 1252 1253 io->io_hdr.status &= ~CTL_STATUS_MASK; 1254 io->io_hdr.status |= CTL_STATUS_NONE; 1255 1256 mtx_lock(&be_lun->queue_lock); 1257 /* 1258 * XXX KDM make sure that links is okay to use at this point. 1259 * Otherwise, we either need to add another field to ctl_io_hdr, 1260 * or deal with resource allocation here. 1261 */ 1262 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1263 mtx_unlock(&be_lun->queue_lock); 1264 1265 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1266} 1267 1268static void 1269ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1270 union ctl_io *io) 1271{ 1272 struct ctl_be_block_io *beio; 1273 struct ctl_be_block_softc *softc; 1274 struct ctl_lba_len_flags *lbalen; 1275 struct ctl_ptr_len_flags *bptrlen; 1276 uint64_t len_left, lbas; 1277 int i; 1278 1279 softc = be_lun->softc; 1280 1281 DPRINTF("entered\n"); 1282 1283 lbalen = ARGS(io); 1284 if (lbalen->flags & CTL_LLF_WRITE) { 1285 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 1286 } else { 1287 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 1288 } 1289 1290 beio = ctl_alloc_beio(softc); 1291 beio->io = io; 1292 beio->lun = be_lun; 1293 bptrlen = PRIV(io); 1294 bptrlen->ptr = (void *)beio; 1295 1296 switch (io->scsiio.tag_type) { 1297 case CTL_TAG_ORDERED: 1298 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1299 break; 1300 case CTL_TAG_HEAD_OF_QUEUE: 1301 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1302 break; 1303 case CTL_TAG_UNTAGGED: 1304 case CTL_TAG_SIMPLE: 1305 case CTL_TAG_ACA: 1306 default: 1307 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1308 break; 1309 } 1310 1311 if (lbalen->flags & CTL_LLF_WRITE) { 1312 beio->bio_cmd = BIO_WRITE; 1313 beio->ds_trans_type = DEVSTAT_WRITE; 1314 } else { 1315 beio->bio_cmd = BIO_READ; 1316 beio->ds_trans_type = DEVSTAT_READ; 1317 } 1318 1319 DPRINTF("%s at LBA %jx len %u @%ju\n", 1320 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1321 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); 1322 if (lbalen->flags & CTL_LLF_COMPARE) 1323 lbas = CTLBLK_HALF_IO_SIZE; 1324 else 1325 lbas = CTLBLK_MAX_IO_SIZE; 1326 lbas = MIN(lbalen->len - bptrlen->len, lbas / be_lun->blocksize); 1327 beio->io_offset = (lbalen->lba + bptrlen->len) * be_lun->blocksize; 1328 beio->io_len = lbas * be_lun->blocksize; 1329 bptrlen->len += lbas; 1330 1331 for (i = 0, len_left = beio->io_len; len_left > 0; i++) { 1332 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", 1333 i, CTLBLK_MAX_SEGS)); 1334 1335 /* 1336 * Setup the S/G entry for this chunk. 1337 */ 1338 beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left); 1339 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1340 1341 DPRINTF("segment %d addr %p len %zd\n", i, 1342 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1343 1344 /* Set up second segment for compare operation. */ 1345 if (lbalen->flags & CTL_LLF_COMPARE) { 1346 beio->sg_segs[i + CTLBLK_HALF_SEGS].len = 1347 beio->sg_segs[i].len; 1348 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = 1349 uma_zalloc(be_lun->lun_zone, M_WAITOK); 1350 } 1351 1352 beio->num_segs++; 1353 len_left -= beio->sg_segs[i].len; 1354 } 1355 if (bptrlen->len < lbalen->len) 1356 beio->beio_cont = ctl_be_block_next; 1357 io->scsiio.be_move_done = ctl_be_block_move_done; 1358 /* For compare we have separate S/G lists for read and datamove. */ 1359 if (lbalen->flags & CTL_LLF_COMPARE) 1360 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; 1361 else 1362 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1363 io->scsiio.kern_data_len = beio->io_len; 1364 io->scsiio.kern_data_resid = 0; 1365 io->scsiio.kern_sg_entries = beio->num_segs; 1366 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1367 1368 /* 1369 * For the read case, we need to read the data into our buffers and 1370 * then we can send it back to the user. For the write case, we 1371 * need to get the data from the user first. 1372 */ 1373 if (beio->bio_cmd == BIO_READ) { 1374 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1375 be_lun->dispatch(be_lun, beio); 1376 } else { 1377 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1378#ifdef CTL_TIME_IO 1379 getbintime(&io->io_hdr.dma_start_bt); 1380#endif 1381 ctl_datamove(io); 1382 } 1383} 1384 1385static void 1386ctl_be_block_worker(void *context, int pending) 1387{ 1388 struct ctl_be_block_lun *be_lun; 1389 struct ctl_be_block_softc *softc; 1390 union ctl_io *io; 1391 1392 be_lun = (struct ctl_be_block_lun *)context; 1393 softc = be_lun->softc; 1394 1395 DPRINTF("entered\n"); 1396 1397 mtx_lock(&be_lun->queue_lock); 1398 for (;;) { 1399 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1400 if (io != NULL) { 1401 struct ctl_be_block_io *beio; 1402 1403 DPRINTF("datamove queue\n"); 1404 1405 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1406 ctl_io_hdr, links); 1407 1408 mtx_unlock(&be_lun->queue_lock); 1409 1410 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1411 1412 be_lun->dispatch(be_lun, beio); 1413 1414 mtx_lock(&be_lun->queue_lock); 1415 continue; 1416 } 1417 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1418 if (io != NULL) { 1419 1420 DPRINTF("config write queue\n"); 1421 1422 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1423 ctl_io_hdr, links); 1424 1425 mtx_unlock(&be_lun->queue_lock); 1426 1427 ctl_be_block_cw_dispatch(be_lun, io); 1428 1429 mtx_lock(&be_lun->queue_lock); 1430 continue; 1431 } 1432 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1433 if (io != NULL) { 1434 DPRINTF("input queue\n"); 1435 1436 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1437 ctl_io_hdr, links); 1438 mtx_unlock(&be_lun->queue_lock); 1439 1440 /* 1441 * We must drop the lock, since this routine and 1442 * its children may sleep. 1443 */ 1444 ctl_be_block_dispatch(be_lun, io); 1445 1446 mtx_lock(&be_lun->queue_lock); 1447 continue; 1448 } 1449 1450 /* 1451 * If we get here, there is no work left in the queues, so 1452 * just break out and let the task queue go to sleep. 1453 */ 1454 break; 1455 } 1456 mtx_unlock(&be_lun->queue_lock); 1457} 1458 1459/* 1460 * Entry point from CTL to the backend for I/O. We queue everything to a 1461 * work thread, so this just puts the I/O on a queue and wakes up the 1462 * thread. 1463 */ 1464static int 1465ctl_be_block_submit(union ctl_io *io) 1466{ 1467 struct ctl_be_block_lun *be_lun; 1468 struct ctl_be_lun *ctl_be_lun; 1469 1470 DPRINTF("entered\n"); 1471 1472 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1473 CTL_PRIV_BACKEND_LUN].ptr; 1474 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1475 1476 /* 1477 * Make sure we only get SCSI I/O. 1478 */ 1479 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1480 "%#x) encountered", io->io_hdr.io_type)); 1481 1482 PRIV(io)->len = 0; 1483 1484 mtx_lock(&be_lun->queue_lock); 1485 /* 1486 * XXX KDM make sure that links is okay to use at this point. 1487 * Otherwise, we either need to add another field to ctl_io_hdr, 1488 * or deal with resource allocation here. 1489 */ 1490 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1491 mtx_unlock(&be_lun->queue_lock); 1492 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1493 1494 return (CTL_RETVAL_COMPLETE); 1495} 1496 1497static int 1498ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1499 int flag, struct thread *td) 1500{ 1501 struct ctl_be_block_softc *softc; 1502 int error; 1503 1504 softc = &backend_block_softc; 1505 1506 error = 0; 1507 1508 switch (cmd) { 1509 case CTL_LUN_REQ: { 1510 struct ctl_lun_req *lun_req; 1511 1512 lun_req = (struct ctl_lun_req *)addr; 1513 1514 switch (lun_req->reqtype) { 1515 case CTL_LUNREQ_CREATE: 1516 error = ctl_be_block_create(softc, lun_req); 1517 break; 1518 case CTL_LUNREQ_RM: 1519 error = ctl_be_block_rm(softc, lun_req); 1520 break; 1521 case CTL_LUNREQ_MODIFY: 1522 error = ctl_be_block_modify(softc, lun_req); 1523 break; 1524 default: 1525 lun_req->status = CTL_LUN_ERROR; 1526 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1527 "%s: invalid LUN request type %d", __func__, 1528 lun_req->reqtype); 1529 break; 1530 } 1531 break; 1532 } 1533 default: 1534 error = ENOTTY; 1535 break; 1536 } 1537 1538 return (error); 1539} 1540 1541static int 1542ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1543{ 1544 struct ctl_be_block_filedata *file_data; 1545 struct ctl_lun_create_params *params; 1546 struct vattr vattr; 1547 int error; 1548 1549 error = 0; 1550 file_data = &be_lun->backend.file; 1551 params = &req->reqdata.create; 1552 1553 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1554 be_lun->dispatch = ctl_be_block_dispatch_file; 1555 be_lun->lun_flush = ctl_be_block_flush_file; 1556 1557 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1558 if (error != 0) { 1559 snprintf(req->error_str, sizeof(req->error_str), 1560 "error calling VOP_GETATTR() for file %s", 1561 be_lun->dev_path); 1562 return (error); 1563 } 1564 1565 /* 1566 * Verify that we have the ability to upgrade to exclusive 1567 * access on this file so we can trap errors at open instead 1568 * of reporting them during first access. 1569 */ 1570 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1571 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1572 if (be_lun->vn->v_iflag & VI_DOOMED) { 1573 error = EBADF; 1574 snprintf(req->error_str, sizeof(req->error_str), 1575 "error locking file %s", be_lun->dev_path); 1576 return (error); 1577 } 1578 } 1579 1580 1581 file_data->cred = crhold(curthread->td_ucred); 1582 if (params->lun_size_bytes != 0) 1583 be_lun->size_bytes = params->lun_size_bytes; 1584 else 1585 be_lun->size_bytes = vattr.va_size; 1586 /* 1587 * We set the multi thread flag for file operations because all 1588 * filesystems (in theory) are capable of allowing multiple readers 1589 * of a file at once. So we want to get the maximum possible 1590 * concurrency. 1591 */ 1592 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1593 1594 /* 1595 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 1596 * With ZFS, it is 131072 bytes. Block sizes that large don't work 1597 * with disklabel and UFS on FreeBSD at least. Large block sizes 1598 * may not work with other OSes as well. So just export a sector 1599 * size of 512 bytes, which should work with any OS or 1600 * application. Since our backing is a file, any block size will 1601 * work fine for the backing store. 1602 */ 1603#if 0 1604 be_lun->blocksize= vattr.va_blocksize; 1605#endif 1606 if (params->blocksize_bytes != 0) 1607 be_lun->blocksize = params->blocksize_bytes; 1608 else 1609 be_lun->blocksize = 512; 1610 1611 /* 1612 * Sanity check. The media size has to be at least one 1613 * sector long. 1614 */ 1615 if (be_lun->size_bytes < be_lun->blocksize) { 1616 error = EINVAL; 1617 snprintf(req->error_str, sizeof(req->error_str), 1618 "file %s size %ju < block size %u", be_lun->dev_path, 1619 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1620 } 1621 return (error); 1622} 1623 1624static int 1625ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1626{ 1627 struct ctl_lun_create_params *params; 1628 struct vattr vattr; 1629 struct cdev *dev; 1630 struct cdevsw *devsw; 1631 int error; 1632 off_t ps, pss, po, pos; 1633 1634 params = &req->reqdata.create; 1635 1636 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1637 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1638 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1639 &be_lun->backend.dev.dev_ref); 1640 if (be_lun->backend.dev.csw == NULL) 1641 panic("Unable to retrieve device switch"); 1642 if (strcmp(be_lun->backend.dev.csw->d_name, "zvol") == 0) 1643 be_lun->dispatch = ctl_be_block_dispatch_zvol; 1644 else 1645 be_lun->dispatch = ctl_be_block_dispatch_dev; 1646 be_lun->lun_flush = ctl_be_block_flush_dev; 1647 be_lun->unmap = ctl_be_block_unmap_dev; 1648 1649 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1650 if (error) { 1651 snprintf(req->error_str, sizeof(req->error_str), 1652 "%s: error getting vnode attributes for device %s", 1653 __func__, be_lun->dev_path); 1654 return (error); 1655 } 1656 1657 dev = be_lun->vn->v_rdev; 1658 devsw = dev->si_devsw; 1659 if (!devsw->d_ioctl) { 1660 snprintf(req->error_str, sizeof(req->error_str), 1661 "%s: no d_ioctl for device %s!", __func__, 1662 be_lun->dev_path); 1663 return (ENODEV); 1664 } 1665 1666 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1667 (caddr_t)&be_lun->blocksize, FREAD, 1668 curthread); 1669 if (error) { 1670 snprintf(req->error_str, sizeof(req->error_str), 1671 "%s: error %d returned for DIOCGSECTORSIZE ioctl " 1672 "on %s!", __func__, error, be_lun->dev_path); 1673 return (error); 1674 } 1675 1676 /* 1677 * If the user has asked for a blocksize that is greater than the 1678 * backing device's blocksize, we can do it only if the blocksize 1679 * the user is asking for is an even multiple of the underlying 1680 * device's blocksize. 1681 */ 1682 if ((params->blocksize_bytes != 0) 1683 && (params->blocksize_bytes > be_lun->blocksize)) { 1684 uint32_t bs_multiple, tmp_blocksize; 1685 1686 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1687 1688 tmp_blocksize = bs_multiple * be_lun->blocksize; 1689 1690 if (tmp_blocksize == params->blocksize_bytes) { 1691 be_lun->blocksize = params->blocksize_bytes; 1692 } else { 1693 snprintf(req->error_str, sizeof(req->error_str), 1694 "%s: requested blocksize %u is not an even " 1695 "multiple of backing device blocksize %u", 1696 __func__, params->blocksize_bytes, 1697 be_lun->blocksize); 1698 return (EINVAL); 1699 1700 } 1701 } else if ((params->blocksize_bytes != 0) 1702 && (params->blocksize_bytes != be_lun->blocksize)) { 1703 snprintf(req->error_str, sizeof(req->error_str), 1704 "%s: requested blocksize %u < backing device " 1705 "blocksize %u", __func__, params->blocksize_bytes, 1706 be_lun->blocksize); 1707 return (EINVAL); 1708 } 1709 1710 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1711 (caddr_t)&be_lun->size_bytes, FREAD, 1712 curthread); 1713 if (error) { 1714 snprintf(req->error_str, sizeof(req->error_str), 1715 "%s: error %d returned for DIOCGMEDIASIZE " 1716 " ioctl on %s!", __func__, error, 1717 be_lun->dev_path); 1718 return (error); 1719 } 1720 1721 if (params->lun_size_bytes != 0) { 1722 if (params->lun_size_bytes > be_lun->size_bytes) { 1723 snprintf(req->error_str, sizeof(req->error_str), 1724 "%s: requested LUN size %ju > backing device " 1725 "size %ju", __func__, 1726 (uintmax_t)params->lun_size_bytes, 1727 (uintmax_t)be_lun->size_bytes); 1728 return (EINVAL); 1729 } 1730 1731 be_lun->size_bytes = params->lun_size_bytes; 1732 } 1733 1734 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE, 1735 (caddr_t)&ps, FREAD, curthread); 1736 if (error) 1737 ps = po = 0; 1738 else { 1739 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET, 1740 (caddr_t)&po, FREAD, curthread); 1741 if (error) 1742 po = 0; 1743 } 1744 pss = ps / be_lun->blocksize; 1745 pos = po / be_lun->blocksize; 1746 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) && 1747 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) { 1748 be_lun->pblockexp = fls(pss) - 1; 1749 be_lun->pblockoff = (pss - pos) % pss; 1750 } 1751 1752 return (0); 1753} 1754 1755static int 1756ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1757{ 1758 DROP_GIANT(); 1759 if (be_lun->vn) { 1760 int flags = FREAD | FWRITE; 1761 1762 switch (be_lun->dev_type) { 1763 case CTL_BE_BLOCK_DEV: 1764 if (be_lun->backend.dev.csw) { 1765 dev_relthread(be_lun->backend.dev.cdev, 1766 be_lun->backend.dev.dev_ref); 1767 be_lun->backend.dev.csw = NULL; 1768 be_lun->backend.dev.cdev = NULL; 1769 } 1770 break; 1771 case CTL_BE_BLOCK_FILE: 1772 break; 1773 case CTL_BE_BLOCK_NONE: 1774 break; 1775 default: 1776 panic("Unexpected backend type."); 1777 break; 1778 } 1779 1780 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1781 be_lun->vn = NULL; 1782 1783 switch (be_lun->dev_type) { 1784 case CTL_BE_BLOCK_DEV: 1785 break; 1786 case CTL_BE_BLOCK_FILE: 1787 if (be_lun->backend.file.cred != NULL) { 1788 crfree(be_lun->backend.file.cred); 1789 be_lun->backend.file.cred = NULL; 1790 } 1791 break; 1792 case CTL_BE_BLOCK_NONE: 1793 break; 1794 default: 1795 panic("Unexpected backend type."); 1796 break; 1797 } 1798 } 1799 PICKUP_GIANT(); 1800 1801 return (0); 1802} 1803 1804static int 1805ctl_be_block_open(struct ctl_be_block_softc *softc, 1806 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1807{ 1808 struct nameidata nd; 1809 int flags; 1810 int error; 1811 1812 /* 1813 * XXX KDM allow a read-only option? 1814 */ 1815 flags = FREAD | FWRITE; 1816 error = 0; 1817 1818 if (rootvnode == NULL) { 1819 snprintf(req->error_str, sizeof(req->error_str), 1820 "%s: Root filesystem is not mounted", __func__); 1821 return (1); 1822 } 1823 1824 if (!curthread->td_proc->p_fd->fd_cdir) { 1825 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1826 VREF(rootvnode); 1827 } 1828 if (!curthread->td_proc->p_fd->fd_rdir) { 1829 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1830 VREF(rootvnode); 1831 } 1832 if (!curthread->td_proc->p_fd->fd_jdir) { 1833 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1834 VREF(rootvnode); 1835 } 1836 1837 again: 1838 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1839 error = vn_open(&nd, &flags, 0, NULL); 1840 if (error) { 1841 /* 1842 * This is the only reasonable guess we can make as far as 1843 * path if the user doesn't give us a fully qualified path. 1844 * If they want to specify a file, they need to specify the 1845 * full path. 1846 */ 1847 if (be_lun->dev_path[0] != '/') { 1848 char *dev_path = "/dev/"; 1849 char *dev_name; 1850 1851 /* Try adding device path at beginning of name */ 1852 dev_name = malloc(strlen(be_lun->dev_path) 1853 + strlen(dev_path) + 1, 1854 M_CTLBLK, M_WAITOK); 1855 if (dev_name) { 1856 sprintf(dev_name, "%s%s", dev_path, 1857 be_lun->dev_path); 1858 free(be_lun->dev_path, M_CTLBLK); 1859 be_lun->dev_path = dev_name; 1860 goto again; 1861 } 1862 } 1863 snprintf(req->error_str, sizeof(req->error_str), 1864 "%s: error opening %s", __func__, be_lun->dev_path); 1865 return (error); 1866 } 1867 1868 NDFREE(&nd, NDF_ONLY_PNBUF); 1869 1870 be_lun->vn = nd.ni_vp; 1871 1872 /* We only support disks and files. */ 1873 if (vn_isdisk(be_lun->vn, &error)) { 1874 error = ctl_be_block_open_dev(be_lun, req); 1875 } else if (be_lun->vn->v_type == VREG) { 1876 error = ctl_be_block_open_file(be_lun, req); 1877 } else { 1878 error = EINVAL; 1879 snprintf(req->error_str, sizeof(req->error_str), 1880 "%s is not a disk or plain file", be_lun->dev_path); 1881 } 1882 VOP_UNLOCK(be_lun->vn, 0); 1883 1884 if (error != 0) { 1885 ctl_be_block_close(be_lun); 1886 return (error); 1887 } 1888 1889 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1890 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1891 1892 return (0); 1893} 1894 1895static int 1896ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1897{ 1898 struct ctl_be_block_lun *be_lun; 1899 struct ctl_lun_create_params *params; 1900 char num_thread_str[16]; 1901 char tmpstr[32]; 1902 char *value; 1903 int retval, num_threads, unmap; 1904 int tmp_num_threads; 1905 1906 params = &req->reqdata.create; 1907 retval = 0; 1908 1909 num_threads = cbb_num_threads; 1910 1911 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1912 1913 be_lun->softc = softc; 1914 STAILQ_INIT(&be_lun->input_queue); 1915 STAILQ_INIT(&be_lun->config_write_queue); 1916 STAILQ_INIT(&be_lun->datamove_queue); 1917 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1918 mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF); 1919 mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF); 1920 ctl_init_opts(&be_lun->ctl_be_lun.options, 1921 req->num_be_args, req->kern_be_args); 1922 1923 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG, 1924 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1925 1926 if (be_lun->lun_zone == NULL) { 1927 snprintf(req->error_str, sizeof(req->error_str), 1928 "%s: error allocating UMA zone", __func__); 1929 goto bailout_error; 1930 } 1931 1932 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1933 be_lun->ctl_be_lun.lun_type = params->device_type; 1934 else 1935 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1936 1937 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1938 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "file"); 1939 if (value == NULL) { 1940 snprintf(req->error_str, sizeof(req->error_str), 1941 "%s: no file argument specified", __func__); 1942 goto bailout_error; 1943 } 1944 be_lun->dev_path = strdup(value, M_CTLBLK); 1945 1946 retval = ctl_be_block_open(softc, be_lun, req); 1947 if (retval != 0) { 1948 retval = 0; 1949 goto bailout_error; 1950 } 1951 1952 /* 1953 * Tell the user the size of the file/device. 1954 */ 1955 params->lun_size_bytes = be_lun->size_bytes; 1956 1957 /* 1958 * The maximum LBA is the size - 1. 1959 */ 1960 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 1961 } else { 1962 /* 1963 * For processor devices, we don't have any size. 1964 */ 1965 be_lun->blocksize = 0; 1966 be_lun->pblockexp = 0; 1967 be_lun->pblockoff = 0; 1968 be_lun->size_blocks = 0; 1969 be_lun->size_bytes = 0; 1970 be_lun->ctl_be_lun.maxlba = 0; 1971 params->lun_size_bytes = 0; 1972 1973 /* 1974 * Default to just 1 thread for processor devices. 1975 */ 1976 num_threads = 1; 1977 } 1978 1979 /* 1980 * XXX This searching loop might be refactored to be combined with 1981 * the loop above, 1982 */ 1983 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "num_threads"); 1984 if (value != NULL) { 1985 tmp_num_threads = strtol(value, NULL, 0); 1986 1987 /* 1988 * We don't let the user specify less than one 1989 * thread, but hope he's clueful enough not to 1990 * specify 1000 threads. 1991 */ 1992 if (tmp_num_threads < 1) { 1993 snprintf(req->error_str, sizeof(req->error_str), 1994 "%s: invalid number of threads %s", 1995 __func__, num_thread_str); 1996 goto bailout_error; 1997 } 1998 num_threads = tmp_num_threads; 1999 } 2000 unmap = 0; 2001 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap"); 2002 if (value != NULL && strcmp(value, "on") == 0) 2003 unmap = 1; 2004 2005 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 2006 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 2007 if (unmap) 2008 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP; 2009 be_lun->ctl_be_lun.be_lun = be_lun; 2010 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 2011 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp; 2012 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff; 2013 /* Tell the user the blocksize we ended up using */ 2014 params->blocksize_bytes = be_lun->blocksize; 2015 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 2016 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 2017 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 2018 } else 2019 be_lun->ctl_be_lun.req_lun_id = 0; 2020 2021 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 2022 be_lun->ctl_be_lun.lun_config_status = 2023 ctl_be_block_lun_config_status; 2024 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 2025 2026 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 2027 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 2028 softc->num_luns); 2029 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 2030 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 2031 sizeof(tmpstr))); 2032 2033 /* Tell the user what we used for a serial number */ 2034 strncpy((char *)params->serial_num, tmpstr, 2035 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 2036 } else { 2037 strncpy((char *)be_lun->ctl_be_lun.serial_num, 2038 params->serial_num, 2039 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 2040 sizeof(params->serial_num))); 2041 } 2042 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 2043 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 2044 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 2045 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2046 sizeof(tmpstr))); 2047 2048 /* Tell the user what we used for a device ID */ 2049 strncpy((char *)params->device_id, tmpstr, 2050 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 2051 } else { 2052 strncpy((char *)be_lun->ctl_be_lun.device_id, 2053 params->device_id, 2054 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2055 sizeof(params->device_id))); 2056 } 2057 2058 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 2059 2060 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 2061 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 2062 2063 if (be_lun->io_taskqueue == NULL) { 2064 snprintf(req->error_str, sizeof(req->error_str), 2065 "%s: Unable to create taskqueue", __func__); 2066 goto bailout_error; 2067 } 2068 2069 /* 2070 * Note that we start the same number of threads by default for 2071 * both the file case and the block device case. For the file 2072 * case, we need multiple threads to allow concurrency, because the 2073 * vnode interface is designed to be a blocking interface. For the 2074 * block device case, ZFS zvols at least will block the caller's 2075 * context in many instances, and so we need multiple threads to 2076 * overcome that problem. Other block devices don't need as many 2077 * threads, but they shouldn't cause too many problems. 2078 * 2079 * If the user wants to just have a single thread for a block 2080 * device, he can specify that when the LUN is created, or change 2081 * the tunable/sysctl to alter the default number of threads. 2082 */ 2083 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 2084 /*num threads*/num_threads, 2085 /*priority*/PWAIT, 2086 /*thread name*/ 2087 "%s taskq", be_lun->lunname); 2088 2089 if (retval != 0) 2090 goto bailout_error; 2091 2092 be_lun->num_threads = num_threads; 2093 2094 mtx_lock(&softc->lock); 2095 softc->num_luns++; 2096 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 2097 2098 mtx_unlock(&softc->lock); 2099 2100 retval = ctl_add_lun(&be_lun->ctl_be_lun); 2101 if (retval != 0) { 2102 mtx_lock(&softc->lock); 2103 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2104 links); 2105 softc->num_luns--; 2106 mtx_unlock(&softc->lock); 2107 snprintf(req->error_str, sizeof(req->error_str), 2108 "%s: ctl_add_lun() returned error %d, see dmesg for " 2109 "details", __func__, retval); 2110 retval = 0; 2111 goto bailout_error; 2112 } 2113 2114 mtx_lock(&softc->lock); 2115 2116 /* 2117 * Tell the config_status routine that we're waiting so it won't 2118 * clean up the LUN in the event of an error. 2119 */ 2120 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2121 2122 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 2123 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2124 if (retval == EINTR) 2125 break; 2126 } 2127 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2128 2129 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 2130 snprintf(req->error_str, sizeof(req->error_str), 2131 "%s: LUN configuration error, see dmesg for details", 2132 __func__); 2133 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2134 links); 2135 softc->num_luns--; 2136 mtx_unlock(&softc->lock); 2137 goto bailout_error; 2138 } else { 2139 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 2140 } 2141 2142 mtx_unlock(&softc->lock); 2143 2144 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 2145 be_lun->blocksize, 2146 DEVSTAT_ALL_SUPPORTED, 2147 be_lun->ctl_be_lun.lun_type 2148 | DEVSTAT_TYPE_IF_OTHER, 2149 DEVSTAT_PRIORITY_OTHER); 2150 2151 2152 req->status = CTL_LUN_OK; 2153 2154 return (retval); 2155 2156bailout_error: 2157 req->status = CTL_LUN_ERROR; 2158 2159 if (be_lun->io_taskqueue != NULL) 2160 taskqueue_free(be_lun->io_taskqueue); 2161 ctl_be_block_close(be_lun); 2162 if (be_lun->dev_path != NULL) 2163 free(be_lun->dev_path, M_CTLBLK); 2164 if (be_lun->lun_zone != NULL) 2165 uma_zdestroy(be_lun->lun_zone); 2166 ctl_free_opts(&be_lun->ctl_be_lun.options); 2167 mtx_destroy(&be_lun->queue_lock); 2168 mtx_destroy(&be_lun->io_lock); 2169 free(be_lun, M_CTLBLK); 2170 2171 return (retval); 2172} 2173 2174static int 2175ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2176{ 2177 struct ctl_lun_rm_params *params; 2178 struct ctl_be_block_lun *be_lun; 2179 int retval; 2180 2181 params = &req->reqdata.rm; 2182 2183 mtx_lock(&softc->lock); 2184 2185 be_lun = NULL; 2186 2187 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2188 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2189 break; 2190 } 2191 mtx_unlock(&softc->lock); 2192 2193 if (be_lun == NULL) { 2194 snprintf(req->error_str, sizeof(req->error_str), 2195 "%s: LUN %u is not managed by the block backend", 2196 __func__, params->lun_id); 2197 goto bailout_error; 2198 } 2199 2200 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 2201 2202 if (retval != 0) { 2203 snprintf(req->error_str, sizeof(req->error_str), 2204 "%s: error %d returned from ctl_disable_lun() for " 2205 "LUN %d", __func__, retval, params->lun_id); 2206 goto bailout_error; 2207 2208 } 2209 2210 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 2211 if (retval != 0) { 2212 snprintf(req->error_str, sizeof(req->error_str), 2213 "%s: error %d returned from ctl_invalidate_lun() for " 2214 "LUN %d", __func__, retval, params->lun_id); 2215 goto bailout_error; 2216 } 2217 2218 mtx_lock(&softc->lock); 2219 2220 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2221 2222 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2223 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2224 if (retval == EINTR) 2225 break; 2226 } 2227 2228 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2229 2230 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2231 snprintf(req->error_str, sizeof(req->error_str), 2232 "%s: interrupted waiting for LUN to be freed", 2233 __func__); 2234 mtx_unlock(&softc->lock); 2235 goto bailout_error; 2236 } 2237 2238 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 2239 2240 softc->num_luns--; 2241 mtx_unlock(&softc->lock); 2242 2243 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 2244 2245 taskqueue_free(be_lun->io_taskqueue); 2246 2247 ctl_be_block_close(be_lun); 2248 2249 if (be_lun->disk_stats != NULL) 2250 devstat_remove_entry(be_lun->disk_stats); 2251 2252 uma_zdestroy(be_lun->lun_zone); 2253 2254 ctl_free_opts(&be_lun->ctl_be_lun.options); 2255 free(be_lun->dev_path, M_CTLBLK); 2256 mtx_destroy(&be_lun->queue_lock); 2257 mtx_destroy(&be_lun->io_lock); 2258 free(be_lun, M_CTLBLK); 2259 2260 req->status = CTL_LUN_OK; 2261 2262 return (0); 2263 2264bailout_error: 2265 2266 req->status = CTL_LUN_ERROR; 2267 2268 return (0); 2269} 2270 2271static int 2272ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 2273 struct ctl_lun_req *req) 2274{ 2275 struct vattr vattr; 2276 int error; 2277 struct ctl_lun_modify_params *params; 2278 2279 params = &req->reqdata.modify; 2280 2281 if (params->lun_size_bytes != 0) { 2282 be_lun->size_bytes = params->lun_size_bytes; 2283 } else { 2284 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2285 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 2286 VOP_UNLOCK(be_lun->vn, 0); 2287 if (error != 0) { 2288 snprintf(req->error_str, sizeof(req->error_str), 2289 "error calling VOP_GETATTR() for file %s", 2290 be_lun->dev_path); 2291 return (error); 2292 } 2293 2294 be_lun->size_bytes = vattr.va_size; 2295 } 2296 2297 return (0); 2298} 2299 2300static int 2301ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 2302 struct ctl_lun_req *req) 2303{ 2304 struct ctl_be_block_devdata *dev_data; 2305 int error; 2306 struct ctl_lun_modify_params *params; 2307 uint64_t size_bytes; 2308 2309 params = &req->reqdata.modify; 2310 2311 dev_data = &be_lun->backend.dev; 2312 if (!dev_data->csw->d_ioctl) { 2313 snprintf(req->error_str, sizeof(req->error_str), 2314 "%s: no d_ioctl for device %s!", __func__, 2315 be_lun->dev_path); 2316 return (ENODEV); 2317 } 2318 2319 error = dev_data->csw->d_ioctl(dev_data->cdev, DIOCGMEDIASIZE, 2320 (caddr_t)&size_bytes, FREAD, 2321 curthread); 2322 if (error) { 2323 snprintf(req->error_str, sizeof(req->error_str), 2324 "%s: error %d returned for DIOCGMEDIASIZE ioctl " 2325 "on %s!", __func__, error, be_lun->dev_path); 2326 return (error); 2327 } 2328 2329 if (params->lun_size_bytes != 0) { 2330 if (params->lun_size_bytes > size_bytes) { 2331 snprintf(req->error_str, sizeof(req->error_str), 2332 "%s: requested LUN size %ju > backing device " 2333 "size %ju", __func__, 2334 (uintmax_t)params->lun_size_bytes, 2335 (uintmax_t)size_bytes); 2336 return (EINVAL); 2337 } 2338 2339 be_lun->size_bytes = params->lun_size_bytes; 2340 } else { 2341 be_lun->size_bytes = size_bytes; 2342 } 2343 2344 return (0); 2345} 2346 2347static int 2348ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2349{ 2350 struct ctl_lun_modify_params *params; 2351 struct ctl_be_block_lun *be_lun; 2352 uint64_t oldsize; 2353 int error; 2354 2355 params = &req->reqdata.modify; 2356 2357 mtx_lock(&softc->lock); 2358 2359 be_lun = NULL; 2360 2361 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2362 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2363 break; 2364 } 2365 mtx_unlock(&softc->lock); 2366 2367 if (be_lun == NULL) { 2368 snprintf(req->error_str, sizeof(req->error_str), 2369 "%s: LUN %u is not managed by the block backend", 2370 __func__, params->lun_id); 2371 goto bailout_error; 2372 } 2373 2374 if (params->lun_size_bytes != 0) { 2375 if (params->lun_size_bytes < be_lun->blocksize) { 2376 snprintf(req->error_str, sizeof(req->error_str), 2377 "%s: LUN size %ju < blocksize %u", __func__, 2378 params->lun_size_bytes, be_lun->blocksize); 2379 goto bailout_error; 2380 } 2381 } 2382 2383 oldsize = be_lun->size_bytes; 2384 if (be_lun->vn->v_type == VREG) 2385 error = ctl_be_block_modify_file(be_lun, req); 2386 else 2387 error = ctl_be_block_modify_dev(be_lun, req); 2388 if (error != 0) 2389 goto bailout_error; 2390 2391 if (be_lun->size_bytes != oldsize) { 2392 be_lun->size_blocks = be_lun->size_bytes >> 2393 be_lun->blocksize_shift; 2394 2395 /* 2396 * The maximum LBA is the size - 1. 2397 * 2398 * XXX: Note that this field is being updated without locking, 2399 * which might cause problems on 32-bit architectures. 2400 */ 2401 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 2402 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2403 } 2404 2405 /* Tell the user the exact size we ended up using */ 2406 params->lun_size_bytes = be_lun->size_bytes; 2407 2408 req->status = CTL_LUN_OK; 2409 2410 return (0); 2411 2412bailout_error: 2413 req->status = CTL_LUN_ERROR; 2414 2415 return (0); 2416} 2417 2418static void 2419ctl_be_block_lun_shutdown(void *be_lun) 2420{ 2421 struct ctl_be_block_lun *lun; 2422 struct ctl_be_block_softc *softc; 2423 2424 lun = (struct ctl_be_block_lun *)be_lun; 2425 2426 softc = lun->softc; 2427 2428 mtx_lock(&softc->lock); 2429 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2430 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2431 wakeup(lun); 2432 mtx_unlock(&softc->lock); 2433 2434} 2435 2436static void 2437ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2438{ 2439 struct ctl_be_block_lun *lun; 2440 struct ctl_be_block_softc *softc; 2441 2442 lun = (struct ctl_be_block_lun *)be_lun; 2443 softc = lun->softc; 2444 2445 if (status == CTL_LUN_CONFIG_OK) { 2446 mtx_lock(&softc->lock); 2447 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2448 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2449 wakeup(lun); 2450 mtx_unlock(&softc->lock); 2451 2452 /* 2453 * We successfully added the LUN, attempt to enable it. 2454 */ 2455 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2456 printf("%s: ctl_enable_lun() failed!\n", __func__); 2457 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2458 printf("%s: ctl_invalidate_lun() failed!\n", 2459 __func__); 2460 } 2461 } 2462 2463 return; 2464 } 2465 2466 2467 mtx_lock(&softc->lock); 2468 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2469 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2470 wakeup(lun); 2471 mtx_unlock(&softc->lock); 2472} 2473 2474 2475static int 2476ctl_be_block_config_write(union ctl_io *io) 2477{ 2478 struct ctl_be_block_lun *be_lun; 2479 struct ctl_be_lun *ctl_be_lun; 2480 int retval; 2481 2482 retval = 0; 2483 2484 DPRINTF("entered\n"); 2485 2486 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2487 CTL_PRIV_BACKEND_LUN].ptr; 2488 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2489 2490 switch (io->scsiio.cdb[0]) { 2491 case SYNCHRONIZE_CACHE: 2492 case SYNCHRONIZE_CACHE_16: 2493 case WRITE_SAME_10: 2494 case WRITE_SAME_16: 2495 case UNMAP: 2496 /* 2497 * The upper level CTL code will filter out any CDBs with 2498 * the immediate bit set and return the proper error. 2499 * 2500 * We don't really need to worry about what LBA range the 2501 * user asked to be synced out. When they issue a sync 2502 * cache command, we'll sync out the whole thing. 2503 */ 2504 mtx_lock(&be_lun->queue_lock); 2505 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2506 links); 2507 mtx_unlock(&be_lun->queue_lock); 2508 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2509 break; 2510 case START_STOP_UNIT: { 2511 struct scsi_start_stop_unit *cdb; 2512 2513 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2514 2515 if (cdb->how & SSS_START) 2516 retval = ctl_start_lun(ctl_be_lun); 2517 else { 2518 retval = ctl_stop_lun(ctl_be_lun); 2519 /* 2520 * XXX KDM Copan-specific offline behavior. 2521 * Figure out a reasonable way to port this? 2522 */ 2523#ifdef NEEDTOPORT 2524 if ((retval == 0) 2525 && (cdb->byte2 & SSS_ONOFFLINE)) 2526 retval = ctl_lun_offline(ctl_be_lun); 2527#endif 2528 } 2529 2530 /* 2531 * In general, the above routines should not fail. They 2532 * just set state for the LUN. So we've got something 2533 * pretty wrong here if we can't start or stop the LUN. 2534 */ 2535 if (retval != 0) { 2536 ctl_set_internal_failure(&io->scsiio, 2537 /*sks_valid*/ 1, 2538 /*retry_count*/ 0xf051); 2539 retval = CTL_RETVAL_COMPLETE; 2540 } else { 2541 ctl_set_success(&io->scsiio); 2542 } 2543 ctl_config_write_done(io); 2544 break; 2545 } 2546 default: 2547 ctl_set_invalid_opcode(&io->scsiio); 2548 ctl_config_write_done(io); 2549 retval = CTL_RETVAL_COMPLETE; 2550 break; 2551 } 2552 2553 return (retval); 2554 2555} 2556 2557static int 2558ctl_be_block_config_read(union ctl_io *io) 2559{ 2560 return (0); 2561} 2562 2563static int 2564ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2565{ 2566 struct ctl_be_block_lun *lun; 2567 int retval; 2568 2569 lun = (struct ctl_be_block_lun *)be_lun; 2570 retval = 0; 2571 2572 retval = sbuf_printf(sb, "\t<num_threads>"); 2573 2574 if (retval != 0) 2575 goto bailout; 2576 2577 retval = sbuf_printf(sb, "%d", lun->num_threads); 2578 2579 if (retval != 0) 2580 goto bailout; 2581 2582 retval = sbuf_printf(sb, "</num_threads>\n"); 2583 2584bailout: 2585 2586 return (retval); 2587} 2588 2589int 2590ctl_be_block_init(void) 2591{ 2592 struct ctl_be_block_softc *softc; 2593 int retval; 2594 2595 softc = &backend_block_softc; 2596 retval = 0; 2597 2598 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); 2599 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2600 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2601 STAILQ_INIT(&softc->disk_list); 2602 STAILQ_INIT(&softc->lun_list); 2603 2604 return (retval); 2605} 2606