Deleted Added
sdiff udiff text old ( 312582 ) new ( 312834 )
full compact
1/*-
2 * Copyright (c) 2003-2009 Silicon Graphics International Corp.
3 * Copyright (c) 2012 The FreeBSD Foundation
4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Portions of this software were developed by Edward Tomasz Napierala
8 * under sponsorship from the FreeBSD Foundation.

--- 29 unchanged lines hidden (view full) ---

38 * CAM Target Layer, a SCSI device emulation subsystem.
39 *
40 * Author: Ken Merry <ken@FreeBSD.org>
41 */
42
43#define _CTL_C
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: stable/11/sys/cam/ctl/ctl.c 312834 2017-01-26 20:49:19Z mav $");
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/ctype.h>
51#include <sys/kernel.h>
52#include <sys/types.h>
53#include <sys/kthread.h>
54#include <sys/bio.h>

--- 377 unchanged lines hidden (view full) ---

432 struct ctl_ooa *ooa_hdr,
433 struct ctl_ooa_entry *kern_entries);
434static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
435 struct thread *td);
436static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun,
437 struct ctl_be_lun *be_lun);
438static int ctl_free_lun(struct ctl_lun *lun);
439static void ctl_create_lun(struct ctl_be_lun *be_lun);
440
441static int ctl_do_mode_select(union ctl_io *io);
442static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun,
443 uint64_t res_key, uint64_t sa_res_key,
444 uint8_t type, uint32_t residx,
445 struct ctl_scsiio *ctsio,
446 struct scsi_per_res_out *cdb,
447 struct scsi_per_res_out_parms* param);
448static void ctl_pro_preempt_other(struct ctl_lun *lun,
449 union ctl_ha_msg *msg);
450static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io);
451static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len);
452static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len);
453static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len);
454static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len);
455static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len);
456static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio,
457 int alloc_len);
458static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio,

--- 102 unchanged lines hidden (view full) ---

561static struct ctl_frontend ha_frontend =
562{
563 .name = "ha",
564};
565
566static void
567ctl_ha_datamove(union ctl_io *io)
568{
569 struct ctl_lun *lun = CTL_LUN(io);
570 struct ctl_sg_entry *sgl;
571 union ctl_ha_msg msg;
572 uint32_t sg_entries_sent;
573 int do_sg_copy, i, j;
574
575 memset(&msg.dt, 0, sizeof(msg.dt));
576 msg.hdr.msg_type = CTL_MSG_DATAMOVE;
577 msg.hdr.original_sc = io->io_hdr.original_sc;
578 msg.hdr.serializing_sc = io;
579 msg.hdr.nexus = io->io_hdr.nexus;
580 msg.hdr.status = io->io_hdr.status;
581 msg.dt.flags = io->io_hdr.flags;
582

--- 1213 unchanged lines hidden (view full) ---

1796 make_dev_args_init(&args);
1797 args.mda_devsw = &ctl_cdevsw;
1798 args.mda_uid = UID_ROOT;
1799 args.mda_gid = GID_OPERATOR;
1800 args.mda_mode = 0600;
1801 args.mda_si_drv1 = softc;
1802 error = make_dev_s(&args, &softc->dev, "cam/ctl");
1803 if (error != 0) {
1804 free(softc, M_DEVBUF);
1805 control_softc = NULL;
1806 return (error);
1807 }
1808
1809 sysctl_ctx_init(&softc->sysctl_ctx);
1810 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1811 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl",
1812 CTLFLAG_RD, 0, "CAM Target Layer");
1813
1814 if (softc->sysctl_tree == NULL) {
1815 printf("%s: unable to allocate sysctl tree\n", __func__);
1816 destroy_dev(softc->dev);
1817 free(softc, M_DEVBUF);
1818 control_softc = NULL;
1819 return (ENOMEM);
1820 }
1821
1822 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF);
1823 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io),
1824 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
1825 softc->flags = 0;

--- 131 unchanged lines hidden (view full) ---

1957 ctl_tpc_shutdown(softc);
1958 uma_zdestroy(softc->io_zone);
1959 mtx_destroy(&softc->ctl_lock);
1960
1961 destroy_dev(softc->dev);
1962
1963 sysctl_ctx_free(&softc->sysctl_ctx);
1964
1965 free(softc, M_DEVBUF);
1966 control_softc = NULL;
1967}
1968
1969static int
1970ctl_module_event_handler(module_t mod, int what, void *arg)
1971{
1972
1973 switch (what) {

--- 225 unchanged lines hidden (view full) ---

2199 * standard case in ctl_scsiio_precheck(). Errors in this case need to get
2200 * sent back to the other side, but in the success case, we execute the
2201 * command on this side (XFER mode) or tell the other side to execute it
2202 * (SER_ONLY mode).
2203 */
2204static void
2205ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
2206{
2207 struct ctl_softc *softc = CTL_SOFTC(ctsio);
2208 struct ctl_port *port = CTL_PORT(ctsio);
2209 union ctl_ha_msg msg_info;
2210 struct ctl_lun *lun;
2211 const struct ctl_cmd_entry *entry;
2212 uint32_t targ_lun;
2213
2214 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
2215
2216 /* Make sure that we know about this port. */
2217 if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) {
2218 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0,
2219 /*retry_count*/ 1);
2220 goto badjuju;
2221 }
2222
2223 /* Make sure that we know about this LUN. */
2224 mtx_lock(&softc->ctl_lock);
2225 if (targ_lun >= CTL_MAX_LUNS ||
2226 (lun = softc->ctl_luns[targ_lun]) == NULL) {
2227 mtx_unlock(&softc->ctl_lock);
2228
2229 /*
2230 * The other node would not send this request to us unless
2231 * received announce that we are primary node for this LUN.
2232 * If this LUN does not exist now, it is probably result of

--- 16 unchanged lines hidden (view full) ---

2249 }
2250
2251 entry = ctl_get_cmd_entry(ctsio, NULL);
2252 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) {
2253 mtx_unlock(&lun->lun_lock);
2254 goto badjuju;
2255 }
2256
2257 CTL_LUN(ctsio) = lun;
2258 CTL_BACKEND_LUN(ctsio) = lun->be_lun;
2259
2260 /*
2261 * Every I/O goes into the OOA queue for a
2262 * particular LUN, and stays there until completion.
2263 */
2264#ifdef CTL_TIME_IO
2265 if (TAILQ_EMPTY(&lun->ooa_queue))
2266 lun->idle_time += getsbinuptime() - lun->last_busy;

--- 1351 unchanged lines hidden (view full) ---

3618 if (l <= 0x3fff)
3619 return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48));
3620 if (l <= 0xffffff)
3621 return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) |
3622 (l << 32));
3623 return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16));
3624}
3625
3626int
3627ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last)
3628{
3629 int i;
3630
3631 for (i = first; i < last; i++) {
3632 if ((mask[i / 32] & (1 << (i % 32))) == 0)
3633 return (i);

--- 101 unchanged lines hidden (view full) ---

3735/*
3736 * ctl_softc, pool_name, total_ctl_io are passed in.
3737 * npool is passed out.
3738 */
3739int
3740ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name,
3741 uint32_t total_ctl_io, void **npool)
3742{
3743 struct ctl_io_pool *pool;
3744
3745 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL,
3746 M_NOWAIT | M_ZERO);
3747 if (pool == NULL)
3748 return (ENOMEM);
3749
3750 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name);
3751 pool->ctl_softc = ctl_softc;
3752#ifdef IO_POOLS
3753 pool->zone = uma_zsecond_create(pool->name, NULL,
3754 NULL, NULL, NULL, ctl_softc->io_zone);
3755 /* uma_prealloc(pool->zone, total_ctl_io); */
3756#else
3757 pool->zone = ctl_softc->io_zone;
3758#endif
3759
3760 *npool = pool;
3761 return (0);
3762}
3763
3764void
3765ctl_pool_free(struct ctl_io_pool *pool)
3766{
3767
3768 if (pool == NULL)
3769 return;
3770
3771#ifdef IO_POOLS
3772 uma_zdestroy(pool->zone);
3773#endif
3774 free(pool, M_CTL);
3775}
3776
3777union ctl_io *
3778ctl_alloc_io(void *pool_ref)
3779{
3780 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3781 union ctl_io *io;
3782
3783 io = uma_zalloc(pool->zone, M_WAITOK);
3784 if (io != NULL) {
3785 io->io_hdr.pool = pool_ref;
3786 CTL_SOFTC(io) = pool->ctl_softc;
3787 }
3788 return (io);
3789}
3790
3791union ctl_io *
3792ctl_alloc_io_nowait(void *pool_ref)
3793{
3794 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref;
3795 union ctl_io *io;
3796
3797 io = uma_zalloc(pool->zone, M_NOWAIT);
3798 if (io != NULL) {
3799 io->io_hdr.pool = pool_ref;
3800 CTL_SOFTC(io) = pool->ctl_softc;
3801 }
3802 return (io);
3803}
3804
3805void
3806ctl_free_io(union ctl_io *io)
3807{
3808 struct ctl_io_pool *pool;
3809
3810 if (io == NULL)
3811 return;
3812
3813 pool = (struct ctl_io_pool *)io->io_hdr.pool;
3814 uma_zfree(pool->zone, io);
3815}
3816
3817void
3818ctl_zero_io(union ctl_io *io)
3819{
3820 struct ctl_io_pool *pool;
3821
3822 if (io == NULL)
3823 return;
3824
3825 /*
3826 * May need to preserve linked list pointers at some point too.
3827 */
3828 pool = io->io_hdr.pool;
3829 memset(io, 0, sizeof(*io));
3830 io->io_hdr.pool = pool;
3831 CTL_SOFTC(io) = pool->ctl_softc;
3832}
3833
3834int
3835ctl_expand_number(const char *buf, uint64_t *num)
3836{
3837 char *endptr;
3838 uint64_t number;
3839 unsigned shift;

--- 790 unchanged lines hidden (view full) ---

4630 * Delete a LUN.
4631 * Assumptions:
4632 * - LUN has already been marked invalid and any pending I/O has been taken
4633 * care of.
4634 */
4635static int
4636ctl_free_lun(struct ctl_lun *lun)
4637{
4638 struct ctl_softc *softc = lun->ctl_softc;
4639 struct ctl_lun *nlun;
4640 int i;
4641
4642 mtx_assert(&softc->ctl_lock, MA_OWNED);
4643
4644 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links);
4645
4646 ctl_clear_mask(softc->ctl_lun_mask, lun->lun);
4647
4648 softc->ctl_luns[lun->lun] = NULL;
4649

--- 484 unchanged lines hidden (view full) ---

5134}
5135
5136/*
5137 * SCSI release command.
5138 */
5139int
5140ctl_scsi_release(struct ctl_scsiio *ctsio)
5141{
5142 struct ctl_lun *lun = CTL_LUN(ctsio);
5143 uint32_t residx;
5144
5145 CTL_DEBUG_PRINT(("ctl_scsi_release\n"));
5146
5147 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5148
5149 /*
5150 * XXX KDM right now, we only support LUN reservation. We don't
5151 * support 3rd party reservations, or extent reservations, which
5152 * might actually need the parameter list. If we've gotten this
5153 * far, we've got a LUN reservation. Anything else got kicked out
5154 * above. So, according to SPC, ignore the length.
5155 */

--- 15 unchanged lines hidden (view full) ---

5171 ctl_set_success(ctsio);
5172 ctl_done((union ctl_io *)ctsio);
5173 return (CTL_RETVAL_COMPLETE);
5174}
5175
5176int
5177ctl_scsi_reserve(struct ctl_scsiio *ctsio)
5178{
5179 struct ctl_lun *lun = CTL_LUN(ctsio);
5180 uint32_t residx;
5181
5182 CTL_DEBUG_PRINT(("ctl_reserve\n"));
5183
5184 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5185
5186 /*
5187 * XXX KDM right now, we only support LUN reservation. We don't
5188 * support 3rd party reservations, or extent reservations, which
5189 * might actually need the parameter list. If we've gotten this
5190 * far, we've got a LUN reservation. Anything else got kicked out
5191 * above. So, according to SPC, ignore the length.
5192 */

--- 18 unchanged lines hidden (view full) ---

5211 mtx_unlock(&lun->lun_lock);
5212 ctl_done((union ctl_io *)ctsio);
5213 return (CTL_RETVAL_COMPLETE);
5214}
5215
5216int
5217ctl_start_stop(struct ctl_scsiio *ctsio)
5218{
5219 struct ctl_lun *lun = CTL_LUN(ctsio);
5220 struct scsi_start_stop_unit *cdb;
5221 int retval;
5222
5223 CTL_DEBUG_PRINT(("ctl_start_stop\n"));
5224
5225 cdb = (struct scsi_start_stop_unit *)ctsio->cdb;
5226
5227 if ((cdb->how & SSS_PC_MASK) == 0) {
5228 if ((lun->flags & CTL_LUN_PR_RESERVED) &&
5229 (cdb->how & SSS_START) == 0) {
5230 uint32_t residx;
5231
5232 residx = ctl_get_initindex(&ctsio->io_hdr.nexus);

--- 32 unchanged lines hidden (view full) ---

5265
5266 retval = lun->backend->config_write((union ctl_io *)ctsio);
5267 return (retval);
5268}
5269
5270int
5271ctl_prevent_allow(struct ctl_scsiio *ctsio)
5272{
5273 struct ctl_lun *lun = CTL_LUN(ctsio);
5274 struct scsi_prevent *cdb;
5275 int retval;
5276 uint32_t initidx;
5277
5278 CTL_DEBUG_PRINT(("ctl_prevent_allow\n"));
5279
5280 cdb = (struct scsi_prevent *)ctsio->cdb;
5281
5282 if ((lun->flags & CTL_LUN_REMOVABLE) == 0) {
5283 ctl_set_invalid_opcode(ctsio);
5284 ctl_done((union ctl_io *)ctsio);
5285 return (CTL_RETVAL_COMPLETE);
5286 }
5287

--- 17 unchanged lines hidden (view full) ---

5305 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but
5306 * we don't really do anything with the LBA and length fields if the user
5307 * passes them in. Instead we'll just flush out the cache for the entire
5308 * LUN.
5309 */
5310int
5311ctl_sync_cache(struct ctl_scsiio *ctsio)
5312{
5313 struct ctl_lun *lun = CTL_LUN(ctsio);
5314 struct ctl_lba_len_flags *lbalen;
5315 uint64_t starting_lba;
5316 uint32_t block_count;
5317 int retval;
5318 uint8_t byte2;
5319
5320 CTL_DEBUG_PRINT(("ctl_sync_cache\n"));
5321
5322 retval = 0;
5323
5324 switch (ctsio->cdb[0]) {
5325 case SYNCHRONIZE_CACHE: {
5326 struct scsi_sync_cache *cdb;
5327 cdb = (struct scsi_sync_cache *)ctsio->cdb;
5328
5329 starting_lba = scsi_4btoul(cdb->begin_lba);

--- 39 unchanged lines hidden (view full) ---

5369bailout:
5370 return (retval);
5371}
5372
5373int
5374ctl_format(struct ctl_scsiio *ctsio)
5375{
5376 struct scsi_format *cdb;
5377 int length, defect_list_len;
5378
5379 CTL_DEBUG_PRINT(("ctl_format\n"));
5380
5381 cdb = (struct scsi_format *)ctsio->cdb;
5382
5383 length = 0;
5384 if (cdb->byte2 & SF_FMTDATA) {
5385 if (cdb->byte2 & SF_LONGLIST)
5386 length = sizeof(struct scsi_format_header_long);
5387 else
5388 length = sizeof(struct scsi_format_header_short);

--- 62 unchanged lines hidden (view full) ---

5451
5452 ctl_done((union ctl_io *)ctsio);
5453 return (CTL_RETVAL_COMPLETE);
5454}
5455
5456int
5457ctl_read_buffer(struct ctl_scsiio *ctsio)
5458{
5459 struct ctl_lun *lun = CTL_LUN(ctsio);
5460 uint64_t buffer_offset;
5461 uint32_t len;
5462 uint8_t byte2;
5463 static uint8_t descr[4];
5464 static uint8_t echo_descr[4] = { 0 };
5465
5466 CTL_DEBUG_PRINT(("ctl_read_buffer\n"));
5467
5468 switch (ctsio->cdb[0]) {
5469 case READ_BUFFER: {
5470 struct scsi_read_buffer *cdb;
5471
5472 cdb = (struct scsi_read_buffer *)ctsio->cdb;
5473 buffer_offset = scsi_3btoul(cdb->offset);
5474 len = scsi_3btoul(cdb->length);
5475 byte2 = cdb->byte2;

--- 50 unchanged lines hidden (view full) ---

5526 ctsio->be_move_done = ctl_config_move_done;
5527 ctl_datamove((union ctl_io *)ctsio);
5528 return (CTL_RETVAL_COMPLETE);
5529}
5530
5531int
5532ctl_write_buffer(struct ctl_scsiio *ctsio)
5533{
5534 struct ctl_lun *lun = CTL_LUN(ctsio);
5535 struct scsi_write_buffer *cdb;
5536 int buffer_offset, len;
5537
5538 CTL_DEBUG_PRINT(("ctl_write_buffer\n"));
5539
5540 cdb = (struct scsi_write_buffer *)ctsio->cdb;
5541
5542 len = scsi_3btoul(cdb->length);
5543 buffer_offset = scsi_3btoul(cdb->offset);
5544
5545 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) {
5546 ctl_set_invalid_field(ctsio,
5547 /*sks_valid*/ 1,

--- 30 unchanged lines hidden (view full) ---

5578 ctl_set_success(ctsio);
5579 ctl_done((union ctl_io *)ctsio);
5580 return (CTL_RETVAL_COMPLETE);
5581}
5582
5583int
5584ctl_write_same(struct ctl_scsiio *ctsio)
5585{
5586 struct ctl_lun *lun = CTL_LUN(ctsio);
5587 struct ctl_lba_len_flags *lbalen;
5588 uint64_t lba;
5589 uint32_t num_blocks;
5590 int len, retval;
5591 uint8_t byte2;
5592
5593 CTL_DEBUG_PRINT(("ctl_write_same\n"));
5594
5595 switch (ctsio->cdb[0]) {
5596 case WRITE_SAME_10: {
5597 struct scsi_write_same_10 *cdb;
5598
5599 cdb = (struct scsi_write_same_10 *)ctsio->cdb;
5600
5601 lba = scsi_4btoul(cdb->addr);
5602 num_blocks = scsi_2btoul(cdb->length);

--- 87 unchanged lines hidden (view full) ---

5690 retval = lun->backend->config_write((union ctl_io *)ctsio);
5691
5692 return (retval);
5693}
5694
5695int
5696ctl_unmap(struct ctl_scsiio *ctsio)
5697{
5698 struct ctl_lun *lun = CTL_LUN(ctsio);
5699 struct scsi_unmap *cdb;
5700 struct ctl_ptr_len_flags *ptrlen;
5701 struct scsi_unmap_header *hdr;
5702 struct scsi_unmap_desc *buf, *end, *endnz, *range;
5703 uint64_t lba;
5704 uint32_t num_blocks;
5705 int len, retval;
5706 uint8_t byte2;
5707
5708 CTL_DEBUG_PRINT(("ctl_unmap\n"));
5709
5710 cdb = (struct scsi_unmap *)ctsio->cdb;
5711 len = scsi_2btoul(cdb->length);
5712 byte2 = cdb->byte2;
5713
5714 /*
5715 * If we've got a kernel request that hasn't been malloced yet,
5716 * malloc it and tell the caller the data buffer is here.
5717 */
5718 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) {

--- 73 unchanged lines hidden (view full) ---

5792 ctl_done((union ctl_io *)ctsio);
5793 return (CTL_RETVAL_COMPLETE);
5794}
5795
5796int
5797ctl_default_page_handler(struct ctl_scsiio *ctsio,
5798 struct ctl_page_index *page_index, uint8_t *page_ptr)
5799{
5800 struct ctl_lun *lun = CTL_LUN(ctsio);
5801 uint8_t *current_cp;
5802 int set_ua;
5803 uint32_t initidx;
5804
5805 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
5806 set_ua = 0;
5807
5808 current_cp = (page_index->page_data + (page_index->page_len *
5809 CTL_PAGE_CURRENT));
5810
5811 mtx_lock(&lun->lun_lock);
5812 if (memcmp(current_cp, page_ptr, page_index->page_len)) {

--- 33 unchanged lines hidden (view full) ---

5846 callout_schedule(&lun->ie_callout, t * hz / 10);
5847 }
5848}
5849
5850int
5851ctl_ie_page_handler(struct ctl_scsiio *ctsio,
5852 struct ctl_page_index *page_index, uint8_t *page_ptr)
5853{
5854 struct ctl_lun *lun = CTL_LUN(ctsio);
5855 struct scsi_info_exceptions_page *pg;
5856 uint64_t t;
5857
5858 (void)ctl_default_page_handler(ctsio, page_index, page_ptr);
5859
5860 pg = (struct scsi_info_exceptions_page *)page_ptr;
5861 mtx_lock(&lun->lun_lock);
5862 if (pg->info_flags & SIEP_FLAGS_TEST) {
5863 lun->ie_asc = 0x5d;
5864 lun->ie_ascq = 0xff;
5865 if (pg->mrie == SIEP_MRIE_UA) {
5866 ctl_est_ua_all(lun, -1, CTL_UA_IE);
5867 lun->ie_reported = 1;

--- 20 unchanged lines hidden (view full) ---

5888 }
5889 mtx_unlock(&lun->lun_lock);
5890 return (CTL_RETVAL_COMPLETE);
5891}
5892
5893static int
5894ctl_do_mode_select(union ctl_io *io)
5895{
5896 struct ctl_lun *lun = CTL_LUN(io);
5897 struct scsi_mode_page_header *page_header;
5898 struct ctl_page_index *page_index;
5899 struct ctl_scsiio *ctsio;
5900 int page_len, page_len_offset, page_len_size;
5901 union ctl_modepage_info *modepage_info;
5902 uint16_t *len_left, *len_used;
5903 int retval, i;
5904
5905 ctsio = &io->scsiio;
5906 page_index = NULL;
5907 page_len = 0;
5908
5909 modepage_info = (union ctl_modepage_info *)
5910 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes;
5911 len_left = &modepage_info->header.len_left;
5912 len_used = &modepage_info->header.len_used;
5913
5914do_next_page:
5915

--- 196 unchanged lines hidden (view full) ---

6112
6113 return (CTL_RETVAL_COMPLETE);
6114
6115}
6116
6117int
6118ctl_mode_select(struct ctl_scsiio *ctsio)
6119{
6120 struct ctl_lun *lun = CTL_LUN(ctsio);
6121 union ctl_modepage_info *modepage_info;
6122 int bd_len, i, header_size, param_len, pf, rtd, sp;
6123 uint32_t initidx;
6124
6125 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);
6126 switch (ctsio->cdb[0]) {
6127 case MODE_SELECT_6: {
6128 struct scsi_mode_select_6 *cdb;
6129
6130 cdb = (struct scsi_mode_select_6 *)ctsio->cdb;
6131
6132 pf = (cdb->byte2 & SMS_PF) ? 1 : 0;

--- 128 unchanged lines hidden (view full) ---

6261 modepage_info->header.len_used = header_size + bd_len;
6262
6263 return (ctl_do_mode_select((union ctl_io *)ctsio));
6264}
6265
6266int
6267ctl_mode_sense(struct ctl_scsiio *ctsio)
6268{
6269 struct ctl_lun *lun = CTL_LUN(ctsio);
6270 int pc, page_code, dbd, llba, subpage;
6271 int alloc_len, page_len, header_len, total_len;
6272 struct scsi_mode_block_descr *block_desc;
6273 struct ctl_page_index *page_index;
6274
6275 dbd = 0;
6276 llba = 0;
6277 block_desc = NULL;
6278
6279 CTL_DEBUG_PRINT(("ctl_mode_sense\n"));
6280
6281 switch (ctsio->cdb[0]) {
6282 case MODE_SENSE_6: {
6283 struct scsi_mode_sense_6 *cdb;
6284
6285 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb;
6286
6287 header_len = sizeof(struct scsi_mode_hdr_6);
6288 if (cdb->byte2 & SMS_DBD)

--- 319 unchanged lines hidden (view full) ---

6608 return (CTL_RETVAL_COMPLETE);
6609}
6610
6611int
6612ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
6613 struct ctl_page_index *page_index,
6614 int pc)
6615{
6616 struct ctl_lun *lun = CTL_LUN(ctsio);
6617 struct scsi_log_param_header *phdr;
6618 uint8_t *data;
6619 uint64_t val;
6620
6621 data = page_index->page_data;
6622
6623 if (lun->backend->lun_attr != NULL &&
6624 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail"))
6625 != UINT64_MAX) {
6626 phdr = (struct scsi_log_param_header *)data;
6627 scsi_ulto2b(0x0001, phdr->param_code);
6628 phdr->param_control = SLP_LBIN | SLP_LP;

--- 47 unchanged lines hidden (view full) ---

6676 return (0);
6677}
6678
6679int
6680ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio,
6681 struct ctl_page_index *page_index,
6682 int pc)
6683{
6684 struct ctl_lun *lun = CTL_LUN(ctsio);
6685 struct stat_page *data;
6686 uint64_t rn, wn, rb, wb;
6687 struct bintime rt, wt;
6688 int i;
6689
6690 data = (struct stat_page *)page_index->page_data;
6691
6692 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code);
6693 data->sap.hdr.param_control = SLP_LBIN;
6694 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) -
6695 sizeof(struct scsi_log_param_header);
6696 rn = wn = rb = wb = 0;
6697 bintime_clear(&rt);

--- 36 unchanged lines hidden (view full) ---

6734 return (0);
6735}
6736
6737int
6738ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio,
6739 struct ctl_page_index *page_index,
6740 int pc)
6741{
6742 struct ctl_lun *lun = CTL_LUN(ctsio);
6743 struct scsi_log_informational_exceptions *data;
6744
6745 data = (struct scsi_log_informational_exceptions *)page_index->page_data;
6746
6747 scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code);
6748 data->hdr.param_control = SLP_LBIN;
6749 data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) -
6750 sizeof(struct scsi_log_param_header);
6751 data->ie_asc = lun->ie_asc;
6752 data->ie_ascq = lun->ie_ascq;
6753 data->temperature = 0xff;
6754 return (0);
6755}
6756
6757int
6758ctl_log_sense(struct ctl_scsiio *ctsio)
6759{
6760 struct ctl_lun *lun = CTL_LUN(ctsio);
6761 int i, pc, page_code, subpage;
6762 int alloc_len, total_len;
6763 struct ctl_page_index *page_index;
6764 struct scsi_log_sense *cdb;
6765 struct scsi_log_header *header;
6766
6767 CTL_DEBUG_PRINT(("ctl_log_sense\n"));
6768
6769 cdb = (struct scsi_log_sense *)ctsio->cdb;
6770 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6;
6771 page_code = cdb->page & SLS_PAGE_CODE;
6772 subpage = cdb->subpage;
6773 alloc_len = scsi_2btoul(cdb->length);
6774
6775 page_index = NULL;
6776 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) {

--- 60 unchanged lines hidden (view full) ---

6837 ctsio->be_move_done = ctl_config_move_done;
6838 ctl_datamove((union ctl_io *)ctsio);
6839 return (CTL_RETVAL_COMPLETE);
6840}
6841
6842int
6843ctl_read_capacity(struct ctl_scsiio *ctsio)
6844{
6845 struct ctl_lun *lun = CTL_LUN(ctsio);
6846 struct scsi_read_capacity *cdb;
6847 struct scsi_read_capacity_data *data;
6848 uint32_t lba;
6849
6850 CTL_DEBUG_PRINT(("ctl_read_capacity\n"));
6851
6852 cdb = (struct scsi_read_capacity *)ctsio->cdb;
6853
6854 lba = scsi_4btoul(cdb->addr);
6855 if (((cdb->pmi & SRC_PMI) == 0)
6856 && (lba != 0)) {
6857 ctl_set_invalid_field(/*ctsio*/ ctsio,
6858 /*sks_valid*/ 1,
6859 /*command*/ 1,
6860 /*field*/ 2,
6861 /*bit_valid*/ 0,
6862 /*bit*/ 0);
6863 ctl_done((union ctl_io *)ctsio);
6864 return (CTL_RETVAL_COMPLETE);
6865 }
6866
6867 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6868 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr;
6869 ctsio->residual = 0;
6870 ctsio->kern_data_len = sizeof(*data);
6871 ctsio->kern_total_len = sizeof(*data);
6872 ctsio->kern_data_resid = 0;
6873 ctsio->kern_rel_offset = 0;
6874 ctsio->kern_sg_entries = 0;

--- 18 unchanged lines hidden (view full) ---

6893 ctsio->be_move_done = ctl_config_move_done;
6894 ctl_datamove((union ctl_io *)ctsio);
6895 return (CTL_RETVAL_COMPLETE);
6896}
6897
6898int
6899ctl_read_capacity_16(struct ctl_scsiio *ctsio)
6900{
6901 struct ctl_lun *lun = CTL_LUN(ctsio);
6902 struct scsi_read_capacity_16 *cdb;
6903 struct scsi_read_capacity_data_long *data;
6904 uint64_t lba;
6905 uint32_t alloc_len;
6906
6907 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n"));
6908
6909 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb;
6910
6911 alloc_len = scsi_4btoul(cdb->alloc_len);

--- 6 unchanged lines hidden (view full) ---

6918 /*command*/ 1,
6919 /*field*/ 2,
6920 /*bit_valid*/ 0,
6921 /*bit*/ 0);
6922 ctl_done((union ctl_io *)ctsio);
6923 return (CTL_RETVAL_COMPLETE);
6924 }
6925
6926 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO);
6927 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr;
6928
6929 if (sizeof(*data) < alloc_len) {
6930 ctsio->residual = alloc_len - sizeof(*data);
6931 ctsio->kern_data_len = sizeof(*data);
6932 ctsio->kern_total_len = sizeof(*data);
6933 } else {

--- 18 unchanged lines hidden (view full) ---

6952 ctsio->be_move_done = ctl_config_move_done;
6953 ctl_datamove((union ctl_io *)ctsio);
6954 return (CTL_RETVAL_COMPLETE);
6955}
6956
6957int
6958ctl_get_lba_status(struct ctl_scsiio *ctsio)
6959{
6960 struct ctl_lun *lun = CTL_LUN(ctsio);
6961 struct scsi_get_lba_status *cdb;
6962 struct scsi_get_lba_status_data *data;
6963 struct ctl_lba_len_flags *lbalen;
6964 uint64_t lba;
6965 uint32_t alloc_len, total_len;
6966 int retval;
6967
6968 CTL_DEBUG_PRINT(("ctl_get_lba_status\n"));
6969
6970 cdb = (struct scsi_get_lba_status *)ctsio->cdb;
6971 lba = scsi_8btou64(cdb->addr);
6972 alloc_len = scsi_4btoul(cdb->alloc_len);
6973
6974 if (lba > lun->be_lun->maxlba) {
6975 ctl_set_lba_out_of_range(ctsio, lba);
6976 ctl_done((union ctl_io *)ctsio);
6977 return (CTL_RETVAL_COMPLETE);

--- 96 unchanged lines hidden (view full) ---

7074 ctsio->be_move_done = ctl_config_move_done;
7075 ctl_datamove((union ctl_io *)ctsio);
7076 return (CTL_RETVAL_COMPLETE);
7077}
7078
7079int
7080ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio)
7081{
7082 struct ctl_softc *softc = CTL_SOFTC(ctsio);
7083 struct ctl_lun *lun = CTL_LUN(ctsio);
7084 struct scsi_maintenance_in *cdb;
7085 int retval;
7086 int alloc_len, ext, total_len = 0, g, pc, pg, ts, os;
7087 int num_ha_groups, num_target_ports, shared_group;
7088 struct ctl_port *port;
7089 struct scsi_target_group_data *rtg_ptr;
7090 struct scsi_target_group_data_extended *rtg_ext_ptr;
7091 struct scsi_target_port_group_descriptor *tpg_desc;
7092
7093 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n"));
7094
7095 cdb = (struct scsi_maintenance_in *)ctsio->cdb;
7096 retval = CTL_RETVAL_COMPLETE;
7097
7098 switch (cdb->byte2 & STG_PDF_MASK) {
7099 case STG_PDF_LENGTH:
7100 ext = 0;
7101 break;
7102 case STG_PDF_EXTENDED:
7103 ext = 1;

--- 149 unchanged lines hidden (view full) ---

7253 ctsio->be_move_done = ctl_config_move_done;
7254 ctl_datamove((union ctl_io *)ctsio);
7255 return(retval);
7256}
7257
7258int
7259ctl_report_supported_opcodes(struct ctl_scsiio *ctsio)
7260{
7261 struct ctl_lun *lun = CTL_LUN(ctsio);
7262 struct scsi_report_supported_opcodes *cdb;
7263 const struct ctl_cmd_entry *entry, *sentry;
7264 struct scsi_report_supported_opcodes_all *all;
7265 struct scsi_report_supported_opcodes_descr *descr;
7266 struct scsi_report_supported_opcodes_one *one;
7267 int retval;
7268 int alloc_len, total_len;
7269 int opcode, service_action, i, j, num;
7270
7271 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n"));
7272
7273 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb;
7274 retval = CTL_RETVAL_COMPLETE;
7275
7276 opcode = cdb->requested_opcode;
7277 service_action = scsi_2btoul(cdb->requested_service_action);
7278 switch (cdb->options & RSO_OPTIONS_MASK) {
7279 case RSO_OPTIONS_ALL:
7280 num = 0;
7281 for (i = 0; i < 256; i++) {

--- 248 unchanged lines hidden (view full) ---

7530 ctsio->be_move_done = ctl_config_move_done;
7531 ctl_datamove((union ctl_io *)ctsio);
7532 return (retval);
7533}
7534
7535int
7536ctl_persistent_reserve_in(struct ctl_scsiio *ctsio)
7537{
7538 struct ctl_softc *softc = CTL_SOFTC(ctsio);
7539 struct ctl_lun *lun = CTL_LUN(ctsio);
7540 struct scsi_per_res_in *cdb;
7541 int alloc_len, total_len = 0;
7542 /* struct scsi_per_res_in_rsrv in_data; */
7543 uint64_t key;
7544
7545 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n"));
7546
7547 cdb = (struct scsi_per_res_in *)ctsio->cdb;
7548
7549 alloc_len = scsi_2btoul(cdb->length);
7550
7551retry:
7552 mtx_lock(&lun->lun_lock);
7553 switch (cdb->action) {
7554 case SPRI_RK: /* read keys */
7555 total_len = sizeof(struct scsi_per_res_in_keys) +
7556 lun->pr_key_count *
7557 sizeof(struct scsi_per_res_key);
7558 break;

--- 546 unchanged lines hidden (view full) ---

8105 lun->pr_generation++;
8106
8107}
8108
8109
8110int
8111ctl_persistent_reserve_out(struct ctl_scsiio *ctsio)
8112{
8113 struct ctl_softc *softc = CTL_SOFTC(ctsio);
8114 struct ctl_lun *lun = CTL_LUN(ctsio);
8115 int retval;
8116 u_int32_t param_len;
8117 struct scsi_per_res_out *cdb;
8118 struct scsi_per_res_out_parms* param;
8119 uint32_t residx;
8120 uint64_t res_key, sa_res_key, key;
8121 uint8_t type;
8122 union ctl_ha_msg persis_io;
8123 int i;
8124
8125 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n"));
8126
8127 cdb = (struct scsi_per_res_out *)ctsio->cdb;
8128 retval = CTL_RETVAL_COMPLETE;
8129
8130 /*
8131 * We only support whole-LUN scope. The scope & type are ignored for
8132 * register, register and ignore existing key and clear.
8133 * We sometimes ignore scope and type on preempts too!!
8134 * Verify reservation type here as well.
8135 */
8136 type = cdb->scope_type & SPR_TYPE_MASK;
8137 if ((cdb->action == SPRO_RESERVE)

--- 356 unchanged lines hidden (view full) ---

8494
8495/*
8496 * This routine is for handling a message from the other SC pertaining to
8497 * persistent reserve out. All the error checking will have been done
8498 * so only perorming the action need be done here to keep the two
8499 * in sync.
8500 */
8501static void
8502ctl_hndl_per_res_out_on_other_sc(union ctl_io *io)
8503{
8504 struct ctl_softc *softc = CTL_SOFTC(io);
8505 union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg;
8506 struct ctl_lun *lun;
8507 int i;
8508 uint32_t residx, targ_lun;
8509
8510 targ_lun = msg->hdr.nexus.targ_mapped_lun;
8511 mtx_lock(&softc->ctl_lock);
8512 if (targ_lun >= CTL_MAX_LUNS ||
8513 (lun = softc->ctl_luns[targ_lun]) == NULL) {

--- 102 unchanged lines hidden (view full) ---

8616 }
8617
8618 mtx_unlock(&lun->lun_lock);
8619}
8620
8621int
8622ctl_read_write(struct ctl_scsiio *ctsio)
8623{
8624 struct ctl_lun *lun = CTL_LUN(ctsio);
8625 struct ctl_lba_len_flags *lbalen;
8626 uint64_t lba;
8627 uint32_t num_blocks;
8628 int flags, retval;
8629 int isread;
8630
8631 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0]));
8632
8633 flags = 0;
8634 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10
8635 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16;
8636 switch (ctsio->cdb[0]) {
8637 case READ_6:
8638 case WRITE_6: {

--- 168 unchanged lines hidden (view full) ---

8807
8808 retval = lun->backend->data_submit((union ctl_io *)ctsio);
8809 return (retval);
8810}
8811
8812static int
8813ctl_cnw_cont(union ctl_io *io)
8814{
8815 struct ctl_lun *lun = CTL_LUN(io);
8816 struct ctl_scsiio *ctsio;
8817 struct ctl_lba_len_flags *lbalen;
8818 int retval;
8819
8820 ctsio = &io->scsiio;
8821 ctsio->io_hdr.status = CTL_STATUS_NONE;
8822 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT;
8823 lbalen = (struct ctl_lba_len_flags *)
8824 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
8825 lbalen->flags &= ~CTL_LLF_COMPARE;
8826 lbalen->flags |= CTL_LLF_WRITE;
8827
8828 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n"));
8829 retval = lun->backend->data_submit((union ctl_io *)ctsio);
8830 return (retval);
8831}
8832
8833int
8834ctl_cnw(struct ctl_scsiio *ctsio)
8835{
8836 struct ctl_lun *lun = CTL_LUN(ctsio);
8837 struct ctl_lba_len_flags *lbalen;
8838 uint64_t lba;
8839 uint32_t num_blocks;
8840 int flags, retval;
8841
8842 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0]));
8843
8844 flags = 0;
8845 switch (ctsio->cdb[0]) {
8846 case COMPARE_AND_WRITE: {
8847 struct scsi_compare_and_write *cdb;
8848
8849 cdb = (struct scsi_compare_and_write *)ctsio->cdb;

--- 64 unchanged lines hidden (view full) ---

8914 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n"));
8915 retval = lun->backend->data_submit((union ctl_io *)ctsio);
8916 return (retval);
8917}
8918
8919int
8920ctl_verify(struct ctl_scsiio *ctsio)
8921{
8922 struct ctl_lun *lun = CTL_LUN(ctsio);
8923 struct ctl_lba_len_flags *lbalen;
8924 uint64_t lba;
8925 uint32_t num_blocks;
8926 int bytchk, flags;
8927 int retval;
8928
8929 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0]));
8930
8931 bytchk = 0;
8932 flags = CTL_LLF_FUA;
8933 switch (ctsio->cdb[0]) {
8934 case VERIFY_10: {
8935 struct scsi_verify_10 *cdb;
8936

--- 79 unchanged lines hidden (view full) ---

9016 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n"));
9017 retval = lun->backend->data_submit((union ctl_io *)ctsio);
9018 return (retval);
9019}
9020
9021int
9022ctl_report_luns(struct ctl_scsiio *ctsio)
9023{
9024 struct ctl_softc *softc = CTL_SOFTC(ctsio);
9025 struct ctl_port *port = CTL_PORT(ctsio);
9026 struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio);
9027 struct scsi_report_luns *cdb;
9028 struct scsi_report_luns_data *lun_data;
9029 int num_filled, num_luns, num_port_luns, retval;
9030 uint32_t alloc_len, lun_datalen;
9031 uint32_t initidx, targ_lun_id, lun_id;
9032
9033 retval = CTL_RETVAL_COMPLETE;
9034 cdb = (struct scsi_report_luns *)ctsio->cdb;
9035
9036 CTL_DEBUG_PRINT(("ctl_report_luns\n"));
9037
9038 num_luns = 0;
9039 num_port_luns = port->lun_map ? port->lun_map_size : CTL_MAX_LUNS;
9040 mtx_lock(&softc->ctl_lock);
9041 for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) {
9042 if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX)

--- 36 unchanged lines hidden (view full) ---

9079 /*command*/ 1,
9080 /*field*/ 6,
9081 /*bit_valid*/ 0,
9082 /*bit*/ 0);
9083 ctl_done((union ctl_io *)ctsio);
9084 return (retval);
9085 }
9086
9087 lun_datalen = sizeof(*lun_data) +
9088 (num_luns * sizeof(struct scsi_report_luns_lundata));
9089
9090 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO);
9091 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr;
9092 ctsio->kern_sg_entries = 0;
9093
9094 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus);

--- 74 unchanged lines hidden (view full) ---

9169 ctsio->be_move_done = ctl_config_move_done;
9170 ctl_datamove((union ctl_io *)ctsio);
9171 return (retval);
9172}
9173
9174int
9175ctl_request_sense(struct ctl_scsiio *ctsio)
9176{
9177 struct ctl_softc *softc = CTL_SOFTC(ctsio);
9178 struct ctl_lun *lun = CTL_LUN(ctsio);
9179 struct scsi_request_sense *cdb;
9180 struct scsi_sense_data *sense_ptr;
9181 uint32_t initidx;
9182 int have_error;
9183 u_int sense_len = SSD_FULL_SIZE;
9184 scsi_sense_data_type sense_format;
9185 ctl_ua_type ua_type;
9186 uint8_t asc = 0, ascq = 0;
9187
9188 cdb = (struct scsi_request_sense *)ctsio->cdb;
9189
9190 CTL_DEBUG_PRINT(("ctl_request_sense\n"));
9191
9192 /*
9193 * Determine which sense format the user wants.
9194 */
9195 if (cdb->byte2 & SRS_DESC)
9196 sense_format = SSD_TYPE_DESC;
9197 else

--- 126 unchanged lines hidden (view full) ---

9324}
9325
9326/*
9327 * SCSI VPD page 0x00, the Supported VPD Pages page.
9328 */
9329static int
9330ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len)
9331{
9332 struct ctl_lun *lun = CTL_LUN(ctsio);
9333 struct scsi_vpd_supported_pages *pages;
9334 int sup_page_size;
9335 int p;
9336
9337 sup_page_size = sizeof(struct scsi_vpd_supported_pages) *
9338 SCSI_EVPD_NUM_SUPPORTED_PAGES;
9339 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO);
9340 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr;
9341 ctsio->kern_sg_entries = 0;
9342
9343 if (sup_page_size < alloc_len) {
9344 ctsio->residual = alloc_len - sup_page_size;

--- 52 unchanged lines hidden (view full) ---

9397}
9398
9399/*
9400 * SCSI VPD page 0x80, the Unit Serial Number page.
9401 */
9402static int
9403ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len)
9404{
9405 struct ctl_lun *lun = CTL_LUN(ctsio);
9406 struct scsi_vpd_unit_serial_number *sn_ptr;
9407 int data_len;
9408
9409 data_len = 4 + CTL_SN_LEN;
9410 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9411 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr;
9412 if (data_len < alloc_len) {
9413 ctsio->residual = alloc_len - data_len;
9414 ctsio->kern_data_len = data_len;
9415 ctsio->kern_total_len = data_len;
9416 } else {

--- 37 unchanged lines hidden (view full) ---

9454
9455
9456/*
9457 * SCSI VPD page 0x86, the Extended INQUIRY Data page.
9458 */
9459static int
9460ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len)
9461{
9462 struct ctl_lun *lun = CTL_LUN(ctsio);
9463 struct scsi_vpd_extended_inquiry_data *eid_ptr;
9464 int data_len;
9465
9466 data_len = sizeof(struct scsi_vpd_extended_inquiry_data);
9467 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9468 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr;
9469 ctsio->kern_sg_entries = 0;
9470
9471 if (data_len < alloc_len) {
9472 ctsio->residual = alloc_len - data_len;
9473 ctsio->kern_data_len = data_len;

--- 54 unchanged lines hidden (view full) ---

9528 ctsio->be_move_done = ctl_config_move_done;
9529 ctl_datamove((union ctl_io *)ctsio);
9530 return (CTL_RETVAL_COMPLETE);
9531}
9532
9533static int
9534ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len)
9535{
9536 struct ctl_lun *lun = CTL_LUN(ctsio);
9537 struct scsi_vpd_mode_page_policy *mpp_ptr;
9538 int data_len;
9539
9540 data_len = sizeof(struct scsi_vpd_mode_page_policy) +
9541 sizeof(struct scsi_vpd_mode_page_policy_descr);
9542
9543 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
9544 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr;
9545 ctsio->kern_sg_entries = 0;
9546
9547 if (data_len < alloc_len) {

--- 32 unchanged lines hidden (view full) ---

9580}
9581
9582/*
9583 * SCSI VPD page 0x83, the Device Identification page.
9584 */
9585static int
9586ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len)
9587{
9588 struct ctl_softc *softc = CTL_SOFTC(ctsio);
9589 struct ctl_port *port = CTL_PORT(ctsio);
9590 struct ctl_lun *lun = CTL_LUN(ctsio);
9591 struct scsi_vpd_device_id *devid_ptr;
9592 struct scsi_vpd_id_descriptor *desc;
9593 int data_len, g;
9594 uint8_t proto;
9595
9596 data_len = sizeof(struct scsi_vpd_device_id) +
9597 sizeof(struct scsi_vpd_id_descriptor) +
9598 sizeof(struct scsi_vpd_id_rel_trgt_port_id) +
9599 sizeof(struct scsi_vpd_id_descriptor) +
9600 sizeof(struct scsi_vpd_id_trgt_port_grp_id);
9601 if (lun && lun->lun_devid)
9602 data_len += lun->lun_devid->len;
9603 if (port && port->port_devid)

--- 96 unchanged lines hidden (view full) ---

9700 ctsio->be_move_done = ctl_config_move_done;
9701 ctl_datamove((union ctl_io *)ctsio);
9702 return (CTL_RETVAL_COMPLETE);
9703}
9704
9705static int
9706ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len)
9707{
9708 struct ctl_softc *softc = CTL_SOFTC(ctsio);
9709 struct ctl_lun *lun = CTL_LUN(ctsio);
9710 struct scsi_vpd_scsi_ports *sp;
9711 struct scsi_vpd_port_designation *pd;
9712 struct scsi_vpd_port_designation_cont *pdc;
9713 struct ctl_port *port;
9714 int data_len, num_target_ports, iid_len, id_len;
9715
9716 num_target_ports = 0;
9717 iid_len = 0;
9718 id_len = 0;
9719 mtx_lock(&softc->ctl_lock);
9720 STAILQ_FOREACH(port, &softc->port_list, links) {
9721 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0)
9722 continue;
9723 if (lun != NULL &&

--- 77 unchanged lines hidden (view full) ---

9801 ctsio->be_move_done = ctl_config_move_done;
9802 ctl_datamove((union ctl_io *)ctsio);
9803 return (CTL_RETVAL_COMPLETE);
9804}
9805
9806static int
9807ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len)
9808{
9809 struct ctl_lun *lun = CTL_LUN(ctsio);
9810 struct scsi_vpd_block_limits *bl_ptr;
9811 uint64_t ival;
9812
9813 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO);
9814 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr;
9815 ctsio->kern_sg_entries = 0;
9816
9817 if (sizeof(*bl_ptr) < alloc_len) {
9818 ctsio->residual = alloc_len - sizeof(*bl_ptr);
9819 ctsio->kern_data_len = sizeof(*bl_ptr);
9820 ctsio->kern_total_len = sizeof(*bl_ptr);

--- 55 unchanged lines hidden (view full) ---

9876 ctsio->be_move_done = ctl_config_move_done;
9877 ctl_datamove((union ctl_io *)ctsio);
9878 return (CTL_RETVAL_COMPLETE);
9879}
9880
9881static int
9882ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len)
9883{
9884 struct ctl_lun *lun = CTL_LUN(ctsio);
9885 struct scsi_vpd_block_device_characteristics *bdc_ptr;
9886 const char *value;
9887 u_int i;
9888
9889 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO);
9890 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr;
9891 ctsio->kern_sg_entries = 0;
9892
9893 if (sizeof(*bdc_ptr) < alloc_len) {
9894 ctsio->residual = alloc_len - sizeof(*bdc_ptr);
9895 ctsio->kern_data_len = sizeof(*bdc_ptr);
9896 ctsio->kern_total_len = sizeof(*bdc_ptr);

--- 37 unchanged lines hidden (view full) ---

9934 ctsio->be_move_done = ctl_config_move_done;
9935 ctl_datamove((union ctl_io *)ctsio);
9936 return (CTL_RETVAL_COMPLETE);
9937}
9938
9939static int
9940ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
9941{
9942 struct ctl_lun *lun = CTL_LUN(ctsio);
9943 struct scsi_vpd_logical_block_prov *lbp_ptr;
9944 const char *value;
9945
9946 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO);
9947 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr;
9948 ctsio->kern_sg_entries = 0;
9949
9950 if (sizeof(*lbp_ptr) < alloc_len) {
9951 ctsio->residual = alloc_len - sizeof(*lbp_ptr);
9952 ctsio->kern_data_len = sizeof(*lbp_ptr);
9953 ctsio->kern_total_len = sizeof(*lbp_ptr);

--- 41 unchanged lines hidden (view full) ---

9995}
9996
9997/*
9998 * INQUIRY with the EVPD bit set.
9999 */
10000static int
10001ctl_inquiry_evpd(struct ctl_scsiio *ctsio)
10002{
10003 struct ctl_lun *lun = CTL_LUN(ctsio);
10004 struct scsi_inquiry *cdb;
10005 int alloc_len, retval;
10006
10007 cdb = (struct scsi_inquiry *)ctsio->cdb;
10008 alloc_len = scsi_2btoul(cdb->length);
10009
10010 switch (cdb->page_code) {
10011 case SVPD_SUPPORTED_PAGES:
10012 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len);
10013 break;
10014 case SVPD_UNIT_SERIAL_NUMBER:

--- 46 unchanged lines hidden (view full) ---

10061}
10062
10063/*
10064 * Standard INQUIRY data.
10065 */
10066static int
10067ctl_inquiry_std(struct ctl_scsiio *ctsio)
10068{
10069 struct ctl_softc *softc = CTL_SOFTC(ctsio);
10070 struct ctl_port *port = CTL_PORT(ctsio);
10071 struct ctl_lun *lun = CTL_LUN(ctsio);
10072 struct scsi_inquiry_data *inq_ptr;
10073 struct scsi_inquiry *cdb;
10074 char *val;
10075 uint32_t alloc_len, data_len;
10076 ctl_port_type port_type;
10077
10078 port_type = port->port_type;
10079 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL)
10080 port_type = CTL_PORT_SCSI;
10081
10082 cdb = (struct scsi_inquiry *)ctsio->cdb;
10083 alloc_len = scsi_2btoul(cdb->length);
10084
10085 /*
10086 * We malloc the full inquiry data size here and fill it
10087 * in. If the user only asks for less, we'll give him
10088 * that much.
10089 */

--- 197 unchanged lines hidden (view full) ---

10287 }
10288
10289 return (retval);
10290}
10291
10292int
10293ctl_get_config(struct ctl_scsiio *ctsio)
10294{
10295 struct ctl_lun *lun = CTL_LUN(ctsio);
10296 struct scsi_get_config_header *hdr;
10297 struct scsi_get_config_feature *feature;
10298 struct scsi_get_config *cdb;
10299 uint32_t alloc_len, data_len;
10300 int rt, starting;
10301
10302 cdb = (struct scsi_get_config *)ctsio->cdb;
10303 rt = (cdb->rt & SGC_RT_MASK);
10304 starting = scsi_2btoul(cdb->starting_feature);
10305 alloc_len = scsi_2btoul(cdb->length);
10306
10307 data_len = sizeof(struct scsi_get_config_header) +
10308 sizeof(struct scsi_get_config_feature) + 8 +
10309 sizeof(struct scsi_get_config_feature) + 8 +

--- 194 unchanged lines hidden (view full) ---

10504 return (CTL_RETVAL_COMPLETE);
10505}
10506
10507int
10508ctl_get_event_status(struct ctl_scsiio *ctsio)
10509{
10510 struct scsi_get_event_status_header *hdr;
10511 struct scsi_get_event_status *cdb;
10512 uint32_t alloc_len, data_len;
10513 int notif_class;
10514
10515 cdb = (struct scsi_get_event_status *)ctsio->cdb;
10516 if ((cdb->byte2 & SGESN_POLLED) == 0) {
10517 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1,
10518 /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0);
10519 ctl_done((union ctl_io *)ctsio);
10520 return (CTL_RETVAL_COMPLETE);
10521 }
10522 notif_class = cdb->notif_class;

--- 27 unchanged lines hidden (view full) ---

10550 return (CTL_RETVAL_COMPLETE);
10551}
10552
10553int
10554ctl_mechanism_status(struct ctl_scsiio *ctsio)
10555{
10556 struct scsi_mechanism_status_header *hdr;
10557 struct scsi_mechanism_status *cdb;
10558 uint32_t alloc_len, data_len;
10559
10560 cdb = (struct scsi_mechanism_status *)ctsio->cdb;
10561 alloc_len = scsi_2btoul(cdb->length);
10562
10563 data_len = sizeof(struct scsi_mechanism_status_header);
10564 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO);
10565 ctsio->kern_sg_entries = 0;
10566 ctsio->kern_data_resid = 0;
10567 ctsio->kern_rel_offset = 0;

--- 31 unchanged lines hidden (view full) ---

10599 buf[1] = bin2bcd((lba / 75) / 60);
10600 buf[2] = bin2bcd((lba / 75) % 60);
10601 buf[3] = bin2bcd(lba % 75);
10602}
10603
10604int
10605ctl_read_toc(struct ctl_scsiio *ctsio)
10606{
10607 struct ctl_lun *lun = CTL_LUN(ctsio);
10608 struct scsi_read_toc_hdr *hdr;
10609 struct scsi_read_toc_type01_descr *descr;
10610 struct scsi_read_toc *cdb;
10611 uint32_t alloc_len, data_len;
10612 int format, msf;
10613
10614 cdb = (struct scsi_read_toc *)ctsio->cdb;
10615 msf = (cdb->byte2 & CD_MSF) != 0;
10616 format = cdb->format;
10617 alloc_len = scsi_2btoul(cdb->data_len);
10618
10619 data_len = sizeof(struct scsi_read_toc_hdr);
10620 if (format == 0)
10621 data_len += 2 * sizeof(struct scsi_read_toc_type01_descr);

--- 730 unchanged lines hidden (view full) ---

11352{
11353 ctl_set_busy(&io->scsiio);
11354 ctl_done(io);
11355}
11356
11357static void
11358ctl_failover_lun(union ctl_io *rio)
11359{
11360 struct ctl_softc *softc = CTL_SOFTC(rio);
11361 struct ctl_lun *lun;
11362 struct ctl_io_hdr *io, *next_io;
11363 uint32_t targ_lun;
11364
11365 targ_lun = rio->io_hdr.nexus.targ_mapped_lun;
11366 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun));
11367
11368 /* Find and lock the LUN. */

--- 89 unchanged lines hidden (view full) ---

11458 * completed.
11459 */
11460 mtx_lock(&lun->lun_lock);
11461 if (lun->flags & CTL_LUN_DISABLED) {
11462 mtx_unlock(&lun->lun_lock);
11463 lun = NULL;
11464 }
11465 }
11466 CTL_LUN(ctsio) = lun;
11467 if (lun) {
11468 CTL_BACKEND_LUN(ctsio) = lun->be_lun;
11469
11470 /*
11471 * Every I/O goes into the OOA queue for a particular LUN,
11472 * and stays there until completion.
11473 */
11474#ifdef CTL_TIME_IO
11475 if (TAILQ_EMPTY(&lun->ooa_queue))
11476 lun->idle_time += getsbinuptime() - lun->last_busy;

--- 297 unchanged lines hidden (view full) ---

11774{
11775 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET));
11776}
11777
11778static int
11779ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
11780 ctl_ua_type ua_type)
11781{
11782 struct ctl_port *port = CTL_PORT(io);
11783 struct ctl_lun *lun;
11784 int retval;
11785
11786 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
11787 union ctl_ha_msg msg_info;
11788
11789 msg_info.hdr.nexus = io->io_hdr.nexus;
11790 if (ua_type==CTL_UA_TARG_RESET)

--- 4 unchanged lines hidden (view full) ---

11795 msg_info.hdr.original_sc = NULL;
11796 msg_info.hdr.serializing_sc = NULL;
11797 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
11798 sizeof(msg_info.task), M_WAITOK);
11799 }
11800 retval = 0;
11801
11802 mtx_lock(&softc->ctl_lock);
11803 STAILQ_FOREACH(lun, &softc->lun_list, links) {
11804 if (port != NULL &&
11805 ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX)
11806 continue;
11807 retval += ctl_do_lun_reset(lun, io, ua_type);
11808 }
11809 mtx_unlock(&softc->ctl_lock);
11810 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;

--- 145 unchanged lines hidden (view full) ---

11956 }
11957 }
11958 }
11959}
11960
11961static int
11962ctl_abort_task_set(union ctl_io *io)
11963{
11964 struct ctl_softc *softc = CTL_SOFTC(io);
11965 struct ctl_lun *lun;
11966 uint32_t targ_lun;
11967
11968 /*
11969 * Look up the LUN.
11970 */
11971 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
11972 mtx_lock(&softc->ctl_lock);

--- 17 unchanged lines hidden (view full) ---

11990 mtx_unlock(&lun->lun_lock);
11991 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
11992 return (0);
11993}
11994
11995static int
11996ctl_i_t_nexus_reset(union ctl_io *io)
11997{
11998 struct ctl_softc *softc = CTL_SOFTC(io);
11999 struct ctl_lun *lun;
12000 uint32_t initidx;
12001
12002 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
12003 union ctl_ha_msg msg_info;
12004
12005 msg_info.hdr.nexus = io->io_hdr.nexus;
12006 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET;

--- 25 unchanged lines hidden (view full) ---

12032 mtx_unlock(&softc->ctl_lock);
12033 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12034 return (0);
12035}
12036
12037static int
12038ctl_abort_task(union ctl_io *io)
12039{
12040 struct ctl_softc *softc = CTL_SOFTC(io);
12041 union ctl_io *xio;
12042 struct ctl_lun *lun;
12043#if 0
12044 struct sbuf sb;
12045 char printbuf[128];
12046#endif
12047 int found;
12048 uint32_t targ_lun;
12049
12050 found = 0;
12051
12052 /*
12053 * Look up the LUN.
12054 */
12055 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12056 mtx_lock(&softc->ctl_lock);
12057 if (targ_lun >= CTL_MAX_LUNS ||

--- 107 unchanged lines hidden (view full) ---

12165 }
12166 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12167 return (0);
12168}
12169
12170static int
12171ctl_query_task(union ctl_io *io, int task_set)
12172{
12173 struct ctl_softc *softc = CTL_SOFTC(io);
12174 union ctl_io *xio;
12175 struct ctl_lun *lun;
12176 int found = 0;
12177 uint32_t targ_lun;
12178
12179 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12180 mtx_lock(&softc->ctl_lock);
12181 if (targ_lun >= CTL_MAX_LUNS ||
12182 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12183 mtx_unlock(&softc->ctl_lock);
12184 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12185 return (1);
12186 }

--- 18 unchanged lines hidden (view full) ---

12205 else
12206 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12207 return (0);
12208}
12209
12210static int
12211ctl_query_async_event(union ctl_io *io)
12212{
12213 struct ctl_softc *softc = CTL_SOFTC(io);
12214 struct ctl_lun *lun;
12215 ctl_ua_type ua;
12216 uint32_t targ_lun, initidx;
12217
12218 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12219 mtx_lock(&softc->ctl_lock);
12220 if (targ_lun >= CTL_MAX_LUNS ||
12221 (lun = softc->ctl_luns[targ_lun]) == NULL) {
12222 mtx_unlock(&softc->ctl_lock);
12223 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
12224 return (1);
12225 }

--- 7 unchanged lines hidden (view full) ---

12233 else
12234 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
12235 return (0);
12236}
12237
12238static void
12239ctl_run_task(union ctl_io *io)
12240{
12241 struct ctl_softc *softc = CTL_SOFTC(io);
12242 int retval = 1;
12243
12244 CTL_DEBUG_PRINT(("ctl_run_task\n"));
12245 KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
12246 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type));
12247 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED;
12248 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp));
12249 switch (io->taskio.task_action) {

--- 45 unchanged lines hidden (view full) ---

12295
12296/*
12297 * For HA operation. Handle commands that come in from the other
12298 * controller.
12299 */
12300static void
12301ctl_handle_isc(union ctl_io *io)
12302{
12303 struct ctl_softc *softc = CTL_SOFTC(io);
12304 struct ctl_lun *lun;
12305 const struct ctl_cmd_entry *entry;
12306 uint32_t targ_lun;
12307
12308 targ_lun = io->io_hdr.nexus.targ_mapped_lun;
12309 switch (io->io_hdr.msg_type) {
12310 case CTL_MSG_SERIALIZE:
12311 ctl_serialize_other_sc_cmd(&io->scsiio);

--- 27 unchanged lines hidden (view full) ---

12339 }
12340 mtx_lock(&lun->lun_lock);
12341 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links);
12342 ctl_check_blocked(lun);
12343 mtx_unlock(&lun->lun_lock);
12344 ctl_free_io(io);
12345 break;
12346 case CTL_MSG_PERS_ACTION:
12347 ctl_hndl_per_res_out_on_other_sc(io);
12348 ctl_free_io(io);
12349 break;
12350 case CTL_MSG_BAD_JUJU:
12351 ctl_done(io);
12352 break;
12353 case CTL_MSG_DATAMOVE: /* Only used in XFER mode */
12354 ctl_datamove_remote(io);
12355 break;

--- 156 unchanged lines hidden (view full) ---

12512
12513 ctl_datamove(io);
12514}
12515#endif /* CTL_IO_DELAY */
12516
12517void
12518ctl_datamove(union ctl_io *io)
12519{
12520 void (*fe_datamove)(union ctl_io *io);
12521
12522 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
12523
12524 CTL_DEBUG_PRINT(("ctl_datamove\n"));
12525
12526#ifdef CTL_TIME_IO
12527 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
12528 char str[256];
12529 char path_str[64];
12530 struct sbuf sb;
12531
12532 ctl_scsi_path_string(io, path_str, sizeof(path_str));
12533 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN);

--- 64 unchanged lines hidden (view full) ---

12598 }
12599
12600 /* Don't confuse frontend with zero length data move. */
12601 if (io->scsiio.kern_data_len == 0) {
12602 io->scsiio.be_move_done(io);
12603 return;
12604 }
12605
12606 fe_datamove = CTL_PORT(io)->fe_datamove;
12607 fe_datamove(io);
12608}
12609
12610static void
12611ctl_send_datamove_done(union ctl_io *io, int have_lock)
12612{
12613 union ctl_ha_msg msg;
12614#ifdef CTL_TIME_IO

--- 101 unchanged lines hidden (view full) ---

12716 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist;
12717
12718 /*
12719 * Use a custom move done callback, since we need to send completion
12720 * back to the other controller, not to the backend on this side.
12721 */
12722 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb;
12723
12724 fe_datamove = CTL_PORT(io)->fe_datamove;
12725 fe_datamove(io);
12726}
12727
12728static int
12729ctl_datamove_remote_dm_read_cb(union ctl_io *io)
12730{
12731#if 0
12732 char str[256];

--- 58 unchanged lines hidden (view full) ---

12791 /*
12792 * Use a custom move done callback, since we need to send completion
12793 * back to the other controller, not to the backend on this side.
12794 */
12795 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb;
12796
12797 /* XXX KDM add checks like the ones in ctl_datamove? */
12798
12799 fe_datamove = CTL_PORT(io)->fe_datamove;
12800 fe_datamove(io);
12801}
12802
12803static int
12804ctl_datamove_remote_sgl_setup(union ctl_io *io)
12805{
12806 struct ctl_sg_entry *local_sglist;
12807 uint32_t len_to_go;

--- 199 unchanged lines hidden (view full) ---

13007 * first. Once that is complete, the data gets DMAed into the remote
13008 * controller's memory. For reads, we DMA from the remote controller's
13009 * memory into our memory first, and then move it out to the FETD.
13010 */
13011static void
13012ctl_datamove_remote(union ctl_io *io)
13013{
13014
13015 mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED);
13016
13017 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
13018 ctl_failover_io(io, /*have_lock*/ 0);
13019 return;
13020 }
13021
13022 /*
13023 * Note that we look for an aborted I/O here, but don't do some of

--- 19 unchanged lines hidden (view full) ---

13043 io->io_hdr.port_status = 31339;
13044 ctl_send_datamove_done(io, /*have_lock*/ 0);
13045 }
13046}
13047
13048static void
13049ctl_process_done(union ctl_io *io)
13050{
13051 struct ctl_softc *softc = CTL_SOFTC(io);
13052 struct ctl_lun *lun = CTL_LUN(io);
13053 void (*fe_done)(union ctl_io *io);
13054 union ctl_ha_msg msg;
13055 uint32_t targ_port = io->io_hdr.nexus.targ_port;
13056
13057 CTL_DEBUG_PRINT(("ctl_process_done\n"));
13058 fe_done = softc->ctl_ports[targ_port]->fe_done;
13059
13060#ifdef CTL_TIME_IO

--- 39 unchanged lines hidden (view full) ---

13100 ctl_io_error_print(io, NULL);
13101 fe_done(io);
13102 return;
13103 default:
13104 panic("%s: Invalid CTL I/O type %d\n",
13105 __func__, io->io_hdr.io_type);
13106 }
13107
13108 if (lun == NULL) {
13109 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n",
13110 io->io_hdr.nexus.targ_mapped_lun));
13111 goto bailout;
13112 }
13113
13114 mtx_lock(&lun->lun_lock);
13115

--- 137 unchanged lines hidden (view full) ---

13253#ifdef CTL_WITH_CA
13254/*
13255 * Front end should call this if it doesn't do autosense. When the request
13256 * sense comes back in from the initiator, we'll dequeue this and send it.
13257 */
13258int
13259ctl_queue_sense(union ctl_io *io)
13260{
13261 struct ctl_softc *softc = CTL_SOFTC(io);
13262 struct ctl_port *port = CTL_PORT(io);
13263 struct ctl_lun *lun;
13264 uint32_t initidx, targ_lun;
13265
13266 CTL_DEBUG_PRINT(("ctl_queue_sense\n"));
13267
13268 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
13269
13270 /*
13271 * LUN lookup will likely move to the ctl_work_thread() once we
13272 * have our new queueing infrastructure (that doesn't put things on
13273 * a per-LUN queue initially). That is so that we can handle
13274 * things like an INQUIRY to a LUN that we don't have enabled. We
13275 * can't deal with that right now.

--- 31 unchanged lines hidden (view full) ---

13307
13308/*
13309 * Primary command inlet from frontend ports. All SCSI and task I/O
13310 * requests must go through this function.
13311 */
13312int
13313ctl_queue(union ctl_io *io)
13314{
13315 struct ctl_port *port = CTL_PORT(io);
13316
13317 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0]));
13318
13319#ifdef CTL_TIME_IO
13320 io->io_hdr.start_time = time_uptime;
13321 getbinuptime(&io->io_hdr.start_bt);
13322#endif /* CTL_TIME_IO */
13323
13324 /* Map FE-specific LUN ID into global one. */
13325 io->io_hdr.nexus.targ_mapped_lun =
13326 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun);
13327
13328 switch (io->io_hdr.io_type) {
13329 case CTL_IO_SCSI:
13330 case CTL_IO_TASK:
13331 if (ctl_debug & CTL_DEBUG_CDB)
13332 ctl_io_print(io);

--- 16 unchanged lines hidden (view full) ---

13349 io = (union ctl_io *)arg;
13350 ctl_done(io);
13351}
13352#endif /* CTL_IO_DELAY */
13353
13354void
13355ctl_serseq_done(union ctl_io *io)
13356{
13357 struct ctl_lun *lun = CTL_LUN(io);;
13358
13359 if (lun->be_lun == NULL ||
13360 lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF)
13361 return;
13362 mtx_lock(&lun->lun_lock);
13363 io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE;
13364 ctl_check_blocked(lun);
13365 mtx_unlock(&lun->lun_lock);
13366}

--- 33 unchanged lines hidden (view full) ---

13400 */
13401 if (io->io_hdr.flags & CTL_FLAG_INT_COPY)
13402 return;
13403
13404#ifdef CTL_IO_DELAY
13405 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
13406 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
13407 } else {
13408 struct ctl_lun *lun = CTL_LUN(io);
13409
13410 if ((lun != NULL)
13411 && (lun->delay_info.done_delay > 0)) {
13412
13413 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1);
13414 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE;
13415 callout_reset(&io->io_hdr.delay_callout,
13416 lun->delay_info.done_delay * hz,
13417 ctl_done_timer_wakeup, io);

--- 194 unchanged lines hidden (view full) ---

13612 mtx_unlock(&softc->ctl_lock);
13613 pause("-", CTL_LBP_PERIOD * hz);
13614 }
13615}
13616
13617static void
13618ctl_enqueue_incoming(union ctl_io *io)
13619{
13620 struct ctl_softc *softc = CTL_SOFTC(io);
13621 struct ctl_thread *thr;
13622 u_int idx;
13623
13624 idx = (io->io_hdr.nexus.targ_port * 127 +
13625 io->io_hdr.nexus.initid) % worker_threads;
13626 thr = &softc->threads[idx];
13627 mtx_lock(&thr->queue_lock);
13628 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links);
13629 mtx_unlock(&thr->queue_lock);
13630 wakeup(thr);
13631}
13632
13633static void
13634ctl_enqueue_rtr(union ctl_io *io)
13635{
13636 struct ctl_softc *softc = CTL_SOFTC(io);
13637 struct ctl_thread *thr;
13638
13639 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13640 mtx_lock(&thr->queue_lock);
13641 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links);
13642 mtx_unlock(&thr->queue_lock);
13643 wakeup(thr);
13644}
13645
13646static void
13647ctl_enqueue_done(union ctl_io *io)
13648{
13649 struct ctl_softc *softc = CTL_SOFTC(io);
13650 struct ctl_thread *thr;
13651
13652 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13653 mtx_lock(&thr->queue_lock);
13654 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links);
13655 mtx_unlock(&thr->queue_lock);
13656 wakeup(thr);
13657}
13658
13659static void
13660ctl_enqueue_isc(union ctl_io *io)
13661{
13662 struct ctl_softc *softc = CTL_SOFTC(io);
13663 struct ctl_thread *thr;
13664
13665 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads];
13666 mtx_lock(&thr->queue_lock);
13667 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links);
13668 mtx_unlock(&thr->queue_lock);
13669 wakeup(thr);
13670}
13671
13672/*
13673 * vim: ts=8
13674 */