ctl.c revision 267992
1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.c#8 $ 35 */ 36/* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42#define _CTL_C 43 44#include <sys/cdefs.h> 45__FBSDID("$FreeBSD: head/sys/cam/ctl/ctl.c 267992 2014-06-28 03:56:17Z hselasky $"); 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/types.h> 51#include <sys/kthread.h> 52#include <sys/bio.h> 53#include <sys/fcntl.h> 54#include <sys/lock.h> 55#include <sys/module.h> 56#include <sys/mutex.h> 57#include <sys/condvar.h> 58#include <sys/malloc.h> 59#include <sys/conf.h> 60#include <sys/ioccom.h> 61#include <sys/queue.h> 62#include <sys/sbuf.h> 63#include <sys/smp.h> 64#include <sys/endian.h> 65#include <sys/sysctl.h> 66 67#include <cam/cam.h> 68#include <cam/scsi/scsi_all.h> 69#include <cam/scsi/scsi_da.h> 70#include <cam/ctl/ctl_io.h> 71#include <cam/ctl/ctl.h> 72#include <cam/ctl/ctl_frontend.h> 73#include <cam/ctl/ctl_frontend_internal.h> 74#include <cam/ctl/ctl_util.h> 75#include <cam/ctl/ctl_backend.h> 76#include <cam/ctl/ctl_ioctl.h> 77#include <cam/ctl/ctl_ha.h> 78#include <cam/ctl/ctl_private.h> 79#include <cam/ctl/ctl_debug.h> 80#include <cam/ctl/ctl_scsi_all.h> 81#include <cam/ctl/ctl_error.h> 82 83struct ctl_softc *control_softc = NULL; 84 85/* 86 * Size and alignment macros needed for Copan-specific HA hardware. These 87 * can go away when the HA code is re-written, and uses busdma for any 88 * hardware. 89 */ 90#define CTL_ALIGN_8B(target, source, type) \ 91 if (((uint32_t)source & 0x7) != 0) \ 92 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 93 else \ 94 target = (type)source; 95 96#define CTL_SIZE_8B(target, size) \ 97 if ((size & 0x7) != 0) \ 98 target = size + (0x8 - (size & 0x7)); \ 99 else \ 100 target = size; 101 102#define CTL_ALIGN_8B_MARGIN 16 103 104/* 105 * Template mode pages. 106 */ 107 108/* 109 * Note that these are default values only. The actual values will be 110 * filled in when the user does a mode sense. 111 */ 112static struct copan_power_subpage power_page_default = { 113 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 114 /*subpage*/ PWR_SUBPAGE_CODE, 115 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 116 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 117 /*page_version*/ PWR_VERSION, 118 /* total_luns */ 26, 119 /* max_active_luns*/ PWR_DFLT_MAX_LUNS, 120 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 121 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 122 0, 0, 0, 0, 0, 0} 123}; 124 125static struct copan_power_subpage power_page_changeable = { 126 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 127 /*subpage*/ PWR_SUBPAGE_CODE, 128 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 129 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 130 /*page_version*/ 0, 131 /* total_luns */ 0, 132 /* max_active_luns*/ 0, 133 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 135 0, 0, 0, 0, 0, 0} 136}; 137 138static struct copan_aps_subpage aps_page_default = { 139 APS_PAGE_CODE | SMPH_SPF, //page_code 140 APS_SUBPAGE_CODE, //subpage 141 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 142 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 143 APS_VERSION, //page_version 144 0, //lock_active 145 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 147 0, 0, 0, 0, 0} //reserved 148}; 149 150static struct copan_aps_subpage aps_page_changeable = { 151 APS_PAGE_CODE | SMPH_SPF, //page_code 152 APS_SUBPAGE_CODE, //subpage 153 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 154 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 155 0, //page_version 156 0, //lock_active 157 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 158 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 159 0, 0, 0, 0, 0} //reserved 160}; 161 162static struct copan_debugconf_subpage debugconf_page_default = { 163 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 164 DBGCNF_SUBPAGE_CODE, /* subpage */ 165 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 166 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 167 DBGCNF_VERSION, /* page_version */ 168 {CTL_TIME_IO_DEFAULT_SECS>>8, 169 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 170}; 171 172static struct copan_debugconf_subpage debugconf_page_changeable = { 173 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 174 DBGCNF_SUBPAGE_CODE, /* subpage */ 175 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 176 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 177 0, /* page_version */ 178 {0xff,0xff}, /* ctl_time_io_secs */ 179}; 180 181static struct scsi_format_page format_page_default = { 182 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 183 /*page_length*/sizeof(struct scsi_format_page) - 2, 184 /*tracks_per_zone*/ {0, 0}, 185 /*alt_sectors_per_zone*/ {0, 0}, 186 /*alt_tracks_per_zone*/ {0, 0}, 187 /*alt_tracks_per_lun*/ {0, 0}, 188 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 189 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 190 /*bytes_per_sector*/ {0, 0}, 191 /*interleave*/ {0, 0}, 192 /*track_skew*/ {0, 0}, 193 /*cylinder_skew*/ {0, 0}, 194 /*flags*/ SFP_HSEC, 195 /*reserved*/ {0, 0, 0} 196}; 197 198static struct scsi_format_page format_page_changeable = { 199 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 200 /*page_length*/sizeof(struct scsi_format_page) - 2, 201 /*tracks_per_zone*/ {0, 0}, 202 /*alt_sectors_per_zone*/ {0, 0}, 203 /*alt_tracks_per_zone*/ {0, 0}, 204 /*alt_tracks_per_lun*/ {0, 0}, 205 /*sectors_per_track*/ {0, 0}, 206 /*bytes_per_sector*/ {0, 0}, 207 /*interleave*/ {0, 0}, 208 /*track_skew*/ {0, 0}, 209 /*cylinder_skew*/ {0, 0}, 210 /*flags*/ 0, 211 /*reserved*/ {0, 0, 0} 212}; 213 214static struct scsi_rigid_disk_page rigid_disk_page_default = { 215 /*page_code*/SMS_RIGID_DISK_PAGE, 216 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 217 /*cylinders*/ {0, 0, 0}, 218 /*heads*/ CTL_DEFAULT_HEADS, 219 /*start_write_precomp*/ {0, 0, 0}, 220 /*start_reduced_current*/ {0, 0, 0}, 221 /*step_rate*/ {0, 0}, 222 /*landing_zone_cylinder*/ {0, 0, 0}, 223 /*rpl*/ SRDP_RPL_DISABLED, 224 /*rotational_offset*/ 0, 225 /*reserved1*/ 0, 226 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 227 CTL_DEFAULT_ROTATION_RATE & 0xff}, 228 /*reserved2*/ {0, 0} 229}; 230 231static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 232 /*page_code*/SMS_RIGID_DISK_PAGE, 233 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 234 /*cylinders*/ {0, 0, 0}, 235 /*heads*/ 0, 236 /*start_write_precomp*/ {0, 0, 0}, 237 /*start_reduced_current*/ {0, 0, 0}, 238 /*step_rate*/ {0, 0}, 239 /*landing_zone_cylinder*/ {0, 0, 0}, 240 /*rpl*/ 0, 241 /*rotational_offset*/ 0, 242 /*reserved1*/ 0, 243 /*rotation_rate*/ {0, 0}, 244 /*reserved2*/ {0, 0} 245}; 246 247static struct scsi_caching_page caching_page_default = { 248 /*page_code*/SMS_CACHING_PAGE, 249 /*page_length*/sizeof(struct scsi_caching_page) - 2, 250 /*flags1*/ SCP_DISC | SCP_WCE, 251 /*ret_priority*/ 0, 252 /*disable_pf_transfer_len*/ {0xff, 0xff}, 253 /*min_prefetch*/ {0, 0}, 254 /*max_prefetch*/ {0xff, 0xff}, 255 /*max_pf_ceiling*/ {0xff, 0xff}, 256 /*flags2*/ 0, 257 /*cache_segments*/ 0, 258 /*cache_seg_size*/ {0, 0}, 259 /*reserved*/ 0, 260 /*non_cache_seg_size*/ {0, 0, 0} 261}; 262 263static struct scsi_caching_page caching_page_changeable = { 264 /*page_code*/SMS_CACHING_PAGE, 265 /*page_length*/sizeof(struct scsi_caching_page) - 2, 266 /*flags1*/ 0, 267 /*ret_priority*/ 0, 268 /*disable_pf_transfer_len*/ {0, 0}, 269 /*min_prefetch*/ {0, 0}, 270 /*max_prefetch*/ {0, 0}, 271 /*max_pf_ceiling*/ {0, 0}, 272 /*flags2*/ 0, 273 /*cache_segments*/ 0, 274 /*cache_seg_size*/ {0, 0}, 275 /*reserved*/ 0, 276 /*non_cache_seg_size*/ {0, 0, 0} 277}; 278 279static struct scsi_control_page control_page_default = { 280 /*page_code*/SMS_CONTROL_MODE_PAGE, 281 /*page_length*/sizeof(struct scsi_control_page) - 2, 282 /*rlec*/0, 283 /*queue_flags*/0, 284 /*eca_and_aen*/0, 285 /*reserved*/0, 286 /*aen_holdoff_period*/{0, 0} 287}; 288 289static struct scsi_control_page control_page_changeable = { 290 /*page_code*/SMS_CONTROL_MODE_PAGE, 291 /*page_length*/sizeof(struct scsi_control_page) - 2, 292 /*rlec*/SCP_DSENSE, 293 /*queue_flags*/0, 294 /*eca_and_aen*/0, 295 /*reserved*/0, 296 /*aen_holdoff_period*/{0, 0} 297}; 298 299 300/* 301 * XXX KDM move these into the softc. 302 */ 303static int rcv_sync_msg; 304static int persis_offset; 305static uint8_t ctl_pause_rtr; 306static int ctl_is_single = 1; 307static int index_to_aps_page; 308 309SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 310static int worker_threads = -1; 311SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 312 &worker_threads, 1, "Number of worker threads"); 313static int verbose = 0; 314SYSCTL_INT(_kern_cam_ctl, OID_AUTO, verbose, CTLFLAG_RWTUN, 315 &verbose, 0, "Show SCSI errors returned to initiator"); 316 317/* 318 * Serial number (0x80), device id (0x83), supported pages (0x00), 319 * Block limits (0xB0) and Logical Block Provisioning (0xB2) 320 */ 321#define SCSI_EVPD_NUM_SUPPORTED_PAGES 5 322 323static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 324 int param); 325static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 326static int ctl_init(void); 327void ctl_shutdown(void); 328static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 329static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 330static void ctl_ioctl_online(void *arg); 331static void ctl_ioctl_offline(void *arg); 332static int ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id); 333static int ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id); 334static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id); 335static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id); 336static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio); 337static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 338static int ctl_ioctl_submit_wait(union ctl_io *io); 339static void ctl_ioctl_datamove(union ctl_io *io); 340static void ctl_ioctl_done(union ctl_io *io); 341static void ctl_ioctl_hard_startstop_callback(void *arg, 342 struct cfi_metatask *metatask); 343static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask); 344static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 345 struct ctl_ooa *ooa_hdr, 346 struct ctl_ooa_entry *kern_entries); 347static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 348 struct thread *td); 349uint32_t ctl_get_resindex(struct ctl_nexus *nexus); 350uint32_t ctl_port_idx(int port_num); 351#ifdef unused 352static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, 353 uint32_t targ_target, uint32_t targ_lun, 354 int can_wait); 355static void ctl_kfree_io(union ctl_io *io); 356#endif /* unused */ 357static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 358 struct ctl_be_lun *be_lun, struct ctl_id target_id); 359static int ctl_free_lun(struct ctl_lun *lun); 360static void ctl_create_lun(struct ctl_be_lun *be_lun); 361/** 362static void ctl_failover_change_pages(struct ctl_softc *softc, 363 struct ctl_scsiio *ctsio, int master); 364**/ 365 366static int ctl_do_mode_select(union ctl_io *io); 367static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 368 uint64_t res_key, uint64_t sa_res_key, 369 uint8_t type, uint32_t residx, 370 struct ctl_scsiio *ctsio, 371 struct scsi_per_res_out *cdb, 372 struct scsi_per_res_out_parms* param); 373static void ctl_pro_preempt_other(struct ctl_lun *lun, 374 union ctl_ha_msg *msg); 375static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 376static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 377static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 378static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 379static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 380 int alloc_len); 381static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 382static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 383static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 384static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len); 385static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2); 386static ctl_action ctl_check_for_blockage(union ctl_io *pending_io, 387 union ctl_io *ooa_io); 388static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 389 union ctl_io *starting_io); 390static int ctl_check_blocked(struct ctl_lun *lun); 391static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, 392 struct ctl_lun *lun, 393 struct ctl_cmd_entry *entry, 394 struct ctl_scsiio *ctsio); 395//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 396static void ctl_failover(void); 397static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 398 struct ctl_scsiio *ctsio); 399static int ctl_scsiio(struct ctl_scsiio *ctsio); 400 401static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 402static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 403 ctl_ua_type ua_type); 404static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 405 ctl_ua_type ua_type); 406static int ctl_abort_task(union ctl_io *io); 407static void ctl_run_task(union ctl_io *io); 408#ifdef CTL_IO_DELAY 409static void ctl_datamove_timer_wakeup(void *arg); 410static void ctl_done_timer_wakeup(void *arg); 411#endif /* CTL_IO_DELAY */ 412 413static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 414static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 415static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 416static void ctl_datamove_remote_write(union ctl_io *io); 417static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 418static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 419static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 420static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 421 ctl_ha_dt_cb callback); 422static void ctl_datamove_remote_read(union ctl_io *io); 423static void ctl_datamove_remote(union ctl_io *io); 424static int ctl_process_done(union ctl_io *io); 425static void ctl_lun_thread(void *arg); 426static void ctl_work_thread(void *arg); 427static void ctl_enqueue_incoming(union ctl_io *io); 428static void ctl_enqueue_rtr(union ctl_io *io); 429static void ctl_enqueue_done(union ctl_io *io); 430static void ctl_enqueue_isc(union ctl_io *io); 431 432/* 433 * Load the serialization table. This isn't very pretty, but is probably 434 * the easiest way to do it. 435 */ 436#include "ctl_ser_table.c" 437 438/* 439 * We only need to define open, close and ioctl routines for this driver. 440 */ 441static struct cdevsw ctl_cdevsw = { 442 .d_version = D_VERSION, 443 .d_flags = 0, 444 .d_open = ctl_open, 445 .d_close = ctl_close, 446 .d_ioctl = ctl_ioctl, 447 .d_name = "ctl", 448}; 449 450 451MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 452 453static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 454 455static moduledata_t ctl_moduledata = { 456 "ctl", 457 ctl_module_event_handler, 458 NULL 459}; 460 461DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 462MODULE_VERSION(ctl, 1); 463 464static void 465ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 466 union ctl_ha_msg *msg_info) 467{ 468 struct ctl_scsiio *ctsio; 469 470 if (msg_info->hdr.original_sc == NULL) { 471 printf("%s: original_sc == NULL!\n", __func__); 472 /* XXX KDM now what? */ 473 return; 474 } 475 476 ctsio = &msg_info->hdr.original_sc->scsiio; 477 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 478 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 479 ctsio->io_hdr.status = msg_info->hdr.status; 480 ctsio->scsi_status = msg_info->scsi.scsi_status; 481 ctsio->sense_len = msg_info->scsi.sense_len; 482 ctsio->sense_residual = msg_info->scsi.sense_residual; 483 ctsio->residual = msg_info->scsi.residual; 484 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 485 sizeof(ctsio->sense_data)); 486 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 487 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 488 ctl_enqueue_isc((union ctl_io *)ctsio); 489} 490 491static void 492ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 493 union ctl_ha_msg *msg_info) 494{ 495 struct ctl_scsiio *ctsio; 496 497 if (msg_info->hdr.serializing_sc == NULL) { 498 printf("%s: serializing_sc == NULL!\n", __func__); 499 /* XXX KDM now what? */ 500 return; 501 } 502 503 ctsio = &msg_info->hdr.serializing_sc->scsiio; 504#if 0 505 /* 506 * Attempt to catch the situation where an I/O has 507 * been freed, and we're using it again. 508 */ 509 if (ctsio->io_hdr.io_type == 0xff) { 510 union ctl_io *tmp_io; 511 tmp_io = (union ctl_io *)ctsio; 512 printf("%s: %p use after free!\n", __func__, 513 ctsio); 514 printf("%s: type %d msg %d cdb %x iptl: " 515 "%d:%d:%d:%d tag 0x%04x " 516 "flag %#x status %x\n", 517 __func__, 518 tmp_io->io_hdr.io_type, 519 tmp_io->io_hdr.msg_type, 520 tmp_io->scsiio.cdb[0], 521 tmp_io->io_hdr.nexus.initid.id, 522 tmp_io->io_hdr.nexus.targ_port, 523 tmp_io->io_hdr.nexus.targ_target.id, 524 tmp_io->io_hdr.nexus.targ_lun, 525 (tmp_io->io_hdr.io_type == 526 CTL_IO_TASK) ? 527 tmp_io->taskio.tag_num : 528 tmp_io->scsiio.tag_num, 529 tmp_io->io_hdr.flags, 530 tmp_io->io_hdr.status); 531 } 532#endif 533 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 534 ctl_enqueue_isc((union ctl_io *)ctsio); 535} 536 537/* 538 * ISC (Inter Shelf Communication) event handler. Events from the HA 539 * subsystem come in here. 540 */ 541static void 542ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 543{ 544 struct ctl_softc *ctl_softc; 545 union ctl_io *io; 546 struct ctl_prio *presio; 547 ctl_ha_status isc_status; 548 549 ctl_softc = control_softc; 550 io = NULL; 551 552 553#if 0 554 printf("CTL: Isc Msg event %d\n", event); 555#endif 556 if (event == CTL_HA_EVT_MSG_RECV) { 557 union ctl_ha_msg msg_info; 558 559 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 560 sizeof(msg_info), /*wait*/ 0); 561#if 0 562 printf("CTL: msg_type %d\n", msg_info.msg_type); 563#endif 564 if (isc_status != 0) { 565 printf("Error receiving message, status = %d\n", 566 isc_status); 567 return; 568 } 569 570 switch (msg_info.hdr.msg_type) { 571 case CTL_MSG_SERIALIZE: 572#if 0 573 printf("Serialize\n"); 574#endif 575 io = ctl_alloc_io((void *)ctl_softc->othersc_pool); 576 if (io == NULL) { 577 printf("ctl_isc_event_handler: can't allocate " 578 "ctl_io!\n"); 579 /* Bad Juju */ 580 /* Need to set busy and send msg back */ 581 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 582 msg_info.hdr.status = CTL_SCSI_ERROR; 583 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 584 msg_info.scsi.sense_len = 0; 585 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 586 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 587 } 588 goto bailout; 589 } 590 ctl_zero_io(io); 591 // populate ctsio from msg_info 592 io->io_hdr.io_type = CTL_IO_SCSI; 593 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 594 io->io_hdr.original_sc = msg_info.hdr.original_sc; 595#if 0 596 printf("pOrig %x\n", (int)msg_info.original_sc); 597#endif 598 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 599 CTL_FLAG_IO_ACTIVE; 600 /* 601 * If we're in serialization-only mode, we don't 602 * want to go through full done processing. Thus 603 * the COPY flag. 604 * 605 * XXX KDM add another flag that is more specific. 606 */ 607 if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY) 608 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 609 io->io_hdr.nexus = msg_info.hdr.nexus; 610#if 0 611 printf("targ %d, port %d, iid %d, lun %d\n", 612 io->io_hdr.nexus.targ_target.id, 613 io->io_hdr.nexus.targ_port, 614 io->io_hdr.nexus.initid.id, 615 io->io_hdr.nexus.targ_lun); 616#endif 617 io->scsiio.tag_num = msg_info.scsi.tag_num; 618 io->scsiio.tag_type = msg_info.scsi.tag_type; 619 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 620 CTL_MAX_CDBLEN); 621 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 622 struct ctl_cmd_entry *entry; 623 uint8_t opcode; 624 625 opcode = io->scsiio.cdb[0]; 626 entry = &ctl_cmd_table[opcode]; 627 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 628 io->io_hdr.flags |= 629 entry->flags & CTL_FLAG_DATA_MASK; 630 } 631 ctl_enqueue_isc(io); 632 break; 633 634 /* Performed on the Originating SC, XFER mode only */ 635 case CTL_MSG_DATAMOVE: { 636 struct ctl_sg_entry *sgl; 637 int i, j; 638 639 io = msg_info.hdr.original_sc; 640 if (io == NULL) { 641 printf("%s: original_sc == NULL!\n", __func__); 642 /* XXX KDM do something here */ 643 break; 644 } 645 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 646 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 647 /* 648 * Keep track of this, we need to send it back over 649 * when the datamove is complete. 650 */ 651 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 652 653 if (msg_info.dt.sg_sequence == 0) { 654 /* 655 * XXX KDM we use the preallocated S/G list 656 * here, but we'll need to change this to 657 * dynamic allocation if we need larger S/G 658 * lists. 659 */ 660 if (msg_info.dt.kern_sg_entries > 661 sizeof(io->io_hdr.remote_sglist) / 662 sizeof(io->io_hdr.remote_sglist[0])) { 663 printf("%s: number of S/G entries " 664 "needed %u > allocated num %zd\n", 665 __func__, 666 msg_info.dt.kern_sg_entries, 667 sizeof(io->io_hdr.remote_sglist)/ 668 sizeof(io->io_hdr.remote_sglist[0])); 669 670 /* 671 * XXX KDM send a message back to 672 * the other side to shut down the 673 * DMA. The error will come back 674 * through via the normal channel. 675 */ 676 break; 677 } 678 sgl = io->io_hdr.remote_sglist; 679 memset(sgl, 0, 680 sizeof(io->io_hdr.remote_sglist)); 681 682 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 683 684 io->scsiio.kern_sg_entries = 685 msg_info.dt.kern_sg_entries; 686 io->scsiio.rem_sg_entries = 687 msg_info.dt.kern_sg_entries; 688 io->scsiio.kern_data_len = 689 msg_info.dt.kern_data_len; 690 io->scsiio.kern_total_len = 691 msg_info.dt.kern_total_len; 692 io->scsiio.kern_data_resid = 693 msg_info.dt.kern_data_resid; 694 io->scsiio.kern_rel_offset = 695 msg_info.dt.kern_rel_offset; 696 /* 697 * Clear out per-DMA flags. 698 */ 699 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 700 /* 701 * Add per-DMA flags that are set for this 702 * particular DMA request. 703 */ 704 io->io_hdr.flags |= msg_info.dt.flags & 705 CTL_FLAG_RDMA_MASK; 706 } else 707 sgl = (struct ctl_sg_entry *) 708 io->scsiio.kern_data_ptr; 709 710 for (i = msg_info.dt.sent_sg_entries, j = 0; 711 i < (msg_info.dt.sent_sg_entries + 712 msg_info.dt.cur_sg_entries); i++, j++) { 713 sgl[i].addr = msg_info.dt.sg_list[j].addr; 714 sgl[i].len = msg_info.dt.sg_list[j].len; 715 716#if 0 717 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 718 __func__, 719 msg_info.dt.sg_list[j].addr, 720 msg_info.dt.sg_list[j].len, 721 sgl[i].addr, sgl[i].len, j, i); 722#endif 723 } 724#if 0 725 memcpy(&sgl[msg_info.dt.sent_sg_entries], 726 msg_info.dt.sg_list, 727 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 728#endif 729 730 /* 731 * If this is the last piece of the I/O, we've got 732 * the full S/G list. Queue processing in the thread. 733 * Otherwise wait for the next piece. 734 */ 735 if (msg_info.dt.sg_last != 0) 736 ctl_enqueue_isc(io); 737 break; 738 } 739 /* Performed on the Serializing (primary) SC, XFER mode only */ 740 case CTL_MSG_DATAMOVE_DONE: { 741 if (msg_info.hdr.serializing_sc == NULL) { 742 printf("%s: serializing_sc == NULL!\n", 743 __func__); 744 /* XXX KDM now what? */ 745 break; 746 } 747 /* 748 * We grab the sense information here in case 749 * there was a failure, so we can return status 750 * back to the initiator. 751 */ 752 io = msg_info.hdr.serializing_sc; 753 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 754 io->io_hdr.status = msg_info.hdr.status; 755 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 756 io->scsiio.sense_len = msg_info.scsi.sense_len; 757 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 758 io->io_hdr.port_status = msg_info.scsi.fetd_status; 759 io->scsiio.residual = msg_info.scsi.residual; 760 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 761 sizeof(io->scsiio.sense_data)); 762 ctl_enqueue_isc(io); 763 break; 764 } 765 766 /* Preformed on Originating SC, SER_ONLY mode */ 767 case CTL_MSG_R2R: 768 io = msg_info.hdr.original_sc; 769 if (io == NULL) { 770 printf("%s: Major Bummer\n", __func__); 771 return; 772 } else { 773#if 0 774 printf("pOrig %x\n",(int) ctsio); 775#endif 776 } 777 io->io_hdr.msg_type = CTL_MSG_R2R; 778 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 779 ctl_enqueue_isc(io); 780 break; 781 782 /* 783 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 784 * mode. 785 * Performed on the Originating (i.e. secondary) SC in XFER 786 * mode 787 */ 788 case CTL_MSG_FINISH_IO: 789 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) 790 ctl_isc_handler_finish_xfer(ctl_softc, 791 &msg_info); 792 else 793 ctl_isc_handler_finish_ser_only(ctl_softc, 794 &msg_info); 795 break; 796 797 /* Preformed on Originating SC */ 798 case CTL_MSG_BAD_JUJU: 799 io = msg_info.hdr.original_sc; 800 if (io == NULL) { 801 printf("%s: Bad JUJU!, original_sc is NULL!\n", 802 __func__); 803 break; 804 } 805 ctl_copy_sense_data(&msg_info, io); 806 /* 807 * IO should have already been cleaned up on other 808 * SC so clear this flag so we won't send a message 809 * back to finish the IO there. 810 */ 811 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 812 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 813 814 /* io = msg_info.hdr.serializing_sc; */ 815 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 816 ctl_enqueue_isc(io); 817 break; 818 819 /* Handle resets sent from the other side */ 820 case CTL_MSG_MANAGE_TASKS: { 821 struct ctl_taskio *taskio; 822 taskio = (struct ctl_taskio *)ctl_alloc_io( 823 (void *)ctl_softc->othersc_pool); 824 if (taskio == NULL) { 825 printf("ctl_isc_event_handler: can't allocate " 826 "ctl_io!\n"); 827 /* Bad Juju */ 828 /* should I just call the proper reset func 829 here??? */ 830 goto bailout; 831 } 832 ctl_zero_io((union ctl_io *)taskio); 833 taskio->io_hdr.io_type = CTL_IO_TASK; 834 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 835 taskio->io_hdr.nexus = msg_info.hdr.nexus; 836 taskio->task_action = msg_info.task.task_action; 837 taskio->tag_num = msg_info.task.tag_num; 838 taskio->tag_type = msg_info.task.tag_type; 839#ifdef CTL_TIME_IO 840 taskio->io_hdr.start_time = time_uptime; 841 getbintime(&taskio->io_hdr.start_bt); 842#if 0 843 cs_prof_gettime(&taskio->io_hdr.start_ticks); 844#endif 845#endif /* CTL_TIME_IO */ 846 ctl_run_task((union ctl_io *)taskio); 847 break; 848 } 849 /* Persistent Reserve action which needs attention */ 850 case CTL_MSG_PERS_ACTION: 851 presio = (struct ctl_prio *)ctl_alloc_io( 852 (void *)ctl_softc->othersc_pool); 853 if (presio == NULL) { 854 printf("ctl_isc_event_handler: can't allocate " 855 "ctl_io!\n"); 856 /* Bad Juju */ 857 /* Need to set busy and send msg back */ 858 goto bailout; 859 } 860 ctl_zero_io((union ctl_io *)presio); 861 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 862 presio->pr_msg = msg_info.pr; 863 ctl_enqueue_isc((union ctl_io *)presio); 864 break; 865 case CTL_MSG_SYNC_FE: 866 rcv_sync_msg = 1; 867 break; 868 case CTL_MSG_APS_LOCK: { 869 // It's quicker to execute this then to 870 // queue it. 871 struct ctl_lun *lun; 872 struct ctl_page_index *page_index; 873 struct copan_aps_subpage *current_sp; 874 uint32_t targ_lun; 875 876 targ_lun = msg_info.hdr.nexus.targ_mapped_lun; 877 lun = ctl_softc->ctl_luns[targ_lun]; 878 mtx_lock(&lun->lun_lock); 879 page_index = &lun->mode_pages.index[index_to_aps_page]; 880 current_sp = (struct copan_aps_subpage *) 881 (page_index->page_data + 882 (page_index->page_len * CTL_PAGE_CURRENT)); 883 884 current_sp->lock_active = msg_info.aps.lock_flag; 885 mtx_unlock(&lun->lun_lock); 886 break; 887 } 888 default: 889 printf("How did I get here?\n"); 890 } 891 } else if (event == CTL_HA_EVT_MSG_SENT) { 892 if (param != CTL_HA_STATUS_SUCCESS) { 893 printf("Bad status from ctl_ha_msg_send status %d\n", 894 param); 895 } 896 return; 897 } else if (event == CTL_HA_EVT_DISCONNECT) { 898 printf("CTL: Got a disconnect from Isc\n"); 899 return; 900 } else { 901 printf("ctl_isc_event_handler: Unknown event %d\n", event); 902 return; 903 } 904 905bailout: 906 return; 907} 908 909static void 910ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 911{ 912 struct scsi_sense_data *sense; 913 914 sense = &dest->scsiio.sense_data; 915 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 916 dest->scsiio.scsi_status = src->scsi.scsi_status; 917 dest->scsiio.sense_len = src->scsi.sense_len; 918 dest->io_hdr.status = src->hdr.status; 919} 920 921static int 922ctl_init(void) 923{ 924 struct ctl_softc *softc; 925 struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool; 926 struct ctl_frontend *fe; 927 uint8_t sc_id =0; 928 int i, error, retval; 929 //int isc_retval; 930 931 retval = 0; 932 ctl_pause_rtr = 0; 933 rcv_sync_msg = 0; 934 935 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 936 M_WAITOK | M_ZERO); 937 softc = control_softc; 938 939 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 940 "cam/ctl"); 941 942 softc->dev->si_drv1 = softc; 943 944 /* 945 * By default, return a "bad LUN" peripheral qualifier for unknown 946 * LUNs. The user can override this default using the tunable or 947 * sysctl. See the comment in ctl_inquiry_std() for more details. 948 */ 949 softc->inquiry_pq_no_lun = 1; 950 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 951 &softc->inquiry_pq_no_lun); 952 sysctl_ctx_init(&softc->sysctl_ctx); 953 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 954 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 955 CTLFLAG_RD, 0, "CAM Target Layer"); 956 957 if (softc->sysctl_tree == NULL) { 958 printf("%s: unable to allocate sysctl tree\n", __func__); 959 destroy_dev(softc->dev); 960 free(control_softc, M_DEVBUF); 961 control_softc = NULL; 962 return (ENOMEM); 963 } 964 965 SYSCTL_ADD_INT(&softc->sysctl_ctx, 966 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 967 "inquiry_pq_no_lun", CTLFLAG_RW, 968 &softc->inquiry_pq_no_lun, 0, 969 "Report no lun possible for invalid LUNs"); 970 971 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 972 mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF); 973 softc->open_count = 0; 974 975 /* 976 * Default to actually sending a SYNCHRONIZE CACHE command down to 977 * the drive. 978 */ 979 softc->flags = CTL_FLAG_REAL_SYNC; 980 981 /* 982 * In Copan's HA scheme, the "master" and "slave" roles are 983 * figured out through the slot the controller is in. Although it 984 * is an active/active system, someone has to be in charge. 985 */ 986#ifdef NEEDTOPORT 987 scmicro_rw(SCMICRO_GET_SHELF_ID, &sc_id); 988#endif 989 990 if (sc_id == 0) { 991 softc->flags |= CTL_FLAG_MASTER_SHELF; 992 persis_offset = 0; 993 } else 994 persis_offset = CTL_MAX_INITIATORS; 995 996 /* 997 * XXX KDM need to figure out where we want to get our target ID 998 * and WWID. Is it different on each port? 999 */ 1000 softc->target.id = 0; 1001 softc->target.wwid[0] = 0x12345678; 1002 softc->target.wwid[1] = 0x87654321; 1003 STAILQ_INIT(&softc->lun_list); 1004 STAILQ_INIT(&softc->pending_lun_queue); 1005 STAILQ_INIT(&softc->fe_list); 1006 STAILQ_INIT(&softc->be_list); 1007 STAILQ_INIT(&softc->io_pools); 1008 1009 if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL, 1010 &internal_pool)!= 0){ 1011 printf("ctl: can't allocate %d entry internal pool, " 1012 "exiting\n", CTL_POOL_ENTRIES_INTERNAL); 1013 return (ENOMEM); 1014 } 1015 1016 if (ctl_pool_create(softc, CTL_POOL_EMERGENCY, 1017 CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) { 1018 printf("ctl: can't allocate %d entry emergency pool, " 1019 "exiting\n", CTL_POOL_ENTRIES_EMERGENCY); 1020 ctl_pool_free(internal_pool); 1021 return (ENOMEM); 1022 } 1023 1024 if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC, 1025 &other_pool) != 0) 1026 { 1027 printf("ctl: can't allocate %d entry other SC pool, " 1028 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1029 ctl_pool_free(internal_pool); 1030 ctl_pool_free(emergency_pool); 1031 return (ENOMEM); 1032 } 1033 1034 softc->internal_pool = internal_pool; 1035 softc->emergency_pool = emergency_pool; 1036 softc->othersc_pool = other_pool; 1037 1038 if (worker_threads <= 0) 1039 worker_threads = max(1, mp_ncpus / 4); 1040 if (worker_threads > CTL_MAX_THREADS) 1041 worker_threads = CTL_MAX_THREADS; 1042 1043 for (i = 0; i < worker_threads; i++) { 1044 struct ctl_thread *thr = &softc->threads[i]; 1045 1046 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1047 thr->ctl_softc = softc; 1048 STAILQ_INIT(&thr->incoming_queue); 1049 STAILQ_INIT(&thr->rtr_queue); 1050 STAILQ_INIT(&thr->done_queue); 1051 STAILQ_INIT(&thr->isc_queue); 1052 1053 error = kproc_kthread_add(ctl_work_thread, thr, 1054 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1055 if (error != 0) { 1056 printf("error creating CTL work thread!\n"); 1057 ctl_pool_free(internal_pool); 1058 ctl_pool_free(emergency_pool); 1059 ctl_pool_free(other_pool); 1060 return (error); 1061 } 1062 } 1063 error = kproc_kthread_add(ctl_lun_thread, softc, 1064 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1065 if (error != 0) { 1066 printf("error creating CTL lun thread!\n"); 1067 ctl_pool_free(internal_pool); 1068 ctl_pool_free(emergency_pool); 1069 ctl_pool_free(other_pool); 1070 return (error); 1071 } 1072 if (bootverbose) 1073 printf("ctl: CAM Target Layer loaded\n"); 1074 1075 /* 1076 * Initialize the initiator and portname mappings 1077 */ 1078 memset(softc->wwpn_iid, 0, sizeof(softc->wwpn_iid)); 1079 1080 /* 1081 * Initialize the ioctl front end. 1082 */ 1083 fe = &softc->ioctl_info.fe; 1084 sprintf(softc->ioctl_info.port_name, "CTL ioctl"); 1085 fe->port_type = CTL_PORT_IOCTL; 1086 fe->num_requested_ctl_io = 100; 1087 fe->port_name = softc->ioctl_info.port_name; 1088 fe->port_online = ctl_ioctl_online; 1089 fe->port_offline = ctl_ioctl_offline; 1090 fe->onoff_arg = &softc->ioctl_info; 1091 fe->targ_enable = ctl_ioctl_targ_enable; 1092 fe->targ_disable = ctl_ioctl_targ_disable; 1093 fe->lun_enable = ctl_ioctl_lun_enable; 1094 fe->lun_disable = ctl_ioctl_lun_disable; 1095 fe->targ_lun_arg = &softc->ioctl_info; 1096 fe->fe_datamove = ctl_ioctl_datamove; 1097 fe->fe_done = ctl_ioctl_done; 1098 fe->max_targets = 15; 1099 fe->max_target_id = 15; 1100 1101 if (ctl_frontend_register(&softc->ioctl_info.fe, 1102 (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) { 1103 printf("ctl: ioctl front end registration failed, will " 1104 "continue anyway\n"); 1105 } 1106 1107#ifdef CTL_IO_DELAY 1108 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1109 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1110 sizeof(struct callout), CTL_TIMER_BYTES); 1111 return (EINVAL); 1112 } 1113#endif /* CTL_IO_DELAY */ 1114 1115 return (0); 1116} 1117 1118void 1119ctl_shutdown(void) 1120{ 1121 struct ctl_softc *softc; 1122 struct ctl_lun *lun, *next_lun; 1123 struct ctl_io_pool *pool; 1124 1125 softc = (struct ctl_softc *)control_softc; 1126 1127 if (ctl_frontend_deregister(&softc->ioctl_info.fe) != 0) 1128 printf("ctl: ioctl front end deregistration failed\n"); 1129 1130 mtx_lock(&softc->ctl_lock); 1131 1132 /* 1133 * Free up each LUN. 1134 */ 1135 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1136 next_lun = STAILQ_NEXT(lun, links); 1137 ctl_free_lun(lun); 1138 } 1139 1140 mtx_unlock(&softc->ctl_lock); 1141 1142 /* 1143 * This will rip the rug out from under any FETDs or anyone else 1144 * that has a pool allocated. Since we increment our module 1145 * refcount any time someone outside the main CTL module allocates 1146 * a pool, we shouldn't have any problems here. The user won't be 1147 * able to unload the CTL module until client modules have 1148 * successfully unloaded. 1149 */ 1150 while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL) 1151 ctl_pool_free(pool); 1152 1153#if 0 1154 ctl_shutdown_thread(softc->work_thread); 1155 mtx_destroy(&softc->queue_lock); 1156#endif 1157 1158 mtx_destroy(&softc->pool_lock); 1159 mtx_destroy(&softc->ctl_lock); 1160 1161 destroy_dev(softc->dev); 1162 1163 sysctl_ctx_free(&softc->sysctl_ctx); 1164 1165 free(control_softc, M_DEVBUF); 1166 control_softc = NULL; 1167 1168 if (bootverbose) 1169 printf("ctl: CAM Target Layer unloaded\n"); 1170} 1171 1172static int 1173ctl_module_event_handler(module_t mod, int what, void *arg) 1174{ 1175 1176 switch (what) { 1177 case MOD_LOAD: 1178 return (ctl_init()); 1179 case MOD_UNLOAD: 1180 return (EBUSY); 1181 default: 1182 return (EOPNOTSUPP); 1183 } 1184} 1185 1186/* 1187 * XXX KDM should we do some access checks here? Bump a reference count to 1188 * prevent a CTL module from being unloaded while someone has it open? 1189 */ 1190static int 1191ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1192{ 1193 return (0); 1194} 1195 1196static int 1197ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1198{ 1199 return (0); 1200} 1201 1202int 1203ctl_port_enable(ctl_port_type port_type) 1204{ 1205 struct ctl_softc *softc; 1206 struct ctl_frontend *fe; 1207 1208 if (ctl_is_single == 0) { 1209 union ctl_ha_msg msg_info; 1210 int isc_retval; 1211 1212#if 0 1213 printf("%s: HA mode, synchronizing frontend enable\n", 1214 __func__); 1215#endif 1216 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1217 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1218 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1219 printf("Sync msg send error retval %d\n", isc_retval); 1220 } 1221 if (!rcv_sync_msg) { 1222 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1223 sizeof(msg_info), 1); 1224 } 1225#if 0 1226 printf("CTL:Frontend Enable\n"); 1227 } else { 1228 printf("%s: single mode, skipping frontend synchronization\n", 1229 __func__); 1230#endif 1231 } 1232 1233 softc = control_softc; 1234 1235 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1236 if (port_type & fe->port_type) 1237 { 1238#if 0 1239 printf("port %d\n", fe->targ_port); 1240#endif 1241 ctl_frontend_online(fe); 1242 } 1243 } 1244 1245 return (0); 1246} 1247 1248int 1249ctl_port_disable(ctl_port_type port_type) 1250{ 1251 struct ctl_softc *softc; 1252 struct ctl_frontend *fe; 1253 1254 softc = control_softc; 1255 1256 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1257 if (port_type & fe->port_type) 1258 ctl_frontend_offline(fe); 1259 } 1260 1261 return (0); 1262} 1263 1264/* 1265 * Returns 0 for success, 1 for failure. 1266 * Currently the only failure mode is if there aren't enough entries 1267 * allocated. So, in case of a failure, look at num_entries_dropped, 1268 * reallocate and try again. 1269 */ 1270int 1271ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1272 int *num_entries_filled, int *num_entries_dropped, 1273 ctl_port_type port_type, int no_virtual) 1274{ 1275 struct ctl_softc *softc; 1276 struct ctl_frontend *fe; 1277 int entries_dropped, entries_filled; 1278 int retval; 1279 int i; 1280 1281 softc = control_softc; 1282 1283 retval = 0; 1284 entries_filled = 0; 1285 entries_dropped = 0; 1286 1287 i = 0; 1288 mtx_lock(&softc->ctl_lock); 1289 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1290 struct ctl_port_entry *entry; 1291 1292 if ((fe->port_type & port_type) == 0) 1293 continue; 1294 1295 if ((no_virtual != 0) 1296 && (fe->virtual_port != 0)) 1297 continue; 1298 1299 if (entries_filled >= num_entries_alloced) { 1300 entries_dropped++; 1301 continue; 1302 } 1303 entry = &entries[i]; 1304 1305 entry->port_type = fe->port_type; 1306 strlcpy(entry->port_name, fe->port_name, 1307 sizeof(entry->port_name)); 1308 entry->physical_port = fe->physical_port; 1309 entry->virtual_port = fe->virtual_port; 1310 entry->wwnn = fe->wwnn; 1311 entry->wwpn = fe->wwpn; 1312 1313 i++; 1314 entries_filled++; 1315 } 1316 1317 mtx_unlock(&softc->ctl_lock); 1318 1319 if (entries_dropped > 0) 1320 retval = 1; 1321 1322 *num_entries_dropped = entries_dropped; 1323 *num_entries_filled = entries_filled; 1324 1325 return (retval); 1326} 1327 1328static void 1329ctl_ioctl_online(void *arg) 1330{ 1331 struct ctl_ioctl_info *ioctl_info; 1332 1333 ioctl_info = (struct ctl_ioctl_info *)arg; 1334 1335 ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED; 1336} 1337 1338static void 1339ctl_ioctl_offline(void *arg) 1340{ 1341 struct ctl_ioctl_info *ioctl_info; 1342 1343 ioctl_info = (struct ctl_ioctl_info *)arg; 1344 1345 ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED; 1346} 1347 1348/* 1349 * Remove an initiator by port number and initiator ID. 1350 * Returns 0 for success, 1 for failure. 1351 */ 1352int 1353ctl_remove_initiator(int32_t targ_port, uint32_t iid) 1354{ 1355 struct ctl_softc *softc; 1356 1357 softc = control_softc; 1358 1359 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1360 1361 if ((targ_port < 0) 1362 || (targ_port > CTL_MAX_PORTS)) { 1363 printf("%s: invalid port number %d\n", __func__, targ_port); 1364 return (1); 1365 } 1366 if (iid > CTL_MAX_INIT_PER_PORT) { 1367 printf("%s: initiator ID %u > maximun %u!\n", 1368 __func__, iid, CTL_MAX_INIT_PER_PORT); 1369 return (1); 1370 } 1371 1372 mtx_lock(&softc->ctl_lock); 1373 1374 softc->wwpn_iid[targ_port][iid].in_use = 0; 1375 1376 mtx_unlock(&softc->ctl_lock); 1377 1378 return (0); 1379} 1380 1381/* 1382 * Add an initiator to the initiator map. 1383 * Returns 0 for success, 1 for failure. 1384 */ 1385int 1386ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid) 1387{ 1388 struct ctl_softc *softc; 1389 int retval; 1390 1391 softc = control_softc; 1392 1393 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1394 1395 retval = 0; 1396 1397 if ((targ_port < 0) 1398 || (targ_port > CTL_MAX_PORTS)) { 1399 printf("%s: invalid port number %d\n", __func__, targ_port); 1400 return (1); 1401 } 1402 if (iid > CTL_MAX_INIT_PER_PORT) { 1403 printf("%s: WWPN %#jx initiator ID %u > maximun %u!\n", 1404 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1405 return (1); 1406 } 1407 1408 mtx_lock(&softc->ctl_lock); 1409 1410 if (softc->wwpn_iid[targ_port][iid].in_use != 0) { 1411 /* 1412 * We don't treat this as an error. 1413 */ 1414 if (softc->wwpn_iid[targ_port][iid].wwpn == wwpn) { 1415 printf("%s: port %d iid %u WWPN %#jx arrived again?\n", 1416 __func__, targ_port, iid, (uintmax_t)wwpn); 1417 goto bailout; 1418 } 1419 1420 /* 1421 * This is an error, but what do we do about it? The 1422 * driver is telling us we have a new WWPN for this 1423 * initiator ID, so we pretty much need to use it. 1424 */ 1425 printf("%s: port %d iid %u WWPN %#jx arrived, WWPN %#jx is " 1426 "still at that address\n", __func__, targ_port, iid, 1427 (uintmax_t)wwpn, 1428 (uintmax_t)softc->wwpn_iid[targ_port][iid].wwpn); 1429 1430 /* 1431 * XXX KDM clear have_ca and ua_pending on each LUN for 1432 * this initiator. 1433 */ 1434 } 1435 softc->wwpn_iid[targ_port][iid].in_use = 1; 1436 softc->wwpn_iid[targ_port][iid].iid = iid; 1437 softc->wwpn_iid[targ_port][iid].wwpn = wwpn; 1438 softc->wwpn_iid[targ_port][iid].port = targ_port; 1439 1440bailout: 1441 1442 mtx_unlock(&softc->ctl_lock); 1443 1444 return (retval); 1445} 1446 1447/* 1448 * XXX KDM should we pretend to do something in the target/lun 1449 * enable/disable functions? 1450 */ 1451static int 1452ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id) 1453{ 1454 return (0); 1455} 1456 1457static int 1458ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id) 1459{ 1460 return (0); 1461} 1462 1463static int 1464ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1465{ 1466 return (0); 1467} 1468 1469static int 1470ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1471{ 1472 return (0); 1473} 1474 1475/* 1476 * Data movement routine for the CTL ioctl frontend port. 1477 */ 1478static int 1479ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) 1480{ 1481 struct ctl_sg_entry *ext_sglist, *kern_sglist; 1482 struct ctl_sg_entry ext_entry, kern_entry; 1483 int ext_sglen, ext_sg_entries, kern_sg_entries; 1484 int ext_sg_start, ext_offset; 1485 int len_to_copy, len_copied; 1486 int kern_watermark, ext_watermark; 1487 int ext_sglist_malloced; 1488 int i, j; 1489 1490 ext_sglist_malloced = 0; 1491 ext_sg_start = 0; 1492 ext_offset = 0; 1493 1494 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); 1495 1496 /* 1497 * If this flag is set, fake the data transfer. 1498 */ 1499 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { 1500 ctsio->ext_data_filled = ctsio->ext_data_len; 1501 goto bailout; 1502 } 1503 1504 /* 1505 * To simplify things here, if we have a single buffer, stick it in 1506 * a S/G entry and just make it a single entry S/G list. 1507 */ 1508 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { 1509 int len_seen; 1510 1511 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); 1512 1513 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, 1514 M_WAITOK); 1515 ext_sglist_malloced = 1; 1516 if (copyin(ctsio->ext_data_ptr, ext_sglist, 1517 ext_sglen) != 0) { 1518 ctl_set_internal_failure(ctsio, 1519 /*sks_valid*/ 0, 1520 /*retry_count*/ 0); 1521 goto bailout; 1522 } 1523 ext_sg_entries = ctsio->ext_sg_entries; 1524 len_seen = 0; 1525 for (i = 0; i < ext_sg_entries; i++) { 1526 if ((len_seen + ext_sglist[i].len) >= 1527 ctsio->ext_data_filled) { 1528 ext_sg_start = i; 1529 ext_offset = ctsio->ext_data_filled - len_seen; 1530 break; 1531 } 1532 len_seen += ext_sglist[i].len; 1533 } 1534 } else { 1535 ext_sglist = &ext_entry; 1536 ext_sglist->addr = ctsio->ext_data_ptr; 1537 ext_sglist->len = ctsio->ext_data_len; 1538 ext_sg_entries = 1; 1539 ext_sg_start = 0; 1540 ext_offset = ctsio->ext_data_filled; 1541 } 1542 1543 if (ctsio->kern_sg_entries > 0) { 1544 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; 1545 kern_sg_entries = ctsio->kern_sg_entries; 1546 } else { 1547 kern_sglist = &kern_entry; 1548 kern_sglist->addr = ctsio->kern_data_ptr; 1549 kern_sglist->len = ctsio->kern_data_len; 1550 kern_sg_entries = 1; 1551 } 1552 1553 1554 kern_watermark = 0; 1555 ext_watermark = ext_offset; 1556 len_copied = 0; 1557 for (i = ext_sg_start, j = 0; 1558 i < ext_sg_entries && j < kern_sg_entries;) { 1559 uint8_t *ext_ptr, *kern_ptr; 1560 1561 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark, 1562 kern_sglist[j].len - kern_watermark); 1563 1564 ext_ptr = (uint8_t *)ext_sglist[i].addr; 1565 ext_ptr = ext_ptr + ext_watermark; 1566 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 1567 /* 1568 * XXX KDM fix this! 1569 */ 1570 panic("need to implement bus address support"); 1571#if 0 1572 kern_ptr = bus_to_virt(kern_sglist[j].addr); 1573#endif 1574 } else 1575 kern_ptr = (uint8_t *)kern_sglist[j].addr; 1576 kern_ptr = kern_ptr + kern_watermark; 1577 1578 kern_watermark += len_to_copy; 1579 ext_watermark += len_to_copy; 1580 1581 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == 1582 CTL_FLAG_DATA_IN) { 1583 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1584 "bytes to user\n", len_to_copy)); 1585 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1586 "to %p\n", kern_ptr, ext_ptr)); 1587 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { 1588 ctl_set_internal_failure(ctsio, 1589 /*sks_valid*/ 0, 1590 /*retry_count*/ 0); 1591 goto bailout; 1592 } 1593 } else { 1594 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1595 "bytes from user\n", len_to_copy)); 1596 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1597 "to %p\n", ext_ptr, kern_ptr)); 1598 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ 1599 ctl_set_internal_failure(ctsio, 1600 /*sks_valid*/ 0, 1601 /*retry_count*/0); 1602 goto bailout; 1603 } 1604 } 1605 1606 len_copied += len_to_copy; 1607 1608 if (ext_sglist[i].len == ext_watermark) { 1609 i++; 1610 ext_watermark = 0; 1611 } 1612 1613 if (kern_sglist[j].len == kern_watermark) { 1614 j++; 1615 kern_watermark = 0; 1616 } 1617 } 1618 1619 ctsio->ext_data_filled += len_copied; 1620 1621 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " 1622 "kern_sg_entries: %d\n", ext_sg_entries, 1623 kern_sg_entries)); 1624 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " 1625 "kern_data_len = %d\n", ctsio->ext_data_len, 1626 ctsio->kern_data_len)); 1627 1628 1629 /* XXX KDM set residual?? */ 1630bailout: 1631 1632 if (ext_sglist_malloced != 0) 1633 free(ext_sglist, M_CTL); 1634 1635 return (CTL_RETVAL_COMPLETE); 1636} 1637 1638/* 1639 * Serialize a command that went down the "wrong" side, and so was sent to 1640 * this controller for execution. The logic is a little different than the 1641 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1642 * sent back to the other side, but in the success case, we execute the 1643 * command on this side (XFER mode) or tell the other side to execute it 1644 * (SER_ONLY mode). 1645 */ 1646static int 1647ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1648{ 1649 struct ctl_softc *ctl_softc; 1650 union ctl_ha_msg msg_info; 1651 struct ctl_lun *lun; 1652 int retval = 0; 1653 uint32_t targ_lun; 1654 1655 ctl_softc = control_softc; 1656 1657 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1658 lun = ctl_softc->ctl_luns[targ_lun]; 1659 if (lun==NULL) 1660 { 1661 /* 1662 * Why isn't LUN defined? The other side wouldn't 1663 * send a cmd if the LUN is undefined. 1664 */ 1665 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1666 1667 /* "Logical unit not supported" */ 1668 ctl_set_sense_data(&msg_info.scsi.sense_data, 1669 lun, 1670 /*sense_format*/SSD_TYPE_NONE, 1671 /*current_error*/ 1, 1672 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1673 /*asc*/ 0x25, 1674 /*ascq*/ 0x00, 1675 SSD_ELEM_NONE); 1676 1677 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1678 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1679 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1680 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1681 msg_info.hdr.serializing_sc = NULL; 1682 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1683 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1684 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1685 } 1686 return(1); 1687 1688 } 1689 1690 mtx_lock(&lun->lun_lock); 1691 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1692 1693 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1694 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1695 ooa_links))) { 1696 case CTL_ACTION_BLOCK: 1697 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1698 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1699 blocked_links); 1700 break; 1701 case CTL_ACTION_PASS: 1702 case CTL_ACTION_SKIP: 1703 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 1704 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1705 ctl_enqueue_rtr((union ctl_io *)ctsio); 1706 } else { 1707 1708 /* send msg back to other side */ 1709 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1710 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1711 msg_info.hdr.msg_type = CTL_MSG_R2R; 1712#if 0 1713 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1714#endif 1715 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1716 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1717 } 1718 } 1719 break; 1720 case CTL_ACTION_OVERLAP: 1721 /* OVERLAPPED COMMANDS ATTEMPTED */ 1722 ctl_set_sense_data(&msg_info.scsi.sense_data, 1723 lun, 1724 /*sense_format*/SSD_TYPE_NONE, 1725 /*current_error*/ 1, 1726 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1727 /*asc*/ 0x4E, 1728 /*ascq*/ 0x00, 1729 SSD_ELEM_NONE); 1730 1731 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1732 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1733 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1734 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1735 msg_info.hdr.serializing_sc = NULL; 1736 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1737#if 0 1738 printf("BAD JUJU:Major Bummer Overlap\n"); 1739#endif 1740 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1741 retval = 1; 1742 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1743 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1744 } 1745 break; 1746 case CTL_ACTION_OVERLAP_TAG: 1747 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1748 ctl_set_sense_data(&msg_info.scsi.sense_data, 1749 lun, 1750 /*sense_format*/SSD_TYPE_NONE, 1751 /*current_error*/ 1, 1752 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1753 /*asc*/ 0x4D, 1754 /*ascq*/ ctsio->tag_num & 0xff, 1755 SSD_ELEM_NONE); 1756 1757 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1758 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1759 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1760 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1761 msg_info.hdr.serializing_sc = NULL; 1762 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1763#if 0 1764 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1765#endif 1766 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1767 retval = 1; 1768 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1769 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1770 } 1771 break; 1772 case CTL_ACTION_ERROR: 1773 default: 1774 /* "Internal target failure" */ 1775 ctl_set_sense_data(&msg_info.scsi.sense_data, 1776 lun, 1777 /*sense_format*/SSD_TYPE_NONE, 1778 /*current_error*/ 1, 1779 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1780 /*asc*/ 0x44, 1781 /*ascq*/ 0x00, 1782 SSD_ELEM_NONE); 1783 1784 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1785 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1786 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1787 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1788 msg_info.hdr.serializing_sc = NULL; 1789 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1790#if 0 1791 printf("BAD JUJU:Major Bummer HW Error\n"); 1792#endif 1793 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1794 retval = 1; 1795 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1796 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1797 } 1798 break; 1799 } 1800 mtx_unlock(&lun->lun_lock); 1801 return (retval); 1802} 1803 1804static int 1805ctl_ioctl_submit_wait(union ctl_io *io) 1806{ 1807 struct ctl_fe_ioctl_params params; 1808 ctl_fe_ioctl_state last_state; 1809 int done, retval; 1810 1811 retval = 0; 1812 1813 bzero(¶ms, sizeof(params)); 1814 1815 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); 1816 cv_init(¶ms.sem, "ctlioccv"); 1817 params.state = CTL_IOCTL_INPROG; 1818 last_state = params.state; 1819 1820 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; 1821 1822 CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n")); 1823 1824 /* This shouldn't happen */ 1825 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) 1826 return (retval); 1827 1828 done = 0; 1829 1830 do { 1831 mtx_lock(¶ms.ioctl_mtx); 1832 /* 1833 * Check the state here, and don't sleep if the state has 1834 * already changed (i.e. wakeup has already occured, but we 1835 * weren't waiting yet). 1836 */ 1837 if (params.state == last_state) { 1838 /* XXX KDM cv_wait_sig instead? */ 1839 cv_wait(¶ms.sem, ¶ms.ioctl_mtx); 1840 } 1841 last_state = params.state; 1842 1843 switch (params.state) { 1844 case CTL_IOCTL_INPROG: 1845 /* Why did we wake up? */ 1846 /* XXX KDM error here? */ 1847 mtx_unlock(¶ms.ioctl_mtx); 1848 break; 1849 case CTL_IOCTL_DATAMOVE: 1850 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); 1851 1852 /* 1853 * change last_state back to INPROG to avoid 1854 * deadlock on subsequent data moves. 1855 */ 1856 params.state = last_state = CTL_IOCTL_INPROG; 1857 1858 mtx_unlock(¶ms.ioctl_mtx); 1859 ctl_ioctl_do_datamove(&io->scsiio); 1860 /* 1861 * Note that in some cases, most notably writes, 1862 * this will queue the I/O and call us back later. 1863 * In other cases, generally reads, this routine 1864 * will immediately call back and wake us up, 1865 * probably using our own context. 1866 */ 1867 io->scsiio.be_move_done(io); 1868 break; 1869 case CTL_IOCTL_DONE: 1870 mtx_unlock(¶ms.ioctl_mtx); 1871 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); 1872 done = 1; 1873 break; 1874 default: 1875 mtx_unlock(¶ms.ioctl_mtx); 1876 /* XXX KDM error here? */ 1877 break; 1878 } 1879 } while (done == 0); 1880 1881 mtx_destroy(¶ms.ioctl_mtx); 1882 cv_destroy(¶ms.sem); 1883 1884 return (CTL_RETVAL_COMPLETE); 1885} 1886 1887static void 1888ctl_ioctl_datamove(union ctl_io *io) 1889{ 1890 struct ctl_fe_ioctl_params *params; 1891 1892 params = (struct ctl_fe_ioctl_params *) 1893 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1894 1895 mtx_lock(¶ms->ioctl_mtx); 1896 params->state = CTL_IOCTL_DATAMOVE; 1897 cv_broadcast(¶ms->sem); 1898 mtx_unlock(¶ms->ioctl_mtx); 1899} 1900 1901static void 1902ctl_ioctl_done(union ctl_io *io) 1903{ 1904 struct ctl_fe_ioctl_params *params; 1905 1906 params = (struct ctl_fe_ioctl_params *) 1907 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1908 1909 mtx_lock(¶ms->ioctl_mtx); 1910 params->state = CTL_IOCTL_DONE; 1911 cv_broadcast(¶ms->sem); 1912 mtx_unlock(¶ms->ioctl_mtx); 1913} 1914 1915static void 1916ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask) 1917{ 1918 struct ctl_fe_ioctl_startstop_info *sd_info; 1919 1920 sd_info = (struct ctl_fe_ioctl_startstop_info *)arg; 1921 1922 sd_info->hs_info.status = metatask->status; 1923 sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns; 1924 sd_info->hs_info.luns_complete = 1925 metatask->taskinfo.startstop.luns_complete; 1926 sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed; 1927 1928 cv_broadcast(&sd_info->sem); 1929} 1930 1931static void 1932ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask) 1933{ 1934 struct ctl_fe_ioctl_bbrread_info *fe_bbr_info; 1935 1936 fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg; 1937 1938 mtx_lock(fe_bbr_info->lock); 1939 fe_bbr_info->bbr_info->status = metatask->status; 1940 fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 1941 fe_bbr_info->wakeup_done = 1; 1942 mtx_unlock(fe_bbr_info->lock); 1943 1944 cv_broadcast(&fe_bbr_info->sem); 1945} 1946 1947/* 1948 * Returns 0 for success, errno for failure. 1949 */ 1950static int 1951ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 1952 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 1953{ 1954 union ctl_io *io; 1955 int retval; 1956 1957 retval = 0; 1958 1959 mtx_lock(&lun->lun_lock); 1960 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 1961 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 1962 ooa_links)) { 1963 struct ctl_ooa_entry *entry; 1964 1965 /* 1966 * If we've got more than we can fit, just count the 1967 * remaining entries. 1968 */ 1969 if (*cur_fill_num >= ooa_hdr->alloc_num) 1970 continue; 1971 1972 entry = &kern_entries[*cur_fill_num]; 1973 1974 entry->tag_num = io->scsiio.tag_num; 1975 entry->lun_num = lun->lun; 1976#ifdef CTL_TIME_IO 1977 entry->start_bt = io->io_hdr.start_bt; 1978#endif 1979 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 1980 entry->cdb_len = io->scsiio.cdb_len; 1981 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 1982 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 1983 1984 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 1985 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 1986 1987 if (io->io_hdr.flags & CTL_FLAG_ABORT) 1988 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 1989 1990 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 1991 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 1992 1993 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 1994 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 1995 } 1996 mtx_unlock(&lun->lun_lock); 1997 1998 return (retval); 1999} 2000 2001static void * 2002ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2003 size_t error_str_len) 2004{ 2005 void *kptr; 2006 2007 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2008 2009 if (copyin(user_addr, kptr, len) != 0) { 2010 snprintf(error_str, error_str_len, "Error copying %d bytes " 2011 "from user address %p to kernel address %p", len, 2012 user_addr, kptr); 2013 free(kptr, M_CTL); 2014 return (NULL); 2015 } 2016 2017 return (kptr); 2018} 2019 2020static void 2021ctl_free_args(int num_be_args, struct ctl_be_arg *be_args) 2022{ 2023 int i; 2024 2025 if (be_args == NULL) 2026 return; 2027 2028 for (i = 0; i < num_be_args; i++) { 2029 free(be_args[i].kname, M_CTL); 2030 free(be_args[i].kvalue, M_CTL); 2031 } 2032 2033 free(be_args, M_CTL); 2034} 2035 2036static struct ctl_be_arg * 2037ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args, 2038 char *error_str, size_t error_str_len) 2039{ 2040 struct ctl_be_arg *args; 2041 int i; 2042 2043 args = ctl_copyin_alloc(be_args, num_be_args * sizeof(*be_args), 2044 error_str, error_str_len); 2045 2046 if (args == NULL) 2047 goto bailout; 2048 2049 for (i = 0; i < num_be_args; i++) { 2050 args[i].kname = NULL; 2051 args[i].kvalue = NULL; 2052 } 2053 2054 for (i = 0; i < num_be_args; i++) { 2055 uint8_t *tmpptr; 2056 2057 args[i].kname = ctl_copyin_alloc(args[i].name, 2058 args[i].namelen, error_str, error_str_len); 2059 if (args[i].kname == NULL) 2060 goto bailout; 2061 2062 if (args[i].kname[args[i].namelen - 1] != '\0') { 2063 snprintf(error_str, error_str_len, "Argument %d " 2064 "name is not NUL-terminated", i); 2065 goto bailout; 2066 } 2067 2068 args[i].kvalue = NULL; 2069 2070 tmpptr = ctl_copyin_alloc(args[i].value, 2071 args[i].vallen, error_str, error_str_len); 2072 if (tmpptr == NULL) 2073 goto bailout; 2074 2075 args[i].kvalue = tmpptr; 2076 2077 if ((args[i].flags & CTL_BEARG_ASCII) 2078 && (tmpptr[args[i].vallen - 1] != '\0')) { 2079 snprintf(error_str, error_str_len, "Argument %d " 2080 "value is not NUL-terminated", i); 2081 goto bailout; 2082 } 2083 } 2084 2085 return (args); 2086bailout: 2087 2088 ctl_free_args(num_be_args, args); 2089 2090 return (NULL); 2091} 2092 2093/* 2094 * Escape characters that are illegal or not recommended in XML. 2095 */ 2096int 2097ctl_sbuf_printf_esc(struct sbuf *sb, char *str) 2098{ 2099 int retval; 2100 2101 retval = 0; 2102 2103 for (; *str; str++) { 2104 switch (*str) { 2105 case '&': 2106 retval = sbuf_printf(sb, "&"); 2107 break; 2108 case '>': 2109 retval = sbuf_printf(sb, ">"); 2110 break; 2111 case '<': 2112 retval = sbuf_printf(sb, "<"); 2113 break; 2114 default: 2115 retval = sbuf_putc(sb, *str); 2116 break; 2117 } 2118 2119 if (retval != 0) 2120 break; 2121 2122 } 2123 2124 return (retval); 2125} 2126 2127static int 2128ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2129 struct thread *td) 2130{ 2131 struct ctl_softc *softc; 2132 int retval; 2133 2134 softc = control_softc; 2135 2136 retval = 0; 2137 2138 switch (cmd) { 2139 case CTL_IO: { 2140 union ctl_io *io; 2141 void *pool_tmp; 2142 2143 /* 2144 * If we haven't been "enabled", don't allow any SCSI I/O 2145 * to this FETD. 2146 */ 2147 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) { 2148 retval = EPERM; 2149 break; 2150 } 2151 2152 io = ctl_alloc_io(softc->ioctl_info.fe.ctl_pool_ref); 2153 if (io == NULL) { 2154 printf("ctl_ioctl: can't allocate ctl_io!\n"); 2155 retval = ENOSPC; 2156 break; 2157 } 2158 2159 /* 2160 * Need to save the pool reference so it doesn't get 2161 * spammed by the user's ctl_io. 2162 */ 2163 pool_tmp = io->io_hdr.pool; 2164 2165 memcpy(io, (void *)addr, sizeof(*io)); 2166 2167 io->io_hdr.pool = pool_tmp; 2168 /* 2169 * No status yet, so make sure the status is set properly. 2170 */ 2171 io->io_hdr.status = CTL_STATUS_NONE; 2172 2173 /* 2174 * The user sets the initiator ID, target and LUN IDs. 2175 */ 2176 io->io_hdr.nexus.targ_port = softc->ioctl_info.fe.targ_port; 2177 io->io_hdr.flags |= CTL_FLAG_USER_REQ; 2178 if ((io->io_hdr.io_type == CTL_IO_SCSI) 2179 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) 2180 io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++; 2181 2182 retval = ctl_ioctl_submit_wait(io); 2183 2184 if (retval != 0) { 2185 ctl_free_io(io); 2186 break; 2187 } 2188 2189 memcpy((void *)addr, io, sizeof(*io)); 2190 2191 /* return this to our pool */ 2192 ctl_free_io(io); 2193 2194 break; 2195 } 2196 case CTL_ENABLE_PORT: 2197 case CTL_DISABLE_PORT: 2198 case CTL_SET_PORT_WWNS: { 2199 struct ctl_frontend *fe; 2200 struct ctl_port_entry *entry; 2201 2202 entry = (struct ctl_port_entry *)addr; 2203 2204 mtx_lock(&softc->ctl_lock); 2205 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2206 int action, done; 2207 2208 action = 0; 2209 done = 0; 2210 2211 if ((entry->port_type == CTL_PORT_NONE) 2212 && (entry->targ_port == fe->targ_port)) { 2213 /* 2214 * If the user only wants to enable or 2215 * disable or set WWNs on a specific port, 2216 * do the operation and we're done. 2217 */ 2218 action = 1; 2219 done = 1; 2220 } else if (entry->port_type & fe->port_type) { 2221 /* 2222 * Compare the user's type mask with the 2223 * particular frontend type to see if we 2224 * have a match. 2225 */ 2226 action = 1; 2227 done = 0; 2228 2229 /* 2230 * Make sure the user isn't trying to set 2231 * WWNs on multiple ports at the same time. 2232 */ 2233 if (cmd == CTL_SET_PORT_WWNS) { 2234 printf("%s: Can't set WWNs on " 2235 "multiple ports\n", __func__); 2236 retval = EINVAL; 2237 break; 2238 } 2239 } 2240 if (action != 0) { 2241 /* 2242 * XXX KDM we have to drop the lock here, 2243 * because the online/offline operations 2244 * can potentially block. We need to 2245 * reference count the frontends so they 2246 * can't go away, 2247 */ 2248 mtx_unlock(&softc->ctl_lock); 2249 2250 if (cmd == CTL_ENABLE_PORT) { 2251 struct ctl_lun *lun; 2252 2253 STAILQ_FOREACH(lun, &softc->lun_list, 2254 links) { 2255 fe->lun_enable(fe->targ_lun_arg, 2256 lun->target, 2257 lun->lun); 2258 } 2259 2260 ctl_frontend_online(fe); 2261 } else if (cmd == CTL_DISABLE_PORT) { 2262 struct ctl_lun *lun; 2263 2264 ctl_frontend_offline(fe); 2265 2266 STAILQ_FOREACH(lun, &softc->lun_list, 2267 links) { 2268 fe->lun_disable( 2269 fe->targ_lun_arg, 2270 lun->target, 2271 lun->lun); 2272 } 2273 } 2274 2275 mtx_lock(&softc->ctl_lock); 2276 2277 if (cmd == CTL_SET_PORT_WWNS) 2278 ctl_frontend_set_wwns(fe, 2279 (entry->flags & CTL_PORT_WWNN_VALID) ? 2280 1 : 0, entry->wwnn, 2281 (entry->flags & CTL_PORT_WWPN_VALID) ? 2282 1 : 0, entry->wwpn); 2283 } 2284 if (done != 0) 2285 break; 2286 } 2287 mtx_unlock(&softc->ctl_lock); 2288 break; 2289 } 2290 case CTL_GET_PORT_LIST: { 2291 struct ctl_frontend *fe; 2292 struct ctl_port_list *list; 2293 int i; 2294 2295 list = (struct ctl_port_list *)addr; 2296 2297 if (list->alloc_len != (list->alloc_num * 2298 sizeof(struct ctl_port_entry))) { 2299 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2300 "alloc_num %u * sizeof(struct ctl_port_entry) " 2301 "%zu\n", __func__, list->alloc_len, 2302 list->alloc_num, sizeof(struct ctl_port_entry)); 2303 retval = EINVAL; 2304 break; 2305 } 2306 list->fill_len = 0; 2307 list->fill_num = 0; 2308 list->dropped_num = 0; 2309 i = 0; 2310 mtx_lock(&softc->ctl_lock); 2311 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2312 struct ctl_port_entry entry, *list_entry; 2313 2314 if (list->fill_num >= list->alloc_num) { 2315 list->dropped_num++; 2316 continue; 2317 } 2318 2319 entry.port_type = fe->port_type; 2320 strlcpy(entry.port_name, fe->port_name, 2321 sizeof(entry.port_name)); 2322 entry.targ_port = fe->targ_port; 2323 entry.physical_port = fe->physical_port; 2324 entry.virtual_port = fe->virtual_port; 2325 entry.wwnn = fe->wwnn; 2326 entry.wwpn = fe->wwpn; 2327 if (fe->status & CTL_PORT_STATUS_ONLINE) 2328 entry.online = 1; 2329 else 2330 entry.online = 0; 2331 2332 list_entry = &list->entries[i]; 2333 2334 retval = copyout(&entry, list_entry, sizeof(entry)); 2335 if (retval != 0) { 2336 printf("%s: CTL_GET_PORT_LIST: copyout " 2337 "returned %d\n", __func__, retval); 2338 break; 2339 } 2340 i++; 2341 list->fill_num++; 2342 list->fill_len += sizeof(entry); 2343 } 2344 mtx_unlock(&softc->ctl_lock); 2345 2346 /* 2347 * If this is non-zero, we had a copyout fault, so there's 2348 * probably no point in attempting to set the status inside 2349 * the structure. 2350 */ 2351 if (retval != 0) 2352 break; 2353 2354 if (list->dropped_num > 0) 2355 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2356 else 2357 list->status = CTL_PORT_LIST_OK; 2358 break; 2359 } 2360 case CTL_DUMP_OOA: { 2361 struct ctl_lun *lun; 2362 union ctl_io *io; 2363 char printbuf[128]; 2364 struct sbuf sb; 2365 2366 mtx_lock(&softc->ctl_lock); 2367 printf("Dumping OOA queues:\n"); 2368 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2369 mtx_lock(&lun->lun_lock); 2370 for (io = (union ctl_io *)TAILQ_FIRST( 2371 &lun->ooa_queue); io != NULL; 2372 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2373 ooa_links)) { 2374 sbuf_new(&sb, printbuf, sizeof(printbuf), 2375 SBUF_FIXEDLEN); 2376 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2377 (intmax_t)lun->lun, 2378 io->scsiio.tag_num, 2379 (io->io_hdr.flags & 2380 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2381 (io->io_hdr.flags & 2382 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2383 (io->io_hdr.flags & 2384 CTL_FLAG_ABORT) ? " ABORT" : "", 2385 (io->io_hdr.flags & 2386 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2387 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2388 sbuf_finish(&sb); 2389 printf("%s\n", sbuf_data(&sb)); 2390 } 2391 mtx_unlock(&lun->lun_lock); 2392 } 2393 printf("OOA queues dump done\n"); 2394 mtx_unlock(&softc->ctl_lock); 2395 break; 2396 } 2397 case CTL_GET_OOA: { 2398 struct ctl_lun *lun; 2399 struct ctl_ooa *ooa_hdr; 2400 struct ctl_ooa_entry *entries; 2401 uint32_t cur_fill_num; 2402 2403 ooa_hdr = (struct ctl_ooa *)addr; 2404 2405 if ((ooa_hdr->alloc_len == 0) 2406 || (ooa_hdr->alloc_num == 0)) { 2407 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2408 "must be non-zero\n", __func__, 2409 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2410 retval = EINVAL; 2411 break; 2412 } 2413 2414 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2415 sizeof(struct ctl_ooa_entry))) { 2416 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2417 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2418 __func__, ooa_hdr->alloc_len, 2419 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2420 retval = EINVAL; 2421 break; 2422 } 2423 2424 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2425 if (entries == NULL) { 2426 printf("%s: could not allocate %d bytes for OOA " 2427 "dump\n", __func__, ooa_hdr->alloc_len); 2428 retval = ENOMEM; 2429 break; 2430 } 2431 2432 mtx_lock(&softc->ctl_lock); 2433 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2434 && ((ooa_hdr->lun_num > CTL_MAX_LUNS) 2435 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2436 mtx_unlock(&softc->ctl_lock); 2437 free(entries, M_CTL); 2438 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2439 __func__, (uintmax_t)ooa_hdr->lun_num); 2440 retval = EINVAL; 2441 break; 2442 } 2443 2444 cur_fill_num = 0; 2445 2446 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2447 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2448 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2449 ooa_hdr, entries); 2450 if (retval != 0) 2451 break; 2452 } 2453 if (retval != 0) { 2454 mtx_unlock(&softc->ctl_lock); 2455 free(entries, M_CTL); 2456 break; 2457 } 2458 } else { 2459 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2460 2461 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2462 entries); 2463 } 2464 mtx_unlock(&softc->ctl_lock); 2465 2466 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2467 ooa_hdr->fill_len = ooa_hdr->fill_num * 2468 sizeof(struct ctl_ooa_entry); 2469 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2470 if (retval != 0) { 2471 printf("%s: error copying out %d bytes for OOA dump\n", 2472 __func__, ooa_hdr->fill_len); 2473 } 2474 2475 getbintime(&ooa_hdr->cur_bt); 2476 2477 if (cur_fill_num > ooa_hdr->alloc_num) { 2478 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2479 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2480 } else { 2481 ooa_hdr->dropped_num = 0; 2482 ooa_hdr->status = CTL_OOA_OK; 2483 } 2484 2485 free(entries, M_CTL); 2486 break; 2487 } 2488 case CTL_CHECK_OOA: { 2489 union ctl_io *io; 2490 struct ctl_lun *lun; 2491 struct ctl_ooa_info *ooa_info; 2492 2493 2494 ooa_info = (struct ctl_ooa_info *)addr; 2495 2496 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2497 ooa_info->status = CTL_OOA_INVALID_LUN; 2498 break; 2499 } 2500 mtx_lock(&softc->ctl_lock); 2501 lun = softc->ctl_luns[ooa_info->lun_id]; 2502 if (lun == NULL) { 2503 mtx_unlock(&softc->ctl_lock); 2504 ooa_info->status = CTL_OOA_INVALID_LUN; 2505 break; 2506 } 2507 mtx_lock(&lun->lun_lock); 2508 mtx_unlock(&softc->ctl_lock); 2509 ooa_info->num_entries = 0; 2510 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2511 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2512 &io->io_hdr, ooa_links)) { 2513 ooa_info->num_entries++; 2514 } 2515 mtx_unlock(&lun->lun_lock); 2516 2517 ooa_info->status = CTL_OOA_SUCCESS; 2518 2519 break; 2520 } 2521 case CTL_HARD_START: 2522 case CTL_HARD_STOP: { 2523 struct ctl_fe_ioctl_startstop_info ss_info; 2524 struct cfi_metatask *metatask; 2525 struct mtx hs_mtx; 2526 2527 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF); 2528 2529 cv_init(&ss_info.sem, "hard start/stop cv" ); 2530 2531 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2532 if (metatask == NULL) { 2533 retval = ENOMEM; 2534 mtx_destroy(&hs_mtx); 2535 break; 2536 } 2537 2538 if (cmd == CTL_HARD_START) 2539 metatask->tasktype = CFI_TASK_STARTUP; 2540 else 2541 metatask->tasktype = CFI_TASK_SHUTDOWN; 2542 2543 metatask->callback = ctl_ioctl_hard_startstop_callback; 2544 metatask->callback_arg = &ss_info; 2545 2546 cfi_action(metatask); 2547 2548 /* Wait for the callback */ 2549 mtx_lock(&hs_mtx); 2550 cv_wait_sig(&ss_info.sem, &hs_mtx); 2551 mtx_unlock(&hs_mtx); 2552 2553 /* 2554 * All information has been copied from the metatask by the 2555 * time cv_broadcast() is called, so we free the metatask here. 2556 */ 2557 cfi_free_metatask(metatask); 2558 2559 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info)); 2560 2561 mtx_destroy(&hs_mtx); 2562 break; 2563 } 2564 case CTL_BBRREAD: { 2565 struct ctl_bbrread_info *bbr_info; 2566 struct ctl_fe_ioctl_bbrread_info fe_bbr_info; 2567 struct mtx bbr_mtx; 2568 struct cfi_metatask *metatask; 2569 2570 bbr_info = (struct ctl_bbrread_info *)addr; 2571 2572 bzero(&fe_bbr_info, sizeof(fe_bbr_info)); 2573 2574 bzero(&bbr_mtx, sizeof(bbr_mtx)); 2575 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF); 2576 2577 fe_bbr_info.bbr_info = bbr_info; 2578 fe_bbr_info.lock = &bbr_mtx; 2579 2580 cv_init(&fe_bbr_info.sem, "BBR read cv"); 2581 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2582 2583 if (metatask == NULL) { 2584 mtx_destroy(&bbr_mtx); 2585 cv_destroy(&fe_bbr_info.sem); 2586 retval = ENOMEM; 2587 break; 2588 } 2589 metatask->tasktype = CFI_TASK_BBRREAD; 2590 metatask->callback = ctl_ioctl_bbrread_callback; 2591 metatask->callback_arg = &fe_bbr_info; 2592 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num; 2593 metatask->taskinfo.bbrread.lba = bbr_info->lba; 2594 metatask->taskinfo.bbrread.len = bbr_info->len; 2595 2596 cfi_action(metatask); 2597 2598 mtx_lock(&bbr_mtx); 2599 while (fe_bbr_info.wakeup_done == 0) 2600 cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx); 2601 mtx_unlock(&bbr_mtx); 2602 2603 bbr_info->status = metatask->status; 2604 bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2605 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status; 2606 memcpy(&bbr_info->sense_data, 2607 &metatask->taskinfo.bbrread.sense_data, 2608 ctl_min(sizeof(bbr_info->sense_data), 2609 sizeof(metatask->taskinfo.bbrread.sense_data))); 2610 2611 cfi_free_metatask(metatask); 2612 2613 mtx_destroy(&bbr_mtx); 2614 cv_destroy(&fe_bbr_info.sem); 2615 2616 break; 2617 } 2618 case CTL_DELAY_IO: { 2619 struct ctl_io_delay_info *delay_info; 2620#ifdef CTL_IO_DELAY 2621 struct ctl_lun *lun; 2622#endif /* CTL_IO_DELAY */ 2623 2624 delay_info = (struct ctl_io_delay_info *)addr; 2625 2626#ifdef CTL_IO_DELAY 2627 mtx_lock(&softc->ctl_lock); 2628 2629 if ((delay_info->lun_id > CTL_MAX_LUNS) 2630 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2631 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2632 } else { 2633 lun = softc->ctl_luns[delay_info->lun_id]; 2634 mtx_lock(&lun->lun_lock); 2635 2636 delay_info->status = CTL_DELAY_STATUS_OK; 2637 2638 switch (delay_info->delay_type) { 2639 case CTL_DELAY_TYPE_CONT: 2640 break; 2641 case CTL_DELAY_TYPE_ONESHOT: 2642 break; 2643 default: 2644 delay_info->status = 2645 CTL_DELAY_STATUS_INVALID_TYPE; 2646 break; 2647 } 2648 2649 switch (delay_info->delay_loc) { 2650 case CTL_DELAY_LOC_DATAMOVE: 2651 lun->delay_info.datamove_type = 2652 delay_info->delay_type; 2653 lun->delay_info.datamove_delay = 2654 delay_info->delay_secs; 2655 break; 2656 case CTL_DELAY_LOC_DONE: 2657 lun->delay_info.done_type = 2658 delay_info->delay_type; 2659 lun->delay_info.done_delay = 2660 delay_info->delay_secs; 2661 break; 2662 default: 2663 delay_info->status = 2664 CTL_DELAY_STATUS_INVALID_LOC; 2665 break; 2666 } 2667 mtx_unlock(&lun->lun_lock); 2668 } 2669 2670 mtx_unlock(&softc->ctl_lock); 2671#else 2672 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2673#endif /* CTL_IO_DELAY */ 2674 break; 2675 } 2676 case CTL_REALSYNC_SET: { 2677 int *syncstate; 2678 2679 syncstate = (int *)addr; 2680 2681 mtx_lock(&softc->ctl_lock); 2682 switch (*syncstate) { 2683 case 0: 2684 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2685 break; 2686 case 1: 2687 softc->flags |= CTL_FLAG_REAL_SYNC; 2688 break; 2689 default: 2690 retval = EINVAL; 2691 break; 2692 } 2693 mtx_unlock(&softc->ctl_lock); 2694 break; 2695 } 2696 case CTL_REALSYNC_GET: { 2697 int *syncstate; 2698 2699 syncstate = (int*)addr; 2700 2701 mtx_lock(&softc->ctl_lock); 2702 if (softc->flags & CTL_FLAG_REAL_SYNC) 2703 *syncstate = 1; 2704 else 2705 *syncstate = 0; 2706 mtx_unlock(&softc->ctl_lock); 2707 2708 break; 2709 } 2710 case CTL_SETSYNC: 2711 case CTL_GETSYNC: { 2712 struct ctl_sync_info *sync_info; 2713 struct ctl_lun *lun; 2714 2715 sync_info = (struct ctl_sync_info *)addr; 2716 2717 mtx_lock(&softc->ctl_lock); 2718 lun = softc->ctl_luns[sync_info->lun_id]; 2719 if (lun == NULL) { 2720 mtx_unlock(&softc->ctl_lock); 2721 sync_info->status = CTL_GS_SYNC_NO_LUN; 2722 } 2723 /* 2724 * Get or set the sync interval. We're not bounds checking 2725 * in the set case, hopefully the user won't do something 2726 * silly. 2727 */ 2728 mtx_lock(&lun->lun_lock); 2729 mtx_unlock(&softc->ctl_lock); 2730 if (cmd == CTL_GETSYNC) 2731 sync_info->sync_interval = lun->sync_interval; 2732 else 2733 lun->sync_interval = sync_info->sync_interval; 2734 mtx_unlock(&lun->lun_lock); 2735 2736 sync_info->status = CTL_GS_SYNC_OK; 2737 2738 break; 2739 } 2740 case CTL_GETSTATS: { 2741 struct ctl_stats *stats; 2742 struct ctl_lun *lun; 2743 int i; 2744 2745 stats = (struct ctl_stats *)addr; 2746 2747 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2748 stats->alloc_len) { 2749 stats->status = CTL_SS_NEED_MORE_SPACE; 2750 stats->num_luns = softc->num_luns; 2751 break; 2752 } 2753 /* 2754 * XXX KDM no locking here. If the LUN list changes, 2755 * things can blow up. 2756 */ 2757 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2758 i++, lun = STAILQ_NEXT(lun, links)) { 2759 retval = copyout(&lun->stats, &stats->lun_stats[i], 2760 sizeof(lun->stats)); 2761 if (retval != 0) 2762 break; 2763 } 2764 stats->num_luns = softc->num_luns; 2765 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2766 softc->num_luns; 2767 stats->status = CTL_SS_OK; 2768#ifdef CTL_TIME_IO 2769 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2770#else 2771 stats->flags = CTL_STATS_FLAG_NONE; 2772#endif 2773 getnanouptime(&stats->timestamp); 2774 break; 2775 } 2776 case CTL_ERROR_INJECT: { 2777 struct ctl_error_desc *err_desc, *new_err_desc; 2778 struct ctl_lun *lun; 2779 2780 err_desc = (struct ctl_error_desc *)addr; 2781 2782 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2783 M_WAITOK | M_ZERO); 2784 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2785 2786 mtx_lock(&softc->ctl_lock); 2787 lun = softc->ctl_luns[err_desc->lun_id]; 2788 if (lun == NULL) { 2789 mtx_unlock(&softc->ctl_lock); 2790 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2791 __func__, (uintmax_t)err_desc->lun_id); 2792 retval = EINVAL; 2793 break; 2794 } 2795 mtx_lock(&lun->lun_lock); 2796 mtx_unlock(&softc->ctl_lock); 2797 2798 /* 2799 * We could do some checking here to verify the validity 2800 * of the request, but given the complexity of error 2801 * injection requests, the checking logic would be fairly 2802 * complex. 2803 * 2804 * For now, if the request is invalid, it just won't get 2805 * executed and might get deleted. 2806 */ 2807 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2808 2809 /* 2810 * XXX KDM check to make sure the serial number is unique, 2811 * in case we somehow manage to wrap. That shouldn't 2812 * happen for a very long time, but it's the right thing to 2813 * do. 2814 */ 2815 new_err_desc->serial = lun->error_serial; 2816 err_desc->serial = lun->error_serial; 2817 lun->error_serial++; 2818 2819 mtx_unlock(&lun->lun_lock); 2820 break; 2821 } 2822 case CTL_ERROR_INJECT_DELETE: { 2823 struct ctl_error_desc *delete_desc, *desc, *desc2; 2824 struct ctl_lun *lun; 2825 int delete_done; 2826 2827 delete_desc = (struct ctl_error_desc *)addr; 2828 delete_done = 0; 2829 2830 mtx_lock(&softc->ctl_lock); 2831 lun = softc->ctl_luns[delete_desc->lun_id]; 2832 if (lun == NULL) { 2833 mtx_unlock(&softc->ctl_lock); 2834 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2835 __func__, (uintmax_t)delete_desc->lun_id); 2836 retval = EINVAL; 2837 break; 2838 } 2839 mtx_lock(&lun->lun_lock); 2840 mtx_unlock(&softc->ctl_lock); 2841 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2842 if (desc->serial != delete_desc->serial) 2843 continue; 2844 2845 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2846 links); 2847 free(desc, M_CTL); 2848 delete_done = 1; 2849 } 2850 mtx_unlock(&lun->lun_lock); 2851 if (delete_done == 0) { 2852 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2853 "error serial %ju on LUN %u\n", __func__, 2854 delete_desc->serial, delete_desc->lun_id); 2855 retval = EINVAL; 2856 break; 2857 } 2858 break; 2859 } 2860 case CTL_DUMP_STRUCTS: { 2861 int i, j, k; 2862 struct ctl_frontend *fe; 2863 2864 printf("CTL IID to WWPN map start:\n"); 2865 for (i = 0; i < CTL_MAX_PORTS; i++) { 2866 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2867 if (softc->wwpn_iid[i][j].in_use == 0) 2868 continue; 2869 2870 printf("port %d iid %u WWPN %#jx\n", 2871 softc->wwpn_iid[i][j].port, 2872 softc->wwpn_iid[i][j].iid, 2873 (uintmax_t)softc->wwpn_iid[i][j].wwpn); 2874 } 2875 } 2876 printf("CTL IID to WWPN map end\n"); 2877 printf("CTL Persistent Reservation information start:\n"); 2878 for (i = 0; i < CTL_MAX_LUNS; i++) { 2879 struct ctl_lun *lun; 2880 2881 lun = softc->ctl_luns[i]; 2882 2883 if ((lun == NULL) 2884 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2885 continue; 2886 2887 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 2888 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2889 if (lun->per_res[j+k].registered == 0) 2890 continue; 2891 printf("LUN %d port %d iid %d key " 2892 "%#jx\n", i, j, k, 2893 (uintmax_t)scsi_8btou64( 2894 lun->per_res[j+k].res_key.key)); 2895 } 2896 } 2897 } 2898 printf("CTL Persistent Reservation information end\n"); 2899 printf("CTL Frontends:\n"); 2900 /* 2901 * XXX KDM calling this without a lock. We'd likely want 2902 * to drop the lock before calling the frontend's dump 2903 * routine anyway. 2904 */ 2905 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2906 printf("Frontend %s Type %u pport %d vport %d WWNN " 2907 "%#jx WWPN %#jx\n", fe->port_name, fe->port_type, 2908 fe->physical_port, fe->virtual_port, 2909 (uintmax_t)fe->wwnn, (uintmax_t)fe->wwpn); 2910 2911 /* 2912 * Frontends are not required to support the dump 2913 * routine. 2914 */ 2915 if (fe->fe_dump == NULL) 2916 continue; 2917 2918 fe->fe_dump(); 2919 } 2920 printf("CTL Frontend information end\n"); 2921 break; 2922 } 2923 case CTL_LUN_REQ: { 2924 struct ctl_lun_req *lun_req; 2925 struct ctl_backend_driver *backend; 2926 2927 lun_req = (struct ctl_lun_req *)addr; 2928 2929 backend = ctl_backend_find(lun_req->backend); 2930 if (backend == NULL) { 2931 lun_req->status = CTL_LUN_ERROR; 2932 snprintf(lun_req->error_str, 2933 sizeof(lun_req->error_str), 2934 "Backend \"%s\" not found.", 2935 lun_req->backend); 2936 break; 2937 } 2938 if (lun_req->num_be_args > 0) { 2939 lun_req->kern_be_args = ctl_copyin_args( 2940 lun_req->num_be_args, 2941 lun_req->be_args, 2942 lun_req->error_str, 2943 sizeof(lun_req->error_str)); 2944 if (lun_req->kern_be_args == NULL) { 2945 lun_req->status = CTL_LUN_ERROR; 2946 break; 2947 } 2948 } 2949 2950 retval = backend->ioctl(dev, cmd, addr, flag, td); 2951 2952 if (lun_req->num_be_args > 0) { 2953 ctl_free_args(lun_req->num_be_args, 2954 lun_req->kern_be_args); 2955 } 2956 break; 2957 } 2958 case CTL_LUN_LIST: { 2959 struct sbuf *sb; 2960 struct ctl_lun *lun; 2961 struct ctl_lun_list *list; 2962 struct ctl_be_lun_option *opt; 2963 2964 list = (struct ctl_lun_list *)addr; 2965 2966 /* 2967 * Allocate a fixed length sbuf here, based on the length 2968 * of the user's buffer. We could allocate an auto-extending 2969 * buffer, and then tell the user how much larger our 2970 * amount of data is than his buffer, but that presents 2971 * some problems: 2972 * 2973 * 1. The sbuf(9) routines use a blocking malloc, and so 2974 * we can't hold a lock while calling them with an 2975 * auto-extending buffer. 2976 * 2977 * 2. There is not currently a LUN reference counting 2978 * mechanism, outside of outstanding transactions on 2979 * the LUN's OOA queue. So a LUN could go away on us 2980 * while we're getting the LUN number, backend-specific 2981 * information, etc. Thus, given the way things 2982 * currently work, we need to hold the CTL lock while 2983 * grabbing LUN information. 2984 * 2985 * So, from the user's standpoint, the best thing to do is 2986 * allocate what he thinks is a reasonable buffer length, 2987 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2988 * double the buffer length and try again. (And repeat 2989 * that until he succeeds.) 2990 */ 2991 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2992 if (sb == NULL) { 2993 list->status = CTL_LUN_LIST_ERROR; 2994 snprintf(list->error_str, sizeof(list->error_str), 2995 "Unable to allocate %d bytes for LUN list", 2996 list->alloc_len); 2997 break; 2998 } 2999 3000 sbuf_printf(sb, "<ctllunlist>\n"); 3001 3002 mtx_lock(&softc->ctl_lock); 3003 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3004 mtx_lock(&lun->lun_lock); 3005 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3006 (uintmax_t)lun->lun); 3007 3008 /* 3009 * Bail out as soon as we see that we've overfilled 3010 * the buffer. 3011 */ 3012 if (retval != 0) 3013 break; 3014 3015 retval = sbuf_printf(sb, "<backend_type>%s" 3016 "</backend_type>\n", 3017 (lun->backend == NULL) ? "none" : 3018 lun->backend->name); 3019 3020 if (retval != 0) 3021 break; 3022 3023 retval = sbuf_printf(sb, "<lun_type>%d</lun_type>\n", 3024 lun->be_lun->lun_type); 3025 3026 if (retval != 0) 3027 break; 3028 3029 if (lun->backend == NULL) { 3030 retval = sbuf_printf(sb, "</lun>\n"); 3031 if (retval != 0) 3032 break; 3033 continue; 3034 } 3035 3036 retval = sbuf_printf(sb, "<size>%ju</size>\n", 3037 (lun->be_lun->maxlba > 0) ? 3038 lun->be_lun->maxlba + 1 : 0); 3039 3040 if (retval != 0) 3041 break; 3042 3043 retval = sbuf_printf(sb, "<blocksize>%u</blocksize>\n", 3044 lun->be_lun->blocksize); 3045 3046 if (retval != 0) 3047 break; 3048 3049 retval = sbuf_printf(sb, "<serial_number>"); 3050 3051 if (retval != 0) 3052 break; 3053 3054 retval = ctl_sbuf_printf_esc(sb, 3055 lun->be_lun->serial_num); 3056 3057 if (retval != 0) 3058 break; 3059 3060 retval = sbuf_printf(sb, "</serial_number>\n"); 3061 3062 if (retval != 0) 3063 break; 3064 3065 retval = sbuf_printf(sb, "<device_id>"); 3066 3067 if (retval != 0) 3068 break; 3069 3070 retval = ctl_sbuf_printf_esc(sb,lun->be_lun->device_id); 3071 3072 if (retval != 0) 3073 break; 3074 3075 retval = sbuf_printf(sb, "</device_id>\n"); 3076 3077 if (retval != 0) 3078 break; 3079 3080 if (lun->backend->lun_info != NULL) { 3081 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3082 if (retval != 0) 3083 break; 3084 } 3085 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3086 retval = sbuf_printf(sb, "<%s>%s</%s>", opt->name, opt->value, opt->name); 3087 if (retval != 0) 3088 break; 3089 } 3090 3091 retval = sbuf_printf(sb, "</lun>\n"); 3092 3093 if (retval != 0) 3094 break; 3095 mtx_unlock(&lun->lun_lock); 3096 } 3097 if (lun != NULL) 3098 mtx_unlock(&lun->lun_lock); 3099 mtx_unlock(&softc->ctl_lock); 3100 3101 if ((retval != 0) 3102 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3103 retval = 0; 3104 sbuf_delete(sb); 3105 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3106 snprintf(list->error_str, sizeof(list->error_str), 3107 "Out of space, %d bytes is too small", 3108 list->alloc_len); 3109 break; 3110 } 3111 3112 sbuf_finish(sb); 3113 3114 retval = copyout(sbuf_data(sb), list->lun_xml, 3115 sbuf_len(sb) + 1); 3116 3117 list->fill_len = sbuf_len(sb) + 1; 3118 list->status = CTL_LUN_LIST_OK; 3119 sbuf_delete(sb); 3120 break; 3121 } 3122 case CTL_ISCSI: { 3123 struct ctl_iscsi *ci; 3124 struct ctl_frontend *fe; 3125 3126 ci = (struct ctl_iscsi *)addr; 3127 3128 mtx_lock(&softc->ctl_lock); 3129 STAILQ_FOREACH(fe, &softc->fe_list, links) { 3130 if (strcmp(fe->port_name, "iscsi") == 0) 3131 break; 3132 } 3133 mtx_unlock(&softc->ctl_lock); 3134 3135 if (fe == NULL) { 3136 ci->status = CTL_ISCSI_ERROR; 3137 snprintf(ci->error_str, sizeof(ci->error_str), "Backend \"iscsi\" not found."); 3138 break; 3139 } 3140 3141 retval = fe->ioctl(dev, cmd, addr, flag, td); 3142 break; 3143 } 3144 default: { 3145 /* XXX KDM should we fix this? */ 3146#if 0 3147 struct ctl_backend_driver *backend; 3148 unsigned int type; 3149 int found; 3150 3151 found = 0; 3152 3153 /* 3154 * We encode the backend type as the ioctl type for backend 3155 * ioctls. So parse it out here, and then search for a 3156 * backend of this type. 3157 */ 3158 type = _IOC_TYPE(cmd); 3159 3160 STAILQ_FOREACH(backend, &softc->be_list, links) { 3161 if (backend->type == type) { 3162 found = 1; 3163 break; 3164 } 3165 } 3166 if (found == 0) { 3167 printf("ctl: unknown ioctl command %#lx or backend " 3168 "%d\n", cmd, type); 3169 retval = EINVAL; 3170 break; 3171 } 3172 retval = backend->ioctl(dev, cmd, addr, flag, td); 3173#endif 3174 retval = ENOTTY; 3175 break; 3176 } 3177 } 3178 return (retval); 3179} 3180 3181uint32_t 3182ctl_get_initindex(struct ctl_nexus *nexus) 3183{ 3184 if (nexus->targ_port < CTL_MAX_PORTS) 3185 return (nexus->initid.id + 3186 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3187 else 3188 return (nexus->initid.id + 3189 ((nexus->targ_port - CTL_MAX_PORTS) * 3190 CTL_MAX_INIT_PER_PORT)); 3191} 3192 3193uint32_t 3194ctl_get_resindex(struct ctl_nexus *nexus) 3195{ 3196 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3197} 3198 3199uint32_t 3200ctl_port_idx(int port_num) 3201{ 3202 if (port_num < CTL_MAX_PORTS) 3203 return(port_num); 3204 else 3205 return(port_num - CTL_MAX_PORTS); 3206} 3207 3208/* 3209 * Note: This only works for bitmask sizes that are at least 32 bits, and 3210 * that are a power of 2. 3211 */ 3212int 3213ctl_ffz(uint32_t *mask, uint32_t size) 3214{ 3215 uint32_t num_chunks, num_pieces; 3216 int i, j; 3217 3218 num_chunks = (size >> 5); 3219 if (num_chunks == 0) 3220 num_chunks++; 3221 num_pieces = ctl_min((sizeof(uint32_t) * 8), size); 3222 3223 for (i = 0; i < num_chunks; i++) { 3224 for (j = 0; j < num_pieces; j++) { 3225 if ((mask[i] & (1 << j)) == 0) 3226 return ((i << 5) + j); 3227 } 3228 } 3229 3230 return (-1); 3231} 3232 3233int 3234ctl_set_mask(uint32_t *mask, uint32_t bit) 3235{ 3236 uint32_t chunk, piece; 3237 3238 chunk = bit >> 5; 3239 piece = bit % (sizeof(uint32_t) * 8); 3240 3241 if ((mask[chunk] & (1 << piece)) != 0) 3242 return (-1); 3243 else 3244 mask[chunk] |= (1 << piece); 3245 3246 return (0); 3247} 3248 3249int 3250ctl_clear_mask(uint32_t *mask, uint32_t bit) 3251{ 3252 uint32_t chunk, piece; 3253 3254 chunk = bit >> 5; 3255 piece = bit % (sizeof(uint32_t) * 8); 3256 3257 if ((mask[chunk] & (1 << piece)) == 0) 3258 return (-1); 3259 else 3260 mask[chunk] &= ~(1 << piece); 3261 3262 return (0); 3263} 3264 3265int 3266ctl_is_set(uint32_t *mask, uint32_t bit) 3267{ 3268 uint32_t chunk, piece; 3269 3270 chunk = bit >> 5; 3271 piece = bit % (sizeof(uint32_t) * 8); 3272 3273 if ((mask[chunk] & (1 << piece)) == 0) 3274 return (0); 3275 else 3276 return (1); 3277} 3278 3279#ifdef unused 3280/* 3281 * The bus, target and lun are optional, they can be filled in later. 3282 * can_wait is used to determine whether we can wait on the malloc or not. 3283 */ 3284union ctl_io* 3285ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, uint32_t targ_target, 3286 uint32_t targ_lun, int can_wait) 3287{ 3288 union ctl_io *io; 3289 3290 if (can_wait) 3291 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_WAITOK); 3292 else 3293 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT); 3294 3295 if (io != NULL) { 3296 io->io_hdr.io_type = io_type; 3297 io->io_hdr.targ_port = targ_port; 3298 /* 3299 * XXX KDM this needs to change/go away. We need to move 3300 * to a preallocated pool of ctl_scsiio structures. 3301 */ 3302 io->io_hdr.nexus.targ_target.id = targ_target; 3303 io->io_hdr.nexus.targ_lun = targ_lun; 3304 } 3305 3306 return (io); 3307} 3308 3309void 3310ctl_kfree_io(union ctl_io *io) 3311{ 3312 free(io, M_CTL); 3313} 3314#endif /* unused */ 3315 3316/* 3317 * ctl_softc, pool_type, total_ctl_io are passed in. 3318 * npool is passed out. 3319 */ 3320int 3321ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, 3322 uint32_t total_ctl_io, struct ctl_io_pool **npool) 3323{ 3324 uint32_t i; 3325 union ctl_io *cur_io, *next_io; 3326 struct ctl_io_pool *pool; 3327 int retval; 3328 3329 retval = 0; 3330 3331 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3332 M_NOWAIT | M_ZERO); 3333 if (pool == NULL) { 3334 retval = ENOMEM; 3335 goto bailout; 3336 } 3337 3338 pool->type = pool_type; 3339 pool->ctl_softc = ctl_softc; 3340 3341 mtx_lock(&ctl_softc->pool_lock); 3342 pool->id = ctl_softc->cur_pool_id++; 3343 mtx_unlock(&ctl_softc->pool_lock); 3344 3345 pool->flags = CTL_POOL_FLAG_NONE; 3346 pool->refcount = 1; /* Reference for validity. */ 3347 STAILQ_INIT(&pool->free_queue); 3348 3349 /* 3350 * XXX KDM other options here: 3351 * - allocate a page at a time 3352 * - allocate one big chunk of memory. 3353 * Page allocation might work well, but would take a little more 3354 * tracking. 3355 */ 3356 for (i = 0; i < total_ctl_io; i++) { 3357 cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTL, 3358 M_NOWAIT); 3359 if (cur_io == NULL) { 3360 retval = ENOMEM; 3361 break; 3362 } 3363 cur_io->io_hdr.pool = pool; 3364 STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links); 3365 pool->total_ctl_io++; 3366 pool->free_ctl_io++; 3367 } 3368 3369 if (retval != 0) { 3370 for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3371 cur_io != NULL; cur_io = next_io) { 3372 next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr, 3373 links); 3374 STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, 3375 ctl_io_hdr, links); 3376 free(cur_io, M_CTL); 3377 } 3378 3379 free(pool, M_CTL); 3380 goto bailout; 3381 } 3382 mtx_lock(&ctl_softc->pool_lock); 3383 ctl_softc->num_pools++; 3384 STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links); 3385 /* 3386 * Increment our usage count if this is an external consumer, so we 3387 * can't get unloaded until the external consumer (most likely a 3388 * FETD) unloads and frees his pool. 3389 * 3390 * XXX KDM will this increment the caller's module use count, or 3391 * mine? 3392 */ 3393#if 0 3394 if ((pool_type != CTL_POOL_EMERGENCY) 3395 && (pool_type != CTL_POOL_INTERNAL) 3396 && (pool_type != CTL_POOL_IOCTL) 3397 && (pool_type != CTL_POOL_4OTHERSC)) 3398 MOD_INC_USE_COUNT; 3399#endif 3400 3401 mtx_unlock(&ctl_softc->pool_lock); 3402 3403 *npool = pool; 3404 3405bailout: 3406 3407 return (retval); 3408} 3409 3410static int 3411ctl_pool_acquire(struct ctl_io_pool *pool) 3412{ 3413 3414 mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED); 3415 3416 if (pool->flags & CTL_POOL_FLAG_INVALID) 3417 return (EINVAL); 3418 3419 pool->refcount++; 3420 3421 return (0); 3422} 3423 3424static void 3425ctl_pool_release(struct ctl_io_pool *pool) 3426{ 3427 struct ctl_softc *ctl_softc = pool->ctl_softc; 3428 union ctl_io *io; 3429 3430 mtx_assert(&ctl_softc->pool_lock, MA_OWNED); 3431 3432 if (--pool->refcount != 0) 3433 return; 3434 3435 while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) { 3436 STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr, 3437 links); 3438 free(io, M_CTL); 3439 } 3440 3441 STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links); 3442 ctl_softc->num_pools--; 3443 3444 /* 3445 * XXX KDM will this decrement the caller's usage count or mine? 3446 */ 3447#if 0 3448 if ((pool->type != CTL_POOL_EMERGENCY) 3449 && (pool->type != CTL_POOL_INTERNAL) 3450 && (pool->type != CTL_POOL_IOCTL)) 3451 MOD_DEC_USE_COUNT; 3452#endif 3453 3454 free(pool, M_CTL); 3455} 3456 3457void 3458ctl_pool_free(struct ctl_io_pool *pool) 3459{ 3460 struct ctl_softc *ctl_softc; 3461 3462 if (pool == NULL) 3463 return; 3464 3465 ctl_softc = pool->ctl_softc; 3466 mtx_lock(&ctl_softc->pool_lock); 3467 pool->flags |= CTL_POOL_FLAG_INVALID; 3468 ctl_pool_release(pool); 3469 mtx_unlock(&ctl_softc->pool_lock); 3470} 3471 3472/* 3473 * This routine does not block (except for spinlocks of course). 3474 * It tries to allocate a ctl_io union from the caller's pool as quickly as 3475 * possible. 3476 */ 3477union ctl_io * 3478ctl_alloc_io(void *pool_ref) 3479{ 3480 union ctl_io *io; 3481 struct ctl_softc *ctl_softc; 3482 struct ctl_io_pool *pool, *npool; 3483 struct ctl_io_pool *emergency_pool; 3484 3485 pool = (struct ctl_io_pool *)pool_ref; 3486 3487 if (pool == NULL) { 3488 printf("%s: pool is NULL\n", __func__); 3489 return (NULL); 3490 } 3491 3492 emergency_pool = NULL; 3493 3494 ctl_softc = pool->ctl_softc; 3495 3496 mtx_lock(&ctl_softc->pool_lock); 3497 /* 3498 * First, try to get the io structure from the user's pool. 3499 */ 3500 if (ctl_pool_acquire(pool) == 0) { 3501 io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3502 if (io != NULL) { 3503 STAILQ_REMOVE_HEAD(&pool->free_queue, links); 3504 pool->total_allocated++; 3505 pool->free_ctl_io--; 3506 mtx_unlock(&ctl_softc->pool_lock); 3507 return (io); 3508 } else 3509 ctl_pool_release(pool); 3510 } 3511 /* 3512 * If he doesn't have any io structures left, search for an 3513 * emergency pool and grab one from there. 3514 */ 3515 STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) { 3516 if (npool->type != CTL_POOL_EMERGENCY) 3517 continue; 3518 3519 if (ctl_pool_acquire(npool) != 0) 3520 continue; 3521 3522 emergency_pool = npool; 3523 3524 io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue); 3525 if (io != NULL) { 3526 STAILQ_REMOVE_HEAD(&npool->free_queue, links); 3527 npool->total_allocated++; 3528 npool->free_ctl_io--; 3529 mtx_unlock(&ctl_softc->pool_lock); 3530 return (io); 3531 } else 3532 ctl_pool_release(npool); 3533 } 3534 3535 /* Drop the spinlock before we malloc */ 3536 mtx_unlock(&ctl_softc->pool_lock); 3537 3538 /* 3539 * The emergency pool (if it exists) didn't have one, so try an 3540 * atomic (i.e. nonblocking) malloc and see if we get lucky. 3541 */ 3542 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT); 3543 if (io != NULL) { 3544 /* 3545 * If the emergency pool exists but is empty, add this 3546 * ctl_io to its list when it gets freed. 3547 */ 3548 if (emergency_pool != NULL) { 3549 mtx_lock(&ctl_softc->pool_lock); 3550 if (ctl_pool_acquire(emergency_pool) == 0) { 3551 io->io_hdr.pool = emergency_pool; 3552 emergency_pool->total_ctl_io++; 3553 /* 3554 * Need to bump this, otherwise 3555 * total_allocated and total_freed won't 3556 * match when we no longer have anything 3557 * outstanding. 3558 */ 3559 emergency_pool->total_allocated++; 3560 } 3561 mtx_unlock(&ctl_softc->pool_lock); 3562 } else 3563 io->io_hdr.pool = NULL; 3564 } 3565 3566 return (io); 3567} 3568 3569void 3570ctl_free_io(union ctl_io *io) 3571{ 3572 if (io == NULL) 3573 return; 3574 3575 /* 3576 * If this ctl_io has a pool, return it to that pool. 3577 */ 3578 if (io->io_hdr.pool != NULL) { 3579 struct ctl_io_pool *pool; 3580 3581 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3582 mtx_lock(&pool->ctl_softc->pool_lock); 3583 io->io_hdr.io_type = 0xff; 3584 STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links); 3585 pool->total_freed++; 3586 pool->free_ctl_io++; 3587 ctl_pool_release(pool); 3588 mtx_unlock(&pool->ctl_softc->pool_lock); 3589 } else { 3590 /* 3591 * Otherwise, just free it. We probably malloced it and 3592 * the emergency pool wasn't available. 3593 */ 3594 free(io, M_CTL); 3595 } 3596 3597} 3598 3599void 3600ctl_zero_io(union ctl_io *io) 3601{ 3602 void *pool_ref; 3603 3604 if (io == NULL) 3605 return; 3606 3607 /* 3608 * May need to preserve linked list pointers at some point too. 3609 */ 3610 pool_ref = io->io_hdr.pool; 3611 3612 memset(io, 0, sizeof(*io)); 3613 3614 io->io_hdr.pool = pool_ref; 3615} 3616 3617/* 3618 * This routine is currently used for internal copies of ctl_ios that need 3619 * to persist for some reason after we've already returned status to the 3620 * FETD. (Thus the flag set.) 3621 * 3622 * XXX XXX 3623 * Note that this makes a blind copy of all fields in the ctl_io, except 3624 * for the pool reference. This includes any memory that has been 3625 * allocated! That memory will no longer be valid after done has been 3626 * called, so this would be VERY DANGEROUS for command that actually does 3627 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3628 * start and stop commands, which don't transfer any data, so this is not a 3629 * problem. If it is used for anything else, the caller would also need to 3630 * allocate data buffer space and this routine would need to be modified to 3631 * copy the data buffer(s) as well. 3632 */ 3633void 3634ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3635{ 3636 void *pool_ref; 3637 3638 if ((src == NULL) 3639 || (dest == NULL)) 3640 return; 3641 3642 /* 3643 * May need to preserve linked list pointers at some point too. 3644 */ 3645 pool_ref = dest->io_hdr.pool; 3646 3647 memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest))); 3648 3649 dest->io_hdr.pool = pool_ref; 3650 /* 3651 * We need to know that this is an internal copy, and doesn't need 3652 * to get passed back to the FETD that allocated it. 3653 */ 3654 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3655} 3656 3657#ifdef NEEDTOPORT 3658static void 3659ctl_update_power_subpage(struct copan_power_subpage *page) 3660{ 3661 int num_luns, num_partitions, config_type; 3662 struct ctl_softc *softc; 3663 cs_BOOL_t aor_present, shelf_50pct_power; 3664 cs_raidset_personality_t rs_type; 3665 int max_active_luns; 3666 3667 softc = control_softc; 3668 3669 /* subtract out the processor LUN */ 3670 num_luns = softc->num_luns - 1; 3671 /* 3672 * Default to 7 LUNs active, which was the only number we allowed 3673 * in the past. 3674 */ 3675 max_active_luns = 7; 3676 3677 num_partitions = config_GetRsPartitionInfo(); 3678 config_type = config_GetConfigType(); 3679 shelf_50pct_power = config_GetShelfPowerMode(); 3680 aor_present = config_IsAorRsPresent(); 3681 3682 rs_type = ddb_GetRsRaidType(1); 3683 if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5) 3684 && (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) { 3685 EPRINT(0, "Unsupported RS type %d!", rs_type); 3686 } 3687 3688 3689 page->total_luns = num_luns; 3690 3691 switch (config_type) { 3692 case 40: 3693 /* 3694 * In a 40 drive configuration, it doesn't matter what DC 3695 * cards we have, whether we have AOR enabled or not, 3696 * partitioning or not, or what type of RAIDset we have. 3697 * In that scenario, we can power up every LUN we present 3698 * to the user. 3699 */ 3700 max_active_luns = num_luns; 3701 3702 break; 3703 case 64: 3704 if (shelf_50pct_power == CS_FALSE) { 3705 /* 25% power */ 3706 if (aor_present == CS_TRUE) { 3707 if (rs_type == 3708 CS_RAIDSET_PERSONALITY_RAID5) { 3709 max_active_luns = 7; 3710 } else if (rs_type == 3711 CS_RAIDSET_PERSONALITY_RAID1){ 3712 max_active_luns = 14; 3713 } else { 3714 /* XXX KDM now what?? */ 3715 } 3716 } else { 3717 if (rs_type == 3718 CS_RAIDSET_PERSONALITY_RAID5) { 3719 max_active_luns = 8; 3720 } else if (rs_type == 3721 CS_RAIDSET_PERSONALITY_RAID1){ 3722 max_active_luns = 16; 3723 } else { 3724 /* XXX KDM now what?? */ 3725 } 3726 } 3727 } else { 3728 /* 50% power */ 3729 /* 3730 * With 50% power in a 64 drive configuration, we 3731 * can power all LUNs we present. 3732 */ 3733 max_active_luns = num_luns; 3734 } 3735 break; 3736 case 112: 3737 if (shelf_50pct_power == CS_FALSE) { 3738 /* 25% power */ 3739 if (aor_present == CS_TRUE) { 3740 if (rs_type == 3741 CS_RAIDSET_PERSONALITY_RAID5) { 3742 max_active_luns = 7; 3743 } else if (rs_type == 3744 CS_RAIDSET_PERSONALITY_RAID1){ 3745 max_active_luns = 14; 3746 } else { 3747 /* XXX KDM now what?? */ 3748 } 3749 } else { 3750 if (rs_type == 3751 CS_RAIDSET_PERSONALITY_RAID5) { 3752 max_active_luns = 8; 3753 } else if (rs_type == 3754 CS_RAIDSET_PERSONALITY_RAID1){ 3755 max_active_luns = 16; 3756 } else { 3757 /* XXX KDM now what?? */ 3758 } 3759 } 3760 } else { 3761 /* 50% power */ 3762 if (aor_present == CS_TRUE) { 3763 if (rs_type == 3764 CS_RAIDSET_PERSONALITY_RAID5) { 3765 max_active_luns = 14; 3766 } else if (rs_type == 3767 CS_RAIDSET_PERSONALITY_RAID1){ 3768 /* 3769 * We're assuming here that disk 3770 * caching is enabled, and so we're 3771 * able to power up half of each 3772 * LUN, and cache all writes. 3773 */ 3774 max_active_luns = num_luns; 3775 } else { 3776 /* XXX KDM now what?? */ 3777 } 3778 } else { 3779 if (rs_type == 3780 CS_RAIDSET_PERSONALITY_RAID5) { 3781 max_active_luns = 15; 3782 } else if (rs_type == 3783 CS_RAIDSET_PERSONALITY_RAID1){ 3784 max_active_luns = 30; 3785 } else { 3786 /* XXX KDM now what?? */ 3787 } 3788 } 3789 } 3790 break; 3791 default: 3792 /* 3793 * In this case, we have an unknown configuration, so we 3794 * just use the default from above. 3795 */ 3796 break; 3797 } 3798 3799 page->max_active_luns = max_active_luns; 3800#if 0 3801 printk("%s: total_luns = %d, max_active_luns = %d\n", __func__, 3802 page->total_luns, page->max_active_luns); 3803#endif 3804} 3805#endif /* NEEDTOPORT */ 3806 3807/* 3808 * This routine could be used in the future to load default and/or saved 3809 * mode page parameters for a particuar lun. 3810 */ 3811static int 3812ctl_init_page_index(struct ctl_lun *lun) 3813{ 3814 int i; 3815 struct ctl_page_index *page_index; 3816 struct ctl_softc *softc; 3817 3818 memcpy(&lun->mode_pages.index, page_index_template, 3819 sizeof(page_index_template)); 3820 3821 softc = lun->ctl_softc; 3822 3823 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3824 3825 page_index = &lun->mode_pages.index[i]; 3826 /* 3827 * If this is a disk-only mode page, there's no point in 3828 * setting it up. For some pages, we have to have some 3829 * basic information about the disk in order to calculate the 3830 * mode page data. 3831 */ 3832 if ((lun->be_lun->lun_type != T_DIRECT) 3833 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3834 continue; 3835 3836 switch (page_index->page_code & SMPH_PC_MASK) { 3837 case SMS_FORMAT_DEVICE_PAGE: { 3838 struct scsi_format_page *format_page; 3839 3840 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3841 panic("subpage is incorrect!"); 3842 3843 /* 3844 * Sectors per track are set above. Bytes per 3845 * sector need to be set here on a per-LUN basis. 3846 */ 3847 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3848 &format_page_default, 3849 sizeof(format_page_default)); 3850 memcpy(&lun->mode_pages.format_page[ 3851 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3852 sizeof(format_page_changeable)); 3853 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3854 &format_page_default, 3855 sizeof(format_page_default)); 3856 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3857 &format_page_default, 3858 sizeof(format_page_default)); 3859 3860 format_page = &lun->mode_pages.format_page[ 3861 CTL_PAGE_CURRENT]; 3862 scsi_ulto2b(lun->be_lun->blocksize, 3863 format_page->bytes_per_sector); 3864 3865 format_page = &lun->mode_pages.format_page[ 3866 CTL_PAGE_DEFAULT]; 3867 scsi_ulto2b(lun->be_lun->blocksize, 3868 format_page->bytes_per_sector); 3869 3870 format_page = &lun->mode_pages.format_page[ 3871 CTL_PAGE_SAVED]; 3872 scsi_ulto2b(lun->be_lun->blocksize, 3873 format_page->bytes_per_sector); 3874 3875 page_index->page_data = 3876 (uint8_t *)lun->mode_pages.format_page; 3877 break; 3878 } 3879 case SMS_RIGID_DISK_PAGE: { 3880 struct scsi_rigid_disk_page *rigid_disk_page; 3881 uint32_t sectors_per_cylinder; 3882 uint64_t cylinders; 3883#ifndef __XSCALE__ 3884 int shift; 3885#endif /* !__XSCALE__ */ 3886 3887 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3888 panic("invalid subpage value %d", 3889 page_index->subpage); 3890 3891 /* 3892 * Rotation rate and sectors per track are set 3893 * above. We calculate the cylinders here based on 3894 * capacity. Due to the number of heads and 3895 * sectors per track we're using, smaller arrays 3896 * may turn out to have 0 cylinders. Linux and 3897 * FreeBSD don't pay attention to these mode pages 3898 * to figure out capacity, but Solaris does. It 3899 * seems to deal with 0 cylinders just fine, and 3900 * works out a fake geometry based on the capacity. 3901 */ 3902 memcpy(&lun->mode_pages.rigid_disk_page[ 3903 CTL_PAGE_CURRENT], &rigid_disk_page_default, 3904 sizeof(rigid_disk_page_default)); 3905 memcpy(&lun->mode_pages.rigid_disk_page[ 3906 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 3907 sizeof(rigid_disk_page_changeable)); 3908 memcpy(&lun->mode_pages.rigid_disk_page[ 3909 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 3910 sizeof(rigid_disk_page_default)); 3911 memcpy(&lun->mode_pages.rigid_disk_page[ 3912 CTL_PAGE_SAVED], &rigid_disk_page_default, 3913 sizeof(rigid_disk_page_default)); 3914 3915 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 3916 CTL_DEFAULT_HEADS; 3917 3918 /* 3919 * The divide method here will be more accurate, 3920 * probably, but results in floating point being 3921 * used in the kernel on i386 (__udivdi3()). On the 3922 * XScale, though, __udivdi3() is implemented in 3923 * software. 3924 * 3925 * The shift method for cylinder calculation is 3926 * accurate if sectors_per_cylinder is a power of 3927 * 2. Otherwise it might be slightly off -- you 3928 * might have a bit of a truncation problem. 3929 */ 3930#ifdef __XSCALE__ 3931 cylinders = (lun->be_lun->maxlba + 1) / 3932 sectors_per_cylinder; 3933#else 3934 for (shift = 31; shift > 0; shift--) { 3935 if (sectors_per_cylinder & (1 << shift)) 3936 break; 3937 } 3938 cylinders = (lun->be_lun->maxlba + 1) >> shift; 3939#endif 3940 3941 /* 3942 * We've basically got 3 bytes, or 24 bits for the 3943 * cylinder size in the mode page. If we're over, 3944 * just round down to 2^24. 3945 */ 3946 if (cylinders > 0xffffff) 3947 cylinders = 0xffffff; 3948 3949 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3950 CTL_PAGE_CURRENT]; 3951 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3952 3953 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3954 CTL_PAGE_DEFAULT]; 3955 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3956 3957 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3958 CTL_PAGE_SAVED]; 3959 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3960 3961 page_index->page_data = 3962 (uint8_t *)lun->mode_pages.rigid_disk_page; 3963 break; 3964 } 3965 case SMS_CACHING_PAGE: { 3966 3967 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3968 panic("invalid subpage value %d", 3969 page_index->subpage); 3970 /* 3971 * Defaults should be okay here, no calculations 3972 * needed. 3973 */ 3974 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 3975 &caching_page_default, 3976 sizeof(caching_page_default)); 3977 memcpy(&lun->mode_pages.caching_page[ 3978 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 3979 sizeof(caching_page_changeable)); 3980 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 3981 &caching_page_default, 3982 sizeof(caching_page_default)); 3983 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3984 &caching_page_default, 3985 sizeof(caching_page_default)); 3986 page_index->page_data = 3987 (uint8_t *)lun->mode_pages.caching_page; 3988 break; 3989 } 3990 case SMS_CONTROL_MODE_PAGE: { 3991 3992 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3993 panic("invalid subpage value %d", 3994 page_index->subpage); 3995 3996 /* 3997 * Defaults should be okay here, no calculations 3998 * needed. 3999 */ 4000 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 4001 &control_page_default, 4002 sizeof(control_page_default)); 4003 memcpy(&lun->mode_pages.control_page[ 4004 CTL_PAGE_CHANGEABLE], &control_page_changeable, 4005 sizeof(control_page_changeable)); 4006 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 4007 &control_page_default, 4008 sizeof(control_page_default)); 4009 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 4010 &control_page_default, 4011 sizeof(control_page_default)); 4012 page_index->page_data = 4013 (uint8_t *)lun->mode_pages.control_page; 4014 break; 4015 4016 } 4017 case SMS_VENDOR_SPECIFIC_PAGE:{ 4018 switch (page_index->subpage) { 4019 case PWR_SUBPAGE_CODE: { 4020 struct copan_power_subpage *current_page, 4021 *saved_page; 4022 4023 memcpy(&lun->mode_pages.power_subpage[ 4024 CTL_PAGE_CURRENT], 4025 &power_page_default, 4026 sizeof(power_page_default)); 4027 memcpy(&lun->mode_pages.power_subpage[ 4028 CTL_PAGE_CHANGEABLE], 4029 &power_page_changeable, 4030 sizeof(power_page_changeable)); 4031 memcpy(&lun->mode_pages.power_subpage[ 4032 CTL_PAGE_DEFAULT], 4033 &power_page_default, 4034 sizeof(power_page_default)); 4035 memcpy(&lun->mode_pages.power_subpage[ 4036 CTL_PAGE_SAVED], 4037 &power_page_default, 4038 sizeof(power_page_default)); 4039 page_index->page_data = 4040 (uint8_t *)lun->mode_pages.power_subpage; 4041 4042 current_page = (struct copan_power_subpage *) 4043 (page_index->page_data + 4044 (page_index->page_len * 4045 CTL_PAGE_CURRENT)); 4046 saved_page = (struct copan_power_subpage *) 4047 (page_index->page_data + 4048 (page_index->page_len * 4049 CTL_PAGE_SAVED)); 4050 break; 4051 } 4052 case APS_SUBPAGE_CODE: { 4053 struct copan_aps_subpage *current_page, 4054 *saved_page; 4055 4056 // This gets set multiple times but 4057 // it should always be the same. It's 4058 // only done during init so who cares. 4059 index_to_aps_page = i; 4060 4061 memcpy(&lun->mode_pages.aps_subpage[ 4062 CTL_PAGE_CURRENT], 4063 &aps_page_default, 4064 sizeof(aps_page_default)); 4065 memcpy(&lun->mode_pages.aps_subpage[ 4066 CTL_PAGE_CHANGEABLE], 4067 &aps_page_changeable, 4068 sizeof(aps_page_changeable)); 4069 memcpy(&lun->mode_pages.aps_subpage[ 4070 CTL_PAGE_DEFAULT], 4071 &aps_page_default, 4072 sizeof(aps_page_default)); 4073 memcpy(&lun->mode_pages.aps_subpage[ 4074 CTL_PAGE_SAVED], 4075 &aps_page_default, 4076 sizeof(aps_page_default)); 4077 page_index->page_data = 4078 (uint8_t *)lun->mode_pages.aps_subpage; 4079 4080 current_page = (struct copan_aps_subpage *) 4081 (page_index->page_data + 4082 (page_index->page_len * 4083 CTL_PAGE_CURRENT)); 4084 saved_page = (struct copan_aps_subpage *) 4085 (page_index->page_data + 4086 (page_index->page_len * 4087 CTL_PAGE_SAVED)); 4088 break; 4089 } 4090 case DBGCNF_SUBPAGE_CODE: { 4091 struct copan_debugconf_subpage *current_page, 4092 *saved_page; 4093 4094 memcpy(&lun->mode_pages.debugconf_subpage[ 4095 CTL_PAGE_CURRENT], 4096 &debugconf_page_default, 4097 sizeof(debugconf_page_default)); 4098 memcpy(&lun->mode_pages.debugconf_subpage[ 4099 CTL_PAGE_CHANGEABLE], 4100 &debugconf_page_changeable, 4101 sizeof(debugconf_page_changeable)); 4102 memcpy(&lun->mode_pages.debugconf_subpage[ 4103 CTL_PAGE_DEFAULT], 4104 &debugconf_page_default, 4105 sizeof(debugconf_page_default)); 4106 memcpy(&lun->mode_pages.debugconf_subpage[ 4107 CTL_PAGE_SAVED], 4108 &debugconf_page_default, 4109 sizeof(debugconf_page_default)); 4110 page_index->page_data = 4111 (uint8_t *)lun->mode_pages.debugconf_subpage; 4112 4113 current_page = (struct copan_debugconf_subpage *) 4114 (page_index->page_data + 4115 (page_index->page_len * 4116 CTL_PAGE_CURRENT)); 4117 saved_page = (struct copan_debugconf_subpage *) 4118 (page_index->page_data + 4119 (page_index->page_len * 4120 CTL_PAGE_SAVED)); 4121 break; 4122 } 4123 default: 4124 panic("invalid subpage value %d", 4125 page_index->subpage); 4126 break; 4127 } 4128 break; 4129 } 4130 default: 4131 panic("invalid page value %d", 4132 page_index->page_code & SMPH_PC_MASK); 4133 break; 4134 } 4135 } 4136 4137 return (CTL_RETVAL_COMPLETE); 4138} 4139 4140/* 4141 * LUN allocation. 4142 * 4143 * Requirements: 4144 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4145 * wants us to allocate the LUN and he can block. 4146 * - ctl_softc is always set 4147 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4148 * 4149 * Returns 0 for success, non-zero (errno) for failure. 4150 */ 4151static int 4152ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4153 struct ctl_be_lun *const be_lun, struct ctl_id target_id) 4154{ 4155 struct ctl_lun *nlun, *lun; 4156 struct ctl_frontend *fe; 4157 int lun_number, i, lun_malloced; 4158 4159 if (be_lun == NULL) 4160 return (EINVAL); 4161 4162 /* 4163 * We currently only support Direct Access or Processor LUN types. 4164 */ 4165 switch (be_lun->lun_type) { 4166 case T_DIRECT: 4167 break; 4168 case T_PROCESSOR: 4169 break; 4170 case T_SEQUENTIAL: 4171 case T_CHANGER: 4172 default: 4173 be_lun->lun_config_status(be_lun->be_lun, 4174 CTL_LUN_CONFIG_FAILURE); 4175 break; 4176 } 4177 if (ctl_lun == NULL) { 4178 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4179 lun_malloced = 1; 4180 } else { 4181 lun_malloced = 0; 4182 lun = ctl_lun; 4183 } 4184 4185 memset(lun, 0, sizeof(*lun)); 4186 if (lun_malloced) 4187 lun->flags = CTL_LUN_MALLOCED; 4188 4189 mtx_lock(&ctl_softc->ctl_lock); 4190 /* 4191 * See if the caller requested a particular LUN number. If so, see 4192 * if it is available. Otherwise, allocate the first available LUN. 4193 */ 4194 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4195 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4196 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4197 mtx_unlock(&ctl_softc->ctl_lock); 4198 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4199 printf("ctl: requested LUN ID %d is higher " 4200 "than CTL_MAX_LUNS - 1 (%d)\n", 4201 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4202 } else { 4203 /* 4204 * XXX KDM return an error, or just assign 4205 * another LUN ID in this case?? 4206 */ 4207 printf("ctl: requested LUN ID %d is already " 4208 "in use\n", be_lun->req_lun_id); 4209 } 4210 if (lun->flags & CTL_LUN_MALLOCED) 4211 free(lun, M_CTL); 4212 be_lun->lun_config_status(be_lun->be_lun, 4213 CTL_LUN_CONFIG_FAILURE); 4214 return (ENOSPC); 4215 } 4216 lun_number = be_lun->req_lun_id; 4217 } else { 4218 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4219 if (lun_number == -1) { 4220 mtx_unlock(&ctl_softc->ctl_lock); 4221 printf("ctl: can't allocate LUN on target %ju, out of " 4222 "LUNs\n", (uintmax_t)target_id.id); 4223 if (lun->flags & CTL_LUN_MALLOCED) 4224 free(lun, M_CTL); 4225 be_lun->lun_config_status(be_lun->be_lun, 4226 CTL_LUN_CONFIG_FAILURE); 4227 return (ENOSPC); 4228 } 4229 } 4230 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4231 4232 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4233 lun->target = target_id; 4234 lun->lun = lun_number; 4235 lun->be_lun = be_lun; 4236 /* 4237 * The processor LUN is always enabled. Disk LUNs come on line 4238 * disabled, and must be enabled by the backend. 4239 */ 4240 lun->flags |= CTL_LUN_DISABLED; 4241 lun->backend = be_lun->be; 4242 be_lun->ctl_lun = lun; 4243 be_lun->lun_id = lun_number; 4244 atomic_add_int(&be_lun->be->num_luns, 1); 4245 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4246 lun->flags |= CTL_LUN_STOPPED; 4247 4248 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4249 lun->flags |= CTL_LUN_INOPERABLE; 4250 4251 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4252 lun->flags |= CTL_LUN_PRIMARY_SC; 4253 4254 lun->ctl_softc = ctl_softc; 4255 TAILQ_INIT(&lun->ooa_queue); 4256 TAILQ_INIT(&lun->blocked_queue); 4257 STAILQ_INIT(&lun->error_list); 4258 4259 /* 4260 * Initialize the mode page index. 4261 */ 4262 ctl_init_page_index(lun); 4263 4264 /* 4265 * Set the poweron UA for all initiators on this LUN only. 4266 */ 4267 for (i = 0; i < CTL_MAX_INITIATORS; i++) 4268 lun->pending_sense[i].ua_pending = CTL_UA_POWERON; 4269 4270 /* 4271 * Now, before we insert this lun on the lun list, set the lun 4272 * inventory changed UA for all other luns. 4273 */ 4274 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4275 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4276 nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE; 4277 } 4278 } 4279 4280 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4281 4282 ctl_softc->ctl_luns[lun_number] = lun; 4283 4284 ctl_softc->num_luns++; 4285 4286 /* Setup statistics gathering */ 4287 lun->stats.device_type = be_lun->lun_type; 4288 lun->stats.lun_number = lun_number; 4289 if (lun->stats.device_type == T_DIRECT) 4290 lun->stats.blocksize = be_lun->blocksize; 4291 else 4292 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4293 for (i = 0;i < CTL_MAX_PORTS;i++) 4294 lun->stats.ports[i].targ_port = i; 4295 4296 mtx_unlock(&ctl_softc->ctl_lock); 4297 4298 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4299 4300 /* 4301 * Run through each registered FETD and bring it online if it isn't 4302 * already. Enable the target ID if it hasn't been enabled, and 4303 * enable this particular LUN. 4304 */ 4305 STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) { 4306 int retval; 4307 4308 /* 4309 * XXX KDM this only works for ONE TARGET ID. We'll need 4310 * to do things differently if we go to a multiple target 4311 * ID scheme. 4312 */ 4313 if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) == 0) { 4314 4315 retval = fe->targ_enable(fe->targ_lun_arg, target_id); 4316 if (retval != 0) { 4317 printf("ctl_alloc_lun: FETD %s port %d " 4318 "returned error %d for targ_enable on " 4319 "target %ju\n", fe->port_name, 4320 fe->targ_port, retval, 4321 (uintmax_t)target_id.id); 4322 } else 4323 fe->status |= CTL_PORT_STATUS_TARG_ONLINE; 4324 } 4325 4326 retval = fe->lun_enable(fe->targ_lun_arg, target_id,lun_number); 4327 if (retval != 0) { 4328 printf("ctl_alloc_lun: FETD %s port %d returned error " 4329 "%d for lun_enable on target %ju lun %d\n", 4330 fe->port_name, fe->targ_port, retval, 4331 (uintmax_t)target_id.id, lun_number); 4332 } else 4333 fe->status |= CTL_PORT_STATUS_LUN_ONLINE; 4334 } 4335 return (0); 4336} 4337 4338/* 4339 * Delete a LUN. 4340 * Assumptions: 4341 * - LUN has already been marked invalid and any pending I/O has been taken 4342 * care of. 4343 */ 4344static int 4345ctl_free_lun(struct ctl_lun *lun) 4346{ 4347 struct ctl_softc *softc; 4348#if 0 4349 struct ctl_frontend *fe; 4350#endif 4351 struct ctl_lun *nlun; 4352 int i; 4353 4354 softc = lun->ctl_softc; 4355 4356 mtx_assert(&softc->ctl_lock, MA_OWNED); 4357 4358 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4359 4360 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4361 4362 softc->ctl_luns[lun->lun] = NULL; 4363 4364 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4365 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4366 4367 softc->num_luns--; 4368 4369 /* 4370 * XXX KDM this scheme only works for a single target/multiple LUN 4371 * setup. It needs to be revamped for a multiple target scheme. 4372 * 4373 * XXX KDM this results in fe->lun_disable() getting called twice, 4374 * once when ctl_disable_lun() is called, and a second time here. 4375 * We really need to re-think the LUN disable semantics. There 4376 * should probably be several steps/levels to LUN removal: 4377 * - disable 4378 * - invalidate 4379 * - free 4380 * 4381 * Right now we only have a disable method when communicating to 4382 * the front end ports, at least for individual LUNs. 4383 */ 4384#if 0 4385 STAILQ_FOREACH(fe, &softc->fe_list, links) { 4386 int retval; 4387 4388 retval = fe->lun_disable(fe->targ_lun_arg, lun->target, 4389 lun->lun); 4390 if (retval != 0) { 4391 printf("ctl_free_lun: FETD %s port %d returned error " 4392 "%d for lun_disable on target %ju lun %jd\n", 4393 fe->port_name, fe->targ_port, retval, 4394 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4395 } 4396 4397 if (STAILQ_FIRST(&softc->lun_list) == NULL) { 4398 fe->status &= ~CTL_PORT_STATUS_LUN_ONLINE; 4399 4400 retval = fe->targ_disable(fe->targ_lun_arg,lun->target); 4401 if (retval != 0) { 4402 printf("ctl_free_lun: FETD %s port %d " 4403 "returned error %d for targ_disable on " 4404 "target %ju\n", fe->port_name, 4405 fe->targ_port, retval, 4406 (uintmax_t)lun->target.id); 4407 } else 4408 fe->status &= ~CTL_PORT_STATUS_TARG_ONLINE; 4409 4410 if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) != 0) 4411 continue; 4412 4413#if 0 4414 fe->port_offline(fe->onoff_arg); 4415 fe->status &= ~CTL_PORT_STATUS_ONLINE; 4416#endif 4417 } 4418 } 4419#endif 4420 4421 /* 4422 * Tell the backend to free resources, if this LUN has a backend. 4423 */ 4424 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4425 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4426 4427 mtx_destroy(&lun->lun_lock); 4428 if (lun->flags & CTL_LUN_MALLOCED) 4429 free(lun, M_CTL); 4430 4431 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4432 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4433 nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE; 4434 } 4435 } 4436 4437 return (0); 4438} 4439 4440static void 4441ctl_create_lun(struct ctl_be_lun *be_lun) 4442{ 4443 struct ctl_softc *ctl_softc; 4444 4445 ctl_softc = control_softc; 4446 4447 /* 4448 * ctl_alloc_lun() should handle all potential failure cases. 4449 */ 4450 ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target); 4451} 4452 4453int 4454ctl_add_lun(struct ctl_be_lun *be_lun) 4455{ 4456 struct ctl_softc *ctl_softc = control_softc; 4457 4458 mtx_lock(&ctl_softc->ctl_lock); 4459 STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links); 4460 mtx_unlock(&ctl_softc->ctl_lock); 4461 wakeup(&ctl_softc->pending_lun_queue); 4462 4463 return (0); 4464} 4465 4466int 4467ctl_enable_lun(struct ctl_be_lun *be_lun) 4468{ 4469 struct ctl_softc *ctl_softc; 4470 struct ctl_frontend *fe, *nfe; 4471 struct ctl_lun *lun; 4472 int retval; 4473 4474 ctl_softc = control_softc; 4475 4476 lun = (struct ctl_lun *)be_lun->ctl_lun; 4477 4478 mtx_lock(&ctl_softc->ctl_lock); 4479 mtx_lock(&lun->lun_lock); 4480 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4481 /* 4482 * eh? Why did we get called if the LUN is already 4483 * enabled? 4484 */ 4485 mtx_unlock(&lun->lun_lock); 4486 mtx_unlock(&ctl_softc->ctl_lock); 4487 return (0); 4488 } 4489 lun->flags &= ~CTL_LUN_DISABLED; 4490 mtx_unlock(&lun->lun_lock); 4491 4492 for (fe = STAILQ_FIRST(&ctl_softc->fe_list); fe != NULL; fe = nfe) { 4493 nfe = STAILQ_NEXT(fe, links); 4494 4495 /* 4496 * Drop the lock while we call the FETD's enable routine. 4497 * This can lead to a callback into CTL (at least in the 4498 * case of the internal initiator frontend. 4499 */ 4500 mtx_unlock(&ctl_softc->ctl_lock); 4501 retval = fe->lun_enable(fe->targ_lun_arg, lun->target,lun->lun); 4502 mtx_lock(&ctl_softc->ctl_lock); 4503 if (retval != 0) { 4504 printf("%s: FETD %s port %d returned error " 4505 "%d for lun_enable on target %ju lun %jd\n", 4506 __func__, fe->port_name, fe->targ_port, retval, 4507 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4508 } 4509#if 0 4510 else { 4511 /* NOTE: TODO: why does lun enable affect port status? */ 4512 fe->status |= CTL_PORT_STATUS_LUN_ONLINE; 4513 } 4514#endif 4515 } 4516 4517 mtx_unlock(&ctl_softc->ctl_lock); 4518 4519 return (0); 4520} 4521 4522int 4523ctl_disable_lun(struct ctl_be_lun *be_lun) 4524{ 4525 struct ctl_softc *ctl_softc; 4526 struct ctl_frontend *fe; 4527 struct ctl_lun *lun; 4528 int retval; 4529 4530 ctl_softc = control_softc; 4531 4532 lun = (struct ctl_lun *)be_lun->ctl_lun; 4533 4534 mtx_lock(&ctl_softc->ctl_lock); 4535 mtx_lock(&lun->lun_lock); 4536 if (lun->flags & CTL_LUN_DISABLED) { 4537 mtx_unlock(&lun->lun_lock); 4538 mtx_unlock(&ctl_softc->ctl_lock); 4539 return (0); 4540 } 4541 lun->flags |= CTL_LUN_DISABLED; 4542 mtx_unlock(&lun->lun_lock); 4543 4544 STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) { 4545 mtx_unlock(&ctl_softc->ctl_lock); 4546 /* 4547 * Drop the lock before we call the frontend's disable 4548 * routine, to avoid lock order reversals. 4549 * 4550 * XXX KDM what happens if the frontend list changes while 4551 * we're traversing it? It's unlikely, but should be handled. 4552 */ 4553 retval = fe->lun_disable(fe->targ_lun_arg, lun->target, 4554 lun->lun); 4555 mtx_lock(&ctl_softc->ctl_lock); 4556 if (retval != 0) { 4557 printf("ctl_alloc_lun: FETD %s port %d returned error " 4558 "%d for lun_disable on target %ju lun %jd\n", 4559 fe->port_name, fe->targ_port, retval, 4560 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4561 } 4562 } 4563 4564 mtx_unlock(&ctl_softc->ctl_lock); 4565 4566 return (0); 4567} 4568 4569int 4570ctl_start_lun(struct ctl_be_lun *be_lun) 4571{ 4572 struct ctl_softc *ctl_softc; 4573 struct ctl_lun *lun; 4574 4575 ctl_softc = control_softc; 4576 4577 lun = (struct ctl_lun *)be_lun->ctl_lun; 4578 4579 mtx_lock(&lun->lun_lock); 4580 lun->flags &= ~CTL_LUN_STOPPED; 4581 mtx_unlock(&lun->lun_lock); 4582 4583 return (0); 4584} 4585 4586int 4587ctl_stop_lun(struct ctl_be_lun *be_lun) 4588{ 4589 struct ctl_softc *ctl_softc; 4590 struct ctl_lun *lun; 4591 4592 ctl_softc = control_softc; 4593 4594 lun = (struct ctl_lun *)be_lun->ctl_lun; 4595 4596 mtx_lock(&lun->lun_lock); 4597 lun->flags |= CTL_LUN_STOPPED; 4598 mtx_unlock(&lun->lun_lock); 4599 4600 return (0); 4601} 4602 4603int 4604ctl_lun_offline(struct ctl_be_lun *be_lun) 4605{ 4606 struct ctl_softc *ctl_softc; 4607 struct ctl_lun *lun; 4608 4609 ctl_softc = control_softc; 4610 4611 lun = (struct ctl_lun *)be_lun->ctl_lun; 4612 4613 mtx_lock(&lun->lun_lock); 4614 lun->flags |= CTL_LUN_OFFLINE; 4615 mtx_unlock(&lun->lun_lock); 4616 4617 return (0); 4618} 4619 4620int 4621ctl_lun_online(struct ctl_be_lun *be_lun) 4622{ 4623 struct ctl_softc *ctl_softc; 4624 struct ctl_lun *lun; 4625 4626 ctl_softc = control_softc; 4627 4628 lun = (struct ctl_lun *)be_lun->ctl_lun; 4629 4630 mtx_lock(&lun->lun_lock); 4631 lun->flags &= ~CTL_LUN_OFFLINE; 4632 mtx_unlock(&lun->lun_lock); 4633 4634 return (0); 4635} 4636 4637int 4638ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4639{ 4640 struct ctl_softc *ctl_softc; 4641 struct ctl_lun *lun; 4642 4643 ctl_softc = control_softc; 4644 4645 lun = (struct ctl_lun *)be_lun->ctl_lun; 4646 4647 mtx_lock(&lun->lun_lock); 4648 4649 /* 4650 * The LUN needs to be disabled before it can be marked invalid. 4651 */ 4652 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4653 mtx_unlock(&lun->lun_lock); 4654 return (-1); 4655 } 4656 /* 4657 * Mark the LUN invalid. 4658 */ 4659 lun->flags |= CTL_LUN_INVALID; 4660 4661 /* 4662 * If there is nothing in the OOA queue, go ahead and free the LUN. 4663 * If we have something in the OOA queue, we'll free it when the 4664 * last I/O completes. 4665 */ 4666 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4667 mtx_unlock(&lun->lun_lock); 4668 mtx_lock(&ctl_softc->ctl_lock); 4669 ctl_free_lun(lun); 4670 mtx_unlock(&ctl_softc->ctl_lock); 4671 } else 4672 mtx_unlock(&lun->lun_lock); 4673 4674 return (0); 4675} 4676 4677int 4678ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4679{ 4680 struct ctl_softc *ctl_softc; 4681 struct ctl_lun *lun; 4682 4683 ctl_softc = control_softc; 4684 lun = (struct ctl_lun *)be_lun->ctl_lun; 4685 4686 mtx_lock(&lun->lun_lock); 4687 lun->flags |= CTL_LUN_INOPERABLE; 4688 mtx_unlock(&lun->lun_lock); 4689 4690 return (0); 4691} 4692 4693int 4694ctl_lun_operable(struct ctl_be_lun *be_lun) 4695{ 4696 struct ctl_softc *ctl_softc; 4697 struct ctl_lun *lun; 4698 4699 ctl_softc = control_softc; 4700 lun = (struct ctl_lun *)be_lun->ctl_lun; 4701 4702 mtx_lock(&lun->lun_lock); 4703 lun->flags &= ~CTL_LUN_INOPERABLE; 4704 mtx_unlock(&lun->lun_lock); 4705 4706 return (0); 4707} 4708 4709int 4710ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus, 4711 int lock) 4712{ 4713 struct ctl_softc *softc; 4714 struct ctl_lun *lun; 4715 struct copan_aps_subpage *current_sp; 4716 struct ctl_page_index *page_index; 4717 int i; 4718 4719 softc = control_softc; 4720 4721 mtx_lock(&softc->ctl_lock); 4722 4723 lun = (struct ctl_lun *)be_lun->ctl_lun; 4724 mtx_lock(&lun->lun_lock); 4725 4726 page_index = NULL; 4727 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4728 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 4729 APS_PAGE_CODE) 4730 continue; 4731 4732 if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE) 4733 continue; 4734 page_index = &lun->mode_pages.index[i]; 4735 } 4736 4737 if (page_index == NULL) { 4738 mtx_unlock(&lun->lun_lock); 4739 mtx_unlock(&softc->ctl_lock); 4740 printf("%s: APS subpage not found for lun %ju!\n", __func__, 4741 (uintmax_t)lun->lun); 4742 return (1); 4743 } 4744#if 0 4745 if ((softc->aps_locked_lun != 0) 4746 && (softc->aps_locked_lun != lun->lun)) { 4747 printf("%s: attempt to lock LUN %llu when %llu is already " 4748 "locked\n"); 4749 mtx_unlock(&lun->lun_lock); 4750 mtx_unlock(&softc->ctl_lock); 4751 return (1); 4752 } 4753#endif 4754 4755 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 4756 (page_index->page_len * CTL_PAGE_CURRENT)); 4757 4758 if (lock != 0) { 4759 current_sp->lock_active = APS_LOCK_ACTIVE; 4760 softc->aps_locked_lun = lun->lun; 4761 } else { 4762 current_sp->lock_active = 0; 4763 softc->aps_locked_lun = 0; 4764 } 4765 4766 4767 /* 4768 * If we're in HA mode, try to send the lock message to the other 4769 * side. 4770 */ 4771 if (ctl_is_single == 0) { 4772 int isc_retval; 4773 union ctl_ha_msg lock_msg; 4774 4775 lock_msg.hdr.nexus = *nexus; 4776 lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK; 4777 if (lock != 0) 4778 lock_msg.aps.lock_flag = 1; 4779 else 4780 lock_msg.aps.lock_flag = 0; 4781 isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg, 4782 sizeof(lock_msg), 0); 4783 if (isc_retval > CTL_HA_STATUS_SUCCESS) { 4784 printf("%s: APS (lock=%d) error returned from " 4785 "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval); 4786 mtx_unlock(&lun->lun_lock); 4787 mtx_unlock(&softc->ctl_lock); 4788 return (1); 4789 } 4790 } 4791 4792 mtx_unlock(&lun->lun_lock); 4793 mtx_unlock(&softc->ctl_lock); 4794 4795 return (0); 4796} 4797 4798void 4799ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4800{ 4801 struct ctl_lun *lun; 4802 struct ctl_softc *softc; 4803 int i; 4804 4805 softc = control_softc; 4806 4807 lun = (struct ctl_lun *)be_lun->ctl_lun; 4808 4809 mtx_lock(&lun->lun_lock); 4810 4811 for (i = 0; i < CTL_MAX_INITIATORS; i++) 4812 lun->pending_sense[i].ua_pending |= CTL_UA_CAPACITY_CHANGED; 4813 4814 mtx_unlock(&lun->lun_lock); 4815} 4816 4817/* 4818 * Backend "memory move is complete" callback for requests that never 4819 * make it down to say RAIDCore's configuration code. 4820 */ 4821int 4822ctl_config_move_done(union ctl_io *io) 4823{ 4824 int retval; 4825 4826 retval = CTL_RETVAL_COMPLETE; 4827 4828 4829 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4830 /* 4831 * XXX KDM this shouldn't happen, but what if it does? 4832 */ 4833 if (io->io_hdr.io_type != CTL_IO_SCSI) 4834 panic("I/O type isn't CTL_IO_SCSI!"); 4835 4836 if ((io->io_hdr.port_status == 0) 4837 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 4838 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 4839 io->io_hdr.status = CTL_SUCCESS; 4840 else if ((io->io_hdr.port_status != 0) 4841 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 4842 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){ 4843 /* 4844 * For hardware error sense keys, the sense key 4845 * specific value is defined to be a retry count, 4846 * but we use it to pass back an internal FETD 4847 * error code. XXX KDM Hopefully the FETD is only 4848 * using 16 bits for an error code, since that's 4849 * all the space we have in the sks field. 4850 */ 4851 ctl_set_internal_failure(&io->scsiio, 4852 /*sks_valid*/ 1, 4853 /*retry_count*/ 4854 io->io_hdr.port_status); 4855 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4856 free(io->scsiio.kern_data_ptr, M_CTL); 4857 ctl_done(io); 4858 goto bailout; 4859 } 4860 4861 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 4862 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 4863 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4864 /* 4865 * XXX KDM just assuming a single pointer here, and not a 4866 * S/G list. If we start using S/G lists for config data, 4867 * we'll need to know how to clean them up here as well. 4868 */ 4869 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4870 free(io->scsiio.kern_data_ptr, M_CTL); 4871 /* Hopefully the user has already set the status... */ 4872 ctl_done(io); 4873 } else { 4874 /* 4875 * XXX KDM now we need to continue data movement. Some 4876 * options: 4877 * - call ctl_scsiio() again? We don't do this for data 4878 * writes, because for those at least we know ahead of 4879 * time where the write will go and how long it is. For 4880 * config writes, though, that information is largely 4881 * contained within the write itself, thus we need to 4882 * parse out the data again. 4883 * 4884 * - Call some other function once the data is in? 4885 */ 4886 4887 /* 4888 * XXX KDM call ctl_scsiio() again for now, and check flag 4889 * bits to see whether we're allocated or not. 4890 */ 4891 retval = ctl_scsiio(&io->scsiio); 4892 } 4893bailout: 4894 return (retval); 4895} 4896 4897/* 4898 * This gets called by a backend driver when it is done with a 4899 * data_submit method. 4900 */ 4901void 4902ctl_data_submit_done(union ctl_io *io) 4903{ 4904 /* 4905 * If the IO_CONT flag is set, we need to call the supplied 4906 * function to continue processing the I/O, instead of completing 4907 * the I/O just yet. 4908 * 4909 * If there is an error, though, we don't want to keep processing. 4910 * Instead, just send status back to the initiator. 4911 */ 4912 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4913 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4914 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4915 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4916 io->scsiio.io_cont(io); 4917 return; 4918 } 4919 ctl_done(io); 4920} 4921 4922/* 4923 * This gets called by a backend driver when it is done with a 4924 * configuration write. 4925 */ 4926void 4927ctl_config_write_done(union ctl_io *io) 4928{ 4929 /* 4930 * If the IO_CONT flag is set, we need to call the supplied 4931 * function to continue processing the I/O, instead of completing 4932 * the I/O just yet. 4933 * 4934 * If there is an error, though, we don't want to keep processing. 4935 * Instead, just send status back to the initiator. 4936 */ 4937 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) 4938 && (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) 4939 || ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))) { 4940 io->scsiio.io_cont(io); 4941 return; 4942 } 4943 /* 4944 * Since a configuration write can be done for commands that actually 4945 * have data allocated, like write buffer, and commands that have 4946 * no data, like start/stop unit, we need to check here. 4947 */ 4948 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 4949 free(io->scsiio.kern_data_ptr, M_CTL); 4950 ctl_done(io); 4951} 4952 4953/* 4954 * SCSI release command. 4955 */ 4956int 4957ctl_scsi_release(struct ctl_scsiio *ctsio) 4958{ 4959 int length, longid, thirdparty_id, resv_id; 4960 struct ctl_softc *ctl_softc; 4961 struct ctl_lun *lun; 4962 4963 length = 0; 4964 resv_id = 0; 4965 4966 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4967 4968 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4969 ctl_softc = control_softc; 4970 4971 switch (ctsio->cdb[0]) { 4972 case RELEASE: { 4973 struct scsi_release *cdb; 4974 4975 cdb = (struct scsi_release *)ctsio->cdb; 4976 if ((cdb->byte2 & 0x1f) != 0) { 4977 ctl_set_invalid_field(ctsio, 4978 /*sks_valid*/ 1, 4979 /*command*/ 1, 4980 /*field*/ 1, 4981 /*bit_valid*/ 0, 4982 /*bit*/ 0); 4983 ctl_done((union ctl_io *)ctsio); 4984 return (CTL_RETVAL_COMPLETE); 4985 } 4986 break; 4987 } 4988 case RELEASE_10: { 4989 struct scsi_release_10 *cdb; 4990 4991 cdb = (struct scsi_release_10 *)ctsio->cdb; 4992 4993 if ((cdb->byte2 & SR10_EXTENT) != 0) { 4994 ctl_set_invalid_field(ctsio, 4995 /*sks_valid*/ 1, 4996 /*command*/ 1, 4997 /*field*/ 1, 4998 /*bit_valid*/ 1, 4999 /*bit*/ 0); 5000 ctl_done((union ctl_io *)ctsio); 5001 return (CTL_RETVAL_COMPLETE); 5002 5003 } 5004 5005 if ((cdb->byte2 & SR10_3RDPTY) != 0) { 5006 ctl_set_invalid_field(ctsio, 5007 /*sks_valid*/ 1, 5008 /*command*/ 1, 5009 /*field*/ 1, 5010 /*bit_valid*/ 1, 5011 /*bit*/ 4); 5012 ctl_done((union ctl_io *)ctsio); 5013 return (CTL_RETVAL_COMPLETE); 5014 } 5015 5016 if (cdb->byte2 & SR10_LONGID) 5017 longid = 1; 5018 else 5019 thirdparty_id = cdb->thirdparty_id; 5020 5021 resv_id = cdb->resv_id; 5022 length = scsi_2btoul(cdb->length); 5023 break; 5024 } 5025 } 5026 5027 5028 /* 5029 * XXX KDM right now, we only support LUN reservation. We don't 5030 * support 3rd party reservations, or extent reservations, which 5031 * might actually need the parameter list. If we've gotten this 5032 * far, we've got a LUN reservation. Anything else got kicked out 5033 * above. So, according to SPC, ignore the length. 5034 */ 5035 length = 0; 5036 5037 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5038 && (length > 0)) { 5039 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5040 ctsio->kern_data_len = length; 5041 ctsio->kern_total_len = length; 5042 ctsio->kern_data_resid = 0; 5043 ctsio->kern_rel_offset = 0; 5044 ctsio->kern_sg_entries = 0; 5045 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5046 ctsio->be_move_done = ctl_config_move_done; 5047 ctl_datamove((union ctl_io *)ctsio); 5048 5049 return (CTL_RETVAL_COMPLETE); 5050 } 5051 5052 if (length > 0) 5053 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5054 5055 mtx_lock(&lun->lun_lock); 5056 5057 /* 5058 * According to SPC, it is not an error for an intiator to attempt 5059 * to release a reservation on a LUN that isn't reserved, or that 5060 * is reserved by another initiator. The reservation can only be 5061 * released, though, by the initiator who made it or by one of 5062 * several reset type events. 5063 */ 5064 if (lun->flags & CTL_LUN_RESERVED) { 5065 if ((ctsio->io_hdr.nexus.initid.id == lun->rsv_nexus.initid.id) 5066 && (ctsio->io_hdr.nexus.targ_port == lun->rsv_nexus.targ_port) 5067 && (ctsio->io_hdr.nexus.targ_target.id == 5068 lun->rsv_nexus.targ_target.id)) { 5069 lun->flags &= ~CTL_LUN_RESERVED; 5070 } 5071 } 5072 5073 mtx_unlock(&lun->lun_lock); 5074 5075 ctsio->scsi_status = SCSI_STATUS_OK; 5076 ctsio->io_hdr.status = CTL_SUCCESS; 5077 5078 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5079 free(ctsio->kern_data_ptr, M_CTL); 5080 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5081 } 5082 5083 ctl_done((union ctl_io *)ctsio); 5084 return (CTL_RETVAL_COMPLETE); 5085} 5086 5087int 5088ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5089{ 5090 int extent, thirdparty, longid; 5091 int resv_id, length; 5092 uint64_t thirdparty_id; 5093 struct ctl_softc *ctl_softc; 5094 struct ctl_lun *lun; 5095 5096 extent = 0; 5097 thirdparty = 0; 5098 longid = 0; 5099 resv_id = 0; 5100 length = 0; 5101 thirdparty_id = 0; 5102 5103 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5104 5105 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5106 ctl_softc = control_softc; 5107 5108 switch (ctsio->cdb[0]) { 5109 case RESERVE: { 5110 struct scsi_reserve *cdb; 5111 5112 cdb = (struct scsi_reserve *)ctsio->cdb; 5113 if ((cdb->byte2 & 0x1f) != 0) { 5114 ctl_set_invalid_field(ctsio, 5115 /*sks_valid*/ 1, 5116 /*command*/ 1, 5117 /*field*/ 1, 5118 /*bit_valid*/ 0, 5119 /*bit*/ 0); 5120 ctl_done((union ctl_io *)ctsio); 5121 return (CTL_RETVAL_COMPLETE); 5122 } 5123 resv_id = cdb->resv_id; 5124 length = scsi_2btoul(cdb->length); 5125 break; 5126 } 5127 case RESERVE_10: { 5128 struct scsi_reserve_10 *cdb; 5129 5130 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5131 5132 if ((cdb->byte2 & SR10_EXTENT) != 0) { 5133 ctl_set_invalid_field(ctsio, 5134 /*sks_valid*/ 1, 5135 /*command*/ 1, 5136 /*field*/ 1, 5137 /*bit_valid*/ 1, 5138 /*bit*/ 0); 5139 ctl_done((union ctl_io *)ctsio); 5140 return (CTL_RETVAL_COMPLETE); 5141 } 5142 if ((cdb->byte2 & SR10_3RDPTY) != 0) { 5143 ctl_set_invalid_field(ctsio, 5144 /*sks_valid*/ 1, 5145 /*command*/ 1, 5146 /*field*/ 1, 5147 /*bit_valid*/ 1, 5148 /*bit*/ 4); 5149 ctl_done((union ctl_io *)ctsio); 5150 return (CTL_RETVAL_COMPLETE); 5151 } 5152 if (cdb->byte2 & SR10_LONGID) 5153 longid = 1; 5154 else 5155 thirdparty_id = cdb->thirdparty_id; 5156 5157 resv_id = cdb->resv_id; 5158 length = scsi_2btoul(cdb->length); 5159 break; 5160 } 5161 } 5162 5163 /* 5164 * XXX KDM right now, we only support LUN reservation. We don't 5165 * support 3rd party reservations, or extent reservations, which 5166 * might actually need the parameter list. If we've gotten this 5167 * far, we've got a LUN reservation. Anything else got kicked out 5168 * above. So, according to SPC, ignore the length. 5169 */ 5170 length = 0; 5171 5172 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5173 && (length > 0)) { 5174 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5175 ctsio->kern_data_len = length; 5176 ctsio->kern_total_len = length; 5177 ctsio->kern_data_resid = 0; 5178 ctsio->kern_rel_offset = 0; 5179 ctsio->kern_sg_entries = 0; 5180 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5181 ctsio->be_move_done = ctl_config_move_done; 5182 ctl_datamove((union ctl_io *)ctsio); 5183 5184 return (CTL_RETVAL_COMPLETE); 5185 } 5186 5187 if (length > 0) 5188 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5189 5190 mtx_lock(&lun->lun_lock); 5191 if (lun->flags & CTL_LUN_RESERVED) { 5192 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id) 5193 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port) 5194 || (ctsio->io_hdr.nexus.targ_target.id != 5195 lun->rsv_nexus.targ_target.id)) { 5196 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 5197 ctsio->io_hdr.status = CTL_SCSI_ERROR; 5198 goto bailout; 5199 } 5200 } 5201 5202 lun->flags |= CTL_LUN_RESERVED; 5203 lun->rsv_nexus = ctsio->io_hdr.nexus; 5204 5205 ctsio->scsi_status = SCSI_STATUS_OK; 5206 ctsio->io_hdr.status = CTL_SUCCESS; 5207 5208bailout: 5209 mtx_unlock(&lun->lun_lock); 5210 5211 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5212 free(ctsio->kern_data_ptr, M_CTL); 5213 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5214 } 5215 5216 ctl_done((union ctl_io *)ctsio); 5217 return (CTL_RETVAL_COMPLETE); 5218} 5219 5220int 5221ctl_start_stop(struct ctl_scsiio *ctsio) 5222{ 5223 struct scsi_start_stop_unit *cdb; 5224 struct ctl_lun *lun; 5225 struct ctl_softc *ctl_softc; 5226 int retval; 5227 5228 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5229 5230 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5231 ctl_softc = control_softc; 5232 retval = 0; 5233 5234 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5235 5236 /* 5237 * XXX KDM 5238 * We don't support the immediate bit on a stop unit. In order to 5239 * do that, we would need to code up a way to know that a stop is 5240 * pending, and hold off any new commands until it completes, one 5241 * way or another. Then we could accept or reject those commands 5242 * depending on its status. We would almost need to do the reverse 5243 * of what we do below for an immediate start -- return the copy of 5244 * the ctl_io to the FETD with status to send to the host (and to 5245 * free the copy!) and then free the original I/O once the stop 5246 * actually completes. That way, the OOA queue mechanism can work 5247 * to block commands that shouldn't proceed. Another alternative 5248 * would be to put the copy in the queue in place of the original, 5249 * and return the original back to the caller. That could be 5250 * slightly safer.. 5251 */ 5252 if ((cdb->byte2 & SSS_IMMED) 5253 && ((cdb->how & SSS_START) == 0)) { 5254 ctl_set_invalid_field(ctsio, 5255 /*sks_valid*/ 1, 5256 /*command*/ 1, 5257 /*field*/ 1, 5258 /*bit_valid*/ 1, 5259 /*bit*/ 0); 5260 ctl_done((union ctl_io *)ctsio); 5261 return (CTL_RETVAL_COMPLETE); 5262 } 5263 5264 /* 5265 * We don't support the power conditions field. We need to check 5266 * this prior to checking the load/eject and start/stop bits. 5267 */ 5268 if ((cdb->how & SSS_PC_MASK) != SSS_PC_START_VALID) { 5269 ctl_set_invalid_field(ctsio, 5270 /*sks_valid*/ 1, 5271 /*command*/ 1, 5272 /*field*/ 4, 5273 /*bit_valid*/ 1, 5274 /*bit*/ 4); 5275 ctl_done((union ctl_io *)ctsio); 5276 return (CTL_RETVAL_COMPLETE); 5277 } 5278 5279 /* 5280 * Media isn't removable, so we can't load or eject it. 5281 */ 5282 if ((cdb->how & SSS_LOEJ) != 0) { 5283 ctl_set_invalid_field(ctsio, 5284 /*sks_valid*/ 1, 5285 /*command*/ 1, 5286 /*field*/ 4, 5287 /*bit_valid*/ 1, 5288 /*bit*/ 1); 5289 ctl_done((union ctl_io *)ctsio); 5290 return (CTL_RETVAL_COMPLETE); 5291 } 5292 5293 if ((lun->flags & CTL_LUN_PR_RESERVED) 5294 && ((cdb->how & SSS_START)==0)) { 5295 uint32_t residx; 5296 5297 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5298 if (!lun->per_res[residx].registered 5299 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5300 5301 ctl_set_reservation_conflict(ctsio); 5302 ctl_done((union ctl_io *)ctsio); 5303 return (CTL_RETVAL_COMPLETE); 5304 } 5305 } 5306 5307 /* 5308 * If there is no backend on this device, we can't start or stop 5309 * it. In theory we shouldn't get any start/stop commands in the 5310 * first place at this level if the LUN doesn't have a backend. 5311 * That should get stopped by the command decode code. 5312 */ 5313 if (lun->backend == NULL) { 5314 ctl_set_invalid_opcode(ctsio); 5315 ctl_done((union ctl_io *)ctsio); 5316 return (CTL_RETVAL_COMPLETE); 5317 } 5318 5319 /* 5320 * XXX KDM Copan-specific offline behavior. 5321 * Figure out a reasonable way to port this? 5322 */ 5323#ifdef NEEDTOPORT 5324 mtx_lock(&lun->lun_lock); 5325 5326 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5327 && (lun->flags & CTL_LUN_OFFLINE)) { 5328 /* 5329 * If the LUN is offline, and the on/offline bit isn't set, 5330 * reject the start or stop. Otherwise, let it through. 5331 */ 5332 mtx_unlock(&lun->lun_lock); 5333 ctl_set_lun_not_ready(ctsio); 5334 ctl_done((union ctl_io *)ctsio); 5335 } else { 5336 mtx_unlock(&lun->lun_lock); 5337#endif /* NEEDTOPORT */ 5338 /* 5339 * This could be a start or a stop when we're online, 5340 * or a stop/offline or start/online. A start or stop when 5341 * we're offline is covered in the case above. 5342 */ 5343 /* 5344 * In the non-immediate case, we send the request to 5345 * the backend and return status to the user when 5346 * it is done. 5347 * 5348 * In the immediate case, we allocate a new ctl_io 5349 * to hold a copy of the request, and send that to 5350 * the backend. We then set good status on the 5351 * user's request and return it immediately. 5352 */ 5353 if (cdb->byte2 & SSS_IMMED) { 5354 union ctl_io *new_io; 5355 5356 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5357 if (new_io == NULL) { 5358 ctl_set_busy(ctsio); 5359 ctl_done((union ctl_io *)ctsio); 5360 } else { 5361 ctl_copy_io((union ctl_io *)ctsio, 5362 new_io); 5363 retval = lun->backend->config_write(new_io); 5364 ctl_set_success(ctsio); 5365 ctl_done((union ctl_io *)ctsio); 5366 } 5367 } else { 5368 retval = lun->backend->config_write( 5369 (union ctl_io *)ctsio); 5370 } 5371#ifdef NEEDTOPORT 5372 } 5373#endif 5374 return (retval); 5375} 5376 5377/* 5378 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5379 * we don't really do anything with the LBA and length fields if the user 5380 * passes them in. Instead we'll just flush out the cache for the entire 5381 * LUN. 5382 */ 5383int 5384ctl_sync_cache(struct ctl_scsiio *ctsio) 5385{ 5386 struct ctl_lun *lun; 5387 struct ctl_softc *ctl_softc; 5388 uint64_t starting_lba; 5389 uint32_t block_count; 5390 int reladr, immed; 5391 int retval; 5392 5393 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5394 5395 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5396 ctl_softc = control_softc; 5397 retval = 0; 5398 reladr = 0; 5399 immed = 0; 5400 5401 switch (ctsio->cdb[0]) { 5402 case SYNCHRONIZE_CACHE: { 5403 struct scsi_sync_cache *cdb; 5404 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5405 5406 if (cdb->byte2 & SSC_RELADR) 5407 reladr = 1; 5408 5409 if (cdb->byte2 & SSC_IMMED) 5410 immed = 1; 5411 5412 starting_lba = scsi_4btoul(cdb->begin_lba); 5413 block_count = scsi_2btoul(cdb->lb_count); 5414 break; 5415 } 5416 case SYNCHRONIZE_CACHE_16: { 5417 struct scsi_sync_cache_16 *cdb; 5418 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5419 5420 if (cdb->byte2 & SSC_RELADR) 5421 reladr = 1; 5422 5423 if (cdb->byte2 & SSC_IMMED) 5424 immed = 1; 5425 5426 starting_lba = scsi_8btou64(cdb->begin_lba); 5427 block_count = scsi_4btoul(cdb->lb_count); 5428 break; 5429 } 5430 default: 5431 ctl_set_invalid_opcode(ctsio); 5432 ctl_done((union ctl_io *)ctsio); 5433 goto bailout; 5434 break; /* NOTREACHED */ 5435 } 5436 5437 if (immed) { 5438 /* 5439 * We don't support the immediate bit. Since it's in the 5440 * same place for the 10 and 16 byte SYNCHRONIZE CACHE 5441 * commands, we can just return the same error in either 5442 * case. 5443 */ 5444 ctl_set_invalid_field(ctsio, 5445 /*sks_valid*/ 1, 5446 /*command*/ 1, 5447 /*field*/ 1, 5448 /*bit_valid*/ 1, 5449 /*bit*/ 1); 5450 ctl_done((union ctl_io *)ctsio); 5451 goto bailout; 5452 } 5453 5454 if (reladr) { 5455 /* 5456 * We don't support the reladr bit either. It can only be 5457 * used with linked commands, and we don't support linked 5458 * commands. Since the bit is in the same place for the 5459 * 10 and 16 byte SYNCHRONIZE CACHE * commands, we can 5460 * just return the same error in either case. 5461 */ 5462 ctl_set_invalid_field(ctsio, 5463 /*sks_valid*/ 1, 5464 /*command*/ 1, 5465 /*field*/ 1, 5466 /*bit_valid*/ 1, 5467 /*bit*/ 0); 5468 ctl_done((union ctl_io *)ctsio); 5469 goto bailout; 5470 } 5471 5472 /* 5473 * We check the LBA and length, but don't do anything with them. 5474 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5475 * get flushed. This check will just help satisfy anyone who wants 5476 * to see an error for an out of range LBA. 5477 */ 5478 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5479 ctl_set_lba_out_of_range(ctsio); 5480 ctl_done((union ctl_io *)ctsio); 5481 goto bailout; 5482 } 5483 5484 /* 5485 * If this LUN has no backend, we can't flush the cache anyway. 5486 */ 5487 if (lun->backend == NULL) { 5488 ctl_set_invalid_opcode(ctsio); 5489 ctl_done((union ctl_io *)ctsio); 5490 goto bailout; 5491 } 5492 5493 /* 5494 * Check to see whether we're configured to send the SYNCHRONIZE 5495 * CACHE command directly to the back end. 5496 */ 5497 mtx_lock(&lun->lun_lock); 5498 if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC) 5499 && (++(lun->sync_count) >= lun->sync_interval)) { 5500 lun->sync_count = 0; 5501 mtx_unlock(&lun->lun_lock); 5502 retval = lun->backend->config_write((union ctl_io *)ctsio); 5503 } else { 5504 mtx_unlock(&lun->lun_lock); 5505 ctl_set_success(ctsio); 5506 ctl_done((union ctl_io *)ctsio); 5507 } 5508 5509bailout: 5510 5511 return (retval); 5512} 5513 5514int 5515ctl_format(struct ctl_scsiio *ctsio) 5516{ 5517 struct scsi_format *cdb; 5518 struct ctl_lun *lun; 5519 struct ctl_softc *ctl_softc; 5520 int length, defect_list_len; 5521 5522 CTL_DEBUG_PRINT(("ctl_format\n")); 5523 5524 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5525 ctl_softc = control_softc; 5526 5527 cdb = (struct scsi_format *)ctsio->cdb; 5528 5529 length = 0; 5530 if (cdb->byte2 & SF_FMTDATA) { 5531 if (cdb->byte2 & SF_LONGLIST) 5532 length = sizeof(struct scsi_format_header_long); 5533 else 5534 length = sizeof(struct scsi_format_header_short); 5535 } 5536 5537 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5538 && (length > 0)) { 5539 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5540 ctsio->kern_data_len = length; 5541 ctsio->kern_total_len = length; 5542 ctsio->kern_data_resid = 0; 5543 ctsio->kern_rel_offset = 0; 5544 ctsio->kern_sg_entries = 0; 5545 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5546 ctsio->be_move_done = ctl_config_move_done; 5547 ctl_datamove((union ctl_io *)ctsio); 5548 5549 return (CTL_RETVAL_COMPLETE); 5550 } 5551 5552 defect_list_len = 0; 5553 5554 if (cdb->byte2 & SF_FMTDATA) { 5555 if (cdb->byte2 & SF_LONGLIST) { 5556 struct scsi_format_header_long *header; 5557 5558 header = (struct scsi_format_header_long *) 5559 ctsio->kern_data_ptr; 5560 5561 defect_list_len = scsi_4btoul(header->defect_list_len); 5562 if (defect_list_len != 0) { 5563 ctl_set_invalid_field(ctsio, 5564 /*sks_valid*/ 1, 5565 /*command*/ 0, 5566 /*field*/ 2, 5567 /*bit_valid*/ 0, 5568 /*bit*/ 0); 5569 goto bailout; 5570 } 5571 } else { 5572 struct scsi_format_header_short *header; 5573 5574 header = (struct scsi_format_header_short *) 5575 ctsio->kern_data_ptr; 5576 5577 defect_list_len = scsi_2btoul(header->defect_list_len); 5578 if (defect_list_len != 0) { 5579 ctl_set_invalid_field(ctsio, 5580 /*sks_valid*/ 1, 5581 /*command*/ 0, 5582 /*field*/ 2, 5583 /*bit_valid*/ 0, 5584 /*bit*/ 0); 5585 goto bailout; 5586 } 5587 } 5588 } 5589 5590 /* 5591 * The format command will clear out the "Medium format corrupted" 5592 * status if set by the configuration code. That status is really 5593 * just a way to notify the host that we have lost the media, and 5594 * get them to issue a command that will basically make them think 5595 * they're blowing away the media. 5596 */ 5597 mtx_lock(&lun->lun_lock); 5598 lun->flags &= ~CTL_LUN_INOPERABLE; 5599 mtx_unlock(&lun->lun_lock); 5600 5601 ctsio->scsi_status = SCSI_STATUS_OK; 5602 ctsio->io_hdr.status = CTL_SUCCESS; 5603bailout: 5604 5605 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5606 free(ctsio->kern_data_ptr, M_CTL); 5607 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5608 } 5609 5610 ctl_done((union ctl_io *)ctsio); 5611 return (CTL_RETVAL_COMPLETE); 5612} 5613 5614int 5615ctl_read_buffer(struct ctl_scsiio *ctsio) 5616{ 5617 struct scsi_read_buffer *cdb; 5618 struct ctl_lun *lun; 5619 int buffer_offset, len; 5620 static uint8_t descr[4]; 5621 static uint8_t echo_descr[4] = { 0 }; 5622 5623 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5624 5625 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5626 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5627 5628 if (lun->flags & CTL_LUN_PR_RESERVED) { 5629 uint32_t residx; 5630 5631 /* 5632 * XXX KDM need a lock here. 5633 */ 5634 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5635 if ((lun->res_type == SPR_TYPE_EX_AC 5636 && residx != lun->pr_res_idx) 5637 || ((lun->res_type == SPR_TYPE_EX_AC_RO 5638 || lun->res_type == SPR_TYPE_EX_AC_AR) 5639 && !lun->per_res[residx].registered)) { 5640 ctl_set_reservation_conflict(ctsio); 5641 ctl_done((union ctl_io *)ctsio); 5642 return (CTL_RETVAL_COMPLETE); 5643 } 5644 } 5645 5646 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5647 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5648 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5649 ctl_set_invalid_field(ctsio, 5650 /*sks_valid*/ 1, 5651 /*command*/ 1, 5652 /*field*/ 1, 5653 /*bit_valid*/ 1, 5654 /*bit*/ 4); 5655 ctl_done((union ctl_io *)ctsio); 5656 return (CTL_RETVAL_COMPLETE); 5657 } 5658 if (cdb->buffer_id != 0) { 5659 ctl_set_invalid_field(ctsio, 5660 /*sks_valid*/ 1, 5661 /*command*/ 1, 5662 /*field*/ 2, 5663 /*bit_valid*/ 0, 5664 /*bit*/ 0); 5665 ctl_done((union ctl_io *)ctsio); 5666 return (CTL_RETVAL_COMPLETE); 5667 } 5668 5669 len = scsi_3btoul(cdb->length); 5670 buffer_offset = scsi_3btoul(cdb->offset); 5671 5672 if (buffer_offset + len > sizeof(lun->write_buffer)) { 5673 ctl_set_invalid_field(ctsio, 5674 /*sks_valid*/ 1, 5675 /*command*/ 1, 5676 /*field*/ 6, 5677 /*bit_valid*/ 0, 5678 /*bit*/ 0); 5679 ctl_done((union ctl_io *)ctsio); 5680 return (CTL_RETVAL_COMPLETE); 5681 } 5682 5683 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5684 descr[0] = 0; 5685 scsi_ulto3b(sizeof(lun->write_buffer), &descr[1]); 5686 ctsio->kern_data_ptr = descr; 5687 len = min(len, sizeof(descr)); 5688 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5689 ctsio->kern_data_ptr = echo_descr; 5690 len = min(len, sizeof(echo_descr)); 5691 } else 5692 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5693 ctsio->kern_data_len = len; 5694 ctsio->kern_total_len = len; 5695 ctsio->kern_data_resid = 0; 5696 ctsio->kern_rel_offset = 0; 5697 ctsio->kern_sg_entries = 0; 5698 ctsio->be_move_done = ctl_config_move_done; 5699 ctl_datamove((union ctl_io *)ctsio); 5700 5701 return (CTL_RETVAL_COMPLETE); 5702} 5703 5704int 5705ctl_write_buffer(struct ctl_scsiio *ctsio) 5706{ 5707 struct scsi_write_buffer *cdb; 5708 struct ctl_lun *lun; 5709 int buffer_offset, len; 5710 5711 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5712 5713 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5714 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5715 5716 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5717 ctl_set_invalid_field(ctsio, 5718 /*sks_valid*/ 1, 5719 /*command*/ 1, 5720 /*field*/ 1, 5721 /*bit_valid*/ 1, 5722 /*bit*/ 4); 5723 ctl_done((union ctl_io *)ctsio); 5724 return (CTL_RETVAL_COMPLETE); 5725 } 5726 if (cdb->buffer_id != 0) { 5727 ctl_set_invalid_field(ctsio, 5728 /*sks_valid*/ 1, 5729 /*command*/ 1, 5730 /*field*/ 2, 5731 /*bit_valid*/ 0, 5732 /*bit*/ 0); 5733 ctl_done((union ctl_io *)ctsio); 5734 return (CTL_RETVAL_COMPLETE); 5735 } 5736 5737 len = scsi_3btoul(cdb->length); 5738 buffer_offset = scsi_3btoul(cdb->offset); 5739 5740 if (buffer_offset + len > sizeof(lun->write_buffer)) { 5741 ctl_set_invalid_field(ctsio, 5742 /*sks_valid*/ 1, 5743 /*command*/ 1, 5744 /*field*/ 6, 5745 /*bit_valid*/ 0, 5746 /*bit*/ 0); 5747 ctl_done((union ctl_io *)ctsio); 5748 return (CTL_RETVAL_COMPLETE); 5749 } 5750 5751 /* 5752 * If we've got a kernel request that hasn't been malloced yet, 5753 * malloc it and tell the caller the data buffer is here. 5754 */ 5755 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5756 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5757 ctsio->kern_data_len = len; 5758 ctsio->kern_total_len = len; 5759 ctsio->kern_data_resid = 0; 5760 ctsio->kern_rel_offset = 0; 5761 ctsio->kern_sg_entries = 0; 5762 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5763 ctsio->be_move_done = ctl_config_move_done; 5764 ctl_datamove((union ctl_io *)ctsio); 5765 5766 return (CTL_RETVAL_COMPLETE); 5767 } 5768 5769 ctl_done((union ctl_io *)ctsio); 5770 5771 return (CTL_RETVAL_COMPLETE); 5772} 5773 5774int 5775ctl_write_same(struct ctl_scsiio *ctsio) 5776{ 5777 struct ctl_lun *lun; 5778 struct ctl_lba_len_flags *lbalen; 5779 uint64_t lba; 5780 uint32_t num_blocks; 5781 int len, retval; 5782 uint8_t byte2; 5783 5784 retval = CTL_RETVAL_COMPLETE; 5785 5786 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5787 5788 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5789 5790 switch (ctsio->cdb[0]) { 5791 case WRITE_SAME_10: { 5792 struct scsi_write_same_10 *cdb; 5793 5794 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5795 5796 lba = scsi_4btoul(cdb->addr); 5797 num_blocks = scsi_2btoul(cdb->length); 5798 byte2 = cdb->byte2; 5799 break; 5800 } 5801 case WRITE_SAME_16: { 5802 struct scsi_write_same_16 *cdb; 5803 5804 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5805 5806 lba = scsi_8btou64(cdb->addr); 5807 num_blocks = scsi_4btoul(cdb->length); 5808 byte2 = cdb->byte2; 5809 break; 5810 } 5811 default: 5812 /* 5813 * We got a command we don't support. This shouldn't 5814 * happen, commands should be filtered out above us. 5815 */ 5816 ctl_set_invalid_opcode(ctsio); 5817 ctl_done((union ctl_io *)ctsio); 5818 5819 return (CTL_RETVAL_COMPLETE); 5820 break; /* NOTREACHED */ 5821 } 5822 5823 /* 5824 * The first check is to make sure we're in bounds, the second 5825 * check is to catch wrap-around problems. If the lba + num blocks 5826 * is less than the lba, then we've wrapped around and the block 5827 * range is invalid anyway. 5828 */ 5829 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5830 || ((lba + num_blocks) < lba)) { 5831 ctl_set_lba_out_of_range(ctsio); 5832 ctl_done((union ctl_io *)ctsio); 5833 return (CTL_RETVAL_COMPLETE); 5834 } 5835 5836 /* Zero number of blocks means "to the last logical block" */ 5837 if (num_blocks == 0) { 5838 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5839 ctl_set_invalid_field(ctsio, 5840 /*sks_valid*/ 0, 5841 /*command*/ 1, 5842 /*field*/ 0, 5843 /*bit_valid*/ 0, 5844 /*bit*/ 0); 5845 ctl_done((union ctl_io *)ctsio); 5846 return (CTL_RETVAL_COMPLETE); 5847 } 5848 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5849 } 5850 5851 len = lun->be_lun->blocksize; 5852 5853 /* 5854 * If we've got a kernel request that hasn't been malloced yet, 5855 * malloc it and tell the caller the data buffer is here. 5856 */ 5857 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5858 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5859 ctsio->kern_data_len = len; 5860 ctsio->kern_total_len = len; 5861 ctsio->kern_data_resid = 0; 5862 ctsio->kern_rel_offset = 0; 5863 ctsio->kern_sg_entries = 0; 5864 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5865 ctsio->be_move_done = ctl_config_move_done; 5866 ctl_datamove((union ctl_io *)ctsio); 5867 5868 return (CTL_RETVAL_COMPLETE); 5869 } 5870 5871 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5872 lbalen->lba = lba; 5873 lbalen->len = num_blocks; 5874 lbalen->flags = byte2; 5875 retval = lun->backend->config_write((union ctl_io *)ctsio); 5876 5877 return (retval); 5878} 5879 5880int 5881ctl_unmap(struct ctl_scsiio *ctsio) 5882{ 5883 struct ctl_lun *lun; 5884 struct scsi_unmap *cdb; 5885 struct ctl_ptr_len_flags *ptrlen; 5886 struct scsi_unmap_header *hdr; 5887 struct scsi_unmap_desc *buf, *end; 5888 uint64_t lba; 5889 uint32_t num_blocks; 5890 int len, retval; 5891 uint8_t byte2; 5892 5893 retval = CTL_RETVAL_COMPLETE; 5894 5895 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5896 5897 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5898 cdb = (struct scsi_unmap *)ctsio->cdb; 5899 5900 len = scsi_2btoul(cdb->length); 5901 byte2 = cdb->byte2; 5902 5903 /* 5904 * If we've got a kernel request that hasn't been malloced yet, 5905 * malloc it and tell the caller the data buffer is here. 5906 */ 5907 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5908 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5909 ctsio->kern_data_len = len; 5910 ctsio->kern_total_len = len; 5911 ctsio->kern_data_resid = 0; 5912 ctsio->kern_rel_offset = 0; 5913 ctsio->kern_sg_entries = 0; 5914 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5915 ctsio->be_move_done = ctl_config_move_done; 5916 ctl_datamove((union ctl_io *)ctsio); 5917 5918 return (CTL_RETVAL_COMPLETE); 5919 } 5920 5921 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5922 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5923 if (len < sizeof (*hdr) || 5924 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5925 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5926 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5927 ctl_set_invalid_field(ctsio, 5928 /*sks_valid*/ 0, 5929 /*command*/ 0, 5930 /*field*/ 0, 5931 /*bit_valid*/ 0, 5932 /*bit*/ 0); 5933 ctl_done((union ctl_io *)ctsio); 5934 return (CTL_RETVAL_COMPLETE); 5935 } 5936 len = scsi_2btoul(hdr->desc_length); 5937 buf = (struct scsi_unmap_desc *)(hdr + 1); 5938 end = buf + len / sizeof(*buf); 5939 5940 ptrlen = (struct ctl_ptr_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5941 ptrlen->ptr = (void *)buf; 5942 ptrlen->len = len; 5943 ptrlen->flags = byte2; 5944 5945 for (; buf < end; buf++) { 5946 lba = scsi_8btou64(buf->lba); 5947 num_blocks = scsi_4btoul(buf->length); 5948 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5949 || ((lba + num_blocks) < lba)) { 5950 ctl_set_lba_out_of_range(ctsio); 5951 ctl_done((union ctl_io *)ctsio); 5952 return (CTL_RETVAL_COMPLETE); 5953 } 5954 } 5955 5956 retval = lun->backend->config_write((union ctl_io *)ctsio); 5957 5958 return (retval); 5959} 5960 5961/* 5962 * Note that this function currently doesn't actually do anything inside 5963 * CTL to enforce things if the DQue bit is turned on. 5964 * 5965 * Also note that this function can't be used in the default case, because 5966 * the DQue bit isn't set in the changeable mask for the control mode page 5967 * anyway. This is just here as an example for how to implement a page 5968 * handler, and a placeholder in case we want to allow the user to turn 5969 * tagged queueing on and off. 5970 * 5971 * The D_SENSE bit handling is functional, however, and will turn 5972 * descriptor sense on and off for a given LUN. 5973 */ 5974int 5975ctl_control_page_handler(struct ctl_scsiio *ctsio, 5976 struct ctl_page_index *page_index, uint8_t *page_ptr) 5977{ 5978 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5979 struct ctl_lun *lun; 5980 struct ctl_softc *softc; 5981 int set_ua; 5982 uint32_t initidx; 5983 5984 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5985 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5986 set_ua = 0; 5987 5988 user_cp = (struct scsi_control_page *)page_ptr; 5989 current_cp = (struct scsi_control_page *) 5990 (page_index->page_data + (page_index->page_len * 5991 CTL_PAGE_CURRENT)); 5992 saved_cp = (struct scsi_control_page *) 5993 (page_index->page_data + (page_index->page_len * 5994 CTL_PAGE_SAVED)); 5995 5996 softc = control_softc; 5997 5998 mtx_lock(&lun->lun_lock); 5999 if (((current_cp->rlec & SCP_DSENSE) == 0) 6000 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 6001 /* 6002 * Descriptor sense is currently turned off and the user 6003 * wants to turn it on. 6004 */ 6005 current_cp->rlec |= SCP_DSENSE; 6006 saved_cp->rlec |= SCP_DSENSE; 6007 lun->flags |= CTL_LUN_SENSE_DESC; 6008 set_ua = 1; 6009 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 6010 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 6011 /* 6012 * Descriptor sense is currently turned on, and the user 6013 * wants to turn it off. 6014 */ 6015 current_cp->rlec &= ~SCP_DSENSE; 6016 saved_cp->rlec &= ~SCP_DSENSE; 6017 lun->flags &= ~CTL_LUN_SENSE_DESC; 6018 set_ua = 1; 6019 } 6020 if (current_cp->queue_flags & SCP_QUEUE_DQUE) { 6021 if (user_cp->queue_flags & SCP_QUEUE_DQUE) { 6022#ifdef NEEDTOPORT 6023 csevent_log(CSC_CTL | CSC_SHELF_SW | 6024 CTL_UNTAG_TO_UNTAG, 6025 csevent_LogType_Trace, 6026 csevent_Severity_Information, 6027 csevent_AlertLevel_Green, 6028 csevent_FRU_Firmware, 6029 csevent_FRU_Unknown, 6030 "Received untagged to untagged transition"); 6031#endif /* NEEDTOPORT */ 6032 } else { 6033#ifdef NEEDTOPORT 6034 csevent_log(CSC_CTL | CSC_SHELF_SW | 6035 CTL_UNTAG_TO_TAG, 6036 csevent_LogType_ConfigChange, 6037 csevent_Severity_Information, 6038 csevent_AlertLevel_Green, 6039 csevent_FRU_Firmware, 6040 csevent_FRU_Unknown, 6041 "Received untagged to tagged " 6042 "queueing transition"); 6043#endif /* NEEDTOPORT */ 6044 6045 current_cp->queue_flags &= ~SCP_QUEUE_DQUE; 6046 saved_cp->queue_flags &= ~SCP_QUEUE_DQUE; 6047 set_ua = 1; 6048 } 6049 } else { 6050 if (user_cp->queue_flags & SCP_QUEUE_DQUE) { 6051#ifdef NEEDTOPORT 6052 csevent_log(CSC_CTL | CSC_SHELF_SW | 6053 CTL_TAG_TO_UNTAG, 6054 csevent_LogType_ConfigChange, 6055 csevent_Severity_Warning, 6056 csevent_AlertLevel_Yellow, 6057 csevent_FRU_Firmware, 6058 csevent_FRU_Unknown, 6059 "Received tagged queueing to untagged " 6060 "transition"); 6061#endif /* NEEDTOPORT */ 6062 6063 current_cp->queue_flags |= SCP_QUEUE_DQUE; 6064 saved_cp->queue_flags |= SCP_QUEUE_DQUE; 6065 set_ua = 1; 6066 } else { 6067#ifdef NEEDTOPORT 6068 csevent_log(CSC_CTL | CSC_SHELF_SW | 6069 CTL_TAG_TO_TAG, 6070 csevent_LogType_Trace, 6071 csevent_Severity_Information, 6072 csevent_AlertLevel_Green, 6073 csevent_FRU_Firmware, 6074 csevent_FRU_Unknown, 6075 "Received tagged queueing to tagged " 6076 "queueing transition"); 6077#endif /* NEEDTOPORT */ 6078 } 6079 } 6080 if (set_ua != 0) { 6081 int i; 6082 /* 6083 * Let other initiators know that the mode 6084 * parameters for this LUN have changed. 6085 */ 6086 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 6087 if (i == initidx) 6088 continue; 6089 6090 lun->pending_sense[i].ua_pending |= 6091 CTL_UA_MODE_CHANGE; 6092 } 6093 } 6094 mtx_unlock(&lun->lun_lock); 6095 6096 return (0); 6097} 6098 6099int 6100ctl_power_sp_handler(struct ctl_scsiio *ctsio, 6101 struct ctl_page_index *page_index, uint8_t *page_ptr) 6102{ 6103 return (0); 6104} 6105 6106int 6107ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio, 6108 struct ctl_page_index *page_index, int pc) 6109{ 6110 struct copan_power_subpage *page; 6111 6112 page = (struct copan_power_subpage *)page_index->page_data + 6113 (page_index->page_len * pc); 6114 6115 switch (pc) { 6116 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6117 /* 6118 * We don't update the changable bits for this page. 6119 */ 6120 break; 6121 case SMS_PAGE_CTRL_CURRENT >> 6: 6122 case SMS_PAGE_CTRL_DEFAULT >> 6: 6123 case SMS_PAGE_CTRL_SAVED >> 6: 6124#ifdef NEEDTOPORT 6125 ctl_update_power_subpage(page); 6126#endif 6127 break; 6128 default: 6129#ifdef NEEDTOPORT 6130 EPRINT(0, "Invalid PC %d!!", pc); 6131#endif 6132 break; 6133 } 6134 return (0); 6135} 6136 6137 6138int 6139ctl_aps_sp_handler(struct ctl_scsiio *ctsio, 6140 struct ctl_page_index *page_index, uint8_t *page_ptr) 6141{ 6142 struct copan_aps_subpage *user_sp; 6143 struct copan_aps_subpage *current_sp; 6144 union ctl_modepage_info *modepage_info; 6145 struct ctl_softc *softc; 6146 struct ctl_lun *lun; 6147 int retval; 6148 6149 retval = CTL_RETVAL_COMPLETE; 6150 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 6151 (page_index->page_len * CTL_PAGE_CURRENT)); 6152 softc = control_softc; 6153 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6154 6155 user_sp = (struct copan_aps_subpage *)page_ptr; 6156 6157 modepage_info = (union ctl_modepage_info *) 6158 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6159 6160 modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK; 6161 modepage_info->header.subpage = page_index->subpage; 6162 modepage_info->aps.lock_active = user_sp->lock_active; 6163 6164 mtx_lock(&softc->ctl_lock); 6165 6166 /* 6167 * If there is a request to lock the LUN and another LUN is locked 6168 * this is an error. If the requested LUN is already locked ignore 6169 * the request. If no LUN is locked attempt to lock it. 6170 * if there is a request to unlock the LUN and the LUN is currently 6171 * locked attempt to unlock it. Otherwise ignore the request. i.e. 6172 * if another LUN is locked or no LUN is locked. 6173 */ 6174 if (user_sp->lock_active & APS_LOCK_ACTIVE) { 6175 if (softc->aps_locked_lun == lun->lun) { 6176 /* 6177 * This LUN is already locked, so we're done. 6178 */ 6179 retval = CTL_RETVAL_COMPLETE; 6180 } else if (softc->aps_locked_lun == 0) { 6181 /* 6182 * No one has the lock, pass the request to the 6183 * backend. 6184 */ 6185 retval = lun->backend->config_write( 6186 (union ctl_io *)ctsio); 6187 } else { 6188 /* 6189 * Someone else has the lock, throw out the request. 6190 */ 6191 ctl_set_already_locked(ctsio); 6192 free(ctsio->kern_data_ptr, M_CTL); 6193 ctl_done((union ctl_io *)ctsio); 6194 6195 /* 6196 * Set the return value so that ctl_do_mode_select() 6197 * won't try to complete the command. We already 6198 * completed it here. 6199 */ 6200 retval = CTL_RETVAL_ERROR; 6201 } 6202 } else if (softc->aps_locked_lun == lun->lun) { 6203 /* 6204 * This LUN is locked, so pass the unlock request to the 6205 * backend. 6206 */ 6207 retval = lun->backend->config_write((union ctl_io *)ctsio); 6208 } 6209 mtx_unlock(&softc->ctl_lock); 6210 6211 return (retval); 6212} 6213 6214int 6215ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 6216 struct ctl_page_index *page_index, 6217 uint8_t *page_ptr) 6218{ 6219 uint8_t *c; 6220 int i; 6221 6222 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 6223 ctl_time_io_secs = 6224 (c[0] << 8) | 6225 (c[1] << 0) | 6226 0; 6227 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 6228 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 6229 printf("page data:"); 6230 for (i=0; i<8; i++) 6231 printf(" %.2x",page_ptr[i]); 6232 printf("\n"); 6233 return (0); 6234} 6235 6236int 6237ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 6238 struct ctl_page_index *page_index, 6239 int pc) 6240{ 6241 struct copan_debugconf_subpage *page; 6242 6243 page = (struct copan_debugconf_subpage *)page_index->page_data + 6244 (page_index->page_len * pc); 6245 6246 switch (pc) { 6247 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6248 case SMS_PAGE_CTRL_DEFAULT >> 6: 6249 case SMS_PAGE_CTRL_SAVED >> 6: 6250 /* 6251 * We don't update the changable or default bits for this page. 6252 */ 6253 break; 6254 case SMS_PAGE_CTRL_CURRENT >> 6: 6255 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 6256 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 6257 break; 6258 default: 6259#ifdef NEEDTOPORT 6260 EPRINT(0, "Invalid PC %d!!", pc); 6261#endif /* NEEDTOPORT */ 6262 break; 6263 } 6264 return (0); 6265} 6266 6267 6268static int 6269ctl_do_mode_select(union ctl_io *io) 6270{ 6271 struct scsi_mode_page_header *page_header; 6272 struct ctl_page_index *page_index; 6273 struct ctl_scsiio *ctsio; 6274 int control_dev, page_len; 6275 int page_len_offset, page_len_size; 6276 union ctl_modepage_info *modepage_info; 6277 struct ctl_lun *lun; 6278 int *len_left, *len_used; 6279 int retval, i; 6280 6281 ctsio = &io->scsiio; 6282 page_index = NULL; 6283 page_len = 0; 6284 retval = CTL_RETVAL_COMPLETE; 6285 6286 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6287 6288 if (lun->be_lun->lun_type != T_DIRECT) 6289 control_dev = 1; 6290 else 6291 control_dev = 0; 6292 6293 modepage_info = (union ctl_modepage_info *) 6294 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6295 len_left = &modepage_info->header.len_left; 6296 len_used = &modepage_info->header.len_used; 6297 6298do_next_page: 6299 6300 page_header = (struct scsi_mode_page_header *) 6301 (ctsio->kern_data_ptr + *len_used); 6302 6303 if (*len_left == 0) { 6304 free(ctsio->kern_data_ptr, M_CTL); 6305 ctl_set_success(ctsio); 6306 ctl_done((union ctl_io *)ctsio); 6307 return (CTL_RETVAL_COMPLETE); 6308 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6309 6310 free(ctsio->kern_data_ptr, M_CTL); 6311 ctl_set_param_len_error(ctsio); 6312 ctl_done((union ctl_io *)ctsio); 6313 return (CTL_RETVAL_COMPLETE); 6314 6315 } else if ((page_header->page_code & SMPH_SPF) 6316 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6317 6318 free(ctsio->kern_data_ptr, M_CTL); 6319 ctl_set_param_len_error(ctsio); 6320 ctl_done((union ctl_io *)ctsio); 6321 return (CTL_RETVAL_COMPLETE); 6322 } 6323 6324 6325 /* 6326 * XXX KDM should we do something with the block descriptor? 6327 */ 6328 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6329 6330 if ((control_dev != 0) 6331 && (lun->mode_pages.index[i].page_flags & 6332 CTL_PAGE_FLAG_DISK_ONLY)) 6333 continue; 6334 6335 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6336 (page_header->page_code & SMPH_PC_MASK)) 6337 continue; 6338 6339 /* 6340 * If neither page has a subpage code, then we've got a 6341 * match. 6342 */ 6343 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6344 && ((page_header->page_code & SMPH_SPF) == 0)) { 6345 page_index = &lun->mode_pages.index[i]; 6346 page_len = page_header->page_length; 6347 break; 6348 } 6349 6350 /* 6351 * If both pages have subpages, then the subpage numbers 6352 * have to match. 6353 */ 6354 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6355 && (page_header->page_code & SMPH_SPF)) { 6356 struct scsi_mode_page_header_sp *sph; 6357 6358 sph = (struct scsi_mode_page_header_sp *)page_header; 6359 6360 if (lun->mode_pages.index[i].subpage == 6361 sph->subpage) { 6362 page_index = &lun->mode_pages.index[i]; 6363 page_len = scsi_2btoul(sph->page_length); 6364 break; 6365 } 6366 } 6367 } 6368 6369 /* 6370 * If we couldn't find the page, or if we don't have a mode select 6371 * handler for it, send back an error to the user. 6372 */ 6373 if ((page_index == NULL) 6374 || (page_index->select_handler == NULL)) { 6375 ctl_set_invalid_field(ctsio, 6376 /*sks_valid*/ 1, 6377 /*command*/ 0, 6378 /*field*/ *len_used, 6379 /*bit_valid*/ 0, 6380 /*bit*/ 0); 6381 free(ctsio->kern_data_ptr, M_CTL); 6382 ctl_done((union ctl_io *)ctsio); 6383 return (CTL_RETVAL_COMPLETE); 6384 } 6385 6386 if (page_index->page_code & SMPH_SPF) { 6387 page_len_offset = 2; 6388 page_len_size = 2; 6389 } else { 6390 page_len_size = 1; 6391 page_len_offset = 1; 6392 } 6393 6394 /* 6395 * If the length the initiator gives us isn't the one we specify in 6396 * the mode page header, or if they didn't specify enough data in 6397 * the CDB to avoid truncating this page, kick out the request. 6398 */ 6399 if ((page_len != (page_index->page_len - page_len_offset - 6400 page_len_size)) 6401 || (*len_left < page_index->page_len)) { 6402 6403 6404 ctl_set_invalid_field(ctsio, 6405 /*sks_valid*/ 1, 6406 /*command*/ 0, 6407 /*field*/ *len_used + page_len_offset, 6408 /*bit_valid*/ 0, 6409 /*bit*/ 0); 6410 free(ctsio->kern_data_ptr, M_CTL); 6411 ctl_done((union ctl_io *)ctsio); 6412 return (CTL_RETVAL_COMPLETE); 6413 } 6414 6415 /* 6416 * Run through the mode page, checking to make sure that the bits 6417 * the user changed are actually legal for him to change. 6418 */ 6419 for (i = 0; i < page_index->page_len; i++) { 6420 uint8_t *user_byte, *change_mask, *current_byte; 6421 int bad_bit; 6422 int j; 6423 6424 user_byte = (uint8_t *)page_header + i; 6425 change_mask = page_index->page_data + 6426 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6427 current_byte = page_index->page_data + 6428 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6429 6430 /* 6431 * Check to see whether the user set any bits in this byte 6432 * that he is not allowed to set. 6433 */ 6434 if ((*user_byte & ~(*change_mask)) == 6435 (*current_byte & ~(*change_mask))) 6436 continue; 6437 6438 /* 6439 * Go through bit by bit to determine which one is illegal. 6440 */ 6441 bad_bit = 0; 6442 for (j = 7; j >= 0; j--) { 6443 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6444 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6445 bad_bit = i; 6446 break; 6447 } 6448 } 6449 ctl_set_invalid_field(ctsio, 6450 /*sks_valid*/ 1, 6451 /*command*/ 0, 6452 /*field*/ *len_used + i, 6453 /*bit_valid*/ 1, 6454 /*bit*/ bad_bit); 6455 free(ctsio->kern_data_ptr, M_CTL); 6456 ctl_done((union ctl_io *)ctsio); 6457 return (CTL_RETVAL_COMPLETE); 6458 } 6459 6460 /* 6461 * Decrement these before we call the page handler, since we may 6462 * end up getting called back one way or another before the handler 6463 * returns to this context. 6464 */ 6465 *len_left -= page_index->page_len; 6466 *len_used += page_index->page_len; 6467 6468 retval = page_index->select_handler(ctsio, page_index, 6469 (uint8_t *)page_header); 6470 6471 /* 6472 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6473 * wait until this queued command completes to finish processing 6474 * the mode page. If it returns anything other than 6475 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6476 * already set the sense information, freed the data pointer, and 6477 * completed the io for us. 6478 */ 6479 if (retval != CTL_RETVAL_COMPLETE) 6480 goto bailout_no_done; 6481 6482 /* 6483 * If the initiator sent us more than one page, parse the next one. 6484 */ 6485 if (*len_left > 0) 6486 goto do_next_page; 6487 6488 ctl_set_success(ctsio); 6489 free(ctsio->kern_data_ptr, M_CTL); 6490 ctl_done((union ctl_io *)ctsio); 6491 6492bailout_no_done: 6493 6494 return (CTL_RETVAL_COMPLETE); 6495 6496} 6497 6498int 6499ctl_mode_select(struct ctl_scsiio *ctsio) 6500{ 6501 int param_len, pf, sp; 6502 int header_size, bd_len; 6503 int len_left, len_used; 6504 struct ctl_page_index *page_index; 6505 struct ctl_lun *lun; 6506 int control_dev, page_len; 6507 union ctl_modepage_info *modepage_info; 6508 int retval; 6509 6510 pf = 0; 6511 sp = 0; 6512 page_len = 0; 6513 len_used = 0; 6514 len_left = 0; 6515 retval = 0; 6516 bd_len = 0; 6517 page_index = NULL; 6518 6519 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6520 6521 if (lun->be_lun->lun_type != T_DIRECT) 6522 control_dev = 1; 6523 else 6524 control_dev = 0; 6525 6526 switch (ctsio->cdb[0]) { 6527 case MODE_SELECT_6: { 6528 struct scsi_mode_select_6 *cdb; 6529 6530 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6531 6532 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6533 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6534 6535 param_len = cdb->length; 6536 header_size = sizeof(struct scsi_mode_header_6); 6537 break; 6538 } 6539 case MODE_SELECT_10: { 6540 struct scsi_mode_select_10 *cdb; 6541 6542 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6543 6544 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6545 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6546 6547 param_len = scsi_2btoul(cdb->length); 6548 header_size = sizeof(struct scsi_mode_header_10); 6549 break; 6550 } 6551 default: 6552 ctl_set_invalid_opcode(ctsio); 6553 ctl_done((union ctl_io *)ctsio); 6554 return (CTL_RETVAL_COMPLETE); 6555 break; /* NOTREACHED */ 6556 } 6557 6558 /* 6559 * From SPC-3: 6560 * "A parameter list length of zero indicates that the Data-Out Buffer 6561 * shall be empty. This condition shall not be considered as an error." 6562 */ 6563 if (param_len == 0) { 6564 ctl_set_success(ctsio); 6565 ctl_done((union ctl_io *)ctsio); 6566 return (CTL_RETVAL_COMPLETE); 6567 } 6568 6569 /* 6570 * Since we'll hit this the first time through, prior to 6571 * allocation, we don't need to free a data buffer here. 6572 */ 6573 if (param_len < header_size) { 6574 ctl_set_param_len_error(ctsio); 6575 ctl_done((union ctl_io *)ctsio); 6576 return (CTL_RETVAL_COMPLETE); 6577 } 6578 6579 /* 6580 * Allocate the data buffer and grab the user's data. In theory, 6581 * we shouldn't have to sanity check the parameter list length here 6582 * because the maximum size is 64K. We should be able to malloc 6583 * that much without too many problems. 6584 */ 6585 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6586 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6587 ctsio->kern_data_len = param_len; 6588 ctsio->kern_total_len = param_len; 6589 ctsio->kern_data_resid = 0; 6590 ctsio->kern_rel_offset = 0; 6591 ctsio->kern_sg_entries = 0; 6592 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6593 ctsio->be_move_done = ctl_config_move_done; 6594 ctl_datamove((union ctl_io *)ctsio); 6595 6596 return (CTL_RETVAL_COMPLETE); 6597 } 6598 6599 switch (ctsio->cdb[0]) { 6600 case MODE_SELECT_6: { 6601 struct scsi_mode_header_6 *mh6; 6602 6603 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6604 bd_len = mh6->blk_desc_len; 6605 break; 6606 } 6607 case MODE_SELECT_10: { 6608 struct scsi_mode_header_10 *mh10; 6609 6610 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6611 bd_len = scsi_2btoul(mh10->blk_desc_len); 6612 break; 6613 } 6614 default: 6615 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6616 break; 6617 } 6618 6619 if (param_len < (header_size + bd_len)) { 6620 free(ctsio->kern_data_ptr, M_CTL); 6621 ctl_set_param_len_error(ctsio); 6622 ctl_done((union ctl_io *)ctsio); 6623 return (CTL_RETVAL_COMPLETE); 6624 } 6625 6626 /* 6627 * Set the IO_CONT flag, so that if this I/O gets passed to 6628 * ctl_config_write_done(), it'll get passed back to 6629 * ctl_do_mode_select() for further processing, or completion if 6630 * we're all done. 6631 */ 6632 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6633 ctsio->io_cont = ctl_do_mode_select; 6634 6635 modepage_info = (union ctl_modepage_info *) 6636 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6637 6638 memset(modepage_info, 0, sizeof(*modepage_info)); 6639 6640 len_left = param_len - header_size - bd_len; 6641 len_used = header_size + bd_len; 6642 6643 modepage_info->header.len_left = len_left; 6644 modepage_info->header.len_used = len_used; 6645 6646 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6647} 6648 6649int 6650ctl_mode_sense(struct ctl_scsiio *ctsio) 6651{ 6652 struct ctl_lun *lun; 6653 int pc, page_code, dbd, llba, subpage; 6654 int alloc_len, page_len, header_len, total_len; 6655 struct scsi_mode_block_descr *block_desc; 6656 struct ctl_page_index *page_index; 6657 int control_dev; 6658 6659 dbd = 0; 6660 llba = 0; 6661 block_desc = NULL; 6662 page_index = NULL; 6663 6664 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6665 6666 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6667 6668 if (lun->be_lun->lun_type != T_DIRECT) 6669 control_dev = 1; 6670 else 6671 control_dev = 0; 6672 6673 if (lun->flags & CTL_LUN_PR_RESERVED) { 6674 uint32_t residx; 6675 6676 /* 6677 * XXX KDM need a lock here. 6678 */ 6679 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 6680 if ((lun->res_type == SPR_TYPE_EX_AC 6681 && residx != lun->pr_res_idx) 6682 || ((lun->res_type == SPR_TYPE_EX_AC_RO 6683 || lun->res_type == SPR_TYPE_EX_AC_AR) 6684 && !lun->per_res[residx].registered)) { 6685 ctl_set_reservation_conflict(ctsio); 6686 ctl_done((union ctl_io *)ctsio); 6687 return (CTL_RETVAL_COMPLETE); 6688 } 6689 } 6690 6691 switch (ctsio->cdb[0]) { 6692 case MODE_SENSE_6: { 6693 struct scsi_mode_sense_6 *cdb; 6694 6695 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6696 6697 header_len = sizeof(struct scsi_mode_hdr_6); 6698 if (cdb->byte2 & SMS_DBD) 6699 dbd = 1; 6700 else 6701 header_len += sizeof(struct scsi_mode_block_descr); 6702 6703 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6704 page_code = cdb->page & SMS_PAGE_CODE; 6705 subpage = cdb->subpage; 6706 alloc_len = cdb->length; 6707 break; 6708 } 6709 case MODE_SENSE_10: { 6710 struct scsi_mode_sense_10 *cdb; 6711 6712 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6713 6714 header_len = sizeof(struct scsi_mode_hdr_10); 6715 6716 if (cdb->byte2 & SMS_DBD) 6717 dbd = 1; 6718 else 6719 header_len += sizeof(struct scsi_mode_block_descr); 6720 if (cdb->byte2 & SMS10_LLBAA) 6721 llba = 1; 6722 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6723 page_code = cdb->page & SMS_PAGE_CODE; 6724 subpage = cdb->subpage; 6725 alloc_len = scsi_2btoul(cdb->length); 6726 break; 6727 } 6728 default: 6729 ctl_set_invalid_opcode(ctsio); 6730 ctl_done((union ctl_io *)ctsio); 6731 return (CTL_RETVAL_COMPLETE); 6732 break; /* NOTREACHED */ 6733 } 6734 6735 /* 6736 * We have to make a first pass through to calculate the size of 6737 * the pages that match the user's query. Then we allocate enough 6738 * memory to hold it, and actually copy the data into the buffer. 6739 */ 6740 switch (page_code) { 6741 case SMS_ALL_PAGES_PAGE: { 6742 int i; 6743 6744 page_len = 0; 6745 6746 /* 6747 * At the moment, values other than 0 and 0xff here are 6748 * reserved according to SPC-3. 6749 */ 6750 if ((subpage != SMS_SUBPAGE_PAGE_0) 6751 && (subpage != SMS_SUBPAGE_ALL)) { 6752 ctl_set_invalid_field(ctsio, 6753 /*sks_valid*/ 1, 6754 /*command*/ 1, 6755 /*field*/ 3, 6756 /*bit_valid*/ 0, 6757 /*bit*/ 0); 6758 ctl_done((union ctl_io *)ctsio); 6759 return (CTL_RETVAL_COMPLETE); 6760 } 6761 6762 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6763 if ((control_dev != 0) 6764 && (lun->mode_pages.index[i].page_flags & 6765 CTL_PAGE_FLAG_DISK_ONLY)) 6766 continue; 6767 6768 /* 6769 * We don't use this subpage if the user didn't 6770 * request all subpages. 6771 */ 6772 if ((lun->mode_pages.index[i].subpage != 0) 6773 && (subpage == SMS_SUBPAGE_PAGE_0)) 6774 continue; 6775 6776#if 0 6777 printf("found page %#x len %d\n", 6778 lun->mode_pages.index[i].page_code & 6779 SMPH_PC_MASK, 6780 lun->mode_pages.index[i].page_len); 6781#endif 6782 page_len += lun->mode_pages.index[i].page_len; 6783 } 6784 break; 6785 } 6786 default: { 6787 int i; 6788 6789 page_len = 0; 6790 6791 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6792 /* Look for the right page code */ 6793 if ((lun->mode_pages.index[i].page_code & 6794 SMPH_PC_MASK) != page_code) 6795 continue; 6796 6797 /* Look for the right subpage or the subpage wildcard*/ 6798 if ((lun->mode_pages.index[i].subpage != subpage) 6799 && (subpage != SMS_SUBPAGE_ALL)) 6800 continue; 6801 6802 /* Make sure the page is supported for this dev type */ 6803 if ((control_dev != 0) 6804 && (lun->mode_pages.index[i].page_flags & 6805 CTL_PAGE_FLAG_DISK_ONLY)) 6806 continue; 6807 6808#if 0 6809 printf("found page %#x len %d\n", 6810 lun->mode_pages.index[i].page_code & 6811 SMPH_PC_MASK, 6812 lun->mode_pages.index[i].page_len); 6813#endif 6814 6815 page_len += lun->mode_pages.index[i].page_len; 6816 } 6817 6818 if (page_len == 0) { 6819 ctl_set_invalid_field(ctsio, 6820 /*sks_valid*/ 1, 6821 /*command*/ 1, 6822 /*field*/ 2, 6823 /*bit_valid*/ 1, 6824 /*bit*/ 5); 6825 ctl_done((union ctl_io *)ctsio); 6826 return (CTL_RETVAL_COMPLETE); 6827 } 6828 break; 6829 } 6830 } 6831 6832 total_len = header_len + page_len; 6833#if 0 6834 printf("header_len = %d, page_len = %d, total_len = %d\n", 6835 header_len, page_len, total_len); 6836#endif 6837 6838 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6839 ctsio->kern_sg_entries = 0; 6840 ctsio->kern_data_resid = 0; 6841 ctsio->kern_rel_offset = 0; 6842 if (total_len < alloc_len) { 6843 ctsio->residual = alloc_len - total_len; 6844 ctsio->kern_data_len = total_len; 6845 ctsio->kern_total_len = total_len; 6846 } else { 6847 ctsio->residual = 0; 6848 ctsio->kern_data_len = alloc_len; 6849 ctsio->kern_total_len = alloc_len; 6850 } 6851 6852 switch (ctsio->cdb[0]) { 6853 case MODE_SENSE_6: { 6854 struct scsi_mode_hdr_6 *header; 6855 6856 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6857 6858 header->datalen = ctl_min(total_len - 1, 254); 6859 6860 if (dbd) 6861 header->block_descr_len = 0; 6862 else 6863 header->block_descr_len = 6864 sizeof(struct scsi_mode_block_descr); 6865 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6866 break; 6867 } 6868 case MODE_SENSE_10: { 6869 struct scsi_mode_hdr_10 *header; 6870 int datalen; 6871 6872 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6873 6874 datalen = ctl_min(total_len - 2, 65533); 6875 scsi_ulto2b(datalen, header->datalen); 6876 if (dbd) 6877 scsi_ulto2b(0, header->block_descr_len); 6878 else 6879 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6880 header->block_descr_len); 6881 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6882 break; 6883 } 6884 default: 6885 panic("invalid CDB type %#x", ctsio->cdb[0]); 6886 break; /* NOTREACHED */ 6887 } 6888 6889 /* 6890 * If we've got a disk, use its blocksize in the block 6891 * descriptor. Otherwise, just set it to 0. 6892 */ 6893 if (dbd == 0) { 6894 if (control_dev != 0) 6895 scsi_ulto3b(lun->be_lun->blocksize, 6896 block_desc->block_len); 6897 else 6898 scsi_ulto3b(0, block_desc->block_len); 6899 } 6900 6901 switch (page_code) { 6902 case SMS_ALL_PAGES_PAGE: { 6903 int i, data_used; 6904 6905 data_used = header_len; 6906 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6907 struct ctl_page_index *page_index; 6908 6909 page_index = &lun->mode_pages.index[i]; 6910 6911 if ((control_dev != 0) 6912 && (page_index->page_flags & 6913 CTL_PAGE_FLAG_DISK_ONLY)) 6914 continue; 6915 6916 /* 6917 * We don't use this subpage if the user didn't 6918 * request all subpages. We already checked (above) 6919 * to make sure the user only specified a subpage 6920 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6921 */ 6922 if ((page_index->subpage != 0) 6923 && (subpage == SMS_SUBPAGE_PAGE_0)) 6924 continue; 6925 6926 /* 6927 * Call the handler, if it exists, to update the 6928 * page to the latest values. 6929 */ 6930 if (page_index->sense_handler != NULL) 6931 page_index->sense_handler(ctsio, page_index,pc); 6932 6933 memcpy(ctsio->kern_data_ptr + data_used, 6934 page_index->page_data + 6935 (page_index->page_len * pc), 6936 page_index->page_len); 6937 data_used += page_index->page_len; 6938 } 6939 break; 6940 } 6941 default: { 6942 int i, data_used; 6943 6944 data_used = header_len; 6945 6946 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6947 struct ctl_page_index *page_index; 6948 6949 page_index = &lun->mode_pages.index[i]; 6950 6951 /* Look for the right page code */ 6952 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6953 continue; 6954 6955 /* Look for the right subpage or the subpage wildcard*/ 6956 if ((page_index->subpage != subpage) 6957 && (subpage != SMS_SUBPAGE_ALL)) 6958 continue; 6959 6960 /* Make sure the page is supported for this dev type */ 6961 if ((control_dev != 0) 6962 && (page_index->page_flags & 6963 CTL_PAGE_FLAG_DISK_ONLY)) 6964 continue; 6965 6966 /* 6967 * Call the handler, if it exists, to update the 6968 * page to the latest values. 6969 */ 6970 if (page_index->sense_handler != NULL) 6971 page_index->sense_handler(ctsio, page_index,pc); 6972 6973 memcpy(ctsio->kern_data_ptr + data_used, 6974 page_index->page_data + 6975 (page_index->page_len * pc), 6976 page_index->page_len); 6977 data_used += page_index->page_len; 6978 } 6979 break; 6980 } 6981 } 6982 6983 ctsio->scsi_status = SCSI_STATUS_OK; 6984 6985 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6986 ctsio->be_move_done = ctl_config_move_done; 6987 ctl_datamove((union ctl_io *)ctsio); 6988 6989 return (CTL_RETVAL_COMPLETE); 6990} 6991 6992int 6993ctl_read_capacity(struct ctl_scsiio *ctsio) 6994{ 6995 struct scsi_read_capacity *cdb; 6996 struct scsi_read_capacity_data *data; 6997 struct ctl_lun *lun; 6998 uint32_t lba; 6999 7000 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 7001 7002 cdb = (struct scsi_read_capacity *)ctsio->cdb; 7003 7004 lba = scsi_4btoul(cdb->addr); 7005 if (((cdb->pmi & SRC_PMI) == 0) 7006 && (lba != 0)) { 7007 ctl_set_invalid_field(/*ctsio*/ ctsio, 7008 /*sks_valid*/ 1, 7009 /*command*/ 1, 7010 /*field*/ 2, 7011 /*bit_valid*/ 0, 7012 /*bit*/ 0); 7013 ctl_done((union ctl_io *)ctsio); 7014 return (CTL_RETVAL_COMPLETE); 7015 } 7016 7017 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7018 7019 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7020 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 7021 ctsio->residual = 0; 7022 ctsio->kern_data_len = sizeof(*data); 7023 ctsio->kern_total_len = sizeof(*data); 7024 ctsio->kern_data_resid = 0; 7025 ctsio->kern_rel_offset = 0; 7026 ctsio->kern_sg_entries = 0; 7027 7028 /* 7029 * If the maximum LBA is greater than 0xfffffffe, the user must 7030 * issue a SERVICE ACTION IN (16) command, with the read capacity 7031 * serivce action set. 7032 */ 7033 if (lun->be_lun->maxlba > 0xfffffffe) 7034 scsi_ulto4b(0xffffffff, data->addr); 7035 else 7036 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 7037 7038 /* 7039 * XXX KDM this may not be 512 bytes... 7040 */ 7041 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7042 7043 ctsio->scsi_status = SCSI_STATUS_OK; 7044 7045 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7046 ctsio->be_move_done = ctl_config_move_done; 7047 ctl_datamove((union ctl_io *)ctsio); 7048 7049 return (CTL_RETVAL_COMPLETE); 7050} 7051 7052static int 7053ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7054{ 7055 struct scsi_read_capacity_16 *cdb; 7056 struct scsi_read_capacity_data_long *data; 7057 struct ctl_lun *lun; 7058 uint64_t lba; 7059 uint32_t alloc_len; 7060 7061 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7062 7063 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7064 7065 alloc_len = scsi_4btoul(cdb->alloc_len); 7066 lba = scsi_8btou64(cdb->addr); 7067 7068 if ((cdb->reladr & SRC16_PMI) 7069 && (lba != 0)) { 7070 ctl_set_invalid_field(/*ctsio*/ ctsio, 7071 /*sks_valid*/ 1, 7072 /*command*/ 1, 7073 /*field*/ 2, 7074 /*bit_valid*/ 0, 7075 /*bit*/ 0); 7076 ctl_done((union ctl_io *)ctsio); 7077 return (CTL_RETVAL_COMPLETE); 7078 } 7079 7080 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7081 7082 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7083 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7084 7085 if (sizeof(*data) < alloc_len) { 7086 ctsio->residual = alloc_len - sizeof(*data); 7087 ctsio->kern_data_len = sizeof(*data); 7088 ctsio->kern_total_len = sizeof(*data); 7089 } else { 7090 ctsio->residual = 0; 7091 ctsio->kern_data_len = alloc_len; 7092 ctsio->kern_total_len = alloc_len; 7093 } 7094 ctsio->kern_data_resid = 0; 7095 ctsio->kern_rel_offset = 0; 7096 ctsio->kern_sg_entries = 0; 7097 7098 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7099 /* XXX KDM this may not be 512 bytes... */ 7100 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7101 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7102 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7103 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7104 data->lalba_lbp[0] |= SRC16_LBPME; 7105 7106 ctsio->scsi_status = SCSI_STATUS_OK; 7107 7108 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7109 ctsio->be_move_done = ctl_config_move_done; 7110 ctl_datamove((union ctl_io *)ctsio); 7111 7112 return (CTL_RETVAL_COMPLETE); 7113} 7114 7115int 7116ctl_service_action_in(struct ctl_scsiio *ctsio) 7117{ 7118 struct scsi_service_action_in *cdb; 7119 int retval; 7120 7121 CTL_DEBUG_PRINT(("ctl_service_action_in\n")); 7122 7123 cdb = (struct scsi_service_action_in *)ctsio->cdb; 7124 7125 retval = CTL_RETVAL_COMPLETE; 7126 7127 switch (cdb->service_action) { 7128 case SRC16_SERVICE_ACTION: 7129 retval = ctl_read_capacity_16(ctsio); 7130 break; 7131 default: 7132 ctl_set_invalid_field(/*ctsio*/ ctsio, 7133 /*sks_valid*/ 1, 7134 /*command*/ 1, 7135 /*field*/ 1, 7136 /*bit_valid*/ 1, 7137 /*bit*/ 4); 7138 ctl_done((union ctl_io *)ctsio); 7139 break; 7140 } 7141 7142 return (retval); 7143} 7144 7145int 7146ctl_maintenance_in(struct ctl_scsiio *ctsio) 7147{ 7148 struct scsi_maintenance_in *cdb; 7149 int retval; 7150 int alloc_len, total_len = 0; 7151 int num_target_port_groups, single; 7152 struct ctl_lun *lun; 7153 struct ctl_softc *softc; 7154 struct scsi_target_group_data *rtg_ptr; 7155 struct scsi_target_port_group_descriptor *tpg_desc_ptr1, *tpg_desc_ptr2; 7156 struct scsi_target_port_descriptor *tp_desc_ptr1_1, *tp_desc_ptr1_2, 7157 *tp_desc_ptr2_1, *tp_desc_ptr2_2; 7158 7159 CTL_DEBUG_PRINT(("ctl_maintenance_in\n")); 7160 7161 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7162 softc = control_softc; 7163 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7164 7165 retval = CTL_RETVAL_COMPLETE; 7166 7167 if ((cdb->byte2 & SERVICE_ACTION_MASK) != SA_RPRT_TRGT_GRP) { 7168 ctl_set_invalid_field(/*ctsio*/ ctsio, 7169 /*sks_valid*/ 1, 7170 /*command*/ 1, 7171 /*field*/ 1, 7172 /*bit_valid*/ 1, 7173 /*bit*/ 4); 7174 ctl_done((union ctl_io *)ctsio); 7175 return(retval); 7176 } 7177 7178 single = ctl_is_single; 7179 if (single) 7180 num_target_port_groups = NUM_TARGET_PORT_GROUPS - 1; 7181 else 7182 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7183 7184 total_len = sizeof(struct scsi_target_group_data) + 7185 sizeof(struct scsi_target_port_group_descriptor) * 7186 num_target_port_groups + 7187 sizeof(struct scsi_target_port_descriptor) * 7188 NUM_PORTS_PER_GRP * num_target_port_groups; 7189 7190 alloc_len = scsi_4btoul(cdb->length); 7191 7192 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7193 7194 ctsio->kern_sg_entries = 0; 7195 7196 if (total_len < alloc_len) { 7197 ctsio->residual = alloc_len - total_len; 7198 ctsio->kern_data_len = total_len; 7199 ctsio->kern_total_len = total_len; 7200 } else { 7201 ctsio->residual = 0; 7202 ctsio->kern_data_len = alloc_len; 7203 ctsio->kern_total_len = alloc_len; 7204 } 7205 ctsio->kern_data_resid = 0; 7206 ctsio->kern_rel_offset = 0; 7207 7208 rtg_ptr = (struct scsi_target_group_data *)ctsio->kern_data_ptr; 7209 7210 tpg_desc_ptr1 = &rtg_ptr->groups[0]; 7211 tp_desc_ptr1_1 = &tpg_desc_ptr1->descriptors[0]; 7212 tp_desc_ptr1_2 = (struct scsi_target_port_descriptor *) 7213 &tp_desc_ptr1_1->desc_list[0]; 7214 7215 if (single == 0) { 7216 tpg_desc_ptr2 = (struct scsi_target_port_group_descriptor *) 7217 &tp_desc_ptr1_2->desc_list[0]; 7218 tp_desc_ptr2_1 = &tpg_desc_ptr2->descriptors[0]; 7219 tp_desc_ptr2_2 = (struct scsi_target_port_descriptor *) 7220 &tp_desc_ptr2_1->desc_list[0]; 7221 } else { 7222 tpg_desc_ptr2 = NULL; 7223 tp_desc_ptr2_1 = NULL; 7224 tp_desc_ptr2_2 = NULL; 7225 } 7226 7227 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7228 if (single == 0) { 7229 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) { 7230 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7231 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 7232 tpg_desc_ptr2->pref_state = 7233 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7234 } else { 7235 tpg_desc_ptr1->pref_state = 7236 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7237 tpg_desc_ptr2->pref_state = TPG_PRIMARY; 7238 } 7239 } else { 7240 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7241 tpg_desc_ptr1->pref_state = 7242 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7243 tpg_desc_ptr2->pref_state = TPG_PRIMARY; 7244 } else { 7245 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 7246 tpg_desc_ptr2->pref_state = 7247 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7248 } 7249 } 7250 } else { 7251 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 7252 } 7253 tpg_desc_ptr1->support = 0; 7254 tpg_desc_ptr1->target_port_group[1] = 1; 7255 tpg_desc_ptr1->status = TPG_IMPLICIT; 7256 tpg_desc_ptr1->target_port_count= NUM_PORTS_PER_GRP; 7257 7258 if (single == 0) { 7259 tpg_desc_ptr2->support = 0; 7260 tpg_desc_ptr2->target_port_group[1] = 2; 7261 tpg_desc_ptr2->status = TPG_IMPLICIT; 7262 tpg_desc_ptr2->target_port_count = NUM_PORTS_PER_GRP; 7263 7264 tp_desc_ptr1_1->relative_target_port_identifier[1] = 1; 7265 tp_desc_ptr1_2->relative_target_port_identifier[1] = 2; 7266 7267 tp_desc_ptr2_1->relative_target_port_identifier[1] = 9; 7268 tp_desc_ptr2_2->relative_target_port_identifier[1] = 10; 7269 } else { 7270 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) { 7271 tp_desc_ptr1_1->relative_target_port_identifier[1] = 1; 7272 tp_desc_ptr1_2->relative_target_port_identifier[1] = 2; 7273 } else { 7274 tp_desc_ptr1_1->relative_target_port_identifier[1] = 9; 7275 tp_desc_ptr1_2->relative_target_port_identifier[1] = 10; 7276 } 7277 } 7278 7279 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7280 ctsio->be_move_done = ctl_config_move_done; 7281 7282 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 7283 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 7284 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 7285 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 7286 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 7287 7288 ctl_datamove((union ctl_io *)ctsio); 7289 return(retval); 7290} 7291 7292int 7293ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7294{ 7295 struct scsi_per_res_in *cdb; 7296 int alloc_len, total_len = 0; 7297 /* struct scsi_per_res_in_rsrv in_data; */ 7298 struct ctl_lun *lun; 7299 struct ctl_softc *softc; 7300 7301 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7302 7303 softc = control_softc; 7304 7305 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7306 7307 alloc_len = scsi_2btoul(cdb->length); 7308 7309 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7310 7311retry: 7312 mtx_lock(&lun->lun_lock); 7313 switch (cdb->action) { 7314 case SPRI_RK: /* read keys */ 7315 total_len = sizeof(struct scsi_per_res_in_keys) + 7316 lun->pr_key_count * 7317 sizeof(struct scsi_per_res_key); 7318 break; 7319 case SPRI_RR: /* read reservation */ 7320 if (lun->flags & CTL_LUN_PR_RESERVED) 7321 total_len = sizeof(struct scsi_per_res_in_rsrv); 7322 else 7323 total_len = sizeof(struct scsi_per_res_in_header); 7324 break; 7325 case SPRI_RC: /* report capabilities */ 7326 total_len = sizeof(struct scsi_per_res_cap); 7327 break; 7328 case SPRI_RS: /* read full status */ 7329 default: 7330 mtx_unlock(&lun->lun_lock); 7331 ctl_set_invalid_field(ctsio, 7332 /*sks_valid*/ 1, 7333 /*command*/ 1, 7334 /*field*/ 1, 7335 /*bit_valid*/ 1, 7336 /*bit*/ 0); 7337 ctl_done((union ctl_io *)ctsio); 7338 return (CTL_RETVAL_COMPLETE); 7339 break; /* NOTREACHED */ 7340 } 7341 mtx_unlock(&lun->lun_lock); 7342 7343 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7344 7345 if (total_len < alloc_len) { 7346 ctsio->residual = alloc_len - total_len; 7347 ctsio->kern_data_len = total_len; 7348 ctsio->kern_total_len = total_len; 7349 } else { 7350 ctsio->residual = 0; 7351 ctsio->kern_data_len = alloc_len; 7352 ctsio->kern_total_len = alloc_len; 7353 } 7354 7355 ctsio->kern_data_resid = 0; 7356 ctsio->kern_rel_offset = 0; 7357 ctsio->kern_sg_entries = 0; 7358 7359 mtx_lock(&lun->lun_lock); 7360 switch (cdb->action) { 7361 case SPRI_RK: { // read keys 7362 struct scsi_per_res_in_keys *res_keys; 7363 int i, key_count; 7364 7365 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7366 7367 /* 7368 * We had to drop the lock to allocate our buffer, which 7369 * leaves time for someone to come in with another 7370 * persistent reservation. (That is unlikely, though, 7371 * since this should be the only persistent reservation 7372 * command active right now.) 7373 */ 7374 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7375 (lun->pr_key_count * 7376 sizeof(struct scsi_per_res_key)))){ 7377 mtx_unlock(&lun->lun_lock); 7378 free(ctsio->kern_data_ptr, M_CTL); 7379 printf("%s: reservation length changed, retrying\n", 7380 __func__); 7381 goto retry; 7382 } 7383 7384 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7385 7386 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7387 lun->pr_key_count, res_keys->header.length); 7388 7389 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7390 if (!lun->per_res[i].registered) 7391 continue; 7392 7393 /* 7394 * We used lun->pr_key_count to calculate the 7395 * size to allocate. If it turns out the number of 7396 * initiators with the registered flag set is 7397 * larger than that (i.e. they haven't been kept in 7398 * sync), we've got a problem. 7399 */ 7400 if (key_count >= lun->pr_key_count) { 7401#ifdef NEEDTOPORT 7402 csevent_log(CSC_CTL | CSC_SHELF_SW | 7403 CTL_PR_ERROR, 7404 csevent_LogType_Fault, 7405 csevent_AlertLevel_Yellow, 7406 csevent_FRU_ShelfController, 7407 csevent_FRU_Firmware, 7408 csevent_FRU_Unknown, 7409 "registered keys %d >= key " 7410 "count %d", key_count, 7411 lun->pr_key_count); 7412#endif 7413 key_count++; 7414 continue; 7415 } 7416 memcpy(res_keys->keys[key_count].key, 7417 lun->per_res[i].res_key.key, 7418 ctl_min(sizeof(res_keys->keys[key_count].key), 7419 sizeof(lun->per_res[i].res_key))); 7420 key_count++; 7421 } 7422 break; 7423 } 7424 case SPRI_RR: { // read reservation 7425 struct scsi_per_res_in_rsrv *res; 7426 int tmp_len, header_only; 7427 7428 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7429 7430 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7431 7432 if (lun->flags & CTL_LUN_PR_RESERVED) 7433 { 7434 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7435 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7436 res->header.length); 7437 header_only = 0; 7438 } else { 7439 tmp_len = sizeof(struct scsi_per_res_in_header); 7440 scsi_ulto4b(0, res->header.length); 7441 header_only = 1; 7442 } 7443 7444 /* 7445 * We had to drop the lock to allocate our buffer, which 7446 * leaves time for someone to come in with another 7447 * persistent reservation. (That is unlikely, though, 7448 * since this should be the only persistent reservation 7449 * command active right now.) 7450 */ 7451 if (tmp_len != total_len) { 7452 mtx_unlock(&lun->lun_lock); 7453 free(ctsio->kern_data_ptr, M_CTL); 7454 printf("%s: reservation status changed, retrying\n", 7455 __func__); 7456 goto retry; 7457 } 7458 7459 /* 7460 * No reservation held, so we're done. 7461 */ 7462 if (header_only != 0) 7463 break; 7464 7465 /* 7466 * If the registration is an All Registrants type, the key 7467 * is 0, since it doesn't really matter. 7468 */ 7469 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7470 memcpy(res->data.reservation, 7471 &lun->per_res[lun->pr_res_idx].res_key, 7472 sizeof(struct scsi_per_res_key)); 7473 } 7474 res->data.scopetype = lun->res_type; 7475 break; 7476 } 7477 case SPRI_RC: //report capabilities 7478 { 7479 struct scsi_per_res_cap *res_cap; 7480 uint16_t type_mask; 7481 7482 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7483 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7484 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_3; 7485 type_mask = SPRI_TM_WR_EX_AR | 7486 SPRI_TM_EX_AC_RO | 7487 SPRI_TM_WR_EX_RO | 7488 SPRI_TM_EX_AC | 7489 SPRI_TM_WR_EX | 7490 SPRI_TM_EX_AC_AR; 7491 scsi_ulto2b(type_mask, res_cap->type_mask); 7492 break; 7493 } 7494 case SPRI_RS: //read full status 7495 default: 7496 /* 7497 * This is a bug, because we just checked for this above, 7498 * and should have returned an error. 7499 */ 7500 panic("Invalid PR type %x", cdb->action); 7501 break; /* NOTREACHED */ 7502 } 7503 mtx_unlock(&lun->lun_lock); 7504 7505 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7506 ctsio->be_move_done = ctl_config_move_done; 7507 7508 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 7509 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 7510 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 7511 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 7512 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 7513 7514 ctl_datamove((union ctl_io *)ctsio); 7515 7516 return (CTL_RETVAL_COMPLETE); 7517} 7518 7519/* 7520 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7521 * it should return. 7522 */ 7523static int 7524ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7525 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7526 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7527 struct scsi_per_res_out_parms* param) 7528{ 7529 union ctl_ha_msg persis_io; 7530 int retval, i; 7531 int isc_retval; 7532 7533 retval = 0; 7534 7535 mtx_lock(&lun->lun_lock); 7536 if (sa_res_key == 0) { 7537 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7538 /* validate scope and type */ 7539 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7540 SPR_LU_SCOPE) { 7541 mtx_unlock(&lun->lun_lock); 7542 ctl_set_invalid_field(/*ctsio*/ ctsio, 7543 /*sks_valid*/ 1, 7544 /*command*/ 1, 7545 /*field*/ 2, 7546 /*bit_valid*/ 1, 7547 /*bit*/ 4); 7548 ctl_done((union ctl_io *)ctsio); 7549 return (1); 7550 } 7551 7552 if (type>8 || type==2 || type==4 || type==0) { 7553 mtx_unlock(&lun->lun_lock); 7554 ctl_set_invalid_field(/*ctsio*/ ctsio, 7555 /*sks_valid*/ 1, 7556 /*command*/ 1, 7557 /*field*/ 2, 7558 /*bit_valid*/ 1, 7559 /*bit*/ 0); 7560 ctl_done((union ctl_io *)ctsio); 7561 return (1); 7562 } 7563 7564 /* temporarily unregister this nexus */ 7565 lun->per_res[residx].registered = 0; 7566 7567 /* 7568 * Unregister everybody else and build UA for 7569 * them 7570 */ 7571 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7572 if (lun->per_res[i].registered == 0) 7573 continue; 7574 7575 if (!persis_offset 7576 && i <CTL_MAX_INITIATORS) 7577 lun->pending_sense[i].ua_pending |= 7578 CTL_UA_REG_PREEMPT; 7579 else if (persis_offset 7580 && i >= persis_offset) 7581 lun->pending_sense[i-persis_offset 7582 ].ua_pending |= 7583 CTL_UA_REG_PREEMPT; 7584 lun->per_res[i].registered = 0; 7585 memset(&lun->per_res[i].res_key, 0, 7586 sizeof(struct scsi_per_res_key)); 7587 } 7588 lun->per_res[residx].registered = 1; 7589 lun->pr_key_count = 1; 7590 lun->res_type = type; 7591 if (lun->res_type != SPR_TYPE_WR_EX_AR 7592 && lun->res_type != SPR_TYPE_EX_AC_AR) 7593 lun->pr_res_idx = residx; 7594 7595 /* send msg to other side */ 7596 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7597 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7598 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7599 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7600 persis_io.pr.pr_info.res_type = type; 7601 memcpy(persis_io.pr.pr_info.sa_res_key, 7602 param->serv_act_res_key, 7603 sizeof(param->serv_act_res_key)); 7604 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7605 &persis_io, sizeof(persis_io), 0)) > 7606 CTL_HA_STATUS_SUCCESS) { 7607 printf("CTL:Persis Out error returned " 7608 "from ctl_ha_msg_send %d\n", 7609 isc_retval); 7610 } 7611 } else { 7612 /* not all registrants */ 7613 mtx_unlock(&lun->lun_lock); 7614 free(ctsio->kern_data_ptr, M_CTL); 7615 ctl_set_invalid_field(ctsio, 7616 /*sks_valid*/ 1, 7617 /*command*/ 0, 7618 /*field*/ 8, 7619 /*bit_valid*/ 0, 7620 /*bit*/ 0); 7621 ctl_done((union ctl_io *)ctsio); 7622 return (1); 7623 } 7624 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7625 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7626 int found = 0; 7627 7628 if (res_key == sa_res_key) { 7629 /* special case */ 7630 /* 7631 * The spec implies this is not good but doesn't 7632 * say what to do. There are two choices either 7633 * generate a res conflict or check condition 7634 * with illegal field in parameter data. Since 7635 * that is what is done when the sa_res_key is 7636 * zero I'll take that approach since this has 7637 * to do with the sa_res_key. 7638 */ 7639 mtx_unlock(&lun->lun_lock); 7640 free(ctsio->kern_data_ptr, M_CTL); 7641 ctl_set_invalid_field(ctsio, 7642 /*sks_valid*/ 1, 7643 /*command*/ 0, 7644 /*field*/ 8, 7645 /*bit_valid*/ 0, 7646 /*bit*/ 0); 7647 ctl_done((union ctl_io *)ctsio); 7648 return (1); 7649 } 7650 7651 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7652 if (lun->per_res[i].registered 7653 && memcmp(param->serv_act_res_key, 7654 lun->per_res[i].res_key.key, 7655 sizeof(struct scsi_per_res_key)) != 0) 7656 continue; 7657 7658 found = 1; 7659 lun->per_res[i].registered = 0; 7660 memset(&lun->per_res[i].res_key, 0, 7661 sizeof(struct scsi_per_res_key)); 7662 lun->pr_key_count--; 7663 7664 if (!persis_offset 7665 && i < CTL_MAX_INITIATORS) 7666 lun->pending_sense[i].ua_pending |= 7667 CTL_UA_REG_PREEMPT; 7668 else if (persis_offset 7669 && i >= persis_offset) 7670 lun->pending_sense[i-persis_offset].ua_pending|= 7671 CTL_UA_REG_PREEMPT; 7672 } 7673 if (!found) { 7674 mtx_unlock(&lun->lun_lock); 7675 free(ctsio->kern_data_ptr, M_CTL); 7676 ctl_set_reservation_conflict(ctsio); 7677 ctl_done((union ctl_io *)ctsio); 7678 return (CTL_RETVAL_COMPLETE); 7679 } 7680 /* send msg to other side */ 7681 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7682 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7683 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7684 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7685 persis_io.pr.pr_info.res_type = type; 7686 memcpy(persis_io.pr.pr_info.sa_res_key, 7687 param->serv_act_res_key, 7688 sizeof(param->serv_act_res_key)); 7689 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7690 &persis_io, sizeof(persis_io), 0)) > 7691 CTL_HA_STATUS_SUCCESS) { 7692 printf("CTL:Persis Out error returned from " 7693 "ctl_ha_msg_send %d\n", isc_retval); 7694 } 7695 } else { 7696 /* Reserved but not all registrants */ 7697 /* sa_res_key is res holder */ 7698 if (memcmp(param->serv_act_res_key, 7699 lun->per_res[lun->pr_res_idx].res_key.key, 7700 sizeof(struct scsi_per_res_key)) == 0) { 7701 /* validate scope and type */ 7702 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7703 SPR_LU_SCOPE) { 7704 mtx_unlock(&lun->lun_lock); 7705 ctl_set_invalid_field(/*ctsio*/ ctsio, 7706 /*sks_valid*/ 1, 7707 /*command*/ 1, 7708 /*field*/ 2, 7709 /*bit_valid*/ 1, 7710 /*bit*/ 4); 7711 ctl_done((union ctl_io *)ctsio); 7712 return (1); 7713 } 7714 7715 if (type>8 || type==2 || type==4 || type==0) { 7716 mtx_unlock(&lun->lun_lock); 7717 ctl_set_invalid_field(/*ctsio*/ ctsio, 7718 /*sks_valid*/ 1, 7719 /*command*/ 1, 7720 /*field*/ 2, 7721 /*bit_valid*/ 1, 7722 /*bit*/ 0); 7723 ctl_done((union ctl_io *)ctsio); 7724 return (1); 7725 } 7726 7727 /* 7728 * Do the following: 7729 * if sa_res_key != res_key remove all 7730 * registrants w/sa_res_key and generate UA 7731 * for these registrants(Registrations 7732 * Preempted) if it wasn't an exclusive 7733 * reservation generate UA(Reservations 7734 * Preempted) for all other registered nexuses 7735 * if the type has changed. Establish the new 7736 * reservation and holder. If res_key and 7737 * sa_res_key are the same do the above 7738 * except don't unregister the res holder. 7739 */ 7740 7741 /* 7742 * Temporarily unregister so it won't get 7743 * removed or UA generated 7744 */ 7745 lun->per_res[residx].registered = 0; 7746 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7747 if (lun->per_res[i].registered == 0) 7748 continue; 7749 7750 if (memcmp(param->serv_act_res_key, 7751 lun->per_res[i].res_key.key, 7752 sizeof(struct scsi_per_res_key)) == 0) { 7753 lun->per_res[i].registered = 0; 7754 memset(&lun->per_res[i].res_key, 7755 0, 7756 sizeof(struct scsi_per_res_key)); 7757 lun->pr_key_count--; 7758 7759 if (!persis_offset 7760 && i < CTL_MAX_INITIATORS) 7761 lun->pending_sense[i 7762 ].ua_pending |= 7763 CTL_UA_REG_PREEMPT; 7764 else if (persis_offset 7765 && i >= persis_offset) 7766 lun->pending_sense[ 7767 i-persis_offset].ua_pending |= 7768 CTL_UA_REG_PREEMPT; 7769 } else if (type != lun->res_type 7770 && (lun->res_type == SPR_TYPE_WR_EX_RO 7771 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 7772 if (!persis_offset 7773 && i < CTL_MAX_INITIATORS) 7774 lun->pending_sense[i 7775 ].ua_pending |= 7776 CTL_UA_RES_RELEASE; 7777 else if (persis_offset 7778 && i >= persis_offset) 7779 lun->pending_sense[ 7780 i-persis_offset 7781 ].ua_pending |= 7782 CTL_UA_RES_RELEASE; 7783 } 7784 } 7785 lun->per_res[residx].registered = 1; 7786 lun->res_type = type; 7787 if (lun->res_type != SPR_TYPE_WR_EX_AR 7788 && lun->res_type != SPR_TYPE_EX_AC_AR) 7789 lun->pr_res_idx = residx; 7790 else 7791 lun->pr_res_idx = 7792 CTL_PR_ALL_REGISTRANTS; 7793 7794 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7795 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7796 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7797 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7798 persis_io.pr.pr_info.res_type = type; 7799 memcpy(persis_io.pr.pr_info.sa_res_key, 7800 param->serv_act_res_key, 7801 sizeof(param->serv_act_res_key)); 7802 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7803 &persis_io, sizeof(persis_io), 0)) > 7804 CTL_HA_STATUS_SUCCESS) { 7805 printf("CTL:Persis Out error returned " 7806 "from ctl_ha_msg_send %d\n", 7807 isc_retval); 7808 } 7809 } else { 7810 /* 7811 * sa_res_key is not the res holder just 7812 * remove registrants 7813 */ 7814 int found=0; 7815 7816 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7817 if (memcmp(param->serv_act_res_key, 7818 lun->per_res[i].res_key.key, 7819 sizeof(struct scsi_per_res_key)) != 0) 7820 continue; 7821 7822 found = 1; 7823 lun->per_res[i].registered = 0; 7824 memset(&lun->per_res[i].res_key, 0, 7825 sizeof(struct scsi_per_res_key)); 7826 lun->pr_key_count--; 7827 7828 if (!persis_offset 7829 && i < CTL_MAX_INITIATORS) 7830 lun->pending_sense[i].ua_pending |= 7831 CTL_UA_REG_PREEMPT; 7832 else if (persis_offset 7833 && i >= persis_offset) 7834 lun->pending_sense[ 7835 i-persis_offset].ua_pending |= 7836 CTL_UA_REG_PREEMPT; 7837 } 7838 7839 if (!found) { 7840 mtx_unlock(&lun->lun_lock); 7841 free(ctsio->kern_data_ptr, M_CTL); 7842 ctl_set_reservation_conflict(ctsio); 7843 ctl_done((union ctl_io *)ctsio); 7844 return (1); 7845 } 7846 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7847 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7848 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7849 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7850 persis_io.pr.pr_info.res_type = type; 7851 memcpy(persis_io.pr.pr_info.sa_res_key, 7852 param->serv_act_res_key, 7853 sizeof(param->serv_act_res_key)); 7854 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7855 &persis_io, sizeof(persis_io), 0)) > 7856 CTL_HA_STATUS_SUCCESS) { 7857 printf("CTL:Persis Out error returned " 7858 "from ctl_ha_msg_send %d\n", 7859 isc_retval); 7860 } 7861 } 7862 } 7863 7864 lun->PRGeneration++; 7865 mtx_unlock(&lun->lun_lock); 7866 7867 return (retval); 7868} 7869 7870static void 7871ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7872{ 7873 int i; 7874 7875 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7876 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7877 || memcmp(&lun->per_res[lun->pr_res_idx].res_key, 7878 msg->pr.pr_info.sa_res_key, 7879 sizeof(struct scsi_per_res_key)) != 0) { 7880 uint64_t sa_res_key; 7881 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7882 7883 if (sa_res_key == 0) { 7884 /* temporarily unregister this nexus */ 7885 lun->per_res[msg->pr.pr_info.residx].registered = 0; 7886 7887 /* 7888 * Unregister everybody else and build UA for 7889 * them 7890 */ 7891 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7892 if (lun->per_res[i].registered == 0) 7893 continue; 7894 7895 if (!persis_offset 7896 && i < CTL_MAX_INITIATORS) 7897 lun->pending_sense[i].ua_pending |= 7898 CTL_UA_REG_PREEMPT; 7899 else if (persis_offset && i >= persis_offset) 7900 lun->pending_sense[i - 7901 persis_offset].ua_pending |= 7902 CTL_UA_REG_PREEMPT; 7903 lun->per_res[i].registered = 0; 7904 memset(&lun->per_res[i].res_key, 0, 7905 sizeof(struct scsi_per_res_key)); 7906 } 7907 7908 lun->per_res[msg->pr.pr_info.residx].registered = 1; 7909 lun->pr_key_count = 1; 7910 lun->res_type = msg->pr.pr_info.res_type; 7911 if (lun->res_type != SPR_TYPE_WR_EX_AR 7912 && lun->res_type != SPR_TYPE_EX_AC_AR) 7913 lun->pr_res_idx = msg->pr.pr_info.residx; 7914 } else { 7915 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7916 if (memcmp(msg->pr.pr_info.sa_res_key, 7917 lun->per_res[i].res_key.key, 7918 sizeof(struct scsi_per_res_key)) != 0) 7919 continue; 7920 7921 lun->per_res[i].registered = 0; 7922 memset(&lun->per_res[i].res_key, 0, 7923 sizeof(struct scsi_per_res_key)); 7924 lun->pr_key_count--; 7925 7926 if (!persis_offset 7927 && i < persis_offset) 7928 lun->pending_sense[i].ua_pending |= 7929 CTL_UA_REG_PREEMPT; 7930 else if (persis_offset 7931 && i >= persis_offset) 7932 lun->pending_sense[i - 7933 persis_offset].ua_pending |= 7934 CTL_UA_REG_PREEMPT; 7935 } 7936 } 7937 } else { 7938 /* 7939 * Temporarily unregister so it won't get removed 7940 * or UA generated 7941 */ 7942 lun->per_res[msg->pr.pr_info.residx].registered = 0; 7943 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7944 if (lun->per_res[i].registered == 0) 7945 continue; 7946 7947 if (memcmp(msg->pr.pr_info.sa_res_key, 7948 lun->per_res[i].res_key.key, 7949 sizeof(struct scsi_per_res_key)) == 0) { 7950 lun->per_res[i].registered = 0; 7951 memset(&lun->per_res[i].res_key, 0, 7952 sizeof(struct scsi_per_res_key)); 7953 lun->pr_key_count--; 7954 if (!persis_offset 7955 && i < CTL_MAX_INITIATORS) 7956 lun->pending_sense[i].ua_pending |= 7957 CTL_UA_REG_PREEMPT; 7958 else if (persis_offset 7959 && i >= persis_offset) 7960 lun->pending_sense[i - 7961 persis_offset].ua_pending |= 7962 CTL_UA_REG_PREEMPT; 7963 } else if (msg->pr.pr_info.res_type != lun->res_type 7964 && (lun->res_type == SPR_TYPE_WR_EX_RO 7965 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 7966 if (!persis_offset 7967 && i < persis_offset) 7968 lun->pending_sense[i 7969 ].ua_pending |= 7970 CTL_UA_RES_RELEASE; 7971 else if (persis_offset 7972 && i >= persis_offset) 7973 lun->pending_sense[i - 7974 persis_offset].ua_pending |= 7975 CTL_UA_RES_RELEASE; 7976 } 7977 } 7978 lun->per_res[msg->pr.pr_info.residx].registered = 1; 7979 lun->res_type = msg->pr.pr_info.res_type; 7980 if (lun->res_type != SPR_TYPE_WR_EX_AR 7981 && lun->res_type != SPR_TYPE_EX_AC_AR) 7982 lun->pr_res_idx = msg->pr.pr_info.residx; 7983 else 7984 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7985 } 7986 lun->PRGeneration++; 7987 7988} 7989 7990 7991int 7992ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 7993{ 7994 int retval; 7995 int isc_retval; 7996 u_int32_t param_len; 7997 struct scsi_per_res_out *cdb; 7998 struct ctl_lun *lun; 7999 struct scsi_per_res_out_parms* param; 8000 struct ctl_softc *softc; 8001 uint32_t residx; 8002 uint64_t res_key, sa_res_key; 8003 uint8_t type; 8004 union ctl_ha_msg persis_io; 8005 int i; 8006 8007 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8008 8009 retval = CTL_RETVAL_COMPLETE; 8010 8011 softc = control_softc; 8012 8013 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8014 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8015 8016 /* 8017 * We only support whole-LUN scope. The scope & type are ignored for 8018 * register, register and ignore existing key and clear. 8019 * We sometimes ignore scope and type on preempts too!! 8020 * Verify reservation type here as well. 8021 */ 8022 type = cdb->scope_type & SPR_TYPE_MASK; 8023 if ((cdb->action == SPRO_RESERVE) 8024 || (cdb->action == SPRO_RELEASE)) { 8025 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8026 ctl_set_invalid_field(/*ctsio*/ ctsio, 8027 /*sks_valid*/ 1, 8028 /*command*/ 1, 8029 /*field*/ 2, 8030 /*bit_valid*/ 1, 8031 /*bit*/ 4); 8032 ctl_done((union ctl_io *)ctsio); 8033 return (CTL_RETVAL_COMPLETE); 8034 } 8035 8036 if (type>8 || type==2 || type==4 || type==0) { 8037 ctl_set_invalid_field(/*ctsio*/ ctsio, 8038 /*sks_valid*/ 1, 8039 /*command*/ 1, 8040 /*field*/ 2, 8041 /*bit_valid*/ 1, 8042 /*bit*/ 0); 8043 ctl_done((union ctl_io *)ctsio); 8044 return (CTL_RETVAL_COMPLETE); 8045 } 8046 } 8047 8048 switch (cdb->action & SPRO_ACTION_MASK) { 8049 case SPRO_REGISTER: 8050 case SPRO_RESERVE: 8051 case SPRO_RELEASE: 8052 case SPRO_CLEAR: 8053 case SPRO_PREEMPT: 8054 case SPRO_REG_IGNO: 8055 break; 8056 case SPRO_REG_MOVE: 8057 case SPRO_PRE_ABO: 8058 default: 8059 ctl_set_invalid_field(/*ctsio*/ ctsio, 8060 /*sks_valid*/ 1, 8061 /*command*/ 1, 8062 /*field*/ 1, 8063 /*bit_valid*/ 1, 8064 /*bit*/ 0); 8065 ctl_done((union ctl_io *)ctsio); 8066 return (CTL_RETVAL_COMPLETE); 8067 break; /* NOTREACHED */ 8068 } 8069 8070 param_len = scsi_4btoul(cdb->length); 8071 8072 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8073 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8074 ctsio->kern_data_len = param_len; 8075 ctsio->kern_total_len = param_len; 8076 ctsio->kern_data_resid = 0; 8077 ctsio->kern_rel_offset = 0; 8078 ctsio->kern_sg_entries = 0; 8079 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8080 ctsio->be_move_done = ctl_config_move_done; 8081 ctl_datamove((union ctl_io *)ctsio); 8082 8083 return (CTL_RETVAL_COMPLETE); 8084 } 8085 8086 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8087 8088 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8089 res_key = scsi_8btou64(param->res_key.key); 8090 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8091 8092 /* 8093 * Validate the reservation key here except for SPRO_REG_IGNO 8094 * This must be done for all other service actions 8095 */ 8096 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8097 mtx_lock(&lun->lun_lock); 8098 if (lun->per_res[residx].registered) { 8099 if (memcmp(param->res_key.key, 8100 lun->per_res[residx].res_key.key, 8101 ctl_min(sizeof(param->res_key), 8102 sizeof(lun->per_res[residx].res_key))) != 0) { 8103 /* 8104 * The current key passed in doesn't match 8105 * the one the initiator previously 8106 * registered. 8107 */ 8108 mtx_unlock(&lun->lun_lock); 8109 free(ctsio->kern_data_ptr, M_CTL); 8110 ctl_set_reservation_conflict(ctsio); 8111 ctl_done((union ctl_io *)ctsio); 8112 return (CTL_RETVAL_COMPLETE); 8113 } 8114 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8115 /* 8116 * We are not registered 8117 */ 8118 mtx_unlock(&lun->lun_lock); 8119 free(ctsio->kern_data_ptr, M_CTL); 8120 ctl_set_reservation_conflict(ctsio); 8121 ctl_done((union ctl_io *)ctsio); 8122 return (CTL_RETVAL_COMPLETE); 8123 } else if (res_key != 0) { 8124 /* 8125 * We are not registered and trying to register but 8126 * the register key isn't zero. 8127 */ 8128 mtx_unlock(&lun->lun_lock); 8129 free(ctsio->kern_data_ptr, M_CTL); 8130 ctl_set_reservation_conflict(ctsio); 8131 ctl_done((union ctl_io *)ctsio); 8132 return (CTL_RETVAL_COMPLETE); 8133 } 8134 mtx_unlock(&lun->lun_lock); 8135 } 8136 8137 switch (cdb->action & SPRO_ACTION_MASK) { 8138 case SPRO_REGISTER: 8139 case SPRO_REG_IGNO: { 8140 8141#if 0 8142 printf("Registration received\n"); 8143#endif 8144 8145 /* 8146 * We don't support any of these options, as we report in 8147 * the read capabilities request (see 8148 * ctl_persistent_reserve_in(), above). 8149 */ 8150 if ((param->flags & SPR_SPEC_I_PT) 8151 || (param->flags & SPR_ALL_TG_PT) 8152 || (param->flags & SPR_APTPL)) { 8153 int bit_ptr; 8154 8155 if (param->flags & SPR_APTPL) 8156 bit_ptr = 0; 8157 else if (param->flags & SPR_ALL_TG_PT) 8158 bit_ptr = 2; 8159 else /* SPR_SPEC_I_PT */ 8160 bit_ptr = 3; 8161 8162 free(ctsio->kern_data_ptr, M_CTL); 8163 ctl_set_invalid_field(ctsio, 8164 /*sks_valid*/ 1, 8165 /*command*/ 0, 8166 /*field*/ 20, 8167 /*bit_valid*/ 1, 8168 /*bit*/ bit_ptr); 8169 ctl_done((union ctl_io *)ctsio); 8170 return (CTL_RETVAL_COMPLETE); 8171 } 8172 8173 mtx_lock(&lun->lun_lock); 8174 8175 /* 8176 * The initiator wants to clear the 8177 * key/unregister. 8178 */ 8179 if (sa_res_key == 0) { 8180 if ((res_key == 0 8181 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8182 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8183 && !lun->per_res[residx].registered)) { 8184 mtx_unlock(&lun->lun_lock); 8185 goto done; 8186 } 8187 8188 lun->per_res[residx].registered = 0; 8189 memset(&lun->per_res[residx].res_key, 8190 0, sizeof(lun->per_res[residx].res_key)); 8191 lun->pr_key_count--; 8192 8193 if (residx == lun->pr_res_idx) { 8194 lun->flags &= ~CTL_LUN_PR_RESERVED; 8195 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8196 8197 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8198 || lun->res_type == SPR_TYPE_EX_AC_RO) 8199 && lun->pr_key_count) { 8200 /* 8201 * If the reservation is a registrants 8202 * only type we need to generate a UA 8203 * for other registered inits. The 8204 * sense code should be RESERVATIONS 8205 * RELEASED 8206 */ 8207 8208 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 8209 if (lun->per_res[ 8210 i+persis_offset].registered 8211 == 0) 8212 continue; 8213 lun->pending_sense[i 8214 ].ua_pending |= 8215 CTL_UA_RES_RELEASE; 8216 } 8217 } 8218 lun->res_type = 0; 8219 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8220 if (lun->pr_key_count==0) { 8221 lun->flags &= ~CTL_LUN_PR_RESERVED; 8222 lun->res_type = 0; 8223 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8224 } 8225 } 8226 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8227 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8228 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8229 persis_io.pr.pr_info.residx = residx; 8230 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8231 &persis_io, sizeof(persis_io), 0 )) > 8232 CTL_HA_STATUS_SUCCESS) { 8233 printf("CTL:Persis Out error returned from " 8234 "ctl_ha_msg_send %d\n", isc_retval); 8235 } 8236 } else /* sa_res_key != 0 */ { 8237 8238 /* 8239 * If we aren't registered currently then increment 8240 * the key count and set the registered flag. 8241 */ 8242 if (!lun->per_res[residx].registered) { 8243 lun->pr_key_count++; 8244 lun->per_res[residx].registered = 1; 8245 } 8246 8247 memcpy(&lun->per_res[residx].res_key, 8248 param->serv_act_res_key, 8249 ctl_min(sizeof(param->serv_act_res_key), 8250 sizeof(lun->per_res[residx].res_key))); 8251 8252 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8253 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8254 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8255 persis_io.pr.pr_info.residx = residx; 8256 memcpy(persis_io.pr.pr_info.sa_res_key, 8257 param->serv_act_res_key, 8258 sizeof(param->serv_act_res_key)); 8259 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8260 &persis_io, sizeof(persis_io), 0)) > 8261 CTL_HA_STATUS_SUCCESS) { 8262 printf("CTL:Persis Out error returned from " 8263 "ctl_ha_msg_send %d\n", isc_retval); 8264 } 8265 } 8266 lun->PRGeneration++; 8267 mtx_unlock(&lun->lun_lock); 8268 8269 break; 8270 } 8271 case SPRO_RESERVE: 8272#if 0 8273 printf("Reserve executed type %d\n", type); 8274#endif 8275 mtx_lock(&lun->lun_lock); 8276 if (lun->flags & CTL_LUN_PR_RESERVED) { 8277 /* 8278 * if this isn't the reservation holder and it's 8279 * not a "all registrants" type or if the type is 8280 * different then we have a conflict 8281 */ 8282 if ((lun->pr_res_idx != residx 8283 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8284 || lun->res_type != type) { 8285 mtx_unlock(&lun->lun_lock); 8286 free(ctsio->kern_data_ptr, M_CTL); 8287 ctl_set_reservation_conflict(ctsio); 8288 ctl_done((union ctl_io *)ctsio); 8289 return (CTL_RETVAL_COMPLETE); 8290 } 8291 mtx_unlock(&lun->lun_lock); 8292 } else /* create a reservation */ { 8293 /* 8294 * If it's not an "all registrants" type record 8295 * reservation holder 8296 */ 8297 if (type != SPR_TYPE_WR_EX_AR 8298 && type != SPR_TYPE_EX_AC_AR) 8299 lun->pr_res_idx = residx; /* Res holder */ 8300 else 8301 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8302 8303 lun->flags |= CTL_LUN_PR_RESERVED; 8304 lun->res_type = type; 8305 8306 mtx_unlock(&lun->lun_lock); 8307 8308 /* send msg to other side */ 8309 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8310 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8311 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8312 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8313 persis_io.pr.pr_info.res_type = type; 8314 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8315 &persis_io, sizeof(persis_io), 0)) > 8316 CTL_HA_STATUS_SUCCESS) { 8317 printf("CTL:Persis Out error returned from " 8318 "ctl_ha_msg_send %d\n", isc_retval); 8319 } 8320 } 8321 break; 8322 8323 case SPRO_RELEASE: 8324 mtx_lock(&lun->lun_lock); 8325 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8326 /* No reservation exists return good status */ 8327 mtx_unlock(&lun->lun_lock); 8328 goto done; 8329 } 8330 /* 8331 * Is this nexus a reservation holder? 8332 */ 8333 if (lun->pr_res_idx != residx 8334 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8335 /* 8336 * not a res holder return good status but 8337 * do nothing 8338 */ 8339 mtx_unlock(&lun->lun_lock); 8340 goto done; 8341 } 8342 8343 if (lun->res_type != type) { 8344 mtx_unlock(&lun->lun_lock); 8345 free(ctsio->kern_data_ptr, M_CTL); 8346 ctl_set_illegal_pr_release(ctsio); 8347 ctl_done((union ctl_io *)ctsio); 8348 return (CTL_RETVAL_COMPLETE); 8349 } 8350 8351 /* okay to release */ 8352 lun->flags &= ~CTL_LUN_PR_RESERVED; 8353 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8354 lun->res_type = 0; 8355 8356 /* 8357 * if this isn't an exclusive access 8358 * res generate UA for all other 8359 * registrants. 8360 */ 8361 if (type != SPR_TYPE_EX_AC 8362 && type != SPR_TYPE_WR_EX) { 8363 /* 8364 * temporarily unregister so we don't generate UA 8365 */ 8366 lun->per_res[residx].registered = 0; 8367 8368 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8369 if (lun->per_res[i+persis_offset].registered 8370 == 0) 8371 continue; 8372 lun->pending_sense[i].ua_pending |= 8373 CTL_UA_RES_RELEASE; 8374 } 8375 8376 lun->per_res[residx].registered = 1; 8377 } 8378 mtx_unlock(&lun->lun_lock); 8379 /* Send msg to other side */ 8380 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8381 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8382 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8383 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8384 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8385 printf("CTL:Persis Out error returned from " 8386 "ctl_ha_msg_send %d\n", isc_retval); 8387 } 8388 break; 8389 8390 case SPRO_CLEAR: 8391 /* send msg to other side */ 8392 8393 mtx_lock(&lun->lun_lock); 8394 lun->flags &= ~CTL_LUN_PR_RESERVED; 8395 lun->res_type = 0; 8396 lun->pr_key_count = 0; 8397 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8398 8399 8400 memset(&lun->per_res[residx].res_key, 8401 0, sizeof(lun->per_res[residx].res_key)); 8402 lun->per_res[residx].registered = 0; 8403 8404 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 8405 if (lun->per_res[i].registered) { 8406 if (!persis_offset && i < CTL_MAX_INITIATORS) 8407 lun->pending_sense[i].ua_pending |= 8408 CTL_UA_RES_PREEMPT; 8409 else if (persis_offset && i >= persis_offset) 8410 lun->pending_sense[i-persis_offset 8411 ].ua_pending |= CTL_UA_RES_PREEMPT; 8412 8413 memset(&lun->per_res[i].res_key, 8414 0, sizeof(struct scsi_per_res_key)); 8415 lun->per_res[i].registered = 0; 8416 } 8417 lun->PRGeneration++; 8418 mtx_unlock(&lun->lun_lock); 8419 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8420 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8421 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8422 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8423 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8424 printf("CTL:Persis Out error returned from " 8425 "ctl_ha_msg_send %d\n", isc_retval); 8426 } 8427 break; 8428 8429 case SPRO_PREEMPT: { 8430 int nretval; 8431 8432 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8433 residx, ctsio, cdb, param); 8434 if (nretval != 0) 8435 return (CTL_RETVAL_COMPLETE); 8436 break; 8437 } 8438 case SPRO_REG_MOVE: 8439 case SPRO_PRE_ABO: 8440 default: 8441 free(ctsio->kern_data_ptr, M_CTL); 8442 ctl_set_invalid_field(/*ctsio*/ ctsio, 8443 /*sks_valid*/ 1, 8444 /*command*/ 1, 8445 /*field*/ 1, 8446 /*bit_valid*/ 1, 8447 /*bit*/ 0); 8448 ctl_done((union ctl_io *)ctsio); 8449 return (CTL_RETVAL_COMPLETE); 8450 break; /* NOTREACHED */ 8451 } 8452 8453done: 8454 free(ctsio->kern_data_ptr, M_CTL); 8455 ctl_set_success(ctsio); 8456 ctl_done((union ctl_io *)ctsio); 8457 8458 return (retval); 8459} 8460 8461/* 8462 * This routine is for handling a message from the other SC pertaining to 8463 * persistent reserve out. All the error checking will have been done 8464 * so only perorming the action need be done here to keep the two 8465 * in sync. 8466 */ 8467static void 8468ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8469{ 8470 struct ctl_lun *lun; 8471 struct ctl_softc *softc; 8472 int i; 8473 uint32_t targ_lun; 8474 8475 softc = control_softc; 8476 8477 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8478 lun = softc->ctl_luns[targ_lun]; 8479 mtx_lock(&lun->lun_lock); 8480 switch(msg->pr.pr_info.action) { 8481 case CTL_PR_REG_KEY: 8482 if (!lun->per_res[msg->pr.pr_info.residx].registered) { 8483 lun->per_res[msg->pr.pr_info.residx].registered = 1; 8484 lun->pr_key_count++; 8485 } 8486 lun->PRGeneration++; 8487 memcpy(&lun->per_res[msg->pr.pr_info.residx].res_key, 8488 msg->pr.pr_info.sa_res_key, 8489 sizeof(struct scsi_per_res_key)); 8490 break; 8491 8492 case CTL_PR_UNREG_KEY: 8493 lun->per_res[msg->pr.pr_info.residx].registered = 0; 8494 memset(&lun->per_res[msg->pr.pr_info.residx].res_key, 8495 0, sizeof(struct scsi_per_res_key)); 8496 lun->pr_key_count--; 8497 8498 /* XXX Need to see if the reservation has been released */ 8499 /* if so do we need to generate UA? */ 8500 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8501 lun->flags &= ~CTL_LUN_PR_RESERVED; 8502 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8503 8504 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8505 || lun->res_type == SPR_TYPE_EX_AC_RO) 8506 && lun->pr_key_count) { 8507 /* 8508 * If the reservation is a registrants 8509 * only type we need to generate a UA 8510 * for other registered inits. The 8511 * sense code should be RESERVATIONS 8512 * RELEASED 8513 */ 8514 8515 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8516 if (lun->per_res[i+ 8517 persis_offset].registered == 0) 8518 continue; 8519 8520 lun->pending_sense[i 8521 ].ua_pending |= 8522 CTL_UA_RES_RELEASE; 8523 } 8524 } 8525 lun->res_type = 0; 8526 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8527 if (lun->pr_key_count==0) { 8528 lun->flags &= ~CTL_LUN_PR_RESERVED; 8529 lun->res_type = 0; 8530 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8531 } 8532 } 8533 lun->PRGeneration++; 8534 break; 8535 8536 case CTL_PR_RESERVE: 8537 lun->flags |= CTL_LUN_PR_RESERVED; 8538 lun->res_type = msg->pr.pr_info.res_type; 8539 lun->pr_res_idx = msg->pr.pr_info.residx; 8540 8541 break; 8542 8543 case CTL_PR_RELEASE: 8544 /* 8545 * if this isn't an exclusive access res generate UA for all 8546 * other registrants. 8547 */ 8548 if (lun->res_type != SPR_TYPE_EX_AC 8549 && lun->res_type != SPR_TYPE_WR_EX) { 8550 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8551 if (lun->per_res[i+persis_offset].registered) 8552 lun->pending_sense[i].ua_pending |= 8553 CTL_UA_RES_RELEASE; 8554 } 8555 8556 lun->flags &= ~CTL_LUN_PR_RESERVED; 8557 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8558 lun->res_type = 0; 8559 break; 8560 8561 case CTL_PR_PREEMPT: 8562 ctl_pro_preempt_other(lun, msg); 8563 break; 8564 case CTL_PR_CLEAR: 8565 lun->flags &= ~CTL_LUN_PR_RESERVED; 8566 lun->res_type = 0; 8567 lun->pr_key_count = 0; 8568 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8569 8570 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8571 if (lun->per_res[i].registered == 0) 8572 continue; 8573 if (!persis_offset 8574 && i < CTL_MAX_INITIATORS) 8575 lun->pending_sense[i].ua_pending |= 8576 CTL_UA_RES_PREEMPT; 8577 else if (persis_offset 8578 && i >= persis_offset) 8579 lun->pending_sense[i-persis_offset].ua_pending|= 8580 CTL_UA_RES_PREEMPT; 8581 memset(&lun->per_res[i].res_key, 0, 8582 sizeof(struct scsi_per_res_key)); 8583 lun->per_res[i].registered = 0; 8584 } 8585 lun->PRGeneration++; 8586 break; 8587 } 8588 8589 mtx_unlock(&lun->lun_lock); 8590} 8591 8592int 8593ctl_read_write(struct ctl_scsiio *ctsio) 8594{ 8595 struct ctl_lun *lun; 8596 struct ctl_lba_len_flags *lbalen; 8597 uint64_t lba; 8598 uint32_t num_blocks; 8599 int reladdr, fua, dpo, ebp; 8600 int retval; 8601 int isread; 8602 8603 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8604 8605 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8606 8607 reladdr = 0; 8608 fua = 0; 8609 dpo = 0; 8610 ebp = 0; 8611 8612 retval = CTL_RETVAL_COMPLETE; 8613 8614 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8615 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8616 if (lun->flags & CTL_LUN_PR_RESERVED && isread) { 8617 uint32_t residx; 8618 8619 /* 8620 * XXX KDM need a lock here. 8621 */ 8622 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8623 if ((lun->res_type == SPR_TYPE_EX_AC 8624 && residx != lun->pr_res_idx) 8625 || ((lun->res_type == SPR_TYPE_EX_AC_RO 8626 || lun->res_type == SPR_TYPE_EX_AC_AR) 8627 && !lun->per_res[residx].registered)) { 8628 ctl_set_reservation_conflict(ctsio); 8629 ctl_done((union ctl_io *)ctsio); 8630 return (CTL_RETVAL_COMPLETE); 8631 } 8632 } 8633 8634 switch (ctsio->cdb[0]) { 8635 case READ_6: 8636 case WRITE_6: { 8637 struct scsi_rw_6 *cdb; 8638 8639 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8640 8641 lba = scsi_3btoul(cdb->addr); 8642 /* only 5 bits are valid in the most significant address byte */ 8643 lba &= 0x1fffff; 8644 num_blocks = cdb->length; 8645 /* 8646 * This is correct according to SBC-2. 8647 */ 8648 if (num_blocks == 0) 8649 num_blocks = 256; 8650 break; 8651 } 8652 case READ_10: 8653 case WRITE_10: { 8654 struct scsi_rw_10 *cdb; 8655 8656 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8657 8658 if (cdb->byte2 & SRW10_RELADDR) 8659 reladdr = 1; 8660 if (cdb->byte2 & SRW10_FUA) 8661 fua = 1; 8662 if (cdb->byte2 & SRW10_DPO) 8663 dpo = 1; 8664 8665 if ((cdb->opcode == WRITE_10) 8666 && (cdb->byte2 & SRW10_EBP)) 8667 ebp = 1; 8668 8669 lba = scsi_4btoul(cdb->addr); 8670 num_blocks = scsi_2btoul(cdb->length); 8671 break; 8672 } 8673 case WRITE_VERIFY_10: { 8674 struct scsi_write_verify_10 *cdb; 8675 8676 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8677 8678 /* 8679 * XXX KDM we should do actual write verify support at some 8680 * point. This is obviously fake, we're just translating 8681 * things to a write. So we don't even bother checking the 8682 * BYTCHK field, since we don't do any verification. If 8683 * the user asks for it, we'll just pretend we did it. 8684 */ 8685 if (cdb->byte2 & SWV_DPO) 8686 dpo = 1; 8687 8688 lba = scsi_4btoul(cdb->addr); 8689 num_blocks = scsi_2btoul(cdb->length); 8690 break; 8691 } 8692 case READ_12: 8693 case WRITE_12: { 8694 struct scsi_rw_12 *cdb; 8695 8696 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8697 8698 if (cdb->byte2 & SRW12_RELADDR) 8699 reladdr = 1; 8700 if (cdb->byte2 & SRW12_FUA) 8701 fua = 1; 8702 if (cdb->byte2 & SRW12_DPO) 8703 dpo = 1; 8704 lba = scsi_4btoul(cdb->addr); 8705 num_blocks = scsi_4btoul(cdb->length); 8706 break; 8707 } 8708 case WRITE_VERIFY_12: { 8709 struct scsi_write_verify_12 *cdb; 8710 8711 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8712 8713 if (cdb->byte2 & SWV_DPO) 8714 dpo = 1; 8715 8716 lba = scsi_4btoul(cdb->addr); 8717 num_blocks = scsi_4btoul(cdb->length); 8718 8719 break; 8720 } 8721 case READ_16: 8722 case WRITE_16: { 8723 struct scsi_rw_16 *cdb; 8724 8725 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8726 8727 if (cdb->byte2 & SRW12_RELADDR) 8728 reladdr = 1; 8729 if (cdb->byte2 & SRW12_FUA) 8730 fua = 1; 8731 if (cdb->byte2 & SRW12_DPO) 8732 dpo = 1; 8733 8734 lba = scsi_8btou64(cdb->addr); 8735 num_blocks = scsi_4btoul(cdb->length); 8736 break; 8737 } 8738 case WRITE_VERIFY_16: { 8739 struct scsi_write_verify_16 *cdb; 8740 8741 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8742 8743 if (cdb->byte2 & SWV_DPO) 8744 dpo = 1; 8745 8746 lba = scsi_8btou64(cdb->addr); 8747 num_blocks = scsi_4btoul(cdb->length); 8748 break; 8749 } 8750 default: 8751 /* 8752 * We got a command we don't support. This shouldn't 8753 * happen, commands should be filtered out above us. 8754 */ 8755 ctl_set_invalid_opcode(ctsio); 8756 ctl_done((union ctl_io *)ctsio); 8757 8758 return (CTL_RETVAL_COMPLETE); 8759 break; /* NOTREACHED */ 8760 } 8761 8762 /* 8763 * XXX KDM what do we do with the DPO and FUA bits? FUA might be 8764 * interesting for us, but if RAIDCore is in write-back mode, 8765 * getting it to do write-through for a particular transaction may 8766 * not be possible. 8767 */ 8768 /* 8769 * We don't support relative addressing. That also requires 8770 * supporting linked commands, which we don't do. 8771 */ 8772 if (reladdr != 0) { 8773 ctl_set_invalid_field(ctsio, 8774 /*sks_valid*/ 1, 8775 /*command*/ 1, 8776 /*field*/ 1, 8777 /*bit_valid*/ 1, 8778 /*bit*/ 0); 8779 ctl_done((union ctl_io *)ctsio); 8780 return (CTL_RETVAL_COMPLETE); 8781 } 8782 8783 /* 8784 * The first check is to make sure we're in bounds, the second 8785 * check is to catch wrap-around problems. If the lba + num blocks 8786 * is less than the lba, then we've wrapped around and the block 8787 * range is invalid anyway. 8788 */ 8789 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8790 || ((lba + num_blocks) < lba)) { 8791 ctl_set_lba_out_of_range(ctsio); 8792 ctl_done((union ctl_io *)ctsio); 8793 return (CTL_RETVAL_COMPLETE); 8794 } 8795 8796 /* 8797 * According to SBC-3, a transfer length of 0 is not an error. 8798 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8799 * translates to 256 blocks for those commands. 8800 */ 8801 if (num_blocks == 0) { 8802 ctl_set_success(ctsio); 8803 ctl_done((union ctl_io *)ctsio); 8804 return (CTL_RETVAL_COMPLETE); 8805 } 8806 8807 lbalen = (struct ctl_lba_len_flags *) 8808 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8809 lbalen->lba = lba; 8810 lbalen->len = num_blocks; 8811 lbalen->flags = isread ? CTL_LLF_READ : CTL_LLF_WRITE; 8812 8813 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8814 ctsio->kern_rel_offset = 0; 8815 8816 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8817 8818 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8819 8820 return (retval); 8821} 8822 8823static int 8824ctl_cnw_cont(union ctl_io *io) 8825{ 8826 struct ctl_scsiio *ctsio; 8827 struct ctl_lun *lun; 8828 struct ctl_lba_len_flags *lbalen; 8829 int retval; 8830 8831 ctsio = &io->scsiio; 8832 ctsio->io_hdr.status = CTL_STATUS_NONE; 8833 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8834 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8835 lbalen = (struct ctl_lba_len_flags *) 8836 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8837 lbalen->flags = CTL_LLF_WRITE; 8838 8839 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8840 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8841 return (retval); 8842} 8843 8844int 8845ctl_cnw(struct ctl_scsiio *ctsio) 8846{ 8847 struct ctl_lun *lun; 8848 struct ctl_lba_len_flags *lbalen; 8849 uint64_t lba; 8850 uint32_t num_blocks; 8851 int fua, dpo; 8852 int retval; 8853 8854 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8855 8856 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8857 8858 fua = 0; 8859 dpo = 0; 8860 8861 retval = CTL_RETVAL_COMPLETE; 8862 8863 switch (ctsio->cdb[0]) { 8864 case COMPARE_AND_WRITE: { 8865 struct scsi_compare_and_write *cdb; 8866 8867 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8868 8869 if (cdb->byte2 & SRW10_FUA) 8870 fua = 1; 8871 if (cdb->byte2 & SRW10_DPO) 8872 dpo = 1; 8873 lba = scsi_8btou64(cdb->addr); 8874 num_blocks = cdb->length; 8875 break; 8876 } 8877 default: 8878 /* 8879 * We got a command we don't support. This shouldn't 8880 * happen, commands should be filtered out above us. 8881 */ 8882 ctl_set_invalid_opcode(ctsio); 8883 ctl_done((union ctl_io *)ctsio); 8884 8885 return (CTL_RETVAL_COMPLETE); 8886 break; /* NOTREACHED */ 8887 } 8888 8889 /* 8890 * XXX KDM what do we do with the DPO and FUA bits? FUA might be 8891 * interesting for us, but if RAIDCore is in write-back mode, 8892 * getting it to do write-through for a particular transaction may 8893 * not be possible. 8894 */ 8895 8896 /* 8897 * The first check is to make sure we're in bounds, the second 8898 * check is to catch wrap-around problems. If the lba + num blocks 8899 * is less than the lba, then we've wrapped around and the block 8900 * range is invalid anyway. 8901 */ 8902 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8903 || ((lba + num_blocks) < lba)) { 8904 ctl_set_lba_out_of_range(ctsio); 8905 ctl_done((union ctl_io *)ctsio); 8906 return (CTL_RETVAL_COMPLETE); 8907 } 8908 8909 /* 8910 * According to SBC-3, a transfer length of 0 is not an error. 8911 */ 8912 if (num_blocks == 0) { 8913 ctl_set_success(ctsio); 8914 ctl_done((union ctl_io *)ctsio); 8915 return (CTL_RETVAL_COMPLETE); 8916 } 8917 8918 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8919 ctsio->kern_rel_offset = 0; 8920 8921 /* 8922 * Set the IO_CONT flag, so that if this I/O gets passed to 8923 * ctl_data_submit_done(), it'll get passed back to 8924 * ctl_ctl_cnw_cont() for further processing. 8925 */ 8926 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8927 ctsio->io_cont = ctl_cnw_cont; 8928 8929 lbalen = (struct ctl_lba_len_flags *) 8930 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8931 lbalen->lba = lba; 8932 lbalen->len = num_blocks; 8933 lbalen->flags = CTL_LLF_COMPARE; 8934 8935 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8936 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8937 return (retval); 8938} 8939 8940int 8941ctl_verify(struct ctl_scsiio *ctsio) 8942{ 8943 struct ctl_lun *lun; 8944 struct ctl_lba_len_flags *lbalen; 8945 uint64_t lba; 8946 uint32_t num_blocks; 8947 int bytchk, dpo; 8948 int retval; 8949 8950 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8951 8952 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8953 8954 bytchk = 0; 8955 dpo = 0; 8956 retval = CTL_RETVAL_COMPLETE; 8957 8958 switch (ctsio->cdb[0]) { 8959 case VERIFY_10: { 8960 struct scsi_verify_10 *cdb; 8961 8962 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8963 if (cdb->byte2 & SVFY_BYTCHK) 8964 bytchk = 1; 8965 if (cdb->byte2 & SVFY_DPO) 8966 dpo = 1; 8967 lba = scsi_4btoul(cdb->addr); 8968 num_blocks = scsi_2btoul(cdb->length); 8969 break; 8970 } 8971 case VERIFY_12: { 8972 struct scsi_verify_12 *cdb; 8973 8974 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8975 if (cdb->byte2 & SVFY_BYTCHK) 8976 bytchk = 1; 8977 if (cdb->byte2 & SVFY_DPO) 8978 dpo = 1; 8979 lba = scsi_4btoul(cdb->addr); 8980 num_blocks = scsi_4btoul(cdb->length); 8981 break; 8982 } 8983 case VERIFY_16: { 8984 struct scsi_rw_16 *cdb; 8985 8986 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8987 if (cdb->byte2 & SVFY_BYTCHK) 8988 bytchk = 1; 8989 if (cdb->byte2 & SVFY_DPO) 8990 dpo = 1; 8991 lba = scsi_8btou64(cdb->addr); 8992 num_blocks = scsi_4btoul(cdb->length); 8993 break; 8994 } 8995 default: 8996 /* 8997 * We got a command we don't support. This shouldn't 8998 * happen, commands should be filtered out above us. 8999 */ 9000 ctl_set_invalid_opcode(ctsio); 9001 ctl_done((union ctl_io *)ctsio); 9002 return (CTL_RETVAL_COMPLETE); 9003 } 9004 9005 /* 9006 * The first check is to make sure we're in bounds, the second 9007 * check is to catch wrap-around problems. If the lba + num blocks 9008 * is less than the lba, then we've wrapped around and the block 9009 * range is invalid anyway. 9010 */ 9011 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9012 || ((lba + num_blocks) < lba)) { 9013 ctl_set_lba_out_of_range(ctsio); 9014 ctl_done((union ctl_io *)ctsio); 9015 return (CTL_RETVAL_COMPLETE); 9016 } 9017 9018 /* 9019 * According to SBC-3, a transfer length of 0 is not an error. 9020 */ 9021 if (num_blocks == 0) { 9022 ctl_set_success(ctsio); 9023 ctl_done((union ctl_io *)ctsio); 9024 return (CTL_RETVAL_COMPLETE); 9025 } 9026 9027 lbalen = (struct ctl_lba_len_flags *) 9028 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9029 lbalen->lba = lba; 9030 lbalen->len = num_blocks; 9031 if (bytchk) { 9032 lbalen->flags = CTL_LLF_COMPARE; 9033 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9034 } else { 9035 lbalen->flags = CTL_LLF_VERIFY; 9036 ctsio->kern_total_len = 0; 9037 } 9038 ctsio->kern_rel_offset = 0; 9039 9040 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9041 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9042 return (retval); 9043} 9044 9045int 9046ctl_report_luns(struct ctl_scsiio *ctsio) 9047{ 9048 struct scsi_report_luns *cdb; 9049 struct scsi_report_luns_data *lun_data; 9050 struct ctl_lun *lun, *request_lun; 9051 int num_luns, retval; 9052 uint32_t alloc_len, lun_datalen; 9053 int num_filled, well_known; 9054 uint32_t initidx, targ_lun_id, lun_id; 9055 9056 retval = CTL_RETVAL_COMPLETE; 9057 well_known = 0; 9058 9059 cdb = (struct scsi_report_luns *)ctsio->cdb; 9060 9061 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9062 9063 mtx_lock(&control_softc->ctl_lock); 9064 num_luns = control_softc->num_luns; 9065 mtx_unlock(&control_softc->ctl_lock); 9066 9067 switch (cdb->select_report) { 9068 case RPL_REPORT_DEFAULT: 9069 case RPL_REPORT_ALL: 9070 break; 9071 case RPL_REPORT_WELLKNOWN: 9072 well_known = 1; 9073 num_luns = 0; 9074 break; 9075 default: 9076 ctl_set_invalid_field(ctsio, 9077 /*sks_valid*/ 1, 9078 /*command*/ 1, 9079 /*field*/ 2, 9080 /*bit_valid*/ 0, 9081 /*bit*/ 0); 9082 ctl_done((union ctl_io *)ctsio); 9083 return (retval); 9084 break; /* NOTREACHED */ 9085 } 9086 9087 alloc_len = scsi_4btoul(cdb->length); 9088 /* 9089 * The initiator has to allocate at least 16 bytes for this request, 9090 * so he can at least get the header and the first LUN. Otherwise 9091 * we reject the request (per SPC-3 rev 14, section 6.21). 9092 */ 9093 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9094 sizeof(struct scsi_report_luns_lundata))) { 9095 ctl_set_invalid_field(ctsio, 9096 /*sks_valid*/ 1, 9097 /*command*/ 1, 9098 /*field*/ 6, 9099 /*bit_valid*/ 0, 9100 /*bit*/ 0); 9101 ctl_done((union ctl_io *)ctsio); 9102 return (retval); 9103 } 9104 9105 request_lun = (struct ctl_lun *) 9106 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9107 9108 lun_datalen = sizeof(*lun_data) + 9109 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9110 9111 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9112 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9113 ctsio->kern_sg_entries = 0; 9114 9115 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9116 9117 mtx_lock(&control_softc->ctl_lock); 9118 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9119 lun_id = targ_lun_id; 9120 if (ctsio->io_hdr.nexus.lun_map_fn != NULL) 9121 lun_id = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, lun_id); 9122 if (lun_id >= CTL_MAX_LUNS) 9123 continue; 9124 lun = control_softc->ctl_luns[lun_id]; 9125 if (lun == NULL) 9126 continue; 9127 9128 if (targ_lun_id <= 0xff) { 9129 /* 9130 * Peripheral addressing method, bus number 0. 9131 */ 9132 lun_data->luns[num_filled].lundata[0] = 9133 RPL_LUNDATA_ATYP_PERIPH; 9134 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9135 num_filled++; 9136 } else if (targ_lun_id <= 0x3fff) { 9137 /* 9138 * Flat addressing method. 9139 */ 9140 lun_data->luns[num_filled].lundata[0] = 9141 RPL_LUNDATA_ATYP_FLAT | 9142 (targ_lun_id & RPL_LUNDATA_FLAT_LUN_MASK); 9143#ifdef OLDCTLHEADERS 9144 (SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) | 9145 (targ_lun_id & SRLD_BUS_LUN_MASK); 9146#endif 9147 lun_data->luns[num_filled].lundata[1] = 9148#ifdef OLDCTLHEADERS 9149 targ_lun_id >> SRLD_BUS_LUN_BITS; 9150#endif 9151 targ_lun_id >> RPL_LUNDATA_FLAT_LUN_BITS; 9152 num_filled++; 9153 } else { 9154 printf("ctl_report_luns: bogus LUN number %jd, " 9155 "skipping\n", (intmax_t)targ_lun_id); 9156 } 9157 /* 9158 * According to SPC-3, rev 14 section 6.21: 9159 * 9160 * "The execution of a REPORT LUNS command to any valid and 9161 * installed logical unit shall clear the REPORTED LUNS DATA 9162 * HAS CHANGED unit attention condition for all logical 9163 * units of that target with respect to the requesting 9164 * initiator. A valid and installed logical unit is one 9165 * having a PERIPHERAL QUALIFIER of 000b in the standard 9166 * INQUIRY data (see 6.4.2)." 9167 * 9168 * If request_lun is NULL, the LUN this report luns command 9169 * was issued to is either disabled or doesn't exist. In that 9170 * case, we shouldn't clear any pending lun change unit 9171 * attention. 9172 */ 9173 if (request_lun != NULL) { 9174 mtx_lock(&lun->lun_lock); 9175 lun->pending_sense[initidx].ua_pending &= 9176 ~CTL_UA_LUN_CHANGE; 9177 mtx_unlock(&lun->lun_lock); 9178 } 9179 } 9180 mtx_unlock(&control_softc->ctl_lock); 9181 9182 /* 9183 * It's quite possible that we've returned fewer LUNs than we allocated 9184 * space for. Trim it. 9185 */ 9186 lun_datalen = sizeof(*lun_data) + 9187 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9188 9189 if (lun_datalen < alloc_len) { 9190 ctsio->residual = alloc_len - lun_datalen; 9191 ctsio->kern_data_len = lun_datalen; 9192 ctsio->kern_total_len = lun_datalen; 9193 } else { 9194 ctsio->residual = 0; 9195 ctsio->kern_data_len = alloc_len; 9196 ctsio->kern_total_len = alloc_len; 9197 } 9198 ctsio->kern_data_resid = 0; 9199 ctsio->kern_rel_offset = 0; 9200 ctsio->kern_sg_entries = 0; 9201 9202 /* 9203 * We set this to the actual data length, regardless of how much 9204 * space we actually have to return results. If the user looks at 9205 * this value, he'll know whether or not he allocated enough space 9206 * and reissue the command if necessary. We don't support well 9207 * known logical units, so if the user asks for that, return none. 9208 */ 9209 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9210 9211 /* 9212 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9213 * this request. 9214 */ 9215 ctsio->scsi_status = SCSI_STATUS_OK; 9216 9217 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9218 ctsio->be_move_done = ctl_config_move_done; 9219 ctl_datamove((union ctl_io *)ctsio); 9220 9221 return (retval); 9222} 9223 9224int 9225ctl_request_sense(struct ctl_scsiio *ctsio) 9226{ 9227 struct scsi_request_sense *cdb; 9228 struct scsi_sense_data *sense_ptr; 9229 struct ctl_lun *lun; 9230 uint32_t initidx; 9231 int have_error; 9232 scsi_sense_data_type sense_format; 9233 9234 cdb = (struct scsi_request_sense *)ctsio->cdb; 9235 9236 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9237 9238 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9239 9240 /* 9241 * Determine which sense format the user wants. 9242 */ 9243 if (cdb->byte2 & SRS_DESC) 9244 sense_format = SSD_TYPE_DESC; 9245 else 9246 sense_format = SSD_TYPE_FIXED; 9247 9248 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9249 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9250 ctsio->kern_sg_entries = 0; 9251 9252 /* 9253 * struct scsi_sense_data, which is currently set to 256 bytes, is 9254 * larger than the largest allowed value for the length field in the 9255 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9256 */ 9257 ctsio->residual = 0; 9258 ctsio->kern_data_len = cdb->length; 9259 ctsio->kern_total_len = cdb->length; 9260 9261 ctsio->kern_data_resid = 0; 9262 ctsio->kern_rel_offset = 0; 9263 ctsio->kern_sg_entries = 0; 9264 9265 /* 9266 * If we don't have a LUN, we don't have any pending sense. 9267 */ 9268 if (lun == NULL) 9269 goto no_sense; 9270 9271 have_error = 0; 9272 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9273 /* 9274 * Check for pending sense, and then for pending unit attentions. 9275 * Pending sense gets returned first, then pending unit attentions. 9276 */ 9277 mtx_lock(&lun->lun_lock); 9278 if (ctl_is_set(lun->have_ca, initidx)) { 9279 scsi_sense_data_type stored_format; 9280 9281 /* 9282 * Check to see which sense format was used for the stored 9283 * sense data. 9284 */ 9285 stored_format = scsi_sense_type( 9286 &lun->pending_sense[initidx].sense); 9287 9288 /* 9289 * If the user requested a different sense format than the 9290 * one we stored, then we need to convert it to the other 9291 * format. If we're going from descriptor to fixed format 9292 * sense data, we may lose things in translation, depending 9293 * on what options were used. 9294 * 9295 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9296 * for some reason we'll just copy it out as-is. 9297 */ 9298 if ((stored_format == SSD_TYPE_FIXED) 9299 && (sense_format == SSD_TYPE_DESC)) 9300 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9301 &lun->pending_sense[initidx].sense, 9302 (struct scsi_sense_data_desc *)sense_ptr); 9303 else if ((stored_format == SSD_TYPE_DESC) 9304 && (sense_format == SSD_TYPE_FIXED)) 9305 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9306 &lun->pending_sense[initidx].sense, 9307 (struct scsi_sense_data_fixed *)sense_ptr); 9308 else 9309 memcpy(sense_ptr, &lun->pending_sense[initidx].sense, 9310 ctl_min(sizeof(*sense_ptr), 9311 sizeof(lun->pending_sense[initidx].sense))); 9312 9313 ctl_clear_mask(lun->have_ca, initidx); 9314 have_error = 1; 9315 } else if (lun->pending_sense[initidx].ua_pending != CTL_UA_NONE) { 9316 ctl_ua_type ua_type; 9317 9318 ua_type = ctl_build_ua(lun->pending_sense[initidx].ua_pending, 9319 sense_ptr, sense_format); 9320 if (ua_type != CTL_UA_NONE) { 9321 have_error = 1; 9322 /* We're reporting this UA, so clear it */ 9323 lun->pending_sense[initidx].ua_pending &= ~ua_type; 9324 } 9325 } 9326 mtx_unlock(&lun->lun_lock); 9327 9328 /* 9329 * We already have a pending error, return it. 9330 */ 9331 if (have_error != 0) { 9332 /* 9333 * We report the SCSI status as OK, since the status of the 9334 * request sense command itself is OK. 9335 */ 9336 ctsio->scsi_status = SCSI_STATUS_OK; 9337 9338 /* 9339 * We report 0 for the sense length, because we aren't doing 9340 * autosense in this case. We're reporting sense as 9341 * parameter data. 9342 */ 9343 ctsio->sense_len = 0; 9344 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9345 ctsio->be_move_done = ctl_config_move_done; 9346 ctl_datamove((union ctl_io *)ctsio); 9347 9348 return (CTL_RETVAL_COMPLETE); 9349 } 9350 9351no_sense: 9352 9353 /* 9354 * No sense information to report, so we report that everything is 9355 * okay. 9356 */ 9357 ctl_set_sense_data(sense_ptr, 9358 lun, 9359 sense_format, 9360 /*current_error*/ 1, 9361 /*sense_key*/ SSD_KEY_NO_SENSE, 9362 /*asc*/ 0x00, 9363 /*ascq*/ 0x00, 9364 SSD_ELEM_NONE); 9365 9366 ctsio->scsi_status = SCSI_STATUS_OK; 9367 9368 /* 9369 * We report 0 for the sense length, because we aren't doing 9370 * autosense in this case. We're reporting sense as parameter data. 9371 */ 9372 ctsio->sense_len = 0; 9373 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9374 ctsio->be_move_done = ctl_config_move_done; 9375 ctl_datamove((union ctl_io *)ctsio); 9376 9377 return (CTL_RETVAL_COMPLETE); 9378} 9379 9380int 9381ctl_tur(struct ctl_scsiio *ctsio) 9382{ 9383 struct ctl_lun *lun; 9384 9385 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9386 9387 CTL_DEBUG_PRINT(("ctl_tur\n")); 9388 9389 if (lun == NULL) 9390 return (EINVAL); 9391 9392 ctsio->scsi_status = SCSI_STATUS_OK; 9393 ctsio->io_hdr.status = CTL_SUCCESS; 9394 9395 ctl_done((union ctl_io *)ctsio); 9396 9397 return (CTL_RETVAL_COMPLETE); 9398} 9399 9400#ifdef notyet 9401static int 9402ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 9403{ 9404 9405} 9406#endif 9407 9408static int 9409ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9410{ 9411 struct scsi_vpd_supported_pages *pages; 9412 int sup_page_size; 9413 struct ctl_lun *lun; 9414 9415 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9416 9417 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9418 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9419 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9420 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9421 ctsio->kern_sg_entries = 0; 9422 9423 if (sup_page_size < alloc_len) { 9424 ctsio->residual = alloc_len - sup_page_size; 9425 ctsio->kern_data_len = sup_page_size; 9426 ctsio->kern_total_len = sup_page_size; 9427 } else { 9428 ctsio->residual = 0; 9429 ctsio->kern_data_len = alloc_len; 9430 ctsio->kern_total_len = alloc_len; 9431 } 9432 ctsio->kern_data_resid = 0; 9433 ctsio->kern_rel_offset = 0; 9434 ctsio->kern_sg_entries = 0; 9435 9436 /* 9437 * The control device is always connected. The disk device, on the 9438 * other hand, may not be online all the time. Need to change this 9439 * to figure out whether the disk device is actually online or not. 9440 */ 9441 if (lun != NULL) 9442 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9443 lun->be_lun->lun_type; 9444 else 9445 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9446 9447 pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES; 9448 /* Supported VPD pages */ 9449 pages->page_list[0] = SVPD_SUPPORTED_PAGES; 9450 /* Serial Number */ 9451 pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER; 9452 /* Device Identification */ 9453 pages->page_list[2] = SVPD_DEVICE_ID; 9454 /* Block limits */ 9455 pages->page_list[3] = SVPD_BLOCK_LIMITS; 9456 /* Logical Block Provisioning */ 9457 pages->page_list[4] = SVPD_LBP; 9458 9459 ctsio->scsi_status = SCSI_STATUS_OK; 9460 9461 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9462 ctsio->be_move_done = ctl_config_move_done; 9463 ctl_datamove((union ctl_io *)ctsio); 9464 9465 return (CTL_RETVAL_COMPLETE); 9466} 9467 9468static int 9469ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9470{ 9471 struct scsi_vpd_unit_serial_number *sn_ptr; 9472 struct ctl_lun *lun; 9473 9474 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9475 9476 ctsio->kern_data_ptr = malloc(sizeof(*sn_ptr), M_CTL, M_WAITOK | M_ZERO); 9477 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9478 ctsio->kern_sg_entries = 0; 9479 9480 if (sizeof(*sn_ptr) < alloc_len) { 9481 ctsio->residual = alloc_len - sizeof(*sn_ptr); 9482 ctsio->kern_data_len = sizeof(*sn_ptr); 9483 ctsio->kern_total_len = sizeof(*sn_ptr); 9484 } else { 9485 ctsio->residual = 0; 9486 ctsio->kern_data_len = alloc_len; 9487 ctsio->kern_total_len = alloc_len; 9488 } 9489 ctsio->kern_data_resid = 0; 9490 ctsio->kern_rel_offset = 0; 9491 ctsio->kern_sg_entries = 0; 9492 9493 /* 9494 * The control device is always connected. The disk device, on the 9495 * other hand, may not be online all the time. Need to change this 9496 * to figure out whether the disk device is actually online or not. 9497 */ 9498 if (lun != NULL) 9499 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9500 lun->be_lun->lun_type; 9501 else 9502 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9503 9504 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9505 sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN); 9506 /* 9507 * If we don't have a LUN, we just leave the serial number as 9508 * all spaces. 9509 */ 9510 memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num)); 9511 if (lun != NULL) { 9512 strncpy((char *)sn_ptr->serial_num, 9513 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9514 } 9515 ctsio->scsi_status = SCSI_STATUS_OK; 9516 9517 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9518 ctsio->be_move_done = ctl_config_move_done; 9519 ctl_datamove((union ctl_io *)ctsio); 9520 9521 return (CTL_RETVAL_COMPLETE); 9522} 9523 9524 9525static int 9526ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9527{ 9528 struct scsi_vpd_device_id *devid_ptr; 9529 struct scsi_vpd_id_descriptor *desc, *desc1; 9530 struct scsi_vpd_id_descriptor *desc2, *desc3; /* for types 4h and 5h */ 9531 struct scsi_vpd_id_t10 *t10id; 9532 struct ctl_softc *ctl_softc; 9533 struct ctl_lun *lun; 9534 struct ctl_frontend *fe; 9535 char *val; 9536 int data_len, devid_len; 9537 9538 ctl_softc = control_softc; 9539 9540 fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 9541 9542 if (fe->devid != NULL) 9543 return ((fe->devid)(ctsio, alloc_len)); 9544 9545 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9546 9547 if (lun == NULL) { 9548 devid_len = CTL_DEVID_MIN_LEN; 9549 } else { 9550 devid_len = max(CTL_DEVID_MIN_LEN, 9551 strnlen(lun->be_lun->device_id, CTL_DEVID_LEN)); 9552 } 9553 9554 data_len = sizeof(struct scsi_vpd_device_id) + 9555 sizeof(struct scsi_vpd_id_descriptor) + 9556 sizeof(struct scsi_vpd_id_t10) + devid_len + 9557 sizeof(struct scsi_vpd_id_descriptor) + CTL_WWPN_LEN + 9558 sizeof(struct scsi_vpd_id_descriptor) + 9559 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9560 sizeof(struct scsi_vpd_id_descriptor) + 9561 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9562 9563 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9564 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9565 ctsio->kern_sg_entries = 0; 9566 9567 if (data_len < alloc_len) { 9568 ctsio->residual = alloc_len - data_len; 9569 ctsio->kern_data_len = data_len; 9570 ctsio->kern_total_len = data_len; 9571 } else { 9572 ctsio->residual = 0; 9573 ctsio->kern_data_len = alloc_len; 9574 ctsio->kern_total_len = alloc_len; 9575 } 9576 ctsio->kern_data_resid = 0; 9577 ctsio->kern_rel_offset = 0; 9578 ctsio->kern_sg_entries = 0; 9579 9580 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9581 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 9582 desc1 = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9583 sizeof(struct scsi_vpd_id_t10) + devid_len); 9584 desc2 = (struct scsi_vpd_id_descriptor *)(&desc1->identifier[0] + 9585 CTL_WWPN_LEN); 9586 desc3 = (struct scsi_vpd_id_descriptor *)(&desc2->identifier[0] + 9587 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9588 9589 /* 9590 * The control device is always connected. The disk device, on the 9591 * other hand, may not be online all the time. 9592 */ 9593 if (lun != NULL) 9594 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9595 lun->be_lun->lun_type; 9596 else 9597 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9598 9599 devid_ptr->page_code = SVPD_DEVICE_ID; 9600 9601 scsi_ulto2b(data_len - 4, devid_ptr->length); 9602 9603 /* 9604 * For Fibre channel, 9605 */ 9606 if (fe->port_type == CTL_PORT_FC) 9607 { 9608 desc->proto_codeset = (SCSI_PROTO_FC << 4) | 9609 SVPD_ID_CODESET_ASCII; 9610 desc1->proto_codeset = (SCSI_PROTO_FC << 4) | 9611 SVPD_ID_CODESET_BINARY; 9612 } 9613 else 9614 { 9615 desc->proto_codeset = (SCSI_PROTO_SPI << 4) | 9616 SVPD_ID_CODESET_ASCII; 9617 desc1->proto_codeset = (SCSI_PROTO_SPI << 4) | 9618 SVPD_ID_CODESET_BINARY; 9619 } 9620 desc2->proto_codeset = desc3->proto_codeset = desc1->proto_codeset; 9621 9622 /* 9623 * We're using a LUN association here. i.e., this device ID is a 9624 * per-LUN identifier. 9625 */ 9626 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 9627 desc->length = sizeof(*t10id) + devid_len; 9628 if (lun == NULL || (val = ctl_get_opt(lun->be_lun, "vendor")) == NULL) { 9629 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 9630 } else { 9631 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 9632 strncpy(t10id->vendor, val, 9633 min(sizeof(t10id->vendor), strlen(val))); 9634 } 9635 9636 /* 9637 * desc1 is for the WWPN which is a port asscociation. 9638 */ 9639 desc1->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA; 9640 desc1->length = CTL_WWPN_LEN; 9641 /* XXX Call Reggie's get_WWNN func here then add port # to the end */ 9642 /* For testing just create the WWPN */ 9643#if 0 9644 ddb_GetWWNN((char *)desc1->identifier); 9645 9646 /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */ 9647 /* This is so Copancontrol will return something sane */ 9648 if (ctsio->io_hdr.nexus.targ_port!=0 && 9649 ctsio->io_hdr.nexus.targ_port!=8) 9650 desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port-1; 9651 else 9652 desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port; 9653#endif 9654 9655 be64enc(desc1->identifier, fe->wwpn); 9656 9657 /* 9658 * desc2 is for the Relative Target Port(type 4h) identifier 9659 */ 9660 desc2->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT 9661 | SVPD_ID_TYPE_RELTARG; 9662 desc2->length = 4; 9663//#if 0 9664 /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */ 9665 /* This is so Copancontrol will return something sane */ 9666 if (ctsio->io_hdr.nexus.targ_port!=0 && 9667 ctsio->io_hdr.nexus.targ_port!=8) 9668 desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port - 1; 9669 else 9670 desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port; 9671//#endif 9672 9673 /* 9674 * desc3 is for the Target Port Group(type 5h) identifier 9675 */ 9676 desc3->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT 9677 | SVPD_ID_TYPE_TPORTGRP; 9678 desc3->length = 4; 9679 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS || ctl_is_single) 9680 desc3->identifier[3] = 1; 9681 else 9682 desc3->identifier[3] = 2; 9683 9684 /* 9685 * If we've actually got a backend, copy the device id from the 9686 * per-LUN data. Otherwise, set it to all spaces. 9687 */ 9688 if (lun != NULL) { 9689 /* 9690 * Copy the backend's LUN ID. 9691 */ 9692 strncpy((char *)t10id->vendor_spec_id, 9693 (char *)lun->be_lun->device_id, devid_len); 9694 } else { 9695 /* 9696 * No backend, set this to spaces. 9697 */ 9698 memset(t10id->vendor_spec_id, 0x20, devid_len); 9699 } 9700 9701 ctsio->scsi_status = SCSI_STATUS_OK; 9702 9703 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9704 ctsio->be_move_done = ctl_config_move_done; 9705 ctl_datamove((union ctl_io *)ctsio); 9706 9707 return (CTL_RETVAL_COMPLETE); 9708} 9709 9710static int 9711ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9712{ 9713 struct scsi_vpd_block_limits *bl_ptr; 9714 struct ctl_lun *lun; 9715 int bs; 9716 9717 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9718 bs = lun->be_lun->blocksize; 9719 9720 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9721 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9722 ctsio->kern_sg_entries = 0; 9723 9724 if (sizeof(*bl_ptr) < alloc_len) { 9725 ctsio->residual = alloc_len - sizeof(*bl_ptr); 9726 ctsio->kern_data_len = sizeof(*bl_ptr); 9727 ctsio->kern_total_len = sizeof(*bl_ptr); 9728 } else { 9729 ctsio->residual = 0; 9730 ctsio->kern_data_len = alloc_len; 9731 ctsio->kern_total_len = alloc_len; 9732 } 9733 ctsio->kern_data_resid = 0; 9734 ctsio->kern_rel_offset = 0; 9735 ctsio->kern_sg_entries = 0; 9736 9737 /* 9738 * The control device is always connected. The disk device, on the 9739 * other hand, may not be online all the time. Need to change this 9740 * to figure out whether the disk device is actually online or not. 9741 */ 9742 if (lun != NULL) 9743 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9744 lun->be_lun->lun_type; 9745 else 9746 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9747 9748 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9749 scsi_ulto2b(sizeof(*bl_ptr), bl_ptr->page_length); 9750 bl_ptr->max_cmp_write_len = 0xff; 9751 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9752 scsi_ulto4b(MAXPHYS / bs, bl_ptr->opt_txfer_len); 9753 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9754 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 9755 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 9756 } 9757 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 9758 9759 ctsio->scsi_status = SCSI_STATUS_OK; 9760 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9761 ctsio->be_move_done = ctl_config_move_done; 9762 ctl_datamove((union ctl_io *)ctsio); 9763 9764 return (CTL_RETVAL_COMPLETE); 9765} 9766 9767static int 9768ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9769{ 9770 struct scsi_vpd_logical_block_prov *lbp_ptr; 9771 struct ctl_lun *lun; 9772 int bs; 9773 9774 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9775 bs = lun->be_lun->blocksize; 9776 9777 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9778 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9779 ctsio->kern_sg_entries = 0; 9780 9781 if (sizeof(*lbp_ptr) < alloc_len) { 9782 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 9783 ctsio->kern_data_len = sizeof(*lbp_ptr); 9784 ctsio->kern_total_len = sizeof(*lbp_ptr); 9785 } else { 9786 ctsio->residual = 0; 9787 ctsio->kern_data_len = alloc_len; 9788 ctsio->kern_total_len = alloc_len; 9789 } 9790 ctsio->kern_data_resid = 0; 9791 ctsio->kern_rel_offset = 0; 9792 ctsio->kern_sg_entries = 0; 9793 9794 /* 9795 * The control device is always connected. The disk device, on the 9796 * other hand, may not be online all the time. Need to change this 9797 * to figure out whether the disk device is actually online or not. 9798 */ 9799 if (lun != NULL) 9800 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9801 lun->be_lun->lun_type; 9802 else 9803 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9804 9805 lbp_ptr->page_code = SVPD_LBP; 9806 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 9807 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | SVPD_LBP_WS10; 9808 9809 ctsio->scsi_status = SCSI_STATUS_OK; 9810 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9811 ctsio->be_move_done = ctl_config_move_done; 9812 ctl_datamove((union ctl_io *)ctsio); 9813 9814 return (CTL_RETVAL_COMPLETE); 9815} 9816 9817static int 9818ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9819{ 9820 struct scsi_inquiry *cdb; 9821 struct ctl_lun *lun; 9822 int alloc_len, retval; 9823 9824 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9825 cdb = (struct scsi_inquiry *)ctsio->cdb; 9826 9827 retval = CTL_RETVAL_COMPLETE; 9828 9829 alloc_len = scsi_2btoul(cdb->length); 9830 9831 switch (cdb->page_code) { 9832 case SVPD_SUPPORTED_PAGES: 9833 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9834 break; 9835 case SVPD_UNIT_SERIAL_NUMBER: 9836 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9837 break; 9838 case SVPD_DEVICE_ID: 9839 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9840 break; 9841 case SVPD_BLOCK_LIMITS: 9842 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9843 break; 9844 case SVPD_LBP: 9845 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 9846 break; 9847 default: 9848 ctl_set_invalid_field(ctsio, 9849 /*sks_valid*/ 1, 9850 /*command*/ 1, 9851 /*field*/ 2, 9852 /*bit_valid*/ 0, 9853 /*bit*/ 0); 9854 ctl_done((union ctl_io *)ctsio); 9855 retval = CTL_RETVAL_COMPLETE; 9856 break; 9857 } 9858 9859 return (retval); 9860} 9861 9862static int 9863ctl_inquiry_std(struct ctl_scsiio *ctsio) 9864{ 9865 struct scsi_inquiry_data *inq_ptr; 9866 struct scsi_inquiry *cdb; 9867 struct ctl_softc *ctl_softc; 9868 struct ctl_lun *lun; 9869 char *val; 9870 uint32_t alloc_len; 9871 int is_fc; 9872 9873 ctl_softc = control_softc; 9874 9875 /* 9876 * Figure out whether we're talking to a Fibre Channel port or not. 9877 * We treat the ioctl front end, and any SCSI adapters, as packetized 9878 * SCSI front ends. 9879 */ 9880 if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type != 9881 CTL_PORT_FC) 9882 is_fc = 0; 9883 else 9884 is_fc = 1; 9885 9886 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9887 cdb = (struct scsi_inquiry *)ctsio->cdb; 9888 alloc_len = scsi_2btoul(cdb->length); 9889 9890 /* 9891 * We malloc the full inquiry data size here and fill it 9892 * in. If the user only asks for less, we'll give him 9893 * that much. 9894 */ 9895 ctsio->kern_data_ptr = malloc(sizeof(*inq_ptr), M_CTL, M_WAITOK | M_ZERO); 9896 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 9897 ctsio->kern_sg_entries = 0; 9898 ctsio->kern_data_resid = 0; 9899 ctsio->kern_rel_offset = 0; 9900 9901 if (sizeof(*inq_ptr) < alloc_len) { 9902 ctsio->residual = alloc_len - sizeof(*inq_ptr); 9903 ctsio->kern_data_len = sizeof(*inq_ptr); 9904 ctsio->kern_total_len = sizeof(*inq_ptr); 9905 } else { 9906 ctsio->residual = 0; 9907 ctsio->kern_data_len = alloc_len; 9908 ctsio->kern_total_len = alloc_len; 9909 } 9910 9911 /* 9912 * If we have a LUN configured, report it as connected. Otherwise, 9913 * report that it is offline or no device is supported, depending 9914 * on the value of inquiry_pq_no_lun. 9915 * 9916 * According to the spec (SPC-4 r34), the peripheral qualifier 9917 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 9918 * 9919 * "A peripheral device having the specified peripheral device type 9920 * is not connected to this logical unit. However, the device 9921 * server is capable of supporting the specified peripheral device 9922 * type on this logical unit." 9923 * 9924 * According to the same spec, the peripheral qualifier 9925 * SID_QUAL_BAD_LU (011b) is used in this scenario: 9926 * 9927 * "The device server is not capable of supporting a peripheral 9928 * device on this logical unit. For this peripheral qualifier the 9929 * peripheral device type shall be set to 1Fh. All other peripheral 9930 * device type values are reserved for this peripheral qualifier." 9931 * 9932 * Given the text, it would seem that we probably want to report that 9933 * the LUN is offline here. There is no LUN connected, but we can 9934 * support a LUN at the given LUN number. 9935 * 9936 * In the real world, though, it sounds like things are a little 9937 * different: 9938 * 9939 * - Linux, when presented with a LUN with the offline peripheral 9940 * qualifier, will create an sg driver instance for it. So when 9941 * you attach it to CTL, you wind up with a ton of sg driver 9942 * instances. (One for every LUN that Linux bothered to probe.) 9943 * Linux does this despite the fact that it issues a REPORT LUNs 9944 * to LUN 0 to get the inventory of supported LUNs. 9945 * 9946 * - There is other anecdotal evidence (from Emulex folks) about 9947 * arrays that use the offline peripheral qualifier for LUNs that 9948 * are on the "passive" path in an active/passive array. 9949 * 9950 * So the solution is provide a hopefully reasonable default 9951 * (return bad/no LUN) and allow the user to change the behavior 9952 * with a tunable/sysctl variable. 9953 */ 9954 if (lun != NULL) 9955 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9956 lun->be_lun->lun_type; 9957 else if (ctl_softc->inquiry_pq_no_lun == 0) 9958 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9959 else 9960 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 9961 9962 /* RMB in byte 2 is 0 */ 9963 inq_ptr->version = SCSI_REV_SPC3; 9964 9965 /* 9966 * According to SAM-3, even if a device only supports a single 9967 * level of LUN addressing, it should still set the HISUP bit: 9968 * 9969 * 4.9.1 Logical unit numbers overview 9970 * 9971 * All logical unit number formats described in this standard are 9972 * hierarchical in structure even when only a single level in that 9973 * hierarchy is used. The HISUP bit shall be set to one in the 9974 * standard INQUIRY data (see SPC-2) when any logical unit number 9975 * format described in this standard is used. Non-hierarchical 9976 * formats are outside the scope of this standard. 9977 * 9978 * Therefore we set the HiSup bit here. 9979 * 9980 * The reponse format is 2, per SPC-3. 9981 */ 9982 inq_ptr->response_format = SID_HiSup | 2; 9983 9984 inq_ptr->additional_length = sizeof(*inq_ptr) - 4; 9985 CTL_DEBUG_PRINT(("additional_length = %d\n", 9986 inq_ptr->additional_length)); 9987 9988 inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT; 9989 /* 16 bit addressing */ 9990 if (is_fc == 0) 9991 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 9992 /* XXX set the SID_MultiP bit here if we're actually going to 9993 respond on multiple ports */ 9994 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 9995 9996 /* 16 bit data bus, synchronous transfers */ 9997 /* XXX these flags don't apply for FC */ 9998 if (is_fc == 0) 9999 inq_ptr->flags = SID_WBus16 | SID_Sync; 10000 /* 10001 * XXX KDM do we want to support tagged queueing on the control 10002 * device at all? 10003 */ 10004 if ((lun == NULL) 10005 || (lun->be_lun->lun_type != T_PROCESSOR)) 10006 inq_ptr->flags |= SID_CmdQue; 10007 /* 10008 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10009 * We have 8 bytes for the vendor name, and 16 bytes for the device 10010 * name and 4 bytes for the revision. 10011 */ 10012 if (lun == NULL || (val = ctl_get_opt(lun->be_lun, "vendor")) == NULL) { 10013 strcpy(inq_ptr->vendor, CTL_VENDOR); 10014 } else { 10015 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10016 strncpy(inq_ptr->vendor, val, 10017 min(sizeof(inq_ptr->vendor), strlen(val))); 10018 } 10019 if (lun == NULL) { 10020 strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT); 10021 } else if ((val = ctl_get_opt(lun->be_lun, "product")) == NULL) { 10022 switch (lun->be_lun->lun_type) { 10023 case T_DIRECT: 10024 strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT); 10025 break; 10026 case T_PROCESSOR: 10027 strcpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT); 10028 break; 10029 default: 10030 strcpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT); 10031 break; 10032 } 10033 } else { 10034 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10035 strncpy(inq_ptr->product, val, 10036 min(sizeof(inq_ptr->product), strlen(val))); 10037 } 10038 10039 /* 10040 * XXX make this a macro somewhere so it automatically gets 10041 * incremented when we make changes. 10042 */ 10043 if (lun == NULL || (val = ctl_get_opt(lun->be_lun, "revision")) == NULL) { 10044 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10045 } else { 10046 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10047 strncpy(inq_ptr->revision, val, 10048 min(sizeof(inq_ptr->revision), strlen(val))); 10049 } 10050 10051 /* 10052 * For parallel SCSI, we support double transition and single 10053 * transition clocking. We also support QAS (Quick Arbitration 10054 * and Selection) and Information Unit transfers on both the 10055 * control and array devices. 10056 */ 10057 if (is_fc == 0) 10058 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10059 SID_SPI_IUS; 10060 10061 /* SAM-3 */ 10062 scsi_ulto2b(0x0060, inq_ptr->version1); 10063 /* SPC-3 (no version claimed) XXX should we claim a version? */ 10064 scsi_ulto2b(0x0300, inq_ptr->version2); 10065 if (is_fc) { 10066 /* FCP-2 ANSI INCITS.350:2003 */ 10067 scsi_ulto2b(0x0917, inq_ptr->version3); 10068 } else { 10069 /* SPI-4 ANSI INCITS.362:200x */ 10070 scsi_ulto2b(0x0B56, inq_ptr->version3); 10071 } 10072 10073 if (lun == NULL) { 10074 /* SBC-2 (no version claimed) XXX should we claim a version? */ 10075 scsi_ulto2b(0x0320, inq_ptr->version4); 10076 } else { 10077 switch (lun->be_lun->lun_type) { 10078 case T_DIRECT: 10079 /* 10080 * SBC-2 (no version claimed) XXX should we claim a 10081 * version? 10082 */ 10083 scsi_ulto2b(0x0320, inq_ptr->version4); 10084 break; 10085 case T_PROCESSOR: 10086 default: 10087 break; 10088 } 10089 } 10090 10091 ctsio->scsi_status = SCSI_STATUS_OK; 10092 if (ctsio->kern_data_len > 0) { 10093 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10094 ctsio->be_move_done = ctl_config_move_done; 10095 ctl_datamove((union ctl_io *)ctsio); 10096 } else { 10097 ctsio->io_hdr.status = CTL_SUCCESS; 10098 ctl_done((union ctl_io *)ctsio); 10099 } 10100 10101 return (CTL_RETVAL_COMPLETE); 10102} 10103 10104int 10105ctl_inquiry(struct ctl_scsiio *ctsio) 10106{ 10107 struct scsi_inquiry *cdb; 10108 int retval; 10109 10110 cdb = (struct scsi_inquiry *)ctsio->cdb; 10111 10112 retval = 0; 10113 10114 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10115 10116 /* 10117 * Right now, we don't support the CmdDt inquiry information. 10118 * This would be nice to support in the future. When we do 10119 * support it, we should change this test so that it checks to make 10120 * sure SI_EVPD and SI_CMDDT aren't both set at the same time. 10121 */ 10122#ifdef notyet 10123 if (((cdb->byte2 & SI_EVPD) 10124 && (cdb->byte2 & SI_CMDDT))) 10125#endif 10126 if (cdb->byte2 & SI_CMDDT) { 10127 /* 10128 * Point to the SI_CMDDT bit. We might change this 10129 * when we support SI_CMDDT, but since both bits would be 10130 * "wrong", this should probably just stay as-is then. 10131 */ 10132 ctl_set_invalid_field(ctsio, 10133 /*sks_valid*/ 1, 10134 /*command*/ 1, 10135 /*field*/ 1, 10136 /*bit_valid*/ 1, 10137 /*bit*/ 1); 10138 ctl_done((union ctl_io *)ctsio); 10139 return (CTL_RETVAL_COMPLETE); 10140 } 10141 if (cdb->byte2 & SI_EVPD) 10142 retval = ctl_inquiry_evpd(ctsio); 10143#ifdef notyet 10144 else if (cdb->byte2 & SI_CMDDT) 10145 retval = ctl_inquiry_cmddt(ctsio); 10146#endif 10147 else 10148 retval = ctl_inquiry_std(ctsio); 10149 10150 return (retval); 10151} 10152 10153/* 10154 * For known CDB types, parse the LBA and length. 10155 */ 10156static int 10157ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len) 10158{ 10159 if (io->io_hdr.io_type != CTL_IO_SCSI) 10160 return (1); 10161 10162 switch (io->scsiio.cdb[0]) { 10163 case COMPARE_AND_WRITE: { 10164 struct scsi_compare_and_write *cdb; 10165 10166 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10167 10168 *lba = scsi_8btou64(cdb->addr); 10169 *len = cdb->length; 10170 break; 10171 } 10172 case READ_6: 10173 case WRITE_6: { 10174 struct scsi_rw_6 *cdb; 10175 10176 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10177 10178 *lba = scsi_3btoul(cdb->addr); 10179 /* only 5 bits are valid in the most significant address byte */ 10180 *lba &= 0x1fffff; 10181 *len = cdb->length; 10182 break; 10183 } 10184 case READ_10: 10185 case WRITE_10: { 10186 struct scsi_rw_10 *cdb; 10187 10188 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10189 10190 *lba = scsi_4btoul(cdb->addr); 10191 *len = scsi_2btoul(cdb->length); 10192 break; 10193 } 10194 case WRITE_VERIFY_10: { 10195 struct scsi_write_verify_10 *cdb; 10196 10197 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10198 10199 *lba = scsi_4btoul(cdb->addr); 10200 *len = scsi_2btoul(cdb->length); 10201 break; 10202 } 10203 case READ_12: 10204 case WRITE_12: { 10205 struct scsi_rw_12 *cdb; 10206 10207 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10208 10209 *lba = scsi_4btoul(cdb->addr); 10210 *len = scsi_4btoul(cdb->length); 10211 break; 10212 } 10213 case WRITE_VERIFY_12: { 10214 struct scsi_write_verify_12 *cdb; 10215 10216 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10217 10218 *lba = scsi_4btoul(cdb->addr); 10219 *len = scsi_4btoul(cdb->length); 10220 break; 10221 } 10222 case READ_16: 10223 case WRITE_16: { 10224 struct scsi_rw_16 *cdb; 10225 10226 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10227 10228 *lba = scsi_8btou64(cdb->addr); 10229 *len = scsi_4btoul(cdb->length); 10230 break; 10231 } 10232 case WRITE_VERIFY_16: { 10233 struct scsi_write_verify_16 *cdb; 10234 10235 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10236 10237 10238 *lba = scsi_8btou64(cdb->addr); 10239 *len = scsi_4btoul(cdb->length); 10240 break; 10241 } 10242 case WRITE_SAME_10: { 10243 struct scsi_write_same_10 *cdb; 10244 10245 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10246 10247 *lba = scsi_4btoul(cdb->addr); 10248 *len = scsi_2btoul(cdb->length); 10249 break; 10250 } 10251 case WRITE_SAME_16: { 10252 struct scsi_write_same_16 *cdb; 10253 10254 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10255 10256 *lba = scsi_8btou64(cdb->addr); 10257 *len = scsi_4btoul(cdb->length); 10258 break; 10259 } 10260 case VERIFY_10: { 10261 struct scsi_verify_10 *cdb; 10262 10263 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10264 10265 *lba = scsi_4btoul(cdb->addr); 10266 *len = scsi_2btoul(cdb->length); 10267 break; 10268 } 10269 case VERIFY_12: { 10270 struct scsi_verify_12 *cdb; 10271 10272 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10273 10274 *lba = scsi_4btoul(cdb->addr); 10275 *len = scsi_4btoul(cdb->length); 10276 break; 10277 } 10278 case VERIFY_16: { 10279 struct scsi_verify_16 *cdb; 10280 10281 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10282 10283 *lba = scsi_8btou64(cdb->addr); 10284 *len = scsi_4btoul(cdb->length); 10285 break; 10286 } 10287 default: 10288 return (1); 10289 break; /* NOTREACHED */ 10290 } 10291 10292 return (0); 10293} 10294 10295static ctl_action 10296ctl_extent_check_lba(uint64_t lba1, uint32_t len1, uint64_t lba2, uint32_t len2) 10297{ 10298 uint64_t endlba1, endlba2; 10299 10300 endlba1 = lba1 + len1 - 1; 10301 endlba2 = lba2 + len2 - 1; 10302 10303 if ((endlba1 < lba2) 10304 || (endlba2 < lba1)) 10305 return (CTL_ACTION_PASS); 10306 else 10307 return (CTL_ACTION_BLOCK); 10308} 10309 10310static ctl_action 10311ctl_extent_check(union ctl_io *io1, union ctl_io *io2) 10312{ 10313 uint64_t lba1, lba2; 10314 uint32_t len1, len2; 10315 int retval; 10316 10317 retval = ctl_get_lba_len(io1, &lba1, &len1); 10318 if (retval != 0) 10319 return (CTL_ACTION_ERROR); 10320 10321 retval = ctl_get_lba_len(io2, &lba2, &len2); 10322 if (retval != 0) 10323 return (CTL_ACTION_ERROR); 10324 10325 return (ctl_extent_check_lba(lba1, len1, lba2, len2)); 10326} 10327 10328static ctl_action 10329ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io) 10330{ 10331 struct ctl_cmd_entry *pending_entry, *ooa_entry; 10332 ctl_serialize_action *serialize_row; 10333 10334 /* 10335 * The initiator attempted multiple untagged commands at the same 10336 * time. Can't do that. 10337 */ 10338 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10339 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10340 && ((pending_io->io_hdr.nexus.targ_port == 10341 ooa_io->io_hdr.nexus.targ_port) 10342 && (pending_io->io_hdr.nexus.initid.id == 10343 ooa_io->io_hdr.nexus.initid.id)) 10344 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 10345 return (CTL_ACTION_OVERLAP); 10346 10347 /* 10348 * The initiator attempted to send multiple tagged commands with 10349 * the same ID. (It's fine if different initiators have the same 10350 * tag ID.) 10351 * 10352 * Even if all of those conditions are true, we don't kill the I/O 10353 * if the command ahead of us has been aborted. We won't end up 10354 * sending it to the FETD, and it's perfectly legal to resend a 10355 * command with the same tag number as long as the previous 10356 * instance of this tag number has been aborted somehow. 10357 */ 10358 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10359 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10360 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10361 && ((pending_io->io_hdr.nexus.targ_port == 10362 ooa_io->io_hdr.nexus.targ_port) 10363 && (pending_io->io_hdr.nexus.initid.id == 10364 ooa_io->io_hdr.nexus.initid.id)) 10365 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 10366 return (CTL_ACTION_OVERLAP_TAG); 10367 10368 /* 10369 * If we get a head of queue tag, SAM-3 says that we should 10370 * immediately execute it. 10371 * 10372 * What happens if this command would normally block for some other 10373 * reason? e.g. a request sense with a head of queue tag 10374 * immediately after a write. Normally that would block, but this 10375 * will result in its getting executed immediately... 10376 * 10377 * We currently return "pass" instead of "skip", so we'll end up 10378 * going through the rest of the queue to check for overlapped tags. 10379 * 10380 * XXX KDM check for other types of blockage first?? 10381 */ 10382 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10383 return (CTL_ACTION_PASS); 10384 10385 /* 10386 * Ordered tags have to block until all items ahead of them 10387 * have completed. If we get called with an ordered tag, we always 10388 * block, if something else is ahead of us in the queue. 10389 */ 10390 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10391 return (CTL_ACTION_BLOCK); 10392 10393 /* 10394 * Simple tags get blocked until all head of queue and ordered tags 10395 * ahead of them have completed. I'm lumping untagged commands in 10396 * with simple tags here. XXX KDM is that the right thing to do? 10397 */ 10398 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10399 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10400 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10401 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10402 return (CTL_ACTION_BLOCK); 10403 10404 pending_entry = &ctl_cmd_table[pending_io->scsiio.cdb[0]]; 10405 ooa_entry = &ctl_cmd_table[ooa_io->scsiio.cdb[0]]; 10406 10407 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10408 10409 switch (serialize_row[pending_entry->seridx]) { 10410 case CTL_SER_BLOCK: 10411 return (CTL_ACTION_BLOCK); 10412 break; /* NOTREACHED */ 10413 case CTL_SER_EXTENT: 10414 return (ctl_extent_check(pending_io, ooa_io)); 10415 break; /* NOTREACHED */ 10416 case CTL_SER_PASS: 10417 return (CTL_ACTION_PASS); 10418 break; /* NOTREACHED */ 10419 case CTL_SER_SKIP: 10420 return (CTL_ACTION_SKIP); 10421 break; 10422 default: 10423 panic("invalid serialization value %d", 10424 serialize_row[pending_entry->seridx]); 10425 break; /* NOTREACHED */ 10426 } 10427 10428 return (CTL_ACTION_ERROR); 10429} 10430 10431/* 10432 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10433 * Assumptions: 10434 * - pending_io is generally either incoming, or on the blocked queue 10435 * - starting I/O is the I/O we want to start the check with. 10436 */ 10437static ctl_action 10438ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10439 union ctl_io *starting_io) 10440{ 10441 union ctl_io *ooa_io; 10442 ctl_action action; 10443 10444 mtx_assert(&lun->lun_lock, MA_OWNED); 10445 10446 /* 10447 * Run back along the OOA queue, starting with the current 10448 * blocked I/O and going through every I/O before it on the 10449 * queue. If starting_io is NULL, we'll just end up returning 10450 * CTL_ACTION_PASS. 10451 */ 10452 for (ooa_io = starting_io; ooa_io != NULL; 10453 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10454 ooa_links)){ 10455 10456 /* 10457 * This routine just checks to see whether 10458 * cur_blocked is blocked by ooa_io, which is ahead 10459 * of it in the queue. It doesn't queue/dequeue 10460 * cur_blocked. 10461 */ 10462 action = ctl_check_for_blockage(pending_io, ooa_io); 10463 switch (action) { 10464 case CTL_ACTION_BLOCK: 10465 case CTL_ACTION_OVERLAP: 10466 case CTL_ACTION_OVERLAP_TAG: 10467 case CTL_ACTION_SKIP: 10468 case CTL_ACTION_ERROR: 10469 return (action); 10470 break; /* NOTREACHED */ 10471 case CTL_ACTION_PASS: 10472 break; 10473 default: 10474 panic("invalid action %d", action); 10475 break; /* NOTREACHED */ 10476 } 10477 } 10478 10479 return (CTL_ACTION_PASS); 10480} 10481 10482/* 10483 * Assumptions: 10484 * - An I/O has just completed, and has been removed from the per-LUN OOA 10485 * queue, so some items on the blocked queue may now be unblocked. 10486 */ 10487static int 10488ctl_check_blocked(struct ctl_lun *lun) 10489{ 10490 union ctl_io *cur_blocked, *next_blocked; 10491 10492 mtx_assert(&lun->lun_lock, MA_OWNED); 10493 10494 /* 10495 * Run forward from the head of the blocked queue, checking each 10496 * entry against the I/Os prior to it on the OOA queue to see if 10497 * there is still any blockage. 10498 * 10499 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10500 * with our removing a variable on it while it is traversing the 10501 * list. 10502 */ 10503 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10504 cur_blocked != NULL; cur_blocked = next_blocked) { 10505 union ctl_io *prev_ooa; 10506 ctl_action action; 10507 10508 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10509 blocked_links); 10510 10511 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10512 ctl_ooaq, ooa_links); 10513 10514 /* 10515 * If cur_blocked happens to be the first item in the OOA 10516 * queue now, prev_ooa will be NULL, and the action 10517 * returned will just be CTL_ACTION_PASS. 10518 */ 10519 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10520 10521 switch (action) { 10522 case CTL_ACTION_BLOCK: 10523 /* Nothing to do here, still blocked */ 10524 break; 10525 case CTL_ACTION_OVERLAP: 10526 case CTL_ACTION_OVERLAP_TAG: 10527 /* 10528 * This shouldn't happen! In theory we've already 10529 * checked this command for overlap... 10530 */ 10531 break; 10532 case CTL_ACTION_PASS: 10533 case CTL_ACTION_SKIP: { 10534 struct ctl_softc *softc; 10535 struct ctl_cmd_entry *entry; 10536 uint32_t initidx; 10537 uint8_t opcode; 10538 int isc_retval; 10539 10540 /* 10541 * The skip case shouldn't happen, this transaction 10542 * should have never made it onto the blocked queue. 10543 */ 10544 /* 10545 * This I/O is no longer blocked, we can remove it 10546 * from the blocked queue. Since this is a TAILQ 10547 * (doubly linked list), we can do O(1) removals 10548 * from any place on the list. 10549 */ 10550 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10551 blocked_links); 10552 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10553 10554 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 10555 /* 10556 * Need to send IO back to original side to 10557 * run 10558 */ 10559 union ctl_ha_msg msg_info; 10560 10561 msg_info.hdr.original_sc = 10562 cur_blocked->io_hdr.original_sc; 10563 msg_info.hdr.serializing_sc = cur_blocked; 10564 msg_info.hdr.msg_type = CTL_MSG_R2R; 10565 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10566 &msg_info, sizeof(msg_info), 0)) > 10567 CTL_HA_STATUS_SUCCESS) { 10568 printf("CTL:Check Blocked error from " 10569 "ctl_ha_msg_send %d\n", 10570 isc_retval); 10571 } 10572 break; 10573 } 10574 opcode = cur_blocked->scsiio.cdb[0]; 10575 entry = &ctl_cmd_table[opcode]; 10576 softc = control_softc; 10577 10578 initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus); 10579 10580 /* 10581 * Check this I/O for LUN state changes that may 10582 * have happened while this command was blocked. 10583 * The LUN state may have been changed by a command 10584 * ahead of us in the queue, so we need to re-check 10585 * for any states that can be caused by SCSI 10586 * commands. 10587 */ 10588 if (ctl_scsiio_lun_check(softc, lun, entry, 10589 &cur_blocked->scsiio) == 0) { 10590 cur_blocked->io_hdr.flags |= 10591 CTL_FLAG_IS_WAS_ON_RTR; 10592 ctl_enqueue_rtr(cur_blocked); 10593 } else 10594 ctl_done(cur_blocked); 10595 break; 10596 } 10597 default: 10598 /* 10599 * This probably shouldn't happen -- we shouldn't 10600 * get CTL_ACTION_ERROR, or anything else. 10601 */ 10602 break; 10603 } 10604 } 10605 10606 return (CTL_RETVAL_COMPLETE); 10607} 10608 10609/* 10610 * This routine (with one exception) checks LUN flags that can be set by 10611 * commands ahead of us in the OOA queue. These flags have to be checked 10612 * when a command initially comes in, and when we pull a command off the 10613 * blocked queue and are preparing to execute it. The reason we have to 10614 * check these flags for commands on the blocked queue is that the LUN 10615 * state may have been changed by a command ahead of us while we're on the 10616 * blocked queue. 10617 * 10618 * Ordering is somewhat important with these checks, so please pay 10619 * careful attention to the placement of any new checks. 10620 */ 10621static int 10622ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 10623 struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 10624{ 10625 int retval; 10626 10627 retval = 0; 10628 10629 mtx_assert(&lun->lun_lock, MA_OWNED); 10630 10631 /* 10632 * If this shelf is a secondary shelf controller, we have to reject 10633 * any media access commands. 10634 */ 10635#if 0 10636 /* No longer needed for HA */ 10637 if (((ctl_softc->flags & CTL_FLAG_MASTER_SHELF) == 0) 10638 && ((entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0)) { 10639 ctl_set_lun_standby(ctsio); 10640 retval = 1; 10641 goto bailout; 10642 } 10643#endif 10644 10645 /* 10646 * Check for a reservation conflict. If this command isn't allowed 10647 * even on reserved LUNs, and if this initiator isn't the one who 10648 * reserved us, reject the command with a reservation conflict. 10649 */ 10650 if ((lun->flags & CTL_LUN_RESERVED) 10651 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10652 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id) 10653 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port) 10654 || (ctsio->io_hdr.nexus.targ_target.id != 10655 lun->rsv_nexus.targ_target.id)) { 10656 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 10657 ctsio->io_hdr.status = CTL_SCSI_ERROR; 10658 retval = 1; 10659 goto bailout; 10660 } 10661 } 10662 10663 if ( (lun->flags & CTL_LUN_PR_RESERVED) 10664 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV) == 0)) { 10665 uint32_t residx; 10666 10667 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 10668 /* 10669 * if we aren't registered or it's a res holder type 10670 * reservation and this isn't the res holder then set a 10671 * conflict. 10672 * NOTE: Commands which might be allowed on write exclusive 10673 * type reservations are checked in the particular command 10674 * for a conflict. Read and SSU are the only ones. 10675 */ 10676 if (!lun->per_res[residx].registered 10677 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10678 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 10679 ctsio->io_hdr.status = CTL_SCSI_ERROR; 10680 retval = 1; 10681 goto bailout; 10682 } 10683 10684 } 10685 10686 if ((lun->flags & CTL_LUN_OFFLINE) 10687 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 10688 ctl_set_lun_not_ready(ctsio); 10689 retval = 1; 10690 goto bailout; 10691 } 10692 10693 /* 10694 * If the LUN is stopped, see if this particular command is allowed 10695 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 10696 */ 10697 if ((lun->flags & CTL_LUN_STOPPED) 10698 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10699 /* "Logical unit not ready, initializing cmd. required" */ 10700 ctl_set_lun_stopped(ctsio); 10701 retval = 1; 10702 goto bailout; 10703 } 10704 10705 if ((lun->flags & CTL_LUN_INOPERABLE) 10706 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10707 /* "Medium format corrupted" */ 10708 ctl_set_medium_format_corrupted(ctsio); 10709 retval = 1; 10710 goto bailout; 10711 } 10712 10713bailout: 10714 return (retval); 10715 10716} 10717 10718static void 10719ctl_failover_io(union ctl_io *io, int have_lock) 10720{ 10721 ctl_set_busy(&io->scsiio); 10722 ctl_done(io); 10723} 10724 10725static void 10726ctl_failover(void) 10727{ 10728 struct ctl_lun *lun; 10729 struct ctl_softc *ctl_softc; 10730 union ctl_io *next_io, *pending_io; 10731 union ctl_io *io; 10732 int lun_idx; 10733 int i; 10734 10735 ctl_softc = control_softc; 10736 10737 mtx_lock(&ctl_softc->ctl_lock); 10738 /* 10739 * Remove any cmds from the other SC from the rtr queue. These 10740 * will obviously only be for LUNs for which we're the primary. 10741 * We can't send status or get/send data for these commands. 10742 * Since they haven't been executed yet, we can just remove them. 10743 * We'll either abort them or delete them below, depending on 10744 * which HA mode we're in. 10745 */ 10746#ifdef notyet 10747 mtx_lock(&ctl_softc->queue_lock); 10748 for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue); 10749 io != NULL; io = next_io) { 10750 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 10751 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10752 STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr, 10753 ctl_io_hdr, links); 10754 } 10755 mtx_unlock(&ctl_softc->queue_lock); 10756#endif 10757 10758 for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) { 10759 lun = ctl_softc->ctl_luns[lun_idx]; 10760 if (lun==NULL) 10761 continue; 10762 10763 /* 10764 * Processor LUNs are primary on both sides. 10765 * XXX will this always be true? 10766 */ 10767 if (lun->be_lun->lun_type == T_PROCESSOR) 10768 continue; 10769 10770 if ((lun->flags & CTL_LUN_PRIMARY_SC) 10771 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10772 printf("FAILOVER: primary lun %d\n", lun_idx); 10773 /* 10774 * Remove all commands from the other SC. First from the 10775 * blocked queue then from the ooa queue. Once we have 10776 * removed them. Call ctl_check_blocked to see if there 10777 * is anything that can run. 10778 */ 10779 for (io = (union ctl_io *)TAILQ_FIRST( 10780 &lun->blocked_queue); io != NULL; io = next_io) { 10781 10782 next_io = (union ctl_io *)TAILQ_NEXT( 10783 &io->io_hdr, blocked_links); 10784 10785 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10786 TAILQ_REMOVE(&lun->blocked_queue, 10787 &io->io_hdr,blocked_links); 10788 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10789 TAILQ_REMOVE(&lun->ooa_queue, 10790 &io->io_hdr, ooa_links); 10791 10792 ctl_free_io(io); 10793 } 10794 } 10795 10796 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10797 io != NULL; io = next_io) { 10798 10799 next_io = (union ctl_io *)TAILQ_NEXT( 10800 &io->io_hdr, ooa_links); 10801 10802 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10803 10804 TAILQ_REMOVE(&lun->ooa_queue, 10805 &io->io_hdr, 10806 ooa_links); 10807 10808 ctl_free_io(io); 10809 } 10810 } 10811 ctl_check_blocked(lun); 10812 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 10813 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 10814 10815 printf("FAILOVER: primary lun %d\n", lun_idx); 10816 /* 10817 * Abort all commands from the other SC. We can't 10818 * send status back for them now. These should get 10819 * cleaned up when they are completed or come out 10820 * for a datamove operation. 10821 */ 10822 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10823 io != NULL; io = next_io) { 10824 next_io = (union ctl_io *)TAILQ_NEXT( 10825 &io->io_hdr, ooa_links); 10826 10827 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10828 io->io_hdr.flags |= CTL_FLAG_ABORT; 10829 } 10830 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10831 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 10832 10833 printf("FAILOVER: secondary lun %d\n", lun_idx); 10834 10835 lun->flags |= CTL_LUN_PRIMARY_SC; 10836 10837 /* 10838 * We send all I/O that was sent to this controller 10839 * and redirected to the other side back with 10840 * busy status, and have the initiator retry it. 10841 * Figuring out how much data has been transferred, 10842 * etc. and picking up where we left off would be 10843 * very tricky. 10844 * 10845 * XXX KDM need to remove I/O from the blocked 10846 * queue as well! 10847 */ 10848 for (pending_io = (union ctl_io *)TAILQ_FIRST( 10849 &lun->ooa_queue); pending_io != NULL; 10850 pending_io = next_io) { 10851 10852 next_io = (union ctl_io *)TAILQ_NEXT( 10853 &pending_io->io_hdr, ooa_links); 10854 10855 pending_io->io_hdr.flags &= 10856 ~CTL_FLAG_SENT_2OTHER_SC; 10857 10858 if (pending_io->io_hdr.flags & 10859 CTL_FLAG_IO_ACTIVE) { 10860 pending_io->io_hdr.flags |= 10861 CTL_FLAG_FAILOVER; 10862 } else { 10863 ctl_set_busy(&pending_io->scsiio); 10864 ctl_done(pending_io); 10865 } 10866 } 10867 10868 /* 10869 * Build Unit Attention 10870 */ 10871 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10872 lun->pending_sense[i].ua_pending |= 10873 CTL_UA_ASYM_ACC_CHANGE; 10874 } 10875 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10876 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10877 printf("FAILOVER: secondary lun %d\n", lun_idx); 10878 /* 10879 * if the first io on the OOA is not on the RtR queue 10880 * add it. 10881 */ 10882 lun->flags |= CTL_LUN_PRIMARY_SC; 10883 10884 pending_io = (union ctl_io *)TAILQ_FIRST( 10885 &lun->ooa_queue); 10886 if (pending_io==NULL) { 10887 printf("Nothing on OOA queue\n"); 10888 continue; 10889 } 10890 10891 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10892 if ((pending_io->io_hdr.flags & 10893 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 10894 pending_io->io_hdr.flags |= 10895 CTL_FLAG_IS_WAS_ON_RTR; 10896 ctl_enqueue_rtr(pending_io); 10897 } 10898#if 0 10899 else 10900 { 10901 printf("Tag 0x%04x is running\n", 10902 pending_io->scsiio.tag_num); 10903 } 10904#endif 10905 10906 next_io = (union ctl_io *)TAILQ_NEXT( 10907 &pending_io->io_hdr, ooa_links); 10908 for (pending_io=next_io; pending_io != NULL; 10909 pending_io = next_io) { 10910 pending_io->io_hdr.flags &= 10911 ~CTL_FLAG_SENT_2OTHER_SC; 10912 next_io = (union ctl_io *)TAILQ_NEXT( 10913 &pending_io->io_hdr, ooa_links); 10914 if (pending_io->io_hdr.flags & 10915 CTL_FLAG_IS_WAS_ON_RTR) { 10916#if 0 10917 printf("Tag 0x%04x is running\n", 10918 pending_io->scsiio.tag_num); 10919#endif 10920 continue; 10921 } 10922 10923 switch (ctl_check_ooa(lun, pending_io, 10924 (union ctl_io *)TAILQ_PREV( 10925 &pending_io->io_hdr, ctl_ooaq, 10926 ooa_links))) { 10927 10928 case CTL_ACTION_BLOCK: 10929 TAILQ_INSERT_TAIL(&lun->blocked_queue, 10930 &pending_io->io_hdr, 10931 blocked_links); 10932 pending_io->io_hdr.flags |= 10933 CTL_FLAG_BLOCKED; 10934 break; 10935 case CTL_ACTION_PASS: 10936 case CTL_ACTION_SKIP: 10937 pending_io->io_hdr.flags |= 10938 CTL_FLAG_IS_WAS_ON_RTR; 10939 ctl_enqueue_rtr(pending_io); 10940 break; 10941 case CTL_ACTION_OVERLAP: 10942 ctl_set_overlapped_cmd( 10943 (struct ctl_scsiio *)pending_io); 10944 ctl_done(pending_io); 10945 break; 10946 case CTL_ACTION_OVERLAP_TAG: 10947 ctl_set_overlapped_tag( 10948 (struct ctl_scsiio *)pending_io, 10949 pending_io->scsiio.tag_num & 0xff); 10950 ctl_done(pending_io); 10951 break; 10952 case CTL_ACTION_ERROR: 10953 default: 10954 ctl_set_internal_failure( 10955 (struct ctl_scsiio *)pending_io, 10956 0, // sks_valid 10957 0); //retry count 10958 ctl_done(pending_io); 10959 break; 10960 } 10961 } 10962 10963 /* 10964 * Build Unit Attention 10965 */ 10966 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10967 lun->pending_sense[i].ua_pending |= 10968 CTL_UA_ASYM_ACC_CHANGE; 10969 } 10970 } else { 10971 panic("Unhandled HA mode failover, LUN flags = %#x, " 10972 "ha_mode = #%x", lun->flags, ctl_softc->ha_mode); 10973 } 10974 } 10975 ctl_pause_rtr = 0; 10976 mtx_unlock(&ctl_softc->ctl_lock); 10977} 10978 10979static int 10980ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio) 10981{ 10982 struct ctl_lun *lun; 10983 struct ctl_cmd_entry *entry; 10984 uint8_t opcode; 10985 uint32_t initidx, targ_lun; 10986 int retval; 10987 10988 retval = 0; 10989 10990 lun = NULL; 10991 10992 opcode = ctsio->cdb[0]; 10993 10994 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 10995 if ((targ_lun < CTL_MAX_LUNS) 10996 && (ctl_softc->ctl_luns[targ_lun] != NULL)) { 10997 lun = ctl_softc->ctl_luns[targ_lun]; 10998 /* 10999 * If the LUN is invalid, pretend that it doesn't exist. 11000 * It will go away as soon as all pending I/O has been 11001 * completed. 11002 */ 11003 if (lun->flags & CTL_LUN_DISABLED) { 11004 lun = NULL; 11005 } else { 11006 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11007 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11008 lun->be_lun; 11009 if (lun->be_lun->lun_type == T_PROCESSOR) { 11010 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 11011 } 11012 } 11013 } else { 11014 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11015 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11016 } 11017 11018 entry = &ctl_cmd_table[opcode]; 11019 11020 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11021 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11022 11023 /* 11024 * Check to see whether we can send this command to LUNs that don't 11025 * exist. This should pretty much only be the case for inquiry 11026 * and request sense. Further checks, below, really require having 11027 * a LUN, so we can't really check the command anymore. Just put 11028 * it on the rtr queue. 11029 */ 11030 if (lun == NULL) { 11031 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11032 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11033 ctl_enqueue_rtr((union ctl_io *)ctsio); 11034 return (retval); 11035 } 11036 11037 ctl_set_unsupported_lun(ctsio); 11038 ctl_done((union ctl_io *)ctsio); 11039 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11040 return (retval); 11041 } else { 11042 mtx_lock(&lun->lun_lock); 11043 11044 /* 11045 * Every I/O goes into the OOA queue for a particular LUN, and 11046 * stays there until completion. 11047 */ 11048 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11049 11050 /* 11051 * Make sure we support this particular command on this LUN. 11052 * e.g., we don't support writes to the control LUN. 11053 */ 11054 switch (lun->be_lun->lun_type) { 11055 case T_PROCESSOR: 11056 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11057 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) 11058 == 0)) { 11059 mtx_unlock(&lun->lun_lock); 11060 ctl_set_invalid_opcode(ctsio); 11061 ctl_done((union ctl_io *)ctsio); 11062 return (retval); 11063 } 11064 break; 11065 case T_DIRECT: 11066 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) 11067 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) 11068 == 0)){ 11069 mtx_unlock(&lun->lun_lock); 11070 ctl_set_invalid_opcode(ctsio); 11071 ctl_done((union ctl_io *)ctsio); 11072 return (retval); 11073 } 11074 break; 11075 default: 11076 mtx_unlock(&lun->lun_lock); 11077 panic("Unsupported CTL LUN type %d\n", 11078 lun->be_lun->lun_type); 11079 } 11080 } 11081 11082 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11083 11084 /* 11085 * If we've got a request sense, it'll clear the contingent 11086 * allegiance condition. Otherwise, if we have a CA condition for 11087 * this initiator, clear it, because it sent down a command other 11088 * than request sense. 11089 */ 11090 if ((opcode != REQUEST_SENSE) 11091 && (ctl_is_set(lun->have_ca, initidx))) 11092 ctl_clear_mask(lun->have_ca, initidx); 11093 11094 /* 11095 * If the command has this flag set, it handles its own unit 11096 * attention reporting, we shouldn't do anything. Otherwise we 11097 * check for any pending unit attentions, and send them back to the 11098 * initiator. We only do this when a command initially comes in, 11099 * not when we pull it off the blocked queue. 11100 * 11101 * According to SAM-3, section 5.3.2, the order that things get 11102 * presented back to the host is basically unit attentions caused 11103 * by some sort of reset event, busy status, reservation conflicts 11104 * or task set full, and finally any other status. 11105 * 11106 * One issue here is that some of the unit attentions we report 11107 * don't fall into the "reset" category (e.g. "reported luns data 11108 * has changed"). So reporting it here, before the reservation 11109 * check, may be technically wrong. I guess the only thing to do 11110 * would be to check for and report the reset events here, and then 11111 * check for the other unit attention types after we check for a 11112 * reservation conflict. 11113 * 11114 * XXX KDM need to fix this 11115 */ 11116 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11117 ctl_ua_type ua_type; 11118 11119 ua_type = lun->pending_sense[initidx].ua_pending; 11120 if (ua_type != CTL_UA_NONE) { 11121 scsi_sense_data_type sense_format; 11122 11123 if (lun != NULL) 11124 sense_format = (lun->flags & 11125 CTL_LUN_SENSE_DESC) ? SSD_TYPE_DESC : 11126 SSD_TYPE_FIXED; 11127 else 11128 sense_format = SSD_TYPE_FIXED; 11129 11130 ua_type = ctl_build_ua(ua_type, &ctsio->sense_data, 11131 sense_format); 11132 if (ua_type != CTL_UA_NONE) { 11133 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11134 ctsio->io_hdr.status = CTL_SCSI_ERROR | 11135 CTL_AUTOSENSE; 11136 ctsio->sense_len = SSD_FULL_SIZE; 11137 lun->pending_sense[initidx].ua_pending &= 11138 ~ua_type; 11139 mtx_unlock(&lun->lun_lock); 11140 ctl_done((union ctl_io *)ctsio); 11141 return (retval); 11142 } 11143 } 11144 } 11145 11146 11147 if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) { 11148 mtx_unlock(&lun->lun_lock); 11149 ctl_done((union ctl_io *)ctsio); 11150 return (retval); 11151 } 11152 11153 /* 11154 * XXX CHD this is where we want to send IO to other side if 11155 * this LUN is secondary on this SC. We will need to make a copy 11156 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11157 * the copy we send as FROM_OTHER. 11158 * We also need to stuff the address of the original IO so we can 11159 * find it easily. Something similar will need be done on the other 11160 * side so when we are done we can find the copy. 11161 */ 11162 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11163 union ctl_ha_msg msg_info; 11164 int isc_retval; 11165 11166 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11167 11168 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11169 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11170#if 0 11171 printf("1. ctsio %p\n", ctsio); 11172#endif 11173 msg_info.hdr.serializing_sc = NULL; 11174 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11175 msg_info.scsi.tag_num = ctsio->tag_num; 11176 msg_info.scsi.tag_type = ctsio->tag_type; 11177 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11178 11179 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11180 11181 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11182 (void *)&msg_info, sizeof(msg_info), 0)) > 11183 CTL_HA_STATUS_SUCCESS) { 11184 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 11185 isc_retval); 11186 printf("CTL:opcode is %x\n",opcode); 11187 } else { 11188#if 0 11189 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 11190#endif 11191 } 11192 11193 /* 11194 * XXX KDM this I/O is off the incoming queue, but hasn't 11195 * been inserted on any other queue. We may need to come 11196 * up with a holding queue while we wait for serialization 11197 * so that we have an idea of what we're waiting for from 11198 * the other side. 11199 */ 11200 mtx_unlock(&lun->lun_lock); 11201 return (retval); 11202 } 11203 11204 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11205 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11206 ctl_ooaq, ooa_links))) { 11207 case CTL_ACTION_BLOCK: 11208 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11209 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11210 blocked_links); 11211 mtx_unlock(&lun->lun_lock); 11212 return (retval); 11213 case CTL_ACTION_PASS: 11214 case CTL_ACTION_SKIP: 11215 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11216 mtx_unlock(&lun->lun_lock); 11217 ctl_enqueue_rtr((union ctl_io *)ctsio); 11218 break; 11219 case CTL_ACTION_OVERLAP: 11220 mtx_unlock(&lun->lun_lock); 11221 ctl_set_overlapped_cmd(ctsio); 11222 ctl_done((union ctl_io *)ctsio); 11223 break; 11224 case CTL_ACTION_OVERLAP_TAG: 11225 mtx_unlock(&lun->lun_lock); 11226 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11227 ctl_done((union ctl_io *)ctsio); 11228 break; 11229 case CTL_ACTION_ERROR: 11230 default: 11231 mtx_unlock(&lun->lun_lock); 11232 ctl_set_internal_failure(ctsio, 11233 /*sks_valid*/ 0, 11234 /*retry_count*/ 0); 11235 ctl_done((union ctl_io *)ctsio); 11236 break; 11237 } 11238 return (retval); 11239} 11240 11241static int 11242ctl_scsiio(struct ctl_scsiio *ctsio) 11243{ 11244 int retval; 11245 struct ctl_cmd_entry *entry; 11246 11247 retval = CTL_RETVAL_COMPLETE; 11248 11249 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11250 11251 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11252 11253 /* 11254 * If this I/O has been aborted, just send it straight to 11255 * ctl_done() without executing it. 11256 */ 11257 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11258 ctl_done((union ctl_io *)ctsio); 11259 goto bailout; 11260 } 11261 11262 /* 11263 * All the checks should have been handled by ctl_scsiio_precheck(). 11264 * We should be clear now to just execute the I/O. 11265 */ 11266 retval = entry->execute(ctsio); 11267 11268bailout: 11269 return (retval); 11270} 11271 11272/* 11273 * Since we only implement one target right now, a bus reset simply resets 11274 * our single target. 11275 */ 11276static int 11277ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io) 11278{ 11279 return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET)); 11280} 11281 11282static int 11283ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 11284 ctl_ua_type ua_type) 11285{ 11286 struct ctl_lun *lun; 11287 int retval; 11288 11289 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11290 union ctl_ha_msg msg_info; 11291 11292 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11293 msg_info.hdr.nexus = io->io_hdr.nexus; 11294 if (ua_type==CTL_UA_TARG_RESET) 11295 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11296 else 11297 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11298 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11299 msg_info.hdr.original_sc = NULL; 11300 msg_info.hdr.serializing_sc = NULL; 11301 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11302 (void *)&msg_info, sizeof(msg_info), 0)) { 11303 } 11304 } 11305 retval = 0; 11306 11307 mtx_lock(&ctl_softc->ctl_lock); 11308 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) 11309 retval += ctl_lun_reset(lun, io, ua_type); 11310 mtx_unlock(&ctl_softc->ctl_lock); 11311 11312 return (retval); 11313} 11314 11315/* 11316 * The LUN should always be set. The I/O is optional, and is used to 11317 * distinguish between I/Os sent by this initiator, and by other 11318 * initiators. We set unit attention for initiators other than this one. 11319 * SAM-3 is vague on this point. It does say that a unit attention should 11320 * be established for other initiators when a LUN is reset (see section 11321 * 5.7.3), but it doesn't specifically say that the unit attention should 11322 * be established for this particular initiator when a LUN is reset. Here 11323 * is the relevant text, from SAM-3 rev 8: 11324 * 11325 * 5.7.2 When a SCSI initiator port aborts its own tasks 11326 * 11327 * When a SCSI initiator port causes its own task(s) to be aborted, no 11328 * notification that the task(s) have been aborted shall be returned to 11329 * the SCSI initiator port other than the completion response for the 11330 * command or task management function action that caused the task(s) to 11331 * be aborted and notification(s) associated with related effects of the 11332 * action (e.g., a reset unit attention condition). 11333 * 11334 * XXX KDM for now, we're setting unit attention for all initiators. 11335 */ 11336static int 11337ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11338{ 11339 union ctl_io *xio; 11340#if 0 11341 uint32_t initindex; 11342#endif 11343 int i; 11344 11345 mtx_lock(&lun->lun_lock); 11346 /* 11347 * Run through the OOA queue and abort each I/O. 11348 */ 11349#if 0 11350 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 11351#endif 11352 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11353 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11354 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11355 } 11356 11357 /* 11358 * This version sets unit attention for every 11359 */ 11360#if 0 11361 initindex = ctl_get_initindex(&io->io_hdr.nexus); 11362 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 11363 if (initindex == i) 11364 continue; 11365 lun->pending_sense[i].ua_pending |= ua_type; 11366 } 11367#endif 11368 11369 /* 11370 * A reset (any kind, really) clears reservations established with 11371 * RESERVE/RELEASE. It does not clear reservations established 11372 * with PERSISTENT RESERVE OUT, but we don't support that at the 11373 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11374 * reservations made with the RESERVE/RELEASE commands, because 11375 * those commands are obsolete in SPC-3. 11376 */ 11377 lun->flags &= ~CTL_LUN_RESERVED; 11378 11379 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 11380 ctl_clear_mask(lun->have_ca, i); 11381 lun->pending_sense[i].ua_pending |= ua_type; 11382 } 11383 mtx_lock(&lun->lun_lock); 11384 11385 return (0); 11386} 11387 11388static int 11389ctl_abort_task(union ctl_io *io) 11390{ 11391 union ctl_io *xio; 11392 struct ctl_lun *lun; 11393 struct ctl_softc *ctl_softc; 11394#if 0 11395 struct sbuf sb; 11396 char printbuf[128]; 11397#endif 11398 int found; 11399 uint32_t targ_lun; 11400 11401 ctl_softc = control_softc; 11402 found = 0; 11403 11404 /* 11405 * Look up the LUN. 11406 */ 11407 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11408 mtx_lock(&ctl_softc->ctl_lock); 11409 if ((targ_lun < CTL_MAX_LUNS) 11410 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 11411 lun = ctl_softc->ctl_luns[targ_lun]; 11412 else { 11413 mtx_unlock(&ctl_softc->ctl_lock); 11414 goto bailout; 11415 } 11416 11417#if 0 11418 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11419 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11420#endif 11421 11422 mtx_lock(&lun->lun_lock); 11423 mtx_unlock(&ctl_softc->ctl_lock); 11424 /* 11425 * Run through the OOA queue and attempt to find the given I/O. 11426 * The target port, initiator ID, tag type and tag number have to 11427 * match the values that we got from the initiator. If we have an 11428 * untagged command to abort, simply abort the first untagged command 11429 * we come to. We only allow one untagged command at a time of course. 11430 */ 11431#if 0 11432 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 11433#endif 11434 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11435 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11436#if 0 11437 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11438 11439 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11440 lun->lun, xio->scsiio.tag_num, 11441 xio->scsiio.tag_type, 11442 (xio->io_hdr.blocked_links.tqe_prev 11443 == NULL) ? "" : " BLOCKED", 11444 (xio->io_hdr.flags & 11445 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11446 (xio->io_hdr.flags & 11447 CTL_FLAG_ABORT) ? " ABORT" : "", 11448 (xio->io_hdr.flags & 11449 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11450 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11451 sbuf_finish(&sb); 11452 printf("%s\n", sbuf_data(&sb)); 11453#endif 11454 11455 if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port) 11456 && (xio->io_hdr.nexus.initid.id == 11457 io->io_hdr.nexus.initid.id)) { 11458 /* 11459 * If the abort says that the task is untagged, the 11460 * task in the queue must be untagged. Otherwise, 11461 * we just check to see whether the tag numbers 11462 * match. This is because the QLogic firmware 11463 * doesn't pass back the tag type in an abort 11464 * request. 11465 */ 11466#if 0 11467 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11468 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11469 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 11470#endif 11471 /* 11472 * XXX KDM we've got problems with FC, because it 11473 * doesn't send down a tag type with aborts. So we 11474 * can only really go by the tag number... 11475 * This may cause problems with parallel SCSI. 11476 * Need to figure that out!! 11477 */ 11478 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11479 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11480 found = 1; 11481 if ((io->io_hdr.flags & 11482 CTL_FLAG_FROM_OTHER_SC) == 0 && 11483 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11484 union ctl_ha_msg msg_info; 11485 11486 io->io_hdr.flags |= 11487 CTL_FLAG_SENT_2OTHER_SC; 11488 msg_info.hdr.nexus = io->io_hdr.nexus; 11489 msg_info.task.task_action = 11490 CTL_TASK_ABORT_TASK; 11491 msg_info.task.tag_num = 11492 io->taskio.tag_num; 11493 msg_info.task.tag_type = 11494 io->taskio.tag_type; 11495 msg_info.hdr.msg_type = 11496 CTL_MSG_MANAGE_TASKS; 11497 msg_info.hdr.original_sc = NULL; 11498 msg_info.hdr.serializing_sc = NULL; 11499#if 0 11500 printf("Sent Abort to other side\n"); 11501#endif 11502 if (CTL_HA_STATUS_SUCCESS != 11503 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11504 (void *)&msg_info, 11505 sizeof(msg_info), 0)) { 11506 } 11507 } 11508#if 0 11509 printf("ctl_abort_task: found I/O to abort\n"); 11510#endif 11511 break; 11512 } 11513 } 11514 } 11515 mtx_unlock(&lun->lun_lock); 11516 11517bailout: 11518 11519 if (found == 0) { 11520 /* 11521 * This isn't really an error. It's entirely possible for 11522 * the abort and command completion to cross on the wire. 11523 * This is more of an informative/diagnostic error. 11524 */ 11525#if 0 11526 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11527 "%d:%d:%d:%d tag %d type %d\n", 11528 io->io_hdr.nexus.initid.id, 11529 io->io_hdr.nexus.targ_port, 11530 io->io_hdr.nexus.targ_target.id, 11531 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11532 io->taskio.tag_type); 11533#endif 11534 return (1); 11535 } else 11536 return (0); 11537} 11538 11539/* 11540 * This routine cannot block! It must be callable from an interrupt 11541 * handler as well as from the work thread. 11542 */ 11543static void 11544ctl_run_task(union ctl_io *io) 11545{ 11546 struct ctl_softc *ctl_softc; 11547 int retval; 11548 const char *task_desc; 11549 11550 CTL_DEBUG_PRINT(("ctl_run_task\n")); 11551 11552 ctl_softc = control_softc; 11553 retval = 0; 11554 11555 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 11556 ("ctl_run_task: Unextected io_type %d\n", 11557 io->io_hdr.io_type)); 11558 11559 task_desc = ctl_scsi_task_string(&io->taskio); 11560 if (task_desc != NULL) { 11561#ifdef NEEDTOPORT 11562 csevent_log(CSC_CTL | CSC_SHELF_SW | 11563 CTL_TASK_REPORT, 11564 csevent_LogType_Trace, 11565 csevent_Severity_Information, 11566 csevent_AlertLevel_Green, 11567 csevent_FRU_Firmware, 11568 csevent_FRU_Unknown, 11569 "CTL: received task: %s",task_desc); 11570#endif 11571 } else { 11572#ifdef NEEDTOPORT 11573 csevent_log(CSC_CTL | CSC_SHELF_SW | 11574 CTL_TASK_REPORT, 11575 csevent_LogType_Trace, 11576 csevent_Severity_Information, 11577 csevent_AlertLevel_Green, 11578 csevent_FRU_Firmware, 11579 csevent_FRU_Unknown, 11580 "CTL: received unknown task " 11581 "type: %d (%#x)", 11582 io->taskio.task_action, 11583 io->taskio.task_action); 11584#endif 11585 } 11586 switch (io->taskio.task_action) { 11587 case CTL_TASK_ABORT_TASK: 11588 retval = ctl_abort_task(io); 11589 break; 11590 case CTL_TASK_ABORT_TASK_SET: 11591 break; 11592 case CTL_TASK_CLEAR_ACA: 11593 break; 11594 case CTL_TASK_CLEAR_TASK_SET: 11595 break; 11596 case CTL_TASK_LUN_RESET: { 11597 struct ctl_lun *lun; 11598 uint32_t targ_lun; 11599 int retval; 11600 11601 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11602 mtx_lock(&ctl_softc->ctl_lock); 11603 if ((targ_lun < CTL_MAX_LUNS) 11604 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 11605 lun = ctl_softc->ctl_luns[targ_lun]; 11606 else { 11607 mtx_unlock(&ctl_softc->ctl_lock); 11608 retval = 1; 11609 break; 11610 } 11611 11612 if (!(io->io_hdr.flags & 11613 CTL_FLAG_FROM_OTHER_SC)) { 11614 union ctl_ha_msg msg_info; 11615 11616 io->io_hdr.flags |= 11617 CTL_FLAG_SENT_2OTHER_SC; 11618 msg_info.hdr.msg_type = 11619 CTL_MSG_MANAGE_TASKS; 11620 msg_info.hdr.nexus = io->io_hdr.nexus; 11621 msg_info.task.task_action = 11622 CTL_TASK_LUN_RESET; 11623 msg_info.hdr.original_sc = NULL; 11624 msg_info.hdr.serializing_sc = NULL; 11625 if (CTL_HA_STATUS_SUCCESS != 11626 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11627 (void *)&msg_info, 11628 sizeof(msg_info), 0)) { 11629 } 11630 } 11631 11632 retval = ctl_lun_reset(lun, io, 11633 CTL_UA_LUN_RESET); 11634 mtx_unlock(&ctl_softc->ctl_lock); 11635 break; 11636 } 11637 case CTL_TASK_TARGET_RESET: 11638 retval = ctl_target_reset(ctl_softc, io, CTL_UA_TARG_RESET); 11639 break; 11640 case CTL_TASK_BUS_RESET: 11641 retval = ctl_bus_reset(ctl_softc, io); 11642 break; 11643 case CTL_TASK_PORT_LOGIN: 11644 break; 11645 case CTL_TASK_PORT_LOGOUT: 11646 break; 11647 default: 11648 printf("ctl_run_task: got unknown task management event %d\n", 11649 io->taskio.task_action); 11650 break; 11651 } 11652 if (retval == 0) 11653 io->io_hdr.status = CTL_SUCCESS; 11654 else 11655 io->io_hdr.status = CTL_ERROR; 11656 11657 /* 11658 * This will queue this I/O to the done queue, but the 11659 * work thread won't be able to process it until we 11660 * return and the lock is released. 11661 */ 11662 ctl_done(io); 11663} 11664 11665/* 11666 * For HA operation. Handle commands that come in from the other 11667 * controller. 11668 */ 11669static void 11670ctl_handle_isc(union ctl_io *io) 11671{ 11672 int free_io; 11673 struct ctl_lun *lun; 11674 struct ctl_softc *ctl_softc; 11675 uint32_t targ_lun; 11676 11677 ctl_softc = control_softc; 11678 11679 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11680 lun = ctl_softc->ctl_luns[targ_lun]; 11681 11682 switch (io->io_hdr.msg_type) { 11683 case CTL_MSG_SERIALIZE: 11684 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 11685 break; 11686 case CTL_MSG_R2R: { 11687 uint8_t opcode; 11688 struct ctl_cmd_entry *entry; 11689 11690 /* 11691 * This is only used in SER_ONLY mode. 11692 */ 11693 free_io = 0; 11694 opcode = io->scsiio.cdb[0]; 11695 entry = &ctl_cmd_table[opcode]; 11696 mtx_lock(&lun->lun_lock); 11697 if (ctl_scsiio_lun_check(ctl_softc, lun, 11698 entry, (struct ctl_scsiio *)io) != 0) { 11699 mtx_unlock(&lun->lun_lock); 11700 ctl_done(io); 11701 break; 11702 } 11703 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11704 mtx_unlock(&lun->lun_lock); 11705 ctl_enqueue_rtr(io); 11706 break; 11707 } 11708 case CTL_MSG_FINISH_IO: 11709 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 11710 free_io = 0; 11711 ctl_done(io); 11712 } else { 11713 free_io = 1; 11714 mtx_lock(&lun->lun_lock); 11715 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 11716 ooa_links); 11717 ctl_check_blocked(lun); 11718 mtx_unlock(&lun->lun_lock); 11719 } 11720 break; 11721 case CTL_MSG_PERS_ACTION: 11722 ctl_hndl_per_res_out_on_other_sc( 11723 (union ctl_ha_msg *)&io->presio.pr_msg); 11724 free_io = 1; 11725 break; 11726 case CTL_MSG_BAD_JUJU: 11727 free_io = 0; 11728 ctl_done(io); 11729 break; 11730 case CTL_MSG_DATAMOVE: 11731 /* Only used in XFER mode */ 11732 free_io = 0; 11733 ctl_datamove_remote(io); 11734 break; 11735 case CTL_MSG_DATAMOVE_DONE: 11736 /* Only used in XFER mode */ 11737 free_io = 0; 11738 io->scsiio.be_move_done(io); 11739 break; 11740 default: 11741 free_io = 1; 11742 printf("%s: Invalid message type %d\n", 11743 __func__, io->io_hdr.msg_type); 11744 break; 11745 } 11746 if (free_io) 11747 ctl_free_io(io); 11748 11749} 11750 11751 11752/* 11753 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 11754 * there is no match. 11755 */ 11756static ctl_lun_error_pattern 11757ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 11758{ 11759 struct ctl_cmd_entry *entry; 11760 ctl_lun_error_pattern filtered_pattern, pattern; 11761 uint8_t opcode; 11762 11763 pattern = desc->error_pattern; 11764 11765 /* 11766 * XXX KDM we need more data passed into this function to match a 11767 * custom pattern, and we actually need to implement custom pattern 11768 * matching. 11769 */ 11770 if (pattern & CTL_LUN_PAT_CMD) 11771 return (CTL_LUN_PAT_CMD); 11772 11773 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 11774 return (CTL_LUN_PAT_ANY); 11775 11776 opcode = ctsio->cdb[0]; 11777 entry = &ctl_cmd_table[opcode]; 11778 11779 filtered_pattern = entry->pattern & pattern; 11780 11781 /* 11782 * If the user requested specific flags in the pattern (e.g. 11783 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 11784 * flags. 11785 * 11786 * If the user did not specify any flags, it doesn't matter whether 11787 * or not the command supports the flags. 11788 */ 11789 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 11790 (pattern & ~CTL_LUN_PAT_MASK)) 11791 return (CTL_LUN_PAT_NONE); 11792 11793 /* 11794 * If the user asked for a range check, see if the requested LBA 11795 * range overlaps with this command's LBA range. 11796 */ 11797 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 11798 uint64_t lba1; 11799 uint32_t len1; 11800 ctl_action action; 11801 int retval; 11802 11803 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 11804 if (retval != 0) 11805 return (CTL_LUN_PAT_NONE); 11806 11807 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 11808 desc->lba_range.len); 11809 /* 11810 * A "pass" means that the LBA ranges don't overlap, so 11811 * this doesn't match the user's range criteria. 11812 */ 11813 if (action == CTL_ACTION_PASS) 11814 return (CTL_LUN_PAT_NONE); 11815 } 11816 11817 return (filtered_pattern); 11818} 11819 11820static void 11821ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 11822{ 11823 struct ctl_error_desc *desc, *desc2; 11824 11825 mtx_assert(&lun->lun_lock, MA_OWNED); 11826 11827 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 11828 ctl_lun_error_pattern pattern; 11829 /* 11830 * Check to see whether this particular command matches 11831 * the pattern in the descriptor. 11832 */ 11833 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 11834 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 11835 continue; 11836 11837 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 11838 case CTL_LUN_INJ_ABORTED: 11839 ctl_set_aborted(&io->scsiio); 11840 break; 11841 case CTL_LUN_INJ_MEDIUM_ERR: 11842 ctl_set_medium_error(&io->scsiio); 11843 break; 11844 case CTL_LUN_INJ_UA: 11845 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 11846 * OCCURRED */ 11847 ctl_set_ua(&io->scsiio, 0x29, 0x00); 11848 break; 11849 case CTL_LUN_INJ_CUSTOM: 11850 /* 11851 * We're assuming the user knows what he is doing. 11852 * Just copy the sense information without doing 11853 * checks. 11854 */ 11855 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 11856 ctl_min(sizeof(desc->custom_sense), 11857 sizeof(io->scsiio.sense_data))); 11858 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 11859 io->scsiio.sense_len = SSD_FULL_SIZE; 11860 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11861 break; 11862 case CTL_LUN_INJ_NONE: 11863 default: 11864 /* 11865 * If this is an error injection type we don't know 11866 * about, clear the continuous flag (if it is set) 11867 * so it will get deleted below. 11868 */ 11869 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 11870 break; 11871 } 11872 /* 11873 * By default, each error injection action is a one-shot 11874 */ 11875 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 11876 continue; 11877 11878 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 11879 11880 free(desc, M_CTL); 11881 } 11882} 11883 11884#ifdef CTL_IO_DELAY 11885static void 11886ctl_datamove_timer_wakeup(void *arg) 11887{ 11888 union ctl_io *io; 11889 11890 io = (union ctl_io *)arg; 11891 11892 ctl_datamove(io); 11893} 11894#endif /* CTL_IO_DELAY */ 11895 11896void 11897ctl_datamove(union ctl_io *io) 11898{ 11899 void (*fe_datamove)(union ctl_io *io); 11900 11901 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 11902 11903 CTL_DEBUG_PRINT(("ctl_datamove\n")); 11904 11905#ifdef CTL_TIME_IO 11906 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 11907 char str[256]; 11908 char path_str[64]; 11909 struct sbuf sb; 11910 11911 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 11912 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 11913 11914 sbuf_cat(&sb, path_str); 11915 switch (io->io_hdr.io_type) { 11916 case CTL_IO_SCSI: 11917 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 11918 sbuf_printf(&sb, "\n"); 11919 sbuf_cat(&sb, path_str); 11920 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 11921 io->scsiio.tag_num, io->scsiio.tag_type); 11922 break; 11923 case CTL_IO_TASK: 11924 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 11925 "Tag Type: %d\n", io->taskio.task_action, 11926 io->taskio.tag_num, io->taskio.tag_type); 11927 break; 11928 default: 11929 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 11930 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 11931 break; 11932 } 11933 sbuf_cat(&sb, path_str); 11934 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 11935 (intmax_t)time_uptime - io->io_hdr.start_time); 11936 sbuf_finish(&sb); 11937 printf("%s", sbuf_data(&sb)); 11938 } 11939#endif /* CTL_TIME_IO */ 11940 11941#ifdef CTL_IO_DELAY 11942 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 11943 struct ctl_lun *lun; 11944 11945 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 11946 11947 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 11948 } else { 11949 struct ctl_lun *lun; 11950 11951 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 11952 if ((lun != NULL) 11953 && (lun->delay_info.datamove_delay > 0)) { 11954 struct callout *callout; 11955 11956 callout = (struct callout *)&io->io_hdr.timer_bytes; 11957 callout_init(callout, /*mpsafe*/ 1); 11958 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 11959 callout_reset(callout, 11960 lun->delay_info.datamove_delay * hz, 11961 ctl_datamove_timer_wakeup, io); 11962 if (lun->delay_info.datamove_type == 11963 CTL_DELAY_TYPE_ONESHOT) 11964 lun->delay_info.datamove_delay = 0; 11965 return; 11966 } 11967 } 11968#endif 11969 11970 /* 11971 * This command has been aborted. Set the port status, so we fail 11972 * the data move. 11973 */ 11974 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 11975 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 11976 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 11977 io->io_hdr.nexus.targ_port, 11978 (uintmax_t)io->io_hdr.nexus.targ_target.id, 11979 io->io_hdr.nexus.targ_lun); 11980 io->io_hdr.status = CTL_CMD_ABORTED; 11981 io->io_hdr.port_status = 31337; 11982 /* 11983 * Note that the backend, in this case, will get the 11984 * callback in its context. In other cases it may get 11985 * called in the frontend's interrupt thread context. 11986 */ 11987 io->scsiio.be_move_done(io); 11988 return; 11989 } 11990 11991 /* 11992 * If we're in XFER mode and this I/O is from the other shelf 11993 * controller, we need to send the DMA to the other side to 11994 * actually transfer the data to/from the host. In serialize only 11995 * mode the transfer happens below CTL and ctl_datamove() is only 11996 * called on the machine that originally received the I/O. 11997 */ 11998 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 11999 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12000 union ctl_ha_msg msg; 12001 uint32_t sg_entries_sent; 12002 int do_sg_copy; 12003 int i; 12004 12005 memset(&msg, 0, sizeof(msg)); 12006 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12007 msg.hdr.original_sc = io->io_hdr.original_sc; 12008 msg.hdr.serializing_sc = io; 12009 msg.hdr.nexus = io->io_hdr.nexus; 12010 msg.dt.flags = io->io_hdr.flags; 12011 /* 12012 * We convert everything into a S/G list here. We can't 12013 * pass by reference, only by value between controllers. 12014 * So we can't pass a pointer to the S/G list, only as many 12015 * S/G entries as we can fit in here. If it's possible for 12016 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12017 * then we need to break this up into multiple transfers. 12018 */ 12019 if (io->scsiio.kern_sg_entries == 0) { 12020 msg.dt.kern_sg_entries = 1; 12021 /* 12022 * If this is in cached memory, flush the cache 12023 * before we send the DMA request to the other 12024 * controller. We want to do this in either the 12025 * read or the write case. The read case is 12026 * straightforward. In the write case, we want to 12027 * make sure nothing is in the local cache that 12028 * could overwrite the DMAed data. 12029 */ 12030 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12031 /* 12032 * XXX KDM use bus_dmamap_sync() here. 12033 */ 12034 } 12035 12036 /* 12037 * Convert to a physical address if this is a 12038 * virtual address. 12039 */ 12040 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12041 msg.dt.sg_list[0].addr = 12042 io->scsiio.kern_data_ptr; 12043 } else { 12044 /* 12045 * XXX KDM use busdma here! 12046 */ 12047#if 0 12048 msg.dt.sg_list[0].addr = (void *) 12049 vtophys(io->scsiio.kern_data_ptr); 12050#endif 12051 } 12052 12053 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12054 do_sg_copy = 0; 12055 } else { 12056 struct ctl_sg_entry *sgl; 12057 12058 do_sg_copy = 1; 12059 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12060 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 12061 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12062 /* 12063 * XXX KDM use bus_dmamap_sync() here. 12064 */ 12065 } 12066 } 12067 12068 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12069 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12070 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12071 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12072 msg.dt.sg_sequence = 0; 12073 12074 /* 12075 * Loop until we've sent all of the S/G entries. On the 12076 * other end, we'll recompose these S/G entries into one 12077 * contiguous list before passing it to the 12078 */ 12079 for (sg_entries_sent = 0; sg_entries_sent < 12080 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12081 msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/ 12082 sizeof(msg.dt.sg_list[0])), 12083 msg.dt.kern_sg_entries - sg_entries_sent); 12084 12085 if (do_sg_copy != 0) { 12086 struct ctl_sg_entry *sgl; 12087 int j; 12088 12089 sgl = (struct ctl_sg_entry *) 12090 io->scsiio.kern_data_ptr; 12091 /* 12092 * If this is in cached memory, flush the cache 12093 * before we send the DMA request to the other 12094 * controller. We want to do this in either 12095 * the * read or the write case. The read 12096 * case is straightforward. In the write 12097 * case, we want to make sure nothing is 12098 * in the local cache that could overwrite 12099 * the DMAed data. 12100 */ 12101 12102 for (i = sg_entries_sent, j = 0; 12103 i < msg.dt.cur_sg_entries; i++, j++) { 12104 if ((io->io_hdr.flags & 12105 CTL_FLAG_NO_DATASYNC) == 0) { 12106 /* 12107 * XXX KDM use bus_dmamap_sync() 12108 */ 12109 } 12110 if ((io->io_hdr.flags & 12111 CTL_FLAG_BUS_ADDR) == 0) { 12112 /* 12113 * XXX KDM use busdma. 12114 */ 12115#if 0 12116 msg.dt.sg_list[j].addr =(void *) 12117 vtophys(sgl[i].addr); 12118#endif 12119 } else { 12120 msg.dt.sg_list[j].addr = 12121 sgl[i].addr; 12122 } 12123 msg.dt.sg_list[j].len = sgl[i].len; 12124 } 12125 } 12126 12127 sg_entries_sent += msg.dt.cur_sg_entries; 12128 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12129 msg.dt.sg_last = 1; 12130 else 12131 msg.dt.sg_last = 0; 12132 12133 /* 12134 * XXX KDM drop and reacquire the lock here? 12135 */ 12136 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12137 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12138 /* 12139 * XXX do something here. 12140 */ 12141 } 12142 12143 msg.dt.sent_sg_entries = sg_entries_sent; 12144 } 12145 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12146 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 12147 ctl_failover_io(io, /*have_lock*/ 0); 12148 12149 } else { 12150 12151 /* 12152 * Lookup the fe_datamove() function for this particular 12153 * front end. 12154 */ 12155 fe_datamove = 12156 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12157 12158 fe_datamove(io); 12159 } 12160} 12161 12162static void 12163ctl_send_datamove_done(union ctl_io *io, int have_lock) 12164{ 12165 union ctl_ha_msg msg; 12166 int isc_status; 12167 12168 memset(&msg, 0, sizeof(msg)); 12169 12170 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12171 msg.hdr.original_sc = io; 12172 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12173 msg.hdr.nexus = io->io_hdr.nexus; 12174 msg.hdr.status = io->io_hdr.status; 12175 msg.scsi.tag_num = io->scsiio.tag_num; 12176 msg.scsi.tag_type = io->scsiio.tag_type; 12177 msg.scsi.scsi_status = io->scsiio.scsi_status; 12178 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12179 sizeof(io->scsiio.sense_data)); 12180 msg.scsi.sense_len = io->scsiio.sense_len; 12181 msg.scsi.sense_residual = io->scsiio.sense_residual; 12182 msg.scsi.fetd_status = io->io_hdr.port_status; 12183 msg.scsi.residual = io->scsiio.residual; 12184 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12185 12186 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12187 ctl_failover_io(io, /*have_lock*/ have_lock); 12188 return; 12189 } 12190 12191 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 12192 if (isc_status > CTL_HA_STATUS_SUCCESS) { 12193 /* XXX do something if this fails */ 12194 } 12195 12196} 12197 12198/* 12199 * The DMA to the remote side is done, now we need to tell the other side 12200 * we're done so it can continue with its data movement. 12201 */ 12202static void 12203ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12204{ 12205 union ctl_io *io; 12206 12207 io = rq->context; 12208 12209 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12210 printf("%s: ISC DMA write failed with error %d", __func__, 12211 rq->ret); 12212 ctl_set_internal_failure(&io->scsiio, 12213 /*sks_valid*/ 1, 12214 /*retry_count*/ rq->ret); 12215 } 12216 12217 ctl_dt_req_free(rq); 12218 12219 /* 12220 * In this case, we had to malloc the memory locally. Free it. 12221 */ 12222 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12223 int i; 12224 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12225 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12226 } 12227 /* 12228 * The data is in local and remote memory, so now we need to send 12229 * status (good or back) back to the other side. 12230 */ 12231 ctl_send_datamove_done(io, /*have_lock*/ 0); 12232} 12233 12234/* 12235 * We've moved the data from the host/controller into local memory. Now we 12236 * need to push it over to the remote controller's memory. 12237 */ 12238static int 12239ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12240{ 12241 int retval; 12242 12243 retval = 0; 12244 12245 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12246 ctl_datamove_remote_write_cb); 12247 12248 return (retval); 12249} 12250 12251static void 12252ctl_datamove_remote_write(union ctl_io *io) 12253{ 12254 int retval; 12255 void (*fe_datamove)(union ctl_io *io); 12256 12257 /* 12258 * - Get the data from the host/HBA into local memory. 12259 * - DMA memory from the local controller to the remote controller. 12260 * - Send status back to the remote controller. 12261 */ 12262 12263 retval = ctl_datamove_remote_sgl_setup(io); 12264 if (retval != 0) 12265 return; 12266 12267 /* Switch the pointer over so the FETD knows what to do */ 12268 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12269 12270 /* 12271 * Use a custom move done callback, since we need to send completion 12272 * back to the other controller, not to the backend on this side. 12273 */ 12274 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12275 12276 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12277 12278 fe_datamove(io); 12279 12280 return; 12281 12282} 12283 12284static int 12285ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12286{ 12287#if 0 12288 char str[256]; 12289 char path_str[64]; 12290 struct sbuf sb; 12291#endif 12292 12293 /* 12294 * In this case, we had to malloc the memory locally. Free it. 12295 */ 12296 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12297 int i; 12298 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12299 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12300 } 12301 12302#if 0 12303 scsi_path_string(io, path_str, sizeof(path_str)); 12304 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12305 sbuf_cat(&sb, path_str); 12306 scsi_command_string(&io->scsiio, NULL, &sb); 12307 sbuf_printf(&sb, "\n"); 12308 sbuf_cat(&sb, path_str); 12309 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12310 io->scsiio.tag_num, io->scsiio.tag_type); 12311 sbuf_cat(&sb, path_str); 12312 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12313 io->io_hdr.flags, io->io_hdr.status); 12314 sbuf_finish(&sb); 12315 printk("%s", sbuf_data(&sb)); 12316#endif 12317 12318 12319 /* 12320 * The read is done, now we need to send status (good or bad) back 12321 * to the other side. 12322 */ 12323 ctl_send_datamove_done(io, /*have_lock*/ 0); 12324 12325 return (0); 12326} 12327 12328static void 12329ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12330{ 12331 union ctl_io *io; 12332 void (*fe_datamove)(union ctl_io *io); 12333 12334 io = rq->context; 12335 12336 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12337 printf("%s: ISC DMA read failed with error %d", __func__, 12338 rq->ret); 12339 ctl_set_internal_failure(&io->scsiio, 12340 /*sks_valid*/ 1, 12341 /*retry_count*/ rq->ret); 12342 } 12343 12344 ctl_dt_req_free(rq); 12345 12346 /* Switch the pointer over so the FETD knows what to do */ 12347 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12348 12349 /* 12350 * Use a custom move done callback, since we need to send completion 12351 * back to the other controller, not to the backend on this side. 12352 */ 12353 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12354 12355 /* XXX KDM add checks like the ones in ctl_datamove? */ 12356 12357 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12358 12359 fe_datamove(io); 12360} 12361 12362static int 12363ctl_datamove_remote_sgl_setup(union ctl_io *io) 12364{ 12365 struct ctl_sg_entry *local_sglist, *remote_sglist; 12366 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 12367 struct ctl_softc *softc; 12368 int retval; 12369 int i; 12370 12371 retval = 0; 12372 softc = control_softc; 12373 12374 local_sglist = io->io_hdr.local_sglist; 12375 local_dma_sglist = io->io_hdr.local_dma_sglist; 12376 remote_sglist = io->io_hdr.remote_sglist; 12377 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 12378 12379 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 12380 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 12381 local_sglist[i].len = remote_sglist[i].len; 12382 12383 /* 12384 * XXX Detect the situation where the RS-level I/O 12385 * redirector on the other side has already read the 12386 * data off of the AOR RS on this side, and 12387 * transferred it to remote (mirror) memory on the 12388 * other side. Since we already have the data in 12389 * memory here, we just need to use it. 12390 * 12391 * XXX KDM this can probably be removed once we 12392 * get the cache device code in and take the 12393 * current AOR implementation out. 12394 */ 12395#ifdef NEEDTOPORT 12396 if ((remote_sglist[i].addr >= 12397 (void *)vtophys(softc->mirr->addr)) 12398 && (remote_sglist[i].addr < 12399 ((void *)vtophys(softc->mirr->addr) + 12400 CacheMirrorOffset))) { 12401 local_sglist[i].addr = remote_sglist[i].addr - 12402 CacheMirrorOffset; 12403 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12404 CTL_FLAG_DATA_IN) 12405 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 12406 } else { 12407 local_sglist[i].addr = remote_sglist[i].addr + 12408 CacheMirrorOffset; 12409 } 12410#endif 12411#if 0 12412 printf("%s: local %p, remote %p, len %d\n", 12413 __func__, local_sglist[i].addr, 12414 remote_sglist[i].addr, local_sglist[i].len); 12415#endif 12416 } 12417 } else { 12418 uint32_t len_to_go; 12419 12420 /* 12421 * In this case, we don't have automatically allocated 12422 * memory for this I/O on this controller. This typically 12423 * happens with internal CTL I/O -- e.g. inquiry, mode 12424 * sense, etc. Anything coming from RAIDCore will have 12425 * a mirror area available. 12426 */ 12427 len_to_go = io->scsiio.kern_data_len; 12428 12429 /* 12430 * Clear the no datasync flag, we have to use malloced 12431 * buffers. 12432 */ 12433 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 12434 12435 /* 12436 * The difficult thing here is that the size of the various 12437 * S/G segments may be different than the size from the 12438 * remote controller. That'll make it harder when DMAing 12439 * the data back to the other side. 12440 */ 12441 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 12442 sizeof(io->io_hdr.remote_sglist[0])) && 12443 (len_to_go > 0); i++) { 12444 local_sglist[i].len = ctl_min(len_to_go, 131072); 12445 CTL_SIZE_8B(local_dma_sglist[i].len, 12446 local_sglist[i].len); 12447 local_sglist[i].addr = 12448 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 12449 12450 local_dma_sglist[i].addr = local_sglist[i].addr; 12451 12452 if (local_sglist[i].addr == NULL) { 12453 int j; 12454 12455 printf("malloc failed for %zd bytes!", 12456 local_dma_sglist[i].len); 12457 for (j = 0; j < i; j++) { 12458 free(local_sglist[j].addr, M_CTL); 12459 } 12460 ctl_set_internal_failure(&io->scsiio, 12461 /*sks_valid*/ 1, 12462 /*retry_count*/ 4857); 12463 retval = 1; 12464 goto bailout_error; 12465 12466 } 12467 /* XXX KDM do we need a sync here? */ 12468 12469 len_to_go -= local_sglist[i].len; 12470 } 12471 /* 12472 * Reset the number of S/G entries accordingly. The 12473 * original number of S/G entries is available in 12474 * rem_sg_entries. 12475 */ 12476 io->scsiio.kern_sg_entries = i; 12477 12478#if 0 12479 printf("%s: kern_sg_entries = %d\n", __func__, 12480 io->scsiio.kern_sg_entries); 12481 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12482 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 12483 local_sglist[i].addr, local_sglist[i].len, 12484 local_dma_sglist[i].len); 12485#endif 12486 } 12487 12488 12489 return (retval); 12490 12491bailout_error: 12492 12493 ctl_send_datamove_done(io, /*have_lock*/ 0); 12494 12495 return (retval); 12496} 12497 12498static int 12499ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12500 ctl_ha_dt_cb callback) 12501{ 12502 struct ctl_ha_dt_req *rq; 12503 struct ctl_sg_entry *remote_sglist, *local_sglist; 12504 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 12505 uint32_t local_used, remote_used, total_used; 12506 int retval; 12507 int i, j; 12508 12509 retval = 0; 12510 12511 rq = ctl_dt_req_alloc(); 12512 12513 /* 12514 * If we failed to allocate the request, and if the DMA didn't fail 12515 * anyway, set busy status. This is just a resource allocation 12516 * failure. 12517 */ 12518 if ((rq == NULL) 12519 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 12520 ctl_set_busy(&io->scsiio); 12521 12522 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 12523 12524 if (rq != NULL) 12525 ctl_dt_req_free(rq); 12526 12527 /* 12528 * The data move failed. We need to return status back 12529 * to the other controller. No point in trying to DMA 12530 * data to the remote controller. 12531 */ 12532 12533 ctl_send_datamove_done(io, /*have_lock*/ 0); 12534 12535 retval = 1; 12536 12537 goto bailout; 12538 } 12539 12540 local_sglist = io->io_hdr.local_sglist; 12541 local_dma_sglist = io->io_hdr.local_dma_sglist; 12542 remote_sglist = io->io_hdr.remote_sglist; 12543 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 12544 local_used = 0; 12545 remote_used = 0; 12546 total_used = 0; 12547 12548 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 12549 rq->ret = CTL_HA_STATUS_SUCCESS; 12550 rq->context = io; 12551 callback(rq); 12552 goto bailout; 12553 } 12554 12555 /* 12556 * Pull/push the data over the wire from/to the other controller. 12557 * This takes into account the possibility that the local and 12558 * remote sglists may not be identical in terms of the size of 12559 * the elements and the number of elements. 12560 * 12561 * One fundamental assumption here is that the length allocated for 12562 * both the local and remote sglists is identical. Otherwise, we've 12563 * essentially got a coding error of some sort. 12564 */ 12565 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12566 int isc_ret; 12567 uint32_t cur_len, dma_length; 12568 uint8_t *tmp_ptr; 12569 12570 rq->id = CTL_HA_DATA_CTL; 12571 rq->command = command; 12572 rq->context = io; 12573 12574 /* 12575 * Both pointers should be aligned. But it is possible 12576 * that the allocation length is not. They should both 12577 * also have enough slack left over at the end, though, 12578 * to round up to the next 8 byte boundary. 12579 */ 12580 cur_len = ctl_min(local_sglist[i].len - local_used, 12581 remote_sglist[j].len - remote_used); 12582 12583 /* 12584 * In this case, we have a size issue and need to decrease 12585 * the size, except in the case where we actually have less 12586 * than 8 bytes left. In that case, we need to increase 12587 * the DMA length to get the last bit. 12588 */ 12589 if ((cur_len & 0x7) != 0) { 12590 if (cur_len > 0x7) { 12591 cur_len = cur_len - (cur_len & 0x7); 12592 dma_length = cur_len; 12593 } else { 12594 CTL_SIZE_8B(dma_length, cur_len); 12595 } 12596 12597 } else 12598 dma_length = cur_len; 12599 12600 /* 12601 * If we had to allocate memory for this I/O, instead of using 12602 * the non-cached mirror memory, we'll need to flush the cache 12603 * before trying to DMA to the other controller. 12604 * 12605 * We could end up doing this multiple times for the same 12606 * segment if we have a larger local segment than remote 12607 * segment. That shouldn't be an issue. 12608 */ 12609 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12610 /* 12611 * XXX KDM use bus_dmamap_sync() here. 12612 */ 12613 } 12614 12615 rq->size = dma_length; 12616 12617 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12618 tmp_ptr += local_used; 12619 12620 /* Use physical addresses when talking to ISC hardware */ 12621 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12622 /* XXX KDM use busdma */ 12623#if 0 12624 rq->local = vtophys(tmp_ptr); 12625#endif 12626 } else 12627 rq->local = tmp_ptr; 12628 12629 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12630 tmp_ptr += remote_used; 12631 rq->remote = tmp_ptr; 12632 12633 rq->callback = NULL; 12634 12635 local_used += cur_len; 12636 if (local_used >= local_sglist[i].len) { 12637 i++; 12638 local_used = 0; 12639 } 12640 12641 remote_used += cur_len; 12642 if (remote_used >= remote_sglist[j].len) { 12643 j++; 12644 remote_used = 0; 12645 } 12646 total_used += cur_len; 12647 12648 if (total_used >= io->scsiio.kern_data_len) 12649 rq->callback = callback; 12650 12651 if ((rq->size & 0x7) != 0) { 12652 printf("%s: warning: size %d is not on 8b boundary\n", 12653 __func__, rq->size); 12654 } 12655 if (((uintptr_t)rq->local & 0x7) != 0) { 12656 printf("%s: warning: local %p not on 8b boundary\n", 12657 __func__, rq->local); 12658 } 12659 if (((uintptr_t)rq->remote & 0x7) != 0) { 12660 printf("%s: warning: remote %p not on 8b boundary\n", 12661 __func__, rq->local); 12662 } 12663#if 0 12664 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12665 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12666 rq->local, rq->remote, rq->size); 12667#endif 12668 12669 isc_ret = ctl_dt_single(rq); 12670 if (isc_ret == CTL_HA_STATUS_WAIT) 12671 continue; 12672 12673 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 12674 rq->ret = CTL_HA_STATUS_SUCCESS; 12675 } else { 12676 rq->ret = isc_ret; 12677 } 12678 callback(rq); 12679 goto bailout; 12680 } 12681 12682bailout: 12683 return (retval); 12684 12685} 12686 12687static void 12688ctl_datamove_remote_read(union ctl_io *io) 12689{ 12690 int retval; 12691 int i; 12692 12693 /* 12694 * This will send an error to the other controller in the case of a 12695 * failure. 12696 */ 12697 retval = ctl_datamove_remote_sgl_setup(io); 12698 if (retval != 0) 12699 return; 12700 12701 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12702 ctl_datamove_remote_read_cb); 12703 if ((retval != 0) 12704 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 12705 /* 12706 * Make sure we free memory if there was an error.. The 12707 * ctl_datamove_remote_xfer() function will send the 12708 * datamove done message, or call the callback with an 12709 * error if there is a problem. 12710 */ 12711 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12712 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12713 } 12714 12715 return; 12716} 12717 12718/* 12719 * Process a datamove request from the other controller. This is used for 12720 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12721 * first. Once that is complete, the data gets DMAed into the remote 12722 * controller's memory. For reads, we DMA from the remote controller's 12723 * memory into our memory first, and then move it out to the FETD. 12724 */ 12725static void 12726ctl_datamove_remote(union ctl_io *io) 12727{ 12728 struct ctl_softc *softc; 12729 12730 softc = control_softc; 12731 12732 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 12733 12734 /* 12735 * Note that we look for an aborted I/O here, but don't do some of 12736 * the other checks that ctl_datamove() normally does. We don't 12737 * need to run the task queue, because this I/O is on the ISC 12738 * queue, which is executed by the work thread after the task queue. 12739 * We don't need to run the datamove delay code, since that should 12740 * have been done if need be on the other controller. 12741 */ 12742 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12743 12744 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 12745 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 12746 io->io_hdr.nexus.targ_port, 12747 io->io_hdr.nexus.targ_target.id, 12748 io->io_hdr.nexus.targ_lun); 12749 io->io_hdr.status = CTL_CMD_ABORTED; 12750 io->io_hdr.port_status = 31338; 12751 12752 ctl_send_datamove_done(io, /*have_lock*/ 0); 12753 12754 return; 12755 } 12756 12757 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 12758 ctl_datamove_remote_write(io); 12759 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 12760 ctl_datamove_remote_read(io); 12761 } else { 12762 union ctl_ha_msg msg; 12763 struct scsi_sense_data *sense; 12764 uint8_t sks[3]; 12765 int retry_count; 12766 12767 memset(&msg, 0, sizeof(msg)); 12768 12769 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 12770 msg.hdr.status = CTL_SCSI_ERROR; 12771 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 12772 12773 retry_count = 4243; 12774 12775 sense = &msg.scsi.sense_data; 12776 sks[0] = SSD_SCS_VALID; 12777 sks[1] = (retry_count >> 8) & 0xff; 12778 sks[2] = retry_count & 0xff; 12779 12780 /* "Internal target failure" */ 12781 scsi_set_sense_data(sense, 12782 /*sense_format*/ SSD_TYPE_NONE, 12783 /*current_error*/ 1, 12784 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 12785 /*asc*/ 0x44, 12786 /*ascq*/ 0x00, 12787 /*type*/ SSD_ELEM_SKS, 12788 /*size*/ sizeof(sks), 12789 /*data*/ sks, 12790 SSD_ELEM_NONE); 12791 12792 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12793 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12794 ctl_failover_io(io, /*have_lock*/ 1); 12795 return; 12796 } 12797 12798 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 12799 CTL_HA_STATUS_SUCCESS) { 12800 /* XXX KDM what to do if this fails? */ 12801 } 12802 return; 12803 } 12804 12805} 12806 12807static int 12808ctl_process_done(union ctl_io *io) 12809{ 12810 struct ctl_lun *lun; 12811 struct ctl_softc *ctl_softc; 12812 void (*fe_done)(union ctl_io *io); 12813 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 12814 12815 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12816 12817 fe_done = 12818 control_softc->ctl_ports[targ_port]->fe_done; 12819 12820#ifdef CTL_TIME_IO 12821 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12822 char str[256]; 12823 char path_str[64]; 12824 struct sbuf sb; 12825 12826 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12827 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12828 12829 sbuf_cat(&sb, path_str); 12830 switch (io->io_hdr.io_type) { 12831 case CTL_IO_SCSI: 12832 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12833 sbuf_printf(&sb, "\n"); 12834 sbuf_cat(&sb, path_str); 12835 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12836 io->scsiio.tag_num, io->scsiio.tag_type); 12837 break; 12838 case CTL_IO_TASK: 12839 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12840 "Tag Type: %d\n", io->taskio.task_action, 12841 io->taskio.tag_num, io->taskio.tag_type); 12842 break; 12843 default: 12844 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12845 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12846 break; 12847 } 12848 sbuf_cat(&sb, path_str); 12849 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12850 (intmax_t)time_uptime - io->io_hdr.start_time); 12851 sbuf_finish(&sb); 12852 printf("%s", sbuf_data(&sb)); 12853 } 12854#endif /* CTL_TIME_IO */ 12855 12856 switch (io->io_hdr.io_type) { 12857 case CTL_IO_SCSI: 12858 break; 12859 case CTL_IO_TASK: 12860 if (bootverbose || verbose > 0) 12861 ctl_io_error_print(io, NULL); 12862 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 12863 ctl_free_io(io); 12864 else 12865 fe_done(io); 12866 return (CTL_RETVAL_COMPLETE); 12867 break; 12868 default: 12869 printf("ctl_process_done: invalid io type %d\n", 12870 io->io_hdr.io_type); 12871 panic("ctl_process_done: invalid io type %d\n", 12872 io->io_hdr.io_type); 12873 break; /* NOTREACHED */ 12874 } 12875 12876 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12877 if (lun == NULL) { 12878 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12879 io->io_hdr.nexus.targ_mapped_lun)); 12880 fe_done(io); 12881 goto bailout; 12882 } 12883 ctl_softc = lun->ctl_softc; 12884 12885 mtx_lock(&lun->lun_lock); 12886 12887 /* 12888 * Check to see if we have any errors to inject here. We only 12889 * inject errors for commands that don't already have errors set. 12890 */ 12891 if ((STAILQ_FIRST(&lun->error_list) != NULL) 12892 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) 12893 ctl_inject_error(lun, io); 12894 12895 /* 12896 * XXX KDM how do we treat commands that aren't completed 12897 * successfully? 12898 * 12899 * XXX KDM should we also track I/O latency? 12900 */ 12901 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 12902 io->io_hdr.io_type == CTL_IO_SCSI) { 12903#ifdef CTL_TIME_IO 12904 struct bintime cur_bt; 12905#endif 12906 int type; 12907 12908 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12909 CTL_FLAG_DATA_IN) 12910 type = CTL_STATS_READ; 12911 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12912 CTL_FLAG_DATA_OUT) 12913 type = CTL_STATS_WRITE; 12914 else 12915 type = CTL_STATS_NO_IO; 12916 12917 lun->stats.ports[targ_port].bytes[type] += 12918 io->scsiio.kern_total_len; 12919 lun->stats.ports[targ_port].operations[type]++; 12920#ifdef CTL_TIME_IO 12921 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 12922 &io->io_hdr.dma_bt); 12923 lun->stats.ports[targ_port].num_dmas[type] += 12924 io->io_hdr.num_dmas; 12925 getbintime(&cur_bt); 12926 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 12927 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 12928#endif 12929 } 12930 12931 /* 12932 * Remove this from the OOA queue. 12933 */ 12934 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12935 12936 /* 12937 * Run through the blocked queue on this LUN and see if anything 12938 * has become unblocked, now that this transaction is done. 12939 */ 12940 ctl_check_blocked(lun); 12941 12942 /* 12943 * If the LUN has been invalidated, free it if there is nothing 12944 * left on its OOA queue. 12945 */ 12946 if ((lun->flags & CTL_LUN_INVALID) 12947 && TAILQ_EMPTY(&lun->ooa_queue)) { 12948 mtx_unlock(&lun->lun_lock); 12949 mtx_lock(&ctl_softc->ctl_lock); 12950 ctl_free_lun(lun); 12951 mtx_unlock(&ctl_softc->ctl_lock); 12952 } else 12953 mtx_unlock(&lun->lun_lock); 12954 12955 /* 12956 * If this command has been aborted, make sure we set the status 12957 * properly. The FETD is responsible for freeing the I/O and doing 12958 * whatever it needs to do to clean up its state. 12959 */ 12960 if (io->io_hdr.flags & CTL_FLAG_ABORT) 12961 io->io_hdr.status = CTL_CMD_ABORTED; 12962 12963 /* 12964 * We print out status for every task management command. For SCSI 12965 * commands, we filter out any unit attention errors; they happen 12966 * on every boot, and would clutter up the log. Note: task 12967 * management commands aren't printed here, they are printed above, 12968 * since they should never even make it down here. 12969 */ 12970 switch (io->io_hdr.io_type) { 12971 case CTL_IO_SCSI: { 12972 int error_code, sense_key, asc, ascq; 12973 12974 sense_key = 0; 12975 12976 if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) 12977 && (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) { 12978 /* 12979 * Since this is just for printing, no need to 12980 * show errors here. 12981 */ 12982 scsi_extract_sense_len(&io->scsiio.sense_data, 12983 io->scsiio.sense_len, 12984 &error_code, 12985 &sense_key, 12986 &asc, 12987 &ascq, 12988 /*show_errors*/ 0); 12989 } 12990 12991 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 12992 && (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) 12993 || (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) 12994 || (sense_key != SSD_KEY_UNIT_ATTENTION))) { 12995 12996 if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){ 12997 ctl_softc->skipped_prints++; 12998 } else { 12999 uint32_t skipped_prints; 13000 13001 skipped_prints = ctl_softc->skipped_prints; 13002 13003 ctl_softc->skipped_prints = 0; 13004 ctl_softc->last_print_jiffies = time_uptime; 13005 13006 if (skipped_prints > 0) { 13007#ifdef NEEDTOPORT 13008 csevent_log(CSC_CTL | CSC_SHELF_SW | 13009 CTL_ERROR_REPORT, 13010 csevent_LogType_Trace, 13011 csevent_Severity_Information, 13012 csevent_AlertLevel_Green, 13013 csevent_FRU_Firmware, 13014 csevent_FRU_Unknown, 13015 "High CTL error volume, %d prints " 13016 "skipped", skipped_prints); 13017#endif 13018 } 13019 if (bootverbose || verbose > 0) 13020 ctl_io_error_print(io, NULL); 13021 } 13022 } 13023 break; 13024 } 13025 case CTL_IO_TASK: 13026 if (bootverbose || verbose > 0) 13027 ctl_io_error_print(io, NULL); 13028 break; 13029 default: 13030 break; 13031 } 13032 13033 /* 13034 * Tell the FETD or the other shelf controller we're done with this 13035 * command. Note that only SCSI commands get to this point. Task 13036 * management commands are completed above. 13037 * 13038 * We only send status to the other controller if we're in XFER 13039 * mode. In SER_ONLY mode, the I/O is done on the controller that 13040 * received the I/O (from CTL's perspective), and so the status is 13041 * generated there. 13042 * 13043 * XXX KDM if we hold the lock here, we could cause a deadlock 13044 * if the frontend comes back in in this context to queue 13045 * something. 13046 */ 13047 if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER) 13048 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13049 union ctl_ha_msg msg; 13050 13051 memset(&msg, 0, sizeof(msg)); 13052 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13053 msg.hdr.original_sc = io->io_hdr.original_sc; 13054 msg.hdr.nexus = io->io_hdr.nexus; 13055 msg.hdr.status = io->io_hdr.status; 13056 msg.scsi.scsi_status = io->scsiio.scsi_status; 13057 msg.scsi.tag_num = io->scsiio.tag_num; 13058 msg.scsi.tag_type = io->scsiio.tag_type; 13059 msg.scsi.sense_len = io->scsiio.sense_len; 13060 msg.scsi.sense_residual = io->scsiio.sense_residual; 13061 msg.scsi.residual = io->scsiio.residual; 13062 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13063 sizeof(io->scsiio.sense_data)); 13064 /* 13065 * We copy this whether or not this is an I/O-related 13066 * command. Otherwise, we'd have to go and check to see 13067 * whether it's a read/write command, and it really isn't 13068 * worth it. 13069 */ 13070 memcpy(&msg.scsi.lbalen, 13071 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 13072 sizeof(msg.scsi.lbalen)); 13073 13074 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13075 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 13076 /* XXX do something here */ 13077 } 13078 13079 ctl_free_io(io); 13080 } else 13081 fe_done(io); 13082 13083bailout: 13084 13085 return (CTL_RETVAL_COMPLETE); 13086} 13087 13088/* 13089 * Front end should call this if it doesn't do autosense. When the request 13090 * sense comes back in from the initiator, we'll dequeue this and send it. 13091 */ 13092int 13093ctl_queue_sense(union ctl_io *io) 13094{ 13095 struct ctl_lun *lun; 13096 struct ctl_softc *ctl_softc; 13097 uint32_t initidx, targ_lun; 13098 13099 ctl_softc = control_softc; 13100 13101 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13102 13103 /* 13104 * LUN lookup will likely move to the ctl_work_thread() once we 13105 * have our new queueing infrastructure (that doesn't put things on 13106 * a per-LUN queue initially). That is so that we can handle 13107 * things like an INQUIRY to a LUN that we don't have enabled. We 13108 * can't deal with that right now. 13109 */ 13110 mtx_lock(&ctl_softc->ctl_lock); 13111 13112 /* 13113 * If we don't have a LUN for this, just toss the sense 13114 * information. 13115 */ 13116 targ_lun = io->io_hdr.nexus.targ_lun; 13117 if (io->io_hdr.nexus.lun_map_fn != NULL) 13118 targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun); 13119 if ((targ_lun < CTL_MAX_LUNS) 13120 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 13121 lun = ctl_softc->ctl_luns[targ_lun]; 13122 else 13123 goto bailout; 13124 13125 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13126 13127 mtx_lock(&lun->lun_lock); 13128 /* 13129 * Already have CA set for this LUN...toss the sense information. 13130 */ 13131 if (ctl_is_set(lun->have_ca, initidx)) { 13132 mtx_unlock(&lun->lun_lock); 13133 goto bailout; 13134 } 13135 13136 memcpy(&lun->pending_sense[initidx].sense, &io->scsiio.sense_data, 13137 ctl_min(sizeof(lun->pending_sense[initidx].sense), 13138 sizeof(io->scsiio.sense_data))); 13139 ctl_set_mask(lun->have_ca, initidx); 13140 mtx_unlock(&lun->lun_lock); 13141 13142bailout: 13143 mtx_unlock(&ctl_softc->ctl_lock); 13144 13145 ctl_free_io(io); 13146 13147 return (CTL_RETVAL_COMPLETE); 13148} 13149 13150/* 13151 * Primary command inlet from frontend ports. All SCSI and task I/O 13152 * requests must go through this function. 13153 */ 13154int 13155ctl_queue(union ctl_io *io) 13156{ 13157 struct ctl_softc *ctl_softc; 13158 13159 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13160 13161 ctl_softc = control_softc; 13162 13163#ifdef CTL_TIME_IO 13164 io->io_hdr.start_time = time_uptime; 13165 getbintime(&io->io_hdr.start_bt); 13166#endif /* CTL_TIME_IO */ 13167 13168 /* Map FE-specific LUN ID into global one. */ 13169 if (io->io_hdr.nexus.lun_map_fn != NULL) 13170 io->io_hdr.nexus.targ_mapped_lun = io->io_hdr.nexus.lun_map_fn( 13171 io->io_hdr.nexus.lun_map_arg, io->io_hdr.nexus.targ_lun); 13172 else 13173 io->io_hdr.nexus.targ_mapped_lun = io->io_hdr.nexus.targ_lun; 13174 13175 switch (io->io_hdr.io_type) { 13176 case CTL_IO_SCSI: 13177 ctl_enqueue_incoming(io); 13178 break; 13179 case CTL_IO_TASK: 13180 ctl_run_task(io); 13181 break; 13182 default: 13183 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13184 return (EINVAL); 13185 } 13186 13187 return (CTL_RETVAL_COMPLETE); 13188} 13189 13190#ifdef CTL_IO_DELAY 13191static void 13192ctl_done_timer_wakeup(void *arg) 13193{ 13194 union ctl_io *io; 13195 13196 io = (union ctl_io *)arg; 13197 ctl_done(io); 13198} 13199#endif /* CTL_IO_DELAY */ 13200 13201void 13202ctl_done(union ctl_io *io) 13203{ 13204 struct ctl_softc *ctl_softc; 13205 13206 ctl_softc = control_softc; 13207 13208 /* 13209 * Enable this to catch duplicate completion issues. 13210 */ 13211#if 0 13212 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13213 printf("%s: type %d msg %d cdb %x iptl: " 13214 "%d:%d:%d:%d tag 0x%04x " 13215 "flag %#x status %x\n", 13216 __func__, 13217 io->io_hdr.io_type, 13218 io->io_hdr.msg_type, 13219 io->scsiio.cdb[0], 13220 io->io_hdr.nexus.initid.id, 13221 io->io_hdr.nexus.targ_port, 13222 io->io_hdr.nexus.targ_target.id, 13223 io->io_hdr.nexus.targ_lun, 13224 (io->io_hdr.io_type == 13225 CTL_IO_TASK) ? 13226 io->taskio.tag_num : 13227 io->scsiio.tag_num, 13228 io->io_hdr.flags, 13229 io->io_hdr.status); 13230 } else 13231 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13232#endif 13233 13234 /* 13235 * This is an internal copy of an I/O, and should not go through 13236 * the normal done processing logic. 13237 */ 13238 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13239 return; 13240 13241 /* 13242 * We need to send a msg to the serializing shelf to finish the IO 13243 * as well. We don't send a finish message to the other shelf if 13244 * this is a task management command. Task management commands 13245 * aren't serialized in the OOA queue, but rather just executed on 13246 * both shelf controllers for commands that originated on that 13247 * controller. 13248 */ 13249 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 13250 && (io->io_hdr.io_type != CTL_IO_TASK)) { 13251 union ctl_ha_msg msg_io; 13252 13253 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 13254 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 13255 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 13256 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 13257 } 13258 /* continue on to finish IO */ 13259 } 13260#ifdef CTL_IO_DELAY 13261 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13262 struct ctl_lun *lun; 13263 13264 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13265 13266 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13267 } else { 13268 struct ctl_lun *lun; 13269 13270 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13271 13272 if ((lun != NULL) 13273 && (lun->delay_info.done_delay > 0)) { 13274 struct callout *callout; 13275 13276 callout = (struct callout *)&io->io_hdr.timer_bytes; 13277 callout_init(callout, /*mpsafe*/ 1); 13278 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13279 callout_reset(callout, 13280 lun->delay_info.done_delay * hz, 13281 ctl_done_timer_wakeup, io); 13282 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13283 lun->delay_info.done_delay = 0; 13284 return; 13285 } 13286 } 13287#endif /* CTL_IO_DELAY */ 13288 13289 ctl_enqueue_done(io); 13290} 13291 13292int 13293ctl_isc(struct ctl_scsiio *ctsio) 13294{ 13295 struct ctl_lun *lun; 13296 int retval; 13297 13298 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13299 13300 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 13301 13302 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 13303 13304 retval = lun->backend->data_submit((union ctl_io *)ctsio); 13305 13306 return (retval); 13307} 13308 13309 13310static void 13311ctl_work_thread(void *arg) 13312{ 13313 struct ctl_thread *thr = (struct ctl_thread *)arg; 13314 struct ctl_softc *softc = thr->ctl_softc; 13315 union ctl_io *io; 13316 int retval; 13317 13318 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13319 13320 for (;;) { 13321 retval = 0; 13322 13323 /* 13324 * We handle the queues in this order: 13325 * - ISC 13326 * - done queue (to free up resources, unblock other commands) 13327 * - RtR queue 13328 * - incoming queue 13329 * 13330 * If those queues are empty, we break out of the loop and 13331 * go to sleep. 13332 */ 13333 mtx_lock(&thr->queue_lock); 13334 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13335 if (io != NULL) { 13336 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13337 mtx_unlock(&thr->queue_lock); 13338 ctl_handle_isc(io); 13339 continue; 13340 } 13341 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13342 if (io != NULL) { 13343 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13344 /* clear any blocked commands, call fe_done */ 13345 mtx_unlock(&thr->queue_lock); 13346 retval = ctl_process_done(io); 13347 continue; 13348 } 13349 if (!ctl_pause_rtr) { 13350 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13351 if (io != NULL) { 13352 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13353 mtx_unlock(&thr->queue_lock); 13354 retval = ctl_scsiio(&io->scsiio); 13355 if (retval != CTL_RETVAL_COMPLETE) 13356 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13357 continue; 13358 } 13359 } 13360 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13361 if (io != NULL) { 13362 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13363 mtx_unlock(&thr->queue_lock); 13364 ctl_scsiio_precheck(softc, &io->scsiio); 13365 continue; 13366 } 13367 13368 /* Sleep until we have something to do. */ 13369 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13370 } 13371} 13372 13373static void 13374ctl_lun_thread(void *arg) 13375{ 13376 struct ctl_softc *softc = (struct ctl_softc *)arg; 13377 struct ctl_be_lun *be_lun; 13378 int retval; 13379 13380 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13381 13382 for (;;) { 13383 retval = 0; 13384 mtx_lock(&softc->ctl_lock); 13385 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13386 if (be_lun != NULL) { 13387 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13388 mtx_unlock(&softc->ctl_lock); 13389 ctl_create_lun(be_lun); 13390 continue; 13391 } 13392 13393 /* Sleep until we have something to do. */ 13394 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13395 PDROP | PRIBIO, "-", 0); 13396 } 13397} 13398 13399static void 13400ctl_enqueue_incoming(union ctl_io *io) 13401{ 13402 struct ctl_softc *softc = control_softc; 13403 struct ctl_thread *thr; 13404 13405 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13406 mtx_lock(&thr->queue_lock); 13407 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13408 mtx_unlock(&thr->queue_lock); 13409 wakeup(thr); 13410} 13411 13412static void 13413ctl_enqueue_rtr(union ctl_io *io) 13414{ 13415 struct ctl_softc *softc = control_softc; 13416 struct ctl_thread *thr; 13417 13418 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13419 mtx_lock(&thr->queue_lock); 13420 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13421 mtx_unlock(&thr->queue_lock); 13422 wakeup(thr); 13423} 13424 13425static void 13426ctl_enqueue_done(union ctl_io *io) 13427{ 13428 struct ctl_softc *softc = control_softc; 13429 struct ctl_thread *thr; 13430 13431 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13432 mtx_lock(&thr->queue_lock); 13433 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13434 mtx_unlock(&thr->queue_lock); 13435 wakeup(thr); 13436} 13437 13438static void 13439ctl_enqueue_isc(union ctl_io *io) 13440{ 13441 struct ctl_softc *softc = control_softc; 13442 struct ctl_thread *thr; 13443 13444 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13445 mtx_lock(&thr->queue_lock); 13446 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13447 mtx_unlock(&thr->queue_lock); 13448 wakeup(thr); 13449} 13450 13451/* Initialization and failover */ 13452 13453void 13454ctl_init_isc_msg(void) 13455{ 13456 printf("CTL: Still calling this thing\n"); 13457} 13458 13459/* 13460 * Init component 13461 * Initializes component into configuration defined by bootMode 13462 * (see hasc-sv.c) 13463 * returns hasc_Status: 13464 * OK 13465 * ERROR - fatal error 13466 */ 13467static ctl_ha_comp_status 13468ctl_isc_init(struct ctl_ha_component *c) 13469{ 13470 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13471 13472 c->status = ret; 13473 return ret; 13474} 13475 13476/* Start component 13477 * Starts component in state requested. If component starts successfully, 13478 * it must set its own state to the requestrd state 13479 * When requested state is HASC_STATE_HA, the component may refine it 13480 * by adding _SLAVE or _MASTER flags. 13481 * Currently allowed state transitions are: 13482 * UNKNOWN->HA - initial startup 13483 * UNKNOWN->SINGLE - initial startup when no parter detected 13484 * HA->SINGLE - failover 13485 * returns ctl_ha_comp_status: 13486 * OK - component successfully started in requested state 13487 * FAILED - could not start the requested state, failover may 13488 * be possible 13489 * ERROR - fatal error detected, no future startup possible 13490 */ 13491static ctl_ha_comp_status 13492ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 13493{ 13494 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13495 13496 printf("%s: go\n", __func__); 13497 13498 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 13499 if (c->state == CTL_HA_STATE_UNKNOWN ) { 13500 ctl_is_single = 0; 13501 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 13502 != CTL_HA_STATUS_SUCCESS) { 13503 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 13504 ret = CTL_HA_COMP_STATUS_ERROR; 13505 } 13506 } else if (CTL_HA_STATE_IS_HA(c->state) 13507 && CTL_HA_STATE_IS_SINGLE(state)){ 13508 // HA->SINGLE transition 13509 ctl_failover(); 13510 ctl_is_single = 1; 13511 } else { 13512 printf("ctl_isc_start:Invalid state transition %X->%X\n", 13513 c->state, state); 13514 ret = CTL_HA_COMP_STATUS_ERROR; 13515 } 13516 if (CTL_HA_STATE_IS_SINGLE(state)) 13517 ctl_is_single = 1; 13518 13519 c->state = state; 13520 c->status = ret; 13521 return ret; 13522} 13523 13524/* 13525 * Quiesce component 13526 * The component must clear any error conditions (set status to OK) and 13527 * prepare itself to another Start call 13528 * returns ctl_ha_comp_status: 13529 * OK 13530 * ERROR 13531 */ 13532static ctl_ha_comp_status 13533ctl_isc_quiesce(struct ctl_ha_component *c) 13534{ 13535 int ret = CTL_HA_COMP_STATUS_OK; 13536 13537 ctl_pause_rtr = 1; 13538 c->status = ret; 13539 return ret; 13540} 13541 13542struct ctl_ha_component ctl_ha_component_ctlisc = 13543{ 13544 .name = "CTL ISC", 13545 .state = CTL_HA_STATE_UNKNOWN, 13546 .init = ctl_isc_init, 13547 .start = ctl_isc_start, 13548 .quiesce = ctl_isc_quiesce 13549}; 13550 13551/* 13552 * vim: ts=8 13553 */ 13554