ctl.c revision 268556
1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.c#8 $ 35 */ 36/* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42#define _CTL_C 43 44#include <sys/cdefs.h> 45__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 268556 2014-07-12 04:34:39Z mav $"); 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/types.h> 51#include <sys/kthread.h> 52#include <sys/bio.h> 53#include <sys/fcntl.h> 54#include <sys/lock.h> 55#include <sys/module.h> 56#include <sys/mutex.h> 57#include <sys/condvar.h> 58#include <sys/malloc.h> 59#include <sys/conf.h> 60#include <sys/ioccom.h> 61#include <sys/queue.h> 62#include <sys/sbuf.h> 63#include <sys/smp.h> 64#include <sys/endian.h> 65#include <sys/sysctl.h> 66 67#include <cam/cam.h> 68#include <cam/scsi/scsi_all.h> 69#include <cam/scsi/scsi_da.h> 70#include <cam/ctl/ctl_io.h> 71#include <cam/ctl/ctl.h> 72#include <cam/ctl/ctl_frontend.h> 73#include <cam/ctl/ctl_frontend_internal.h> 74#include <cam/ctl/ctl_util.h> 75#include <cam/ctl/ctl_backend.h> 76#include <cam/ctl/ctl_ioctl.h> 77#include <cam/ctl/ctl_ha.h> 78#include <cam/ctl/ctl_private.h> 79#include <cam/ctl/ctl_debug.h> 80#include <cam/ctl/ctl_scsi_all.h> 81#include <cam/ctl/ctl_error.h> 82 83struct ctl_softc *control_softc = NULL; 84 85/* 86 * Use the serial number and device ID provided by the backend, rather than 87 * making up our own. 88 */ 89#define CTL_USE_BACKEND_SN 90 91/* 92 * Size and alignment macros needed for Copan-specific HA hardware. These 93 * can go away when the HA code is re-written, and uses busdma for any 94 * hardware. 95 */ 96#define CTL_ALIGN_8B(target, source, type) \ 97 if (((uint32_t)source & 0x7) != 0) \ 98 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 99 else \ 100 target = (type)source; 101 102#define CTL_SIZE_8B(target, size) \ 103 if ((size & 0x7) != 0) \ 104 target = size + (0x8 - (size & 0x7)); \ 105 else \ 106 target = size; 107 108#define CTL_ALIGN_8B_MARGIN 16 109 110/* 111 * Template mode pages. 112 */ 113 114/* 115 * Note that these are default values only. The actual values will be 116 * filled in when the user does a mode sense. 117 */ 118static struct copan_power_subpage power_page_default = { 119 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 120 /*subpage*/ PWR_SUBPAGE_CODE, 121 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 122 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 123 /*page_version*/ PWR_VERSION, 124 /* total_luns */ 26, 125 /* max_active_luns*/ PWR_DFLT_MAX_LUNS, 126 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 127 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128 0, 0, 0, 0, 0, 0} 129}; 130 131static struct copan_power_subpage power_page_changeable = { 132 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 133 /*subpage*/ PWR_SUBPAGE_CODE, 134 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 135 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 136 /*page_version*/ 0, 137 /* total_luns */ 0, 138 /* max_active_luns*/ 0, 139 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 140 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 141 0, 0, 0, 0, 0, 0} 142}; 143 144static struct copan_aps_subpage aps_page_default = { 145 APS_PAGE_CODE | SMPH_SPF, //page_code 146 APS_SUBPAGE_CODE, //subpage 147 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 148 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 149 APS_VERSION, //page_version 150 0, //lock_active 151 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 152 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 153 0, 0, 0, 0, 0} //reserved 154}; 155 156static struct copan_aps_subpage aps_page_changeable = { 157 APS_PAGE_CODE | SMPH_SPF, //page_code 158 APS_SUBPAGE_CODE, //subpage 159 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 160 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 161 0, //page_version 162 0, //lock_active 163 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 164 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 165 0, 0, 0, 0, 0} //reserved 166}; 167 168static struct copan_debugconf_subpage debugconf_page_default = { 169 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 170 DBGCNF_SUBPAGE_CODE, /* subpage */ 171 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 172 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 173 DBGCNF_VERSION, /* page_version */ 174 {CTL_TIME_IO_DEFAULT_SECS>>8, 175 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 176}; 177 178static struct copan_debugconf_subpage debugconf_page_changeable = { 179 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 180 DBGCNF_SUBPAGE_CODE, /* subpage */ 181 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 182 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 183 0, /* page_version */ 184 {0xff,0xff}, /* ctl_time_io_secs */ 185}; 186 187static struct scsi_format_page format_page_default = { 188 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 189 /*page_length*/sizeof(struct scsi_format_page) - 2, 190 /*tracks_per_zone*/ {0, 0}, 191 /*alt_sectors_per_zone*/ {0, 0}, 192 /*alt_tracks_per_zone*/ {0, 0}, 193 /*alt_tracks_per_lun*/ {0, 0}, 194 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 195 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 196 /*bytes_per_sector*/ {0, 0}, 197 /*interleave*/ {0, 0}, 198 /*track_skew*/ {0, 0}, 199 /*cylinder_skew*/ {0, 0}, 200 /*flags*/ SFP_HSEC, 201 /*reserved*/ {0, 0, 0} 202}; 203 204static struct scsi_format_page format_page_changeable = { 205 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 206 /*page_length*/sizeof(struct scsi_format_page) - 2, 207 /*tracks_per_zone*/ {0, 0}, 208 /*alt_sectors_per_zone*/ {0, 0}, 209 /*alt_tracks_per_zone*/ {0, 0}, 210 /*alt_tracks_per_lun*/ {0, 0}, 211 /*sectors_per_track*/ {0, 0}, 212 /*bytes_per_sector*/ {0, 0}, 213 /*interleave*/ {0, 0}, 214 /*track_skew*/ {0, 0}, 215 /*cylinder_skew*/ {0, 0}, 216 /*flags*/ 0, 217 /*reserved*/ {0, 0, 0} 218}; 219 220static struct scsi_rigid_disk_page rigid_disk_page_default = { 221 /*page_code*/SMS_RIGID_DISK_PAGE, 222 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 223 /*cylinders*/ {0, 0, 0}, 224 /*heads*/ CTL_DEFAULT_HEADS, 225 /*start_write_precomp*/ {0, 0, 0}, 226 /*start_reduced_current*/ {0, 0, 0}, 227 /*step_rate*/ {0, 0}, 228 /*landing_zone_cylinder*/ {0, 0, 0}, 229 /*rpl*/ SRDP_RPL_DISABLED, 230 /*rotational_offset*/ 0, 231 /*reserved1*/ 0, 232 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 233 CTL_DEFAULT_ROTATION_RATE & 0xff}, 234 /*reserved2*/ {0, 0} 235}; 236 237static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 238 /*page_code*/SMS_RIGID_DISK_PAGE, 239 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 240 /*cylinders*/ {0, 0, 0}, 241 /*heads*/ 0, 242 /*start_write_precomp*/ {0, 0, 0}, 243 /*start_reduced_current*/ {0, 0, 0}, 244 /*step_rate*/ {0, 0}, 245 /*landing_zone_cylinder*/ {0, 0, 0}, 246 /*rpl*/ 0, 247 /*rotational_offset*/ 0, 248 /*reserved1*/ 0, 249 /*rotation_rate*/ {0, 0}, 250 /*reserved2*/ {0, 0} 251}; 252 253static struct scsi_caching_page caching_page_default = { 254 /*page_code*/SMS_CACHING_PAGE, 255 /*page_length*/sizeof(struct scsi_caching_page) - 2, 256 /*flags1*/ SCP_DISC | SCP_WCE, 257 /*ret_priority*/ 0, 258 /*disable_pf_transfer_len*/ {0xff, 0xff}, 259 /*min_prefetch*/ {0, 0}, 260 /*max_prefetch*/ {0xff, 0xff}, 261 /*max_pf_ceiling*/ {0xff, 0xff}, 262 /*flags2*/ 0, 263 /*cache_segments*/ 0, 264 /*cache_seg_size*/ {0, 0}, 265 /*reserved*/ 0, 266 /*non_cache_seg_size*/ {0, 0, 0} 267}; 268 269static struct scsi_caching_page caching_page_changeable = { 270 /*page_code*/SMS_CACHING_PAGE, 271 /*page_length*/sizeof(struct scsi_caching_page) - 2, 272 /*flags1*/ 0, 273 /*ret_priority*/ 0, 274 /*disable_pf_transfer_len*/ {0, 0}, 275 /*min_prefetch*/ {0, 0}, 276 /*max_prefetch*/ {0, 0}, 277 /*max_pf_ceiling*/ {0, 0}, 278 /*flags2*/ 0, 279 /*cache_segments*/ 0, 280 /*cache_seg_size*/ {0, 0}, 281 /*reserved*/ 0, 282 /*non_cache_seg_size*/ {0, 0, 0} 283}; 284 285static struct scsi_control_page control_page_default = { 286 /*page_code*/SMS_CONTROL_MODE_PAGE, 287 /*page_length*/sizeof(struct scsi_control_page) - 2, 288 /*rlec*/0, 289 /*queue_flags*/0, 290 /*eca_and_aen*/0, 291 /*reserved*/0, 292 /*aen_holdoff_period*/{0, 0} 293}; 294 295static struct scsi_control_page control_page_changeable = { 296 /*page_code*/SMS_CONTROL_MODE_PAGE, 297 /*page_length*/sizeof(struct scsi_control_page) - 2, 298 /*rlec*/SCP_DSENSE, 299 /*queue_flags*/0, 300 /*eca_and_aen*/0, 301 /*reserved*/0, 302 /*aen_holdoff_period*/{0, 0} 303}; 304 305 306/* 307 * XXX KDM move these into the softc. 308 */ 309static int rcv_sync_msg; 310static int persis_offset; 311static uint8_t ctl_pause_rtr; 312static int ctl_is_single = 1; 313static int index_to_aps_page; 314 315SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 316static int worker_threads = -1; 317TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads); 318SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 319 &worker_threads, 1, "Number of worker threads"); 320static int verbose = 0; 321TUNABLE_INT("kern.cam.ctl.verbose", &verbose); 322SYSCTL_INT(_kern_cam_ctl, OID_AUTO, verbose, CTLFLAG_RWTUN, 323 &verbose, 0, "Show SCSI errors returned to initiator"); 324 325/* 326 * Serial number (0x80), device id (0x83), supported pages (0x00), 327 * Block limits (0xB0) and Logical Block Provisioning (0xB2) 328 */ 329#define SCSI_EVPD_NUM_SUPPORTED_PAGES 5 330 331static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 332 int param); 333static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 334static int ctl_init(void); 335void ctl_shutdown(void); 336static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 337static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 338static void ctl_ioctl_online(void *arg); 339static void ctl_ioctl_offline(void *arg); 340static int ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id); 341static int ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id); 342static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id); 343static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id); 344static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio); 345static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 346static int ctl_ioctl_submit_wait(union ctl_io *io); 347static void ctl_ioctl_datamove(union ctl_io *io); 348static void ctl_ioctl_done(union ctl_io *io); 349static void ctl_ioctl_hard_startstop_callback(void *arg, 350 struct cfi_metatask *metatask); 351static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask); 352static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 353 struct ctl_ooa *ooa_hdr, 354 struct ctl_ooa_entry *kern_entries); 355static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 356 struct thread *td); 357uint32_t ctl_get_resindex(struct ctl_nexus *nexus); 358uint32_t ctl_port_idx(int port_num); 359#ifdef unused 360static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, 361 uint32_t targ_target, uint32_t targ_lun, 362 int can_wait); 363static void ctl_kfree_io(union ctl_io *io); 364#endif /* unused */ 365static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 366 struct ctl_be_lun *be_lun, struct ctl_id target_id); 367static int ctl_free_lun(struct ctl_lun *lun); 368static void ctl_create_lun(struct ctl_be_lun *be_lun); 369/** 370static void ctl_failover_change_pages(struct ctl_softc *softc, 371 struct ctl_scsiio *ctsio, int master); 372**/ 373 374static int ctl_do_mode_select(union ctl_io *io); 375static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 376 uint64_t res_key, uint64_t sa_res_key, 377 uint8_t type, uint32_t residx, 378 struct ctl_scsiio *ctsio, 379 struct scsi_per_res_out *cdb, 380 struct scsi_per_res_out_parms* param); 381static void ctl_pro_preempt_other(struct ctl_lun *lun, 382 union ctl_ha_msg *msg); 383static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 384static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 385static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 386static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 387static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 388 int alloc_len); 389static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 390static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 391static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 392static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len); 393static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2); 394static ctl_action ctl_check_for_blockage(union ctl_io *pending_io, 395 union ctl_io *ooa_io); 396static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 397 union ctl_io *starting_io); 398static int ctl_check_blocked(struct ctl_lun *lun); 399static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, 400 struct ctl_lun *lun, 401 struct ctl_cmd_entry *entry, 402 struct ctl_scsiio *ctsio); 403//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 404static void ctl_failover(void); 405static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 406 struct ctl_scsiio *ctsio); 407static int ctl_scsiio(struct ctl_scsiio *ctsio); 408 409static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 410static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 411 ctl_ua_type ua_type); 412static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 413 ctl_ua_type ua_type); 414static int ctl_abort_task(union ctl_io *io); 415static void ctl_run_task(union ctl_io *io); 416#ifdef CTL_IO_DELAY 417static void ctl_datamove_timer_wakeup(void *arg); 418static void ctl_done_timer_wakeup(void *arg); 419#endif /* CTL_IO_DELAY */ 420 421static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 422static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 423static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 424static void ctl_datamove_remote_write(union ctl_io *io); 425static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 426static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 427static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 428static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 429 ctl_ha_dt_cb callback); 430static void ctl_datamove_remote_read(union ctl_io *io); 431static void ctl_datamove_remote(union ctl_io *io); 432static int ctl_process_done(union ctl_io *io); 433static void ctl_lun_thread(void *arg); 434static void ctl_work_thread(void *arg); 435static void ctl_enqueue_incoming(union ctl_io *io); 436static void ctl_enqueue_rtr(union ctl_io *io); 437static void ctl_enqueue_done(union ctl_io *io); 438static void ctl_enqueue_isc(union ctl_io *io); 439 440/* 441 * Load the serialization table. This isn't very pretty, but is probably 442 * the easiest way to do it. 443 */ 444#include "ctl_ser_table.c" 445 446/* 447 * We only need to define open, close and ioctl routines for this driver. 448 */ 449static struct cdevsw ctl_cdevsw = { 450 .d_version = D_VERSION, 451 .d_flags = 0, 452 .d_open = ctl_open, 453 .d_close = ctl_close, 454 .d_ioctl = ctl_ioctl, 455 .d_name = "ctl", 456}; 457 458 459MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 460MALLOC_DEFINE(M_CTLIO, "ctlio", "Memory used for CTL requests"); 461 462static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 463 464static moduledata_t ctl_moduledata = { 465 "ctl", 466 ctl_module_event_handler, 467 NULL 468}; 469 470DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 471MODULE_VERSION(ctl, 1); 472 473static void 474ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 475 union ctl_ha_msg *msg_info) 476{ 477 struct ctl_scsiio *ctsio; 478 479 if (msg_info->hdr.original_sc == NULL) { 480 printf("%s: original_sc == NULL!\n", __func__); 481 /* XXX KDM now what? */ 482 return; 483 } 484 485 ctsio = &msg_info->hdr.original_sc->scsiio; 486 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 487 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 488 ctsio->io_hdr.status = msg_info->hdr.status; 489 ctsio->scsi_status = msg_info->scsi.scsi_status; 490 ctsio->sense_len = msg_info->scsi.sense_len; 491 ctsio->sense_residual = msg_info->scsi.sense_residual; 492 ctsio->residual = msg_info->scsi.residual; 493 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 494 sizeof(ctsio->sense_data)); 495 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 496 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 497 ctl_enqueue_isc((union ctl_io *)ctsio); 498} 499 500static void 501ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 502 union ctl_ha_msg *msg_info) 503{ 504 struct ctl_scsiio *ctsio; 505 506 if (msg_info->hdr.serializing_sc == NULL) { 507 printf("%s: serializing_sc == NULL!\n", __func__); 508 /* XXX KDM now what? */ 509 return; 510 } 511 512 ctsio = &msg_info->hdr.serializing_sc->scsiio; 513#if 0 514 /* 515 * Attempt to catch the situation where an I/O has 516 * been freed, and we're using it again. 517 */ 518 if (ctsio->io_hdr.io_type == 0xff) { 519 union ctl_io *tmp_io; 520 tmp_io = (union ctl_io *)ctsio; 521 printf("%s: %p use after free!\n", __func__, 522 ctsio); 523 printf("%s: type %d msg %d cdb %x iptl: " 524 "%d:%d:%d:%d tag 0x%04x " 525 "flag %#x status %x\n", 526 __func__, 527 tmp_io->io_hdr.io_type, 528 tmp_io->io_hdr.msg_type, 529 tmp_io->scsiio.cdb[0], 530 tmp_io->io_hdr.nexus.initid.id, 531 tmp_io->io_hdr.nexus.targ_port, 532 tmp_io->io_hdr.nexus.targ_target.id, 533 tmp_io->io_hdr.nexus.targ_lun, 534 (tmp_io->io_hdr.io_type == 535 CTL_IO_TASK) ? 536 tmp_io->taskio.tag_num : 537 tmp_io->scsiio.tag_num, 538 tmp_io->io_hdr.flags, 539 tmp_io->io_hdr.status); 540 } 541#endif 542 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 543 ctl_enqueue_isc((union ctl_io *)ctsio); 544} 545 546/* 547 * ISC (Inter Shelf Communication) event handler. Events from the HA 548 * subsystem come in here. 549 */ 550static void 551ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 552{ 553 struct ctl_softc *ctl_softc; 554 union ctl_io *io; 555 struct ctl_prio *presio; 556 ctl_ha_status isc_status; 557 558 ctl_softc = control_softc; 559 io = NULL; 560 561 562#if 0 563 printf("CTL: Isc Msg event %d\n", event); 564#endif 565 if (event == CTL_HA_EVT_MSG_RECV) { 566 union ctl_ha_msg msg_info; 567 568 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 569 sizeof(msg_info), /*wait*/ 0); 570#if 0 571 printf("CTL: msg_type %d\n", msg_info.msg_type); 572#endif 573 if (isc_status != 0) { 574 printf("Error receiving message, status = %d\n", 575 isc_status); 576 return; 577 } 578 579 switch (msg_info.hdr.msg_type) { 580 case CTL_MSG_SERIALIZE: 581#if 0 582 printf("Serialize\n"); 583#endif 584 io = ctl_alloc_io((void *)ctl_softc->othersc_pool); 585 if (io == NULL) { 586 printf("ctl_isc_event_handler: can't allocate " 587 "ctl_io!\n"); 588 /* Bad Juju */ 589 /* Need to set busy and send msg back */ 590 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 591 msg_info.hdr.status = CTL_SCSI_ERROR; 592 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 593 msg_info.scsi.sense_len = 0; 594 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 595 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 596 } 597 goto bailout; 598 } 599 ctl_zero_io(io); 600 // populate ctsio from msg_info 601 io->io_hdr.io_type = CTL_IO_SCSI; 602 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 603 io->io_hdr.original_sc = msg_info.hdr.original_sc; 604#if 0 605 printf("pOrig %x\n", (int)msg_info.original_sc); 606#endif 607 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 608 CTL_FLAG_IO_ACTIVE; 609 /* 610 * If we're in serialization-only mode, we don't 611 * want to go through full done processing. Thus 612 * the COPY flag. 613 * 614 * XXX KDM add another flag that is more specific. 615 */ 616 if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY) 617 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 618 io->io_hdr.nexus = msg_info.hdr.nexus; 619#if 0 620 printf("targ %d, port %d, iid %d, lun %d\n", 621 io->io_hdr.nexus.targ_target.id, 622 io->io_hdr.nexus.targ_port, 623 io->io_hdr.nexus.initid.id, 624 io->io_hdr.nexus.targ_lun); 625#endif 626 io->scsiio.tag_num = msg_info.scsi.tag_num; 627 io->scsiio.tag_type = msg_info.scsi.tag_type; 628 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 629 CTL_MAX_CDBLEN); 630 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 631 struct ctl_cmd_entry *entry; 632 uint8_t opcode; 633 634 opcode = io->scsiio.cdb[0]; 635 entry = &ctl_cmd_table[opcode]; 636 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 637 io->io_hdr.flags |= 638 entry->flags & CTL_FLAG_DATA_MASK; 639 } 640 ctl_enqueue_isc(io); 641 break; 642 643 /* Performed on the Originating SC, XFER mode only */ 644 case CTL_MSG_DATAMOVE: { 645 struct ctl_sg_entry *sgl; 646 int i, j; 647 648 io = msg_info.hdr.original_sc; 649 if (io == NULL) { 650 printf("%s: original_sc == NULL!\n", __func__); 651 /* XXX KDM do something here */ 652 break; 653 } 654 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 655 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 656 /* 657 * Keep track of this, we need to send it back over 658 * when the datamove is complete. 659 */ 660 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 661 662 if (msg_info.dt.sg_sequence == 0) { 663 /* 664 * XXX KDM we use the preallocated S/G list 665 * here, but we'll need to change this to 666 * dynamic allocation if we need larger S/G 667 * lists. 668 */ 669 if (msg_info.dt.kern_sg_entries > 670 sizeof(io->io_hdr.remote_sglist) / 671 sizeof(io->io_hdr.remote_sglist[0])) { 672 printf("%s: number of S/G entries " 673 "needed %u > allocated num %zd\n", 674 __func__, 675 msg_info.dt.kern_sg_entries, 676 sizeof(io->io_hdr.remote_sglist)/ 677 sizeof(io->io_hdr.remote_sglist[0])); 678 679 /* 680 * XXX KDM send a message back to 681 * the other side to shut down the 682 * DMA. The error will come back 683 * through via the normal channel. 684 */ 685 break; 686 } 687 sgl = io->io_hdr.remote_sglist; 688 memset(sgl, 0, 689 sizeof(io->io_hdr.remote_sglist)); 690 691 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 692 693 io->scsiio.kern_sg_entries = 694 msg_info.dt.kern_sg_entries; 695 io->scsiio.rem_sg_entries = 696 msg_info.dt.kern_sg_entries; 697 io->scsiio.kern_data_len = 698 msg_info.dt.kern_data_len; 699 io->scsiio.kern_total_len = 700 msg_info.dt.kern_total_len; 701 io->scsiio.kern_data_resid = 702 msg_info.dt.kern_data_resid; 703 io->scsiio.kern_rel_offset = 704 msg_info.dt.kern_rel_offset; 705 /* 706 * Clear out per-DMA flags. 707 */ 708 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 709 /* 710 * Add per-DMA flags that are set for this 711 * particular DMA request. 712 */ 713 io->io_hdr.flags |= msg_info.dt.flags & 714 CTL_FLAG_RDMA_MASK; 715 } else 716 sgl = (struct ctl_sg_entry *) 717 io->scsiio.kern_data_ptr; 718 719 for (i = msg_info.dt.sent_sg_entries, j = 0; 720 i < (msg_info.dt.sent_sg_entries + 721 msg_info.dt.cur_sg_entries); i++, j++) { 722 sgl[i].addr = msg_info.dt.sg_list[j].addr; 723 sgl[i].len = msg_info.dt.sg_list[j].len; 724 725#if 0 726 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 727 __func__, 728 msg_info.dt.sg_list[j].addr, 729 msg_info.dt.sg_list[j].len, 730 sgl[i].addr, sgl[i].len, j, i); 731#endif 732 } 733#if 0 734 memcpy(&sgl[msg_info.dt.sent_sg_entries], 735 msg_info.dt.sg_list, 736 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 737#endif 738 739 /* 740 * If this is the last piece of the I/O, we've got 741 * the full S/G list. Queue processing in the thread. 742 * Otherwise wait for the next piece. 743 */ 744 if (msg_info.dt.sg_last != 0) 745 ctl_enqueue_isc(io); 746 break; 747 } 748 /* Performed on the Serializing (primary) SC, XFER mode only */ 749 case CTL_MSG_DATAMOVE_DONE: { 750 if (msg_info.hdr.serializing_sc == NULL) { 751 printf("%s: serializing_sc == NULL!\n", 752 __func__); 753 /* XXX KDM now what? */ 754 break; 755 } 756 /* 757 * We grab the sense information here in case 758 * there was a failure, so we can return status 759 * back to the initiator. 760 */ 761 io = msg_info.hdr.serializing_sc; 762 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 763 io->io_hdr.status = msg_info.hdr.status; 764 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 765 io->scsiio.sense_len = msg_info.scsi.sense_len; 766 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 767 io->io_hdr.port_status = msg_info.scsi.fetd_status; 768 io->scsiio.residual = msg_info.scsi.residual; 769 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 770 sizeof(io->scsiio.sense_data)); 771 ctl_enqueue_isc(io); 772 break; 773 } 774 775 /* Preformed on Originating SC, SER_ONLY mode */ 776 case CTL_MSG_R2R: 777 io = msg_info.hdr.original_sc; 778 if (io == NULL) { 779 printf("%s: Major Bummer\n", __func__); 780 return; 781 } else { 782#if 0 783 printf("pOrig %x\n",(int) ctsio); 784#endif 785 } 786 io->io_hdr.msg_type = CTL_MSG_R2R; 787 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 788 ctl_enqueue_isc(io); 789 break; 790 791 /* 792 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 793 * mode. 794 * Performed on the Originating (i.e. secondary) SC in XFER 795 * mode 796 */ 797 case CTL_MSG_FINISH_IO: 798 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) 799 ctl_isc_handler_finish_xfer(ctl_softc, 800 &msg_info); 801 else 802 ctl_isc_handler_finish_ser_only(ctl_softc, 803 &msg_info); 804 break; 805 806 /* Preformed on Originating SC */ 807 case CTL_MSG_BAD_JUJU: 808 io = msg_info.hdr.original_sc; 809 if (io == NULL) { 810 printf("%s: Bad JUJU!, original_sc is NULL!\n", 811 __func__); 812 break; 813 } 814 ctl_copy_sense_data(&msg_info, io); 815 /* 816 * IO should have already been cleaned up on other 817 * SC so clear this flag so we won't send a message 818 * back to finish the IO there. 819 */ 820 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 821 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 822 823 /* io = msg_info.hdr.serializing_sc; */ 824 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 825 ctl_enqueue_isc(io); 826 break; 827 828 /* Handle resets sent from the other side */ 829 case CTL_MSG_MANAGE_TASKS: { 830 struct ctl_taskio *taskio; 831 taskio = (struct ctl_taskio *)ctl_alloc_io( 832 (void *)ctl_softc->othersc_pool); 833 if (taskio == NULL) { 834 printf("ctl_isc_event_handler: can't allocate " 835 "ctl_io!\n"); 836 /* Bad Juju */ 837 /* should I just call the proper reset func 838 here??? */ 839 goto bailout; 840 } 841 ctl_zero_io((union ctl_io *)taskio); 842 taskio->io_hdr.io_type = CTL_IO_TASK; 843 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 844 taskio->io_hdr.nexus = msg_info.hdr.nexus; 845 taskio->task_action = msg_info.task.task_action; 846 taskio->tag_num = msg_info.task.tag_num; 847 taskio->tag_type = msg_info.task.tag_type; 848#ifdef CTL_TIME_IO 849 taskio->io_hdr.start_time = time_uptime; 850 getbintime(&taskio->io_hdr.start_bt); 851#if 0 852 cs_prof_gettime(&taskio->io_hdr.start_ticks); 853#endif 854#endif /* CTL_TIME_IO */ 855 ctl_run_task((union ctl_io *)taskio); 856 break; 857 } 858 /* Persistent Reserve action which needs attention */ 859 case CTL_MSG_PERS_ACTION: 860 presio = (struct ctl_prio *)ctl_alloc_io( 861 (void *)ctl_softc->othersc_pool); 862 if (presio == NULL) { 863 printf("ctl_isc_event_handler: can't allocate " 864 "ctl_io!\n"); 865 /* Bad Juju */ 866 /* Need to set busy and send msg back */ 867 goto bailout; 868 } 869 ctl_zero_io((union ctl_io *)presio); 870 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 871 presio->pr_msg = msg_info.pr; 872 ctl_enqueue_isc((union ctl_io *)presio); 873 break; 874 case CTL_MSG_SYNC_FE: 875 rcv_sync_msg = 1; 876 break; 877 case CTL_MSG_APS_LOCK: { 878 // It's quicker to execute this then to 879 // queue it. 880 struct ctl_lun *lun; 881 struct ctl_page_index *page_index; 882 struct copan_aps_subpage *current_sp; 883 uint32_t targ_lun; 884 885 targ_lun = msg_info.hdr.nexus.targ_mapped_lun; 886 lun = ctl_softc->ctl_luns[targ_lun]; 887 mtx_lock(&lun->lun_lock); 888 page_index = &lun->mode_pages.index[index_to_aps_page]; 889 current_sp = (struct copan_aps_subpage *) 890 (page_index->page_data + 891 (page_index->page_len * CTL_PAGE_CURRENT)); 892 893 current_sp->lock_active = msg_info.aps.lock_flag; 894 mtx_unlock(&lun->lun_lock); 895 break; 896 } 897 default: 898 printf("How did I get here?\n"); 899 } 900 } else if (event == CTL_HA_EVT_MSG_SENT) { 901 if (param != CTL_HA_STATUS_SUCCESS) { 902 printf("Bad status from ctl_ha_msg_send status %d\n", 903 param); 904 } 905 return; 906 } else if (event == CTL_HA_EVT_DISCONNECT) { 907 printf("CTL: Got a disconnect from Isc\n"); 908 return; 909 } else { 910 printf("ctl_isc_event_handler: Unknown event %d\n", event); 911 return; 912 } 913 914bailout: 915 return; 916} 917 918static void 919ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 920{ 921 struct scsi_sense_data *sense; 922 923 sense = &dest->scsiio.sense_data; 924 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 925 dest->scsiio.scsi_status = src->scsi.scsi_status; 926 dest->scsiio.sense_len = src->scsi.sense_len; 927 dest->io_hdr.status = src->hdr.status; 928} 929 930static int 931ctl_init(void) 932{ 933 struct ctl_softc *softc; 934 struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool; 935 struct ctl_frontend *fe; 936 uint8_t sc_id =0; 937 int i, error, retval; 938 //int isc_retval; 939 940 retval = 0; 941 ctl_pause_rtr = 0; 942 rcv_sync_msg = 0; 943 944 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 945 M_WAITOK | M_ZERO); 946 softc = control_softc; 947 948 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 949 "cam/ctl"); 950 951 softc->dev->si_drv1 = softc; 952 953 /* 954 * By default, return a "bad LUN" peripheral qualifier for unknown 955 * LUNs. The user can override this default using the tunable or 956 * sysctl. See the comment in ctl_inquiry_std() for more details. 957 */ 958 softc->inquiry_pq_no_lun = 1; 959 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 960 &softc->inquiry_pq_no_lun); 961 sysctl_ctx_init(&softc->sysctl_ctx); 962 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 963 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 964 CTLFLAG_RD, 0, "CAM Target Layer"); 965 966 if (softc->sysctl_tree == NULL) { 967 printf("%s: unable to allocate sysctl tree\n", __func__); 968 destroy_dev(softc->dev); 969 free(control_softc, M_DEVBUF); 970 control_softc = NULL; 971 return (ENOMEM); 972 } 973 974 SYSCTL_ADD_INT(&softc->sysctl_ctx, 975 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 976 "inquiry_pq_no_lun", CTLFLAG_RW, 977 &softc->inquiry_pq_no_lun, 0, 978 "Report no lun possible for invalid LUNs"); 979 980 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 981 mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF); 982 softc->open_count = 0; 983 984 /* 985 * Default to actually sending a SYNCHRONIZE CACHE command down to 986 * the drive. 987 */ 988 softc->flags = CTL_FLAG_REAL_SYNC; 989 990 /* 991 * In Copan's HA scheme, the "master" and "slave" roles are 992 * figured out through the slot the controller is in. Although it 993 * is an active/active system, someone has to be in charge. 994 */ 995#ifdef NEEDTOPORT 996 scmicro_rw(SCMICRO_GET_SHELF_ID, &sc_id); 997#endif 998 999 if (sc_id == 0) { 1000 softc->flags |= CTL_FLAG_MASTER_SHELF; 1001 persis_offset = 0; 1002 } else 1003 persis_offset = CTL_MAX_INITIATORS; 1004 1005 /* 1006 * XXX KDM need to figure out where we want to get our target ID 1007 * and WWID. Is it different on each port? 1008 */ 1009 softc->target.id = 0; 1010 softc->target.wwid[0] = 0x12345678; 1011 softc->target.wwid[1] = 0x87654321; 1012 STAILQ_INIT(&softc->lun_list); 1013 STAILQ_INIT(&softc->pending_lun_queue); 1014 STAILQ_INIT(&softc->fe_list); 1015 STAILQ_INIT(&softc->be_list); 1016 STAILQ_INIT(&softc->io_pools); 1017 1018 if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL, 1019 &internal_pool)!= 0){ 1020 printf("ctl: can't allocate %d entry internal pool, " 1021 "exiting\n", CTL_POOL_ENTRIES_INTERNAL); 1022 return (ENOMEM); 1023 } 1024 1025 if (ctl_pool_create(softc, CTL_POOL_EMERGENCY, 1026 CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) { 1027 printf("ctl: can't allocate %d entry emergency pool, " 1028 "exiting\n", CTL_POOL_ENTRIES_EMERGENCY); 1029 ctl_pool_free(internal_pool); 1030 return (ENOMEM); 1031 } 1032 1033 if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC, 1034 &other_pool) != 0) 1035 { 1036 printf("ctl: can't allocate %d entry other SC pool, " 1037 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1038 ctl_pool_free(internal_pool); 1039 ctl_pool_free(emergency_pool); 1040 return (ENOMEM); 1041 } 1042 1043 softc->internal_pool = internal_pool; 1044 softc->emergency_pool = emergency_pool; 1045 softc->othersc_pool = other_pool; 1046 1047 if (worker_threads <= 0) 1048 worker_threads = max(1, mp_ncpus / 4); 1049 if (worker_threads > CTL_MAX_THREADS) 1050 worker_threads = CTL_MAX_THREADS; 1051 1052 for (i = 0; i < worker_threads; i++) { 1053 struct ctl_thread *thr = &softc->threads[i]; 1054 1055 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1056 thr->ctl_softc = softc; 1057 STAILQ_INIT(&thr->incoming_queue); 1058 STAILQ_INIT(&thr->rtr_queue); 1059 STAILQ_INIT(&thr->done_queue); 1060 STAILQ_INIT(&thr->isc_queue); 1061 1062 error = kproc_kthread_add(ctl_work_thread, thr, 1063 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1064 if (error != 0) { 1065 printf("error creating CTL work thread!\n"); 1066 ctl_pool_free(internal_pool); 1067 ctl_pool_free(emergency_pool); 1068 ctl_pool_free(other_pool); 1069 return (error); 1070 } 1071 } 1072 error = kproc_kthread_add(ctl_lun_thread, softc, 1073 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1074 if (error != 0) { 1075 printf("error creating CTL lun thread!\n"); 1076 ctl_pool_free(internal_pool); 1077 ctl_pool_free(emergency_pool); 1078 ctl_pool_free(other_pool); 1079 return (error); 1080 } 1081 if (bootverbose) 1082 printf("ctl: CAM Target Layer loaded\n"); 1083 1084 /* 1085 * Initialize the initiator and portname mappings 1086 */ 1087 memset(softc->wwpn_iid, 0, sizeof(softc->wwpn_iid)); 1088 1089 /* 1090 * Initialize the ioctl front end. 1091 */ 1092 fe = &softc->ioctl_info.fe; 1093 sprintf(softc->ioctl_info.port_name, "CTL ioctl"); 1094 fe->port_type = CTL_PORT_IOCTL; 1095 fe->num_requested_ctl_io = 100; 1096 fe->port_name = softc->ioctl_info.port_name; 1097 fe->port_online = ctl_ioctl_online; 1098 fe->port_offline = ctl_ioctl_offline; 1099 fe->onoff_arg = &softc->ioctl_info; 1100 fe->targ_enable = ctl_ioctl_targ_enable; 1101 fe->targ_disable = ctl_ioctl_targ_disable; 1102 fe->lun_enable = ctl_ioctl_lun_enable; 1103 fe->lun_disable = ctl_ioctl_lun_disable; 1104 fe->targ_lun_arg = &softc->ioctl_info; 1105 fe->fe_datamove = ctl_ioctl_datamove; 1106 fe->fe_done = ctl_ioctl_done; 1107 fe->max_targets = 15; 1108 fe->max_target_id = 15; 1109 1110 if (ctl_frontend_register(&softc->ioctl_info.fe, 1111 (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) { 1112 printf("ctl: ioctl front end registration failed, will " 1113 "continue anyway\n"); 1114 } 1115 1116#ifdef CTL_IO_DELAY 1117 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1118 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1119 sizeof(struct callout), CTL_TIMER_BYTES); 1120 return (EINVAL); 1121 } 1122#endif /* CTL_IO_DELAY */ 1123 1124 return (0); 1125} 1126 1127void 1128ctl_shutdown(void) 1129{ 1130 struct ctl_softc *softc; 1131 struct ctl_lun *lun, *next_lun; 1132 struct ctl_io_pool *pool; 1133 1134 softc = (struct ctl_softc *)control_softc; 1135 1136 if (ctl_frontend_deregister(&softc->ioctl_info.fe) != 0) 1137 printf("ctl: ioctl front end deregistration failed\n"); 1138 1139 mtx_lock(&softc->ctl_lock); 1140 1141 /* 1142 * Free up each LUN. 1143 */ 1144 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1145 next_lun = STAILQ_NEXT(lun, links); 1146 ctl_free_lun(lun); 1147 } 1148 1149 mtx_unlock(&softc->ctl_lock); 1150 1151 /* 1152 * This will rip the rug out from under any FETDs or anyone else 1153 * that has a pool allocated. Since we increment our module 1154 * refcount any time someone outside the main CTL module allocates 1155 * a pool, we shouldn't have any problems here. The user won't be 1156 * able to unload the CTL module until client modules have 1157 * successfully unloaded. 1158 */ 1159 while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL) 1160 ctl_pool_free(pool); 1161 1162#if 0 1163 ctl_shutdown_thread(softc->work_thread); 1164 mtx_destroy(&softc->queue_lock); 1165#endif 1166 1167 mtx_destroy(&softc->pool_lock); 1168 mtx_destroy(&softc->ctl_lock); 1169 1170 destroy_dev(softc->dev); 1171 1172 sysctl_ctx_free(&softc->sysctl_ctx); 1173 1174 free(control_softc, M_DEVBUF); 1175 control_softc = NULL; 1176 1177 if (bootverbose) 1178 printf("ctl: CAM Target Layer unloaded\n"); 1179} 1180 1181static int 1182ctl_module_event_handler(module_t mod, int what, void *arg) 1183{ 1184 1185 switch (what) { 1186 case MOD_LOAD: 1187 return (ctl_init()); 1188 case MOD_UNLOAD: 1189 return (EBUSY); 1190 default: 1191 return (EOPNOTSUPP); 1192 } 1193} 1194 1195/* 1196 * XXX KDM should we do some access checks here? Bump a reference count to 1197 * prevent a CTL module from being unloaded while someone has it open? 1198 */ 1199static int 1200ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1201{ 1202 return (0); 1203} 1204 1205static int 1206ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1207{ 1208 return (0); 1209} 1210 1211int 1212ctl_port_enable(ctl_port_type port_type) 1213{ 1214 struct ctl_softc *softc; 1215 struct ctl_frontend *fe; 1216 1217 if (ctl_is_single == 0) { 1218 union ctl_ha_msg msg_info; 1219 int isc_retval; 1220 1221#if 0 1222 printf("%s: HA mode, synchronizing frontend enable\n", 1223 __func__); 1224#endif 1225 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1226 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1227 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1228 printf("Sync msg send error retval %d\n", isc_retval); 1229 } 1230 if (!rcv_sync_msg) { 1231 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1232 sizeof(msg_info), 1); 1233 } 1234#if 0 1235 printf("CTL:Frontend Enable\n"); 1236 } else { 1237 printf("%s: single mode, skipping frontend synchronization\n", 1238 __func__); 1239#endif 1240 } 1241 1242 softc = control_softc; 1243 1244 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1245 if (port_type & fe->port_type) 1246 { 1247#if 0 1248 printf("port %d\n", fe->targ_port); 1249#endif 1250 ctl_frontend_online(fe); 1251 } 1252 } 1253 1254 return (0); 1255} 1256 1257int 1258ctl_port_disable(ctl_port_type port_type) 1259{ 1260 struct ctl_softc *softc; 1261 struct ctl_frontend *fe; 1262 1263 softc = control_softc; 1264 1265 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1266 if (port_type & fe->port_type) 1267 ctl_frontend_offline(fe); 1268 } 1269 1270 return (0); 1271} 1272 1273/* 1274 * Returns 0 for success, 1 for failure. 1275 * Currently the only failure mode is if there aren't enough entries 1276 * allocated. So, in case of a failure, look at num_entries_dropped, 1277 * reallocate and try again. 1278 */ 1279int 1280ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1281 int *num_entries_filled, int *num_entries_dropped, 1282 ctl_port_type port_type, int no_virtual) 1283{ 1284 struct ctl_softc *softc; 1285 struct ctl_frontend *fe; 1286 int entries_dropped, entries_filled; 1287 int retval; 1288 int i; 1289 1290 softc = control_softc; 1291 1292 retval = 0; 1293 entries_filled = 0; 1294 entries_dropped = 0; 1295 1296 i = 0; 1297 mtx_lock(&softc->ctl_lock); 1298 STAILQ_FOREACH(fe, &softc->fe_list, links) { 1299 struct ctl_port_entry *entry; 1300 1301 if ((fe->port_type & port_type) == 0) 1302 continue; 1303 1304 if ((no_virtual != 0) 1305 && (fe->virtual_port != 0)) 1306 continue; 1307 1308 if (entries_filled >= num_entries_alloced) { 1309 entries_dropped++; 1310 continue; 1311 } 1312 entry = &entries[i]; 1313 1314 entry->port_type = fe->port_type; 1315 strlcpy(entry->port_name, fe->port_name, 1316 sizeof(entry->port_name)); 1317 entry->physical_port = fe->physical_port; 1318 entry->virtual_port = fe->virtual_port; 1319 entry->wwnn = fe->wwnn; 1320 entry->wwpn = fe->wwpn; 1321 1322 i++; 1323 entries_filled++; 1324 } 1325 1326 mtx_unlock(&softc->ctl_lock); 1327 1328 if (entries_dropped > 0) 1329 retval = 1; 1330 1331 *num_entries_dropped = entries_dropped; 1332 *num_entries_filled = entries_filled; 1333 1334 return (retval); 1335} 1336 1337static void 1338ctl_ioctl_online(void *arg) 1339{ 1340 struct ctl_ioctl_info *ioctl_info; 1341 1342 ioctl_info = (struct ctl_ioctl_info *)arg; 1343 1344 ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED; 1345} 1346 1347static void 1348ctl_ioctl_offline(void *arg) 1349{ 1350 struct ctl_ioctl_info *ioctl_info; 1351 1352 ioctl_info = (struct ctl_ioctl_info *)arg; 1353 1354 ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED; 1355} 1356 1357/* 1358 * Remove an initiator by port number and initiator ID. 1359 * Returns 0 for success, 1 for failure. 1360 */ 1361int 1362ctl_remove_initiator(int32_t targ_port, uint32_t iid) 1363{ 1364 struct ctl_softc *softc; 1365 1366 softc = control_softc; 1367 1368 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1369 1370 if ((targ_port < 0) 1371 || (targ_port > CTL_MAX_PORTS)) { 1372 printf("%s: invalid port number %d\n", __func__, targ_port); 1373 return (1); 1374 } 1375 if (iid > CTL_MAX_INIT_PER_PORT) { 1376 printf("%s: initiator ID %u > maximun %u!\n", 1377 __func__, iid, CTL_MAX_INIT_PER_PORT); 1378 return (1); 1379 } 1380 1381 mtx_lock(&softc->ctl_lock); 1382 1383 softc->wwpn_iid[targ_port][iid].in_use = 0; 1384 1385 mtx_unlock(&softc->ctl_lock); 1386 1387 return (0); 1388} 1389 1390/* 1391 * Add an initiator to the initiator map. 1392 * Returns 0 for success, 1 for failure. 1393 */ 1394int 1395ctl_add_initiator(uint64_t wwpn, int32_t targ_port, uint32_t iid) 1396{ 1397 struct ctl_softc *softc; 1398 int retval; 1399 1400 softc = control_softc; 1401 1402 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1403 1404 retval = 0; 1405 1406 if ((targ_port < 0) 1407 || (targ_port > CTL_MAX_PORTS)) { 1408 printf("%s: invalid port number %d\n", __func__, targ_port); 1409 return (1); 1410 } 1411 if (iid > CTL_MAX_INIT_PER_PORT) { 1412 printf("%s: WWPN %#jx initiator ID %u > maximun %u!\n", 1413 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1414 return (1); 1415 } 1416 1417 mtx_lock(&softc->ctl_lock); 1418 1419 if (softc->wwpn_iid[targ_port][iid].in_use != 0) { 1420 /* 1421 * We don't treat this as an error. 1422 */ 1423 if (softc->wwpn_iid[targ_port][iid].wwpn == wwpn) { 1424 printf("%s: port %d iid %u WWPN %#jx arrived again?\n", 1425 __func__, targ_port, iid, (uintmax_t)wwpn); 1426 goto bailout; 1427 } 1428 1429 /* 1430 * This is an error, but what do we do about it? The 1431 * driver is telling us we have a new WWPN for this 1432 * initiator ID, so we pretty much need to use it. 1433 */ 1434 printf("%s: port %d iid %u WWPN %#jx arrived, WWPN %#jx is " 1435 "still at that address\n", __func__, targ_port, iid, 1436 (uintmax_t)wwpn, 1437 (uintmax_t)softc->wwpn_iid[targ_port][iid].wwpn); 1438 1439 /* 1440 * XXX KDM clear have_ca and ua_pending on each LUN for 1441 * this initiator. 1442 */ 1443 } 1444 softc->wwpn_iid[targ_port][iid].in_use = 1; 1445 softc->wwpn_iid[targ_port][iid].iid = iid; 1446 softc->wwpn_iid[targ_port][iid].wwpn = wwpn; 1447 softc->wwpn_iid[targ_port][iid].port = targ_port; 1448 1449bailout: 1450 1451 mtx_unlock(&softc->ctl_lock); 1452 1453 return (retval); 1454} 1455 1456/* 1457 * XXX KDM should we pretend to do something in the target/lun 1458 * enable/disable functions? 1459 */ 1460static int 1461ctl_ioctl_targ_enable(void *arg, struct ctl_id targ_id) 1462{ 1463 return (0); 1464} 1465 1466static int 1467ctl_ioctl_targ_disable(void *arg, struct ctl_id targ_id) 1468{ 1469 return (0); 1470} 1471 1472static int 1473ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1474{ 1475 return (0); 1476} 1477 1478static int 1479ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1480{ 1481 return (0); 1482} 1483 1484/* 1485 * Data movement routine for the CTL ioctl frontend port. 1486 */ 1487static int 1488ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) 1489{ 1490 struct ctl_sg_entry *ext_sglist, *kern_sglist; 1491 struct ctl_sg_entry ext_entry, kern_entry; 1492 int ext_sglen, ext_sg_entries, kern_sg_entries; 1493 int ext_sg_start, ext_offset; 1494 int len_to_copy, len_copied; 1495 int kern_watermark, ext_watermark; 1496 int ext_sglist_malloced; 1497 int i, j; 1498 1499 ext_sglist_malloced = 0; 1500 ext_sg_start = 0; 1501 ext_offset = 0; 1502 1503 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); 1504 1505 /* 1506 * If this flag is set, fake the data transfer. 1507 */ 1508 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { 1509 ctsio->ext_data_filled = ctsio->ext_data_len; 1510 goto bailout; 1511 } 1512 1513 /* 1514 * To simplify things here, if we have a single buffer, stick it in 1515 * a S/G entry and just make it a single entry S/G list. 1516 */ 1517 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { 1518 int len_seen; 1519 1520 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); 1521 1522 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, 1523 M_WAITOK); 1524 ext_sglist_malloced = 1; 1525 if (copyin(ctsio->ext_data_ptr, ext_sglist, 1526 ext_sglen) != 0) { 1527 ctl_set_internal_failure(ctsio, 1528 /*sks_valid*/ 0, 1529 /*retry_count*/ 0); 1530 goto bailout; 1531 } 1532 ext_sg_entries = ctsio->ext_sg_entries; 1533 len_seen = 0; 1534 for (i = 0; i < ext_sg_entries; i++) { 1535 if ((len_seen + ext_sglist[i].len) >= 1536 ctsio->ext_data_filled) { 1537 ext_sg_start = i; 1538 ext_offset = ctsio->ext_data_filled - len_seen; 1539 break; 1540 } 1541 len_seen += ext_sglist[i].len; 1542 } 1543 } else { 1544 ext_sglist = &ext_entry; 1545 ext_sglist->addr = ctsio->ext_data_ptr; 1546 ext_sglist->len = ctsio->ext_data_len; 1547 ext_sg_entries = 1; 1548 ext_sg_start = 0; 1549 ext_offset = ctsio->ext_data_filled; 1550 } 1551 1552 if (ctsio->kern_sg_entries > 0) { 1553 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; 1554 kern_sg_entries = ctsio->kern_sg_entries; 1555 } else { 1556 kern_sglist = &kern_entry; 1557 kern_sglist->addr = ctsio->kern_data_ptr; 1558 kern_sglist->len = ctsio->kern_data_len; 1559 kern_sg_entries = 1; 1560 } 1561 1562 1563 kern_watermark = 0; 1564 ext_watermark = ext_offset; 1565 len_copied = 0; 1566 for (i = ext_sg_start, j = 0; 1567 i < ext_sg_entries && j < kern_sg_entries;) { 1568 uint8_t *ext_ptr, *kern_ptr; 1569 1570 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark, 1571 kern_sglist[j].len - kern_watermark); 1572 1573 ext_ptr = (uint8_t *)ext_sglist[i].addr; 1574 ext_ptr = ext_ptr + ext_watermark; 1575 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 1576 /* 1577 * XXX KDM fix this! 1578 */ 1579 panic("need to implement bus address support"); 1580#if 0 1581 kern_ptr = bus_to_virt(kern_sglist[j].addr); 1582#endif 1583 } else 1584 kern_ptr = (uint8_t *)kern_sglist[j].addr; 1585 kern_ptr = kern_ptr + kern_watermark; 1586 1587 kern_watermark += len_to_copy; 1588 ext_watermark += len_to_copy; 1589 1590 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == 1591 CTL_FLAG_DATA_IN) { 1592 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1593 "bytes to user\n", len_to_copy)); 1594 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1595 "to %p\n", kern_ptr, ext_ptr)); 1596 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { 1597 ctl_set_internal_failure(ctsio, 1598 /*sks_valid*/ 0, 1599 /*retry_count*/ 0); 1600 goto bailout; 1601 } 1602 } else { 1603 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1604 "bytes from user\n", len_to_copy)); 1605 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1606 "to %p\n", ext_ptr, kern_ptr)); 1607 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ 1608 ctl_set_internal_failure(ctsio, 1609 /*sks_valid*/ 0, 1610 /*retry_count*/0); 1611 goto bailout; 1612 } 1613 } 1614 1615 len_copied += len_to_copy; 1616 1617 if (ext_sglist[i].len == ext_watermark) { 1618 i++; 1619 ext_watermark = 0; 1620 } 1621 1622 if (kern_sglist[j].len == kern_watermark) { 1623 j++; 1624 kern_watermark = 0; 1625 } 1626 } 1627 1628 ctsio->ext_data_filled += len_copied; 1629 1630 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " 1631 "kern_sg_entries: %d\n", ext_sg_entries, 1632 kern_sg_entries)); 1633 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " 1634 "kern_data_len = %d\n", ctsio->ext_data_len, 1635 ctsio->kern_data_len)); 1636 1637 1638 /* XXX KDM set residual?? */ 1639bailout: 1640 1641 if (ext_sglist_malloced != 0) 1642 free(ext_sglist, M_CTL); 1643 1644 return (CTL_RETVAL_COMPLETE); 1645} 1646 1647/* 1648 * Serialize a command that went down the "wrong" side, and so was sent to 1649 * this controller for execution. The logic is a little different than the 1650 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1651 * sent back to the other side, but in the success case, we execute the 1652 * command on this side (XFER mode) or tell the other side to execute it 1653 * (SER_ONLY mode). 1654 */ 1655static int 1656ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1657{ 1658 struct ctl_softc *ctl_softc; 1659 union ctl_ha_msg msg_info; 1660 struct ctl_lun *lun; 1661 int retval = 0; 1662 uint32_t targ_lun; 1663 1664 ctl_softc = control_softc; 1665 1666 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1667 lun = ctl_softc->ctl_luns[targ_lun]; 1668 if (lun==NULL) 1669 { 1670 /* 1671 * Why isn't LUN defined? The other side wouldn't 1672 * send a cmd if the LUN is undefined. 1673 */ 1674 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1675 1676 /* "Logical unit not supported" */ 1677 ctl_set_sense_data(&msg_info.scsi.sense_data, 1678 lun, 1679 /*sense_format*/SSD_TYPE_NONE, 1680 /*current_error*/ 1, 1681 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1682 /*asc*/ 0x25, 1683 /*ascq*/ 0x00, 1684 SSD_ELEM_NONE); 1685 1686 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1687 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1688 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1689 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1690 msg_info.hdr.serializing_sc = NULL; 1691 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1692 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1693 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1694 } 1695 return(1); 1696 1697 } 1698 1699 mtx_lock(&lun->lun_lock); 1700 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1701 1702 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1703 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1704 ooa_links))) { 1705 case CTL_ACTION_BLOCK: 1706 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1707 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1708 blocked_links); 1709 break; 1710 case CTL_ACTION_PASS: 1711 case CTL_ACTION_SKIP: 1712 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 1713 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1714 ctl_enqueue_rtr((union ctl_io *)ctsio); 1715 } else { 1716 1717 /* send msg back to other side */ 1718 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1719 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1720 msg_info.hdr.msg_type = CTL_MSG_R2R; 1721#if 0 1722 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1723#endif 1724 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1725 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1726 } 1727 } 1728 break; 1729 case CTL_ACTION_OVERLAP: 1730 /* OVERLAPPED COMMANDS ATTEMPTED */ 1731 ctl_set_sense_data(&msg_info.scsi.sense_data, 1732 lun, 1733 /*sense_format*/SSD_TYPE_NONE, 1734 /*current_error*/ 1, 1735 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1736 /*asc*/ 0x4E, 1737 /*ascq*/ 0x00, 1738 SSD_ELEM_NONE); 1739 1740 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1741 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1742 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1743 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1744 msg_info.hdr.serializing_sc = NULL; 1745 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1746#if 0 1747 printf("BAD JUJU:Major Bummer Overlap\n"); 1748#endif 1749 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1750 retval = 1; 1751 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1752 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1753 } 1754 break; 1755 case CTL_ACTION_OVERLAP_TAG: 1756 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1757 ctl_set_sense_data(&msg_info.scsi.sense_data, 1758 lun, 1759 /*sense_format*/SSD_TYPE_NONE, 1760 /*current_error*/ 1, 1761 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1762 /*asc*/ 0x4D, 1763 /*ascq*/ ctsio->tag_num & 0xff, 1764 SSD_ELEM_NONE); 1765 1766 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1767 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1768 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1769 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1770 msg_info.hdr.serializing_sc = NULL; 1771 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1772#if 0 1773 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1774#endif 1775 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1776 retval = 1; 1777 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1778 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1779 } 1780 break; 1781 case CTL_ACTION_ERROR: 1782 default: 1783 /* "Internal target failure" */ 1784 ctl_set_sense_data(&msg_info.scsi.sense_data, 1785 lun, 1786 /*sense_format*/SSD_TYPE_NONE, 1787 /*current_error*/ 1, 1788 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1789 /*asc*/ 0x44, 1790 /*ascq*/ 0x00, 1791 SSD_ELEM_NONE); 1792 1793 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1794 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1795 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1796 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1797 msg_info.hdr.serializing_sc = NULL; 1798 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1799#if 0 1800 printf("BAD JUJU:Major Bummer HW Error\n"); 1801#endif 1802 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1803 retval = 1; 1804 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1805 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1806 } 1807 break; 1808 } 1809 mtx_unlock(&lun->lun_lock); 1810 return (retval); 1811} 1812 1813static int 1814ctl_ioctl_submit_wait(union ctl_io *io) 1815{ 1816 struct ctl_fe_ioctl_params params; 1817 ctl_fe_ioctl_state last_state; 1818 int done, retval; 1819 1820 retval = 0; 1821 1822 bzero(¶ms, sizeof(params)); 1823 1824 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); 1825 cv_init(¶ms.sem, "ctlioccv"); 1826 params.state = CTL_IOCTL_INPROG; 1827 last_state = params.state; 1828 1829 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; 1830 1831 CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n")); 1832 1833 /* This shouldn't happen */ 1834 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) 1835 return (retval); 1836 1837 done = 0; 1838 1839 do { 1840 mtx_lock(¶ms.ioctl_mtx); 1841 /* 1842 * Check the state here, and don't sleep if the state has 1843 * already changed (i.e. wakeup has already occured, but we 1844 * weren't waiting yet). 1845 */ 1846 if (params.state == last_state) { 1847 /* XXX KDM cv_wait_sig instead? */ 1848 cv_wait(¶ms.sem, ¶ms.ioctl_mtx); 1849 } 1850 last_state = params.state; 1851 1852 switch (params.state) { 1853 case CTL_IOCTL_INPROG: 1854 /* Why did we wake up? */ 1855 /* XXX KDM error here? */ 1856 mtx_unlock(¶ms.ioctl_mtx); 1857 break; 1858 case CTL_IOCTL_DATAMOVE: 1859 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); 1860 1861 /* 1862 * change last_state back to INPROG to avoid 1863 * deadlock on subsequent data moves. 1864 */ 1865 params.state = last_state = CTL_IOCTL_INPROG; 1866 1867 mtx_unlock(¶ms.ioctl_mtx); 1868 ctl_ioctl_do_datamove(&io->scsiio); 1869 /* 1870 * Note that in some cases, most notably writes, 1871 * this will queue the I/O and call us back later. 1872 * In other cases, generally reads, this routine 1873 * will immediately call back and wake us up, 1874 * probably using our own context. 1875 */ 1876 io->scsiio.be_move_done(io); 1877 break; 1878 case CTL_IOCTL_DONE: 1879 mtx_unlock(¶ms.ioctl_mtx); 1880 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); 1881 done = 1; 1882 break; 1883 default: 1884 mtx_unlock(¶ms.ioctl_mtx); 1885 /* XXX KDM error here? */ 1886 break; 1887 } 1888 } while (done == 0); 1889 1890 mtx_destroy(¶ms.ioctl_mtx); 1891 cv_destroy(¶ms.sem); 1892 1893 return (CTL_RETVAL_COMPLETE); 1894} 1895 1896static void 1897ctl_ioctl_datamove(union ctl_io *io) 1898{ 1899 struct ctl_fe_ioctl_params *params; 1900 1901 params = (struct ctl_fe_ioctl_params *) 1902 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1903 1904 mtx_lock(¶ms->ioctl_mtx); 1905 params->state = CTL_IOCTL_DATAMOVE; 1906 cv_broadcast(¶ms->sem); 1907 mtx_unlock(¶ms->ioctl_mtx); 1908} 1909 1910static void 1911ctl_ioctl_done(union ctl_io *io) 1912{ 1913 struct ctl_fe_ioctl_params *params; 1914 1915 params = (struct ctl_fe_ioctl_params *) 1916 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1917 1918 mtx_lock(¶ms->ioctl_mtx); 1919 params->state = CTL_IOCTL_DONE; 1920 cv_broadcast(¶ms->sem); 1921 mtx_unlock(¶ms->ioctl_mtx); 1922} 1923 1924static void 1925ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask) 1926{ 1927 struct ctl_fe_ioctl_startstop_info *sd_info; 1928 1929 sd_info = (struct ctl_fe_ioctl_startstop_info *)arg; 1930 1931 sd_info->hs_info.status = metatask->status; 1932 sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns; 1933 sd_info->hs_info.luns_complete = 1934 metatask->taskinfo.startstop.luns_complete; 1935 sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed; 1936 1937 cv_broadcast(&sd_info->sem); 1938} 1939 1940static void 1941ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask) 1942{ 1943 struct ctl_fe_ioctl_bbrread_info *fe_bbr_info; 1944 1945 fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg; 1946 1947 mtx_lock(fe_bbr_info->lock); 1948 fe_bbr_info->bbr_info->status = metatask->status; 1949 fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 1950 fe_bbr_info->wakeup_done = 1; 1951 mtx_unlock(fe_bbr_info->lock); 1952 1953 cv_broadcast(&fe_bbr_info->sem); 1954} 1955 1956/* 1957 * Returns 0 for success, errno for failure. 1958 */ 1959static int 1960ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 1961 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 1962{ 1963 union ctl_io *io; 1964 int retval; 1965 1966 retval = 0; 1967 1968 mtx_lock(&lun->lun_lock); 1969 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 1970 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 1971 ooa_links)) { 1972 struct ctl_ooa_entry *entry; 1973 1974 /* 1975 * If we've got more than we can fit, just count the 1976 * remaining entries. 1977 */ 1978 if (*cur_fill_num >= ooa_hdr->alloc_num) 1979 continue; 1980 1981 entry = &kern_entries[*cur_fill_num]; 1982 1983 entry->tag_num = io->scsiio.tag_num; 1984 entry->lun_num = lun->lun; 1985#ifdef CTL_TIME_IO 1986 entry->start_bt = io->io_hdr.start_bt; 1987#endif 1988 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 1989 entry->cdb_len = io->scsiio.cdb_len; 1990 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 1991 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 1992 1993 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 1994 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 1995 1996 if (io->io_hdr.flags & CTL_FLAG_ABORT) 1997 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 1998 1999 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2000 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2001 2002 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2003 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2004 } 2005 mtx_unlock(&lun->lun_lock); 2006 2007 return (retval); 2008} 2009 2010static void * 2011ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2012 size_t error_str_len) 2013{ 2014 void *kptr; 2015 2016 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2017 2018 if (copyin(user_addr, kptr, len) != 0) { 2019 snprintf(error_str, error_str_len, "Error copying %d bytes " 2020 "from user address %p to kernel address %p", len, 2021 user_addr, kptr); 2022 free(kptr, M_CTL); 2023 return (NULL); 2024 } 2025 2026 return (kptr); 2027} 2028 2029static void 2030ctl_free_args(int num_be_args, struct ctl_be_arg *be_args) 2031{ 2032 int i; 2033 2034 if (be_args == NULL) 2035 return; 2036 2037 for (i = 0; i < num_be_args; i++) { 2038 free(be_args[i].kname, M_CTL); 2039 free(be_args[i].kvalue, M_CTL); 2040 } 2041 2042 free(be_args, M_CTL); 2043} 2044 2045static struct ctl_be_arg * 2046ctl_copyin_args(int num_be_args, struct ctl_be_arg *be_args, 2047 char *error_str, size_t error_str_len) 2048{ 2049 struct ctl_be_arg *args; 2050 int i; 2051 2052 args = ctl_copyin_alloc(be_args, num_be_args * sizeof(*be_args), 2053 error_str, error_str_len); 2054 2055 if (args == NULL) 2056 goto bailout; 2057 2058 for (i = 0; i < num_be_args; i++) { 2059 args[i].kname = NULL; 2060 args[i].kvalue = NULL; 2061 } 2062 2063 for (i = 0; i < num_be_args; i++) { 2064 uint8_t *tmpptr; 2065 2066 args[i].kname = ctl_copyin_alloc(args[i].name, 2067 args[i].namelen, error_str, error_str_len); 2068 if (args[i].kname == NULL) 2069 goto bailout; 2070 2071 if (args[i].kname[args[i].namelen - 1] != '\0') { 2072 snprintf(error_str, error_str_len, "Argument %d " 2073 "name is not NUL-terminated", i); 2074 goto bailout; 2075 } 2076 2077 args[i].kvalue = NULL; 2078 2079 tmpptr = ctl_copyin_alloc(args[i].value, 2080 args[i].vallen, error_str, error_str_len); 2081 if (tmpptr == NULL) 2082 goto bailout; 2083 2084 args[i].kvalue = tmpptr; 2085 2086 if ((args[i].flags & CTL_BEARG_ASCII) 2087 && (tmpptr[args[i].vallen - 1] != '\0')) { 2088 snprintf(error_str, error_str_len, "Argument %d " 2089 "value is not NUL-terminated", i); 2090 goto bailout; 2091 } 2092 } 2093 2094 return (args); 2095bailout: 2096 2097 ctl_free_args(num_be_args, args); 2098 2099 return (NULL); 2100} 2101 2102/* 2103 * Escape characters that are illegal or not recommended in XML. 2104 */ 2105int 2106ctl_sbuf_printf_esc(struct sbuf *sb, char *str) 2107{ 2108 int retval; 2109 2110 retval = 0; 2111 2112 for (; *str; str++) { 2113 switch (*str) { 2114 case '&': 2115 retval = sbuf_printf(sb, "&"); 2116 break; 2117 case '>': 2118 retval = sbuf_printf(sb, ">"); 2119 break; 2120 case '<': 2121 retval = sbuf_printf(sb, "<"); 2122 break; 2123 default: 2124 retval = sbuf_putc(sb, *str); 2125 break; 2126 } 2127 2128 if (retval != 0) 2129 break; 2130 2131 } 2132 2133 return (retval); 2134} 2135 2136static int 2137ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2138 struct thread *td) 2139{ 2140 struct ctl_softc *softc; 2141 int retval; 2142 2143 softc = control_softc; 2144 2145 retval = 0; 2146 2147 switch (cmd) { 2148 case CTL_IO: { 2149 union ctl_io *io; 2150 void *pool_tmp; 2151 2152 /* 2153 * If we haven't been "enabled", don't allow any SCSI I/O 2154 * to this FETD. 2155 */ 2156 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) { 2157 retval = EPERM; 2158 break; 2159 } 2160 2161 io = ctl_alloc_io(softc->ioctl_info.fe.ctl_pool_ref); 2162 if (io == NULL) { 2163 printf("ctl_ioctl: can't allocate ctl_io!\n"); 2164 retval = ENOSPC; 2165 break; 2166 } 2167 2168 /* 2169 * Need to save the pool reference so it doesn't get 2170 * spammed by the user's ctl_io. 2171 */ 2172 pool_tmp = io->io_hdr.pool; 2173 2174 memcpy(io, (void *)addr, sizeof(*io)); 2175 2176 io->io_hdr.pool = pool_tmp; 2177 /* 2178 * No status yet, so make sure the status is set properly. 2179 */ 2180 io->io_hdr.status = CTL_STATUS_NONE; 2181 2182 /* 2183 * The user sets the initiator ID, target and LUN IDs. 2184 */ 2185 io->io_hdr.nexus.targ_port = softc->ioctl_info.fe.targ_port; 2186 io->io_hdr.flags |= CTL_FLAG_USER_REQ; 2187 if ((io->io_hdr.io_type == CTL_IO_SCSI) 2188 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) 2189 io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++; 2190 2191 retval = ctl_ioctl_submit_wait(io); 2192 2193 if (retval != 0) { 2194 ctl_free_io(io); 2195 break; 2196 } 2197 2198 memcpy((void *)addr, io, sizeof(*io)); 2199 2200 /* return this to our pool */ 2201 ctl_free_io(io); 2202 2203 break; 2204 } 2205 case CTL_ENABLE_PORT: 2206 case CTL_DISABLE_PORT: 2207 case CTL_SET_PORT_WWNS: { 2208 struct ctl_frontend *fe; 2209 struct ctl_port_entry *entry; 2210 2211 entry = (struct ctl_port_entry *)addr; 2212 2213 mtx_lock(&softc->ctl_lock); 2214 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2215 int action, done; 2216 2217 action = 0; 2218 done = 0; 2219 2220 if ((entry->port_type == CTL_PORT_NONE) 2221 && (entry->targ_port == fe->targ_port)) { 2222 /* 2223 * If the user only wants to enable or 2224 * disable or set WWNs on a specific port, 2225 * do the operation and we're done. 2226 */ 2227 action = 1; 2228 done = 1; 2229 } else if (entry->port_type & fe->port_type) { 2230 /* 2231 * Compare the user's type mask with the 2232 * particular frontend type to see if we 2233 * have a match. 2234 */ 2235 action = 1; 2236 done = 0; 2237 2238 /* 2239 * Make sure the user isn't trying to set 2240 * WWNs on multiple ports at the same time. 2241 */ 2242 if (cmd == CTL_SET_PORT_WWNS) { 2243 printf("%s: Can't set WWNs on " 2244 "multiple ports\n", __func__); 2245 retval = EINVAL; 2246 break; 2247 } 2248 } 2249 if (action != 0) { 2250 /* 2251 * XXX KDM we have to drop the lock here, 2252 * because the online/offline operations 2253 * can potentially block. We need to 2254 * reference count the frontends so they 2255 * can't go away, 2256 */ 2257 mtx_unlock(&softc->ctl_lock); 2258 2259 if (cmd == CTL_ENABLE_PORT) { 2260 struct ctl_lun *lun; 2261 2262 STAILQ_FOREACH(lun, &softc->lun_list, 2263 links) { 2264 fe->lun_enable(fe->targ_lun_arg, 2265 lun->target, 2266 lun->lun); 2267 } 2268 2269 ctl_frontend_online(fe); 2270 } else if (cmd == CTL_DISABLE_PORT) { 2271 struct ctl_lun *lun; 2272 2273 ctl_frontend_offline(fe); 2274 2275 STAILQ_FOREACH(lun, &softc->lun_list, 2276 links) { 2277 fe->lun_disable( 2278 fe->targ_lun_arg, 2279 lun->target, 2280 lun->lun); 2281 } 2282 } 2283 2284 mtx_lock(&softc->ctl_lock); 2285 2286 if (cmd == CTL_SET_PORT_WWNS) 2287 ctl_frontend_set_wwns(fe, 2288 (entry->flags & CTL_PORT_WWNN_VALID) ? 2289 1 : 0, entry->wwnn, 2290 (entry->flags & CTL_PORT_WWPN_VALID) ? 2291 1 : 0, entry->wwpn); 2292 } 2293 if (done != 0) 2294 break; 2295 } 2296 mtx_unlock(&softc->ctl_lock); 2297 break; 2298 } 2299 case CTL_GET_PORT_LIST: { 2300 struct ctl_frontend *fe; 2301 struct ctl_port_list *list; 2302 int i; 2303 2304 list = (struct ctl_port_list *)addr; 2305 2306 if (list->alloc_len != (list->alloc_num * 2307 sizeof(struct ctl_port_entry))) { 2308 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2309 "alloc_num %u * sizeof(struct ctl_port_entry) " 2310 "%zu\n", __func__, list->alloc_len, 2311 list->alloc_num, sizeof(struct ctl_port_entry)); 2312 retval = EINVAL; 2313 break; 2314 } 2315 list->fill_len = 0; 2316 list->fill_num = 0; 2317 list->dropped_num = 0; 2318 i = 0; 2319 mtx_lock(&softc->ctl_lock); 2320 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2321 struct ctl_port_entry entry, *list_entry; 2322 2323 if (list->fill_num >= list->alloc_num) { 2324 list->dropped_num++; 2325 continue; 2326 } 2327 2328 entry.port_type = fe->port_type; 2329 strlcpy(entry.port_name, fe->port_name, 2330 sizeof(entry.port_name)); 2331 entry.targ_port = fe->targ_port; 2332 entry.physical_port = fe->physical_port; 2333 entry.virtual_port = fe->virtual_port; 2334 entry.wwnn = fe->wwnn; 2335 entry.wwpn = fe->wwpn; 2336 if (fe->status & CTL_PORT_STATUS_ONLINE) 2337 entry.online = 1; 2338 else 2339 entry.online = 0; 2340 2341 list_entry = &list->entries[i]; 2342 2343 retval = copyout(&entry, list_entry, sizeof(entry)); 2344 if (retval != 0) { 2345 printf("%s: CTL_GET_PORT_LIST: copyout " 2346 "returned %d\n", __func__, retval); 2347 break; 2348 } 2349 i++; 2350 list->fill_num++; 2351 list->fill_len += sizeof(entry); 2352 } 2353 mtx_unlock(&softc->ctl_lock); 2354 2355 /* 2356 * If this is non-zero, we had a copyout fault, so there's 2357 * probably no point in attempting to set the status inside 2358 * the structure. 2359 */ 2360 if (retval != 0) 2361 break; 2362 2363 if (list->dropped_num > 0) 2364 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2365 else 2366 list->status = CTL_PORT_LIST_OK; 2367 break; 2368 } 2369 case CTL_DUMP_OOA: { 2370 struct ctl_lun *lun; 2371 union ctl_io *io; 2372 char printbuf[128]; 2373 struct sbuf sb; 2374 2375 mtx_lock(&softc->ctl_lock); 2376 printf("Dumping OOA queues:\n"); 2377 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2378 mtx_lock(&lun->lun_lock); 2379 for (io = (union ctl_io *)TAILQ_FIRST( 2380 &lun->ooa_queue); io != NULL; 2381 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2382 ooa_links)) { 2383 sbuf_new(&sb, printbuf, sizeof(printbuf), 2384 SBUF_FIXEDLEN); 2385 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2386 (intmax_t)lun->lun, 2387 io->scsiio.tag_num, 2388 (io->io_hdr.flags & 2389 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2390 (io->io_hdr.flags & 2391 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2392 (io->io_hdr.flags & 2393 CTL_FLAG_ABORT) ? " ABORT" : "", 2394 (io->io_hdr.flags & 2395 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2396 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2397 sbuf_finish(&sb); 2398 printf("%s\n", sbuf_data(&sb)); 2399 } 2400 mtx_unlock(&lun->lun_lock); 2401 } 2402 printf("OOA queues dump done\n"); 2403 mtx_unlock(&softc->ctl_lock); 2404 break; 2405 } 2406 case CTL_GET_OOA: { 2407 struct ctl_lun *lun; 2408 struct ctl_ooa *ooa_hdr; 2409 struct ctl_ooa_entry *entries; 2410 uint32_t cur_fill_num; 2411 2412 ooa_hdr = (struct ctl_ooa *)addr; 2413 2414 if ((ooa_hdr->alloc_len == 0) 2415 || (ooa_hdr->alloc_num == 0)) { 2416 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2417 "must be non-zero\n", __func__, 2418 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2419 retval = EINVAL; 2420 break; 2421 } 2422 2423 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2424 sizeof(struct ctl_ooa_entry))) { 2425 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2426 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2427 __func__, ooa_hdr->alloc_len, 2428 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2429 retval = EINVAL; 2430 break; 2431 } 2432 2433 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2434 if (entries == NULL) { 2435 printf("%s: could not allocate %d bytes for OOA " 2436 "dump\n", __func__, ooa_hdr->alloc_len); 2437 retval = ENOMEM; 2438 break; 2439 } 2440 2441 mtx_lock(&softc->ctl_lock); 2442 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2443 && ((ooa_hdr->lun_num > CTL_MAX_LUNS) 2444 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2445 mtx_unlock(&softc->ctl_lock); 2446 free(entries, M_CTL); 2447 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2448 __func__, (uintmax_t)ooa_hdr->lun_num); 2449 retval = EINVAL; 2450 break; 2451 } 2452 2453 cur_fill_num = 0; 2454 2455 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2456 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2457 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2458 ooa_hdr, entries); 2459 if (retval != 0) 2460 break; 2461 } 2462 if (retval != 0) { 2463 mtx_unlock(&softc->ctl_lock); 2464 free(entries, M_CTL); 2465 break; 2466 } 2467 } else { 2468 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2469 2470 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2471 entries); 2472 } 2473 mtx_unlock(&softc->ctl_lock); 2474 2475 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2476 ooa_hdr->fill_len = ooa_hdr->fill_num * 2477 sizeof(struct ctl_ooa_entry); 2478 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2479 if (retval != 0) { 2480 printf("%s: error copying out %d bytes for OOA dump\n", 2481 __func__, ooa_hdr->fill_len); 2482 } 2483 2484 getbintime(&ooa_hdr->cur_bt); 2485 2486 if (cur_fill_num > ooa_hdr->alloc_num) { 2487 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2488 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2489 } else { 2490 ooa_hdr->dropped_num = 0; 2491 ooa_hdr->status = CTL_OOA_OK; 2492 } 2493 2494 free(entries, M_CTL); 2495 break; 2496 } 2497 case CTL_CHECK_OOA: { 2498 union ctl_io *io; 2499 struct ctl_lun *lun; 2500 struct ctl_ooa_info *ooa_info; 2501 2502 2503 ooa_info = (struct ctl_ooa_info *)addr; 2504 2505 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2506 ooa_info->status = CTL_OOA_INVALID_LUN; 2507 break; 2508 } 2509 mtx_lock(&softc->ctl_lock); 2510 lun = softc->ctl_luns[ooa_info->lun_id]; 2511 if (lun == NULL) { 2512 mtx_unlock(&softc->ctl_lock); 2513 ooa_info->status = CTL_OOA_INVALID_LUN; 2514 break; 2515 } 2516 mtx_lock(&lun->lun_lock); 2517 mtx_unlock(&softc->ctl_lock); 2518 ooa_info->num_entries = 0; 2519 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2520 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2521 &io->io_hdr, ooa_links)) { 2522 ooa_info->num_entries++; 2523 } 2524 mtx_unlock(&lun->lun_lock); 2525 2526 ooa_info->status = CTL_OOA_SUCCESS; 2527 2528 break; 2529 } 2530 case CTL_HARD_START: 2531 case CTL_HARD_STOP: { 2532 struct ctl_fe_ioctl_startstop_info ss_info; 2533 struct cfi_metatask *metatask; 2534 struct mtx hs_mtx; 2535 2536 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF); 2537 2538 cv_init(&ss_info.sem, "hard start/stop cv" ); 2539 2540 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2541 if (metatask == NULL) { 2542 retval = ENOMEM; 2543 mtx_destroy(&hs_mtx); 2544 break; 2545 } 2546 2547 if (cmd == CTL_HARD_START) 2548 metatask->tasktype = CFI_TASK_STARTUP; 2549 else 2550 metatask->tasktype = CFI_TASK_SHUTDOWN; 2551 2552 metatask->callback = ctl_ioctl_hard_startstop_callback; 2553 metatask->callback_arg = &ss_info; 2554 2555 cfi_action(metatask); 2556 2557 /* Wait for the callback */ 2558 mtx_lock(&hs_mtx); 2559 cv_wait_sig(&ss_info.sem, &hs_mtx); 2560 mtx_unlock(&hs_mtx); 2561 2562 /* 2563 * All information has been copied from the metatask by the 2564 * time cv_broadcast() is called, so we free the metatask here. 2565 */ 2566 cfi_free_metatask(metatask); 2567 2568 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info)); 2569 2570 mtx_destroy(&hs_mtx); 2571 break; 2572 } 2573 case CTL_BBRREAD: { 2574 struct ctl_bbrread_info *bbr_info; 2575 struct ctl_fe_ioctl_bbrread_info fe_bbr_info; 2576 struct mtx bbr_mtx; 2577 struct cfi_metatask *metatask; 2578 2579 bbr_info = (struct ctl_bbrread_info *)addr; 2580 2581 bzero(&fe_bbr_info, sizeof(fe_bbr_info)); 2582 2583 bzero(&bbr_mtx, sizeof(bbr_mtx)); 2584 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF); 2585 2586 fe_bbr_info.bbr_info = bbr_info; 2587 fe_bbr_info.lock = &bbr_mtx; 2588 2589 cv_init(&fe_bbr_info.sem, "BBR read cv"); 2590 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2591 2592 if (metatask == NULL) { 2593 mtx_destroy(&bbr_mtx); 2594 cv_destroy(&fe_bbr_info.sem); 2595 retval = ENOMEM; 2596 break; 2597 } 2598 metatask->tasktype = CFI_TASK_BBRREAD; 2599 metatask->callback = ctl_ioctl_bbrread_callback; 2600 metatask->callback_arg = &fe_bbr_info; 2601 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num; 2602 metatask->taskinfo.bbrread.lba = bbr_info->lba; 2603 metatask->taskinfo.bbrread.len = bbr_info->len; 2604 2605 cfi_action(metatask); 2606 2607 mtx_lock(&bbr_mtx); 2608 while (fe_bbr_info.wakeup_done == 0) 2609 cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx); 2610 mtx_unlock(&bbr_mtx); 2611 2612 bbr_info->status = metatask->status; 2613 bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2614 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status; 2615 memcpy(&bbr_info->sense_data, 2616 &metatask->taskinfo.bbrread.sense_data, 2617 ctl_min(sizeof(bbr_info->sense_data), 2618 sizeof(metatask->taskinfo.bbrread.sense_data))); 2619 2620 cfi_free_metatask(metatask); 2621 2622 mtx_destroy(&bbr_mtx); 2623 cv_destroy(&fe_bbr_info.sem); 2624 2625 break; 2626 } 2627 case CTL_DELAY_IO: { 2628 struct ctl_io_delay_info *delay_info; 2629#ifdef CTL_IO_DELAY 2630 struct ctl_lun *lun; 2631#endif /* CTL_IO_DELAY */ 2632 2633 delay_info = (struct ctl_io_delay_info *)addr; 2634 2635#ifdef CTL_IO_DELAY 2636 mtx_lock(&softc->ctl_lock); 2637 2638 if ((delay_info->lun_id > CTL_MAX_LUNS) 2639 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2640 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2641 } else { 2642 lun = softc->ctl_luns[delay_info->lun_id]; 2643 mtx_lock(&lun->lun_lock); 2644 2645 delay_info->status = CTL_DELAY_STATUS_OK; 2646 2647 switch (delay_info->delay_type) { 2648 case CTL_DELAY_TYPE_CONT: 2649 break; 2650 case CTL_DELAY_TYPE_ONESHOT: 2651 break; 2652 default: 2653 delay_info->status = 2654 CTL_DELAY_STATUS_INVALID_TYPE; 2655 break; 2656 } 2657 2658 switch (delay_info->delay_loc) { 2659 case CTL_DELAY_LOC_DATAMOVE: 2660 lun->delay_info.datamove_type = 2661 delay_info->delay_type; 2662 lun->delay_info.datamove_delay = 2663 delay_info->delay_secs; 2664 break; 2665 case CTL_DELAY_LOC_DONE: 2666 lun->delay_info.done_type = 2667 delay_info->delay_type; 2668 lun->delay_info.done_delay = 2669 delay_info->delay_secs; 2670 break; 2671 default: 2672 delay_info->status = 2673 CTL_DELAY_STATUS_INVALID_LOC; 2674 break; 2675 } 2676 mtx_unlock(&lun->lun_lock); 2677 } 2678 2679 mtx_unlock(&softc->ctl_lock); 2680#else 2681 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2682#endif /* CTL_IO_DELAY */ 2683 break; 2684 } 2685 case CTL_REALSYNC_SET: { 2686 int *syncstate; 2687 2688 syncstate = (int *)addr; 2689 2690 mtx_lock(&softc->ctl_lock); 2691 switch (*syncstate) { 2692 case 0: 2693 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2694 break; 2695 case 1: 2696 softc->flags |= CTL_FLAG_REAL_SYNC; 2697 break; 2698 default: 2699 retval = EINVAL; 2700 break; 2701 } 2702 mtx_unlock(&softc->ctl_lock); 2703 break; 2704 } 2705 case CTL_REALSYNC_GET: { 2706 int *syncstate; 2707 2708 syncstate = (int*)addr; 2709 2710 mtx_lock(&softc->ctl_lock); 2711 if (softc->flags & CTL_FLAG_REAL_SYNC) 2712 *syncstate = 1; 2713 else 2714 *syncstate = 0; 2715 mtx_unlock(&softc->ctl_lock); 2716 2717 break; 2718 } 2719 case CTL_SETSYNC: 2720 case CTL_GETSYNC: { 2721 struct ctl_sync_info *sync_info; 2722 struct ctl_lun *lun; 2723 2724 sync_info = (struct ctl_sync_info *)addr; 2725 2726 mtx_lock(&softc->ctl_lock); 2727 lun = softc->ctl_luns[sync_info->lun_id]; 2728 if (lun == NULL) { 2729 mtx_unlock(&softc->ctl_lock); 2730 sync_info->status = CTL_GS_SYNC_NO_LUN; 2731 } 2732 /* 2733 * Get or set the sync interval. We're not bounds checking 2734 * in the set case, hopefully the user won't do something 2735 * silly. 2736 */ 2737 mtx_lock(&lun->lun_lock); 2738 mtx_unlock(&softc->ctl_lock); 2739 if (cmd == CTL_GETSYNC) 2740 sync_info->sync_interval = lun->sync_interval; 2741 else 2742 lun->sync_interval = sync_info->sync_interval; 2743 mtx_unlock(&lun->lun_lock); 2744 2745 sync_info->status = CTL_GS_SYNC_OK; 2746 2747 break; 2748 } 2749 case CTL_GETSTATS: { 2750 struct ctl_stats *stats; 2751 struct ctl_lun *lun; 2752 int i; 2753 2754 stats = (struct ctl_stats *)addr; 2755 2756 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2757 stats->alloc_len) { 2758 stats->status = CTL_SS_NEED_MORE_SPACE; 2759 stats->num_luns = softc->num_luns; 2760 break; 2761 } 2762 /* 2763 * XXX KDM no locking here. If the LUN list changes, 2764 * things can blow up. 2765 */ 2766 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2767 i++, lun = STAILQ_NEXT(lun, links)) { 2768 retval = copyout(&lun->stats, &stats->lun_stats[i], 2769 sizeof(lun->stats)); 2770 if (retval != 0) 2771 break; 2772 } 2773 stats->num_luns = softc->num_luns; 2774 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2775 softc->num_luns; 2776 stats->status = CTL_SS_OK; 2777#ifdef CTL_TIME_IO 2778 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2779#else 2780 stats->flags = CTL_STATS_FLAG_NONE; 2781#endif 2782 getnanouptime(&stats->timestamp); 2783 break; 2784 } 2785 case CTL_ERROR_INJECT: { 2786 struct ctl_error_desc *err_desc, *new_err_desc; 2787 struct ctl_lun *lun; 2788 2789 err_desc = (struct ctl_error_desc *)addr; 2790 2791 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2792 M_WAITOK | M_ZERO); 2793 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2794 2795 mtx_lock(&softc->ctl_lock); 2796 lun = softc->ctl_luns[err_desc->lun_id]; 2797 if (lun == NULL) { 2798 mtx_unlock(&softc->ctl_lock); 2799 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2800 __func__, (uintmax_t)err_desc->lun_id); 2801 retval = EINVAL; 2802 break; 2803 } 2804 mtx_lock(&lun->lun_lock); 2805 mtx_unlock(&softc->ctl_lock); 2806 2807 /* 2808 * We could do some checking here to verify the validity 2809 * of the request, but given the complexity of error 2810 * injection requests, the checking logic would be fairly 2811 * complex. 2812 * 2813 * For now, if the request is invalid, it just won't get 2814 * executed and might get deleted. 2815 */ 2816 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2817 2818 /* 2819 * XXX KDM check to make sure the serial number is unique, 2820 * in case we somehow manage to wrap. That shouldn't 2821 * happen for a very long time, but it's the right thing to 2822 * do. 2823 */ 2824 new_err_desc->serial = lun->error_serial; 2825 err_desc->serial = lun->error_serial; 2826 lun->error_serial++; 2827 2828 mtx_unlock(&lun->lun_lock); 2829 break; 2830 } 2831 case CTL_ERROR_INJECT_DELETE: { 2832 struct ctl_error_desc *delete_desc, *desc, *desc2; 2833 struct ctl_lun *lun; 2834 int delete_done; 2835 2836 delete_desc = (struct ctl_error_desc *)addr; 2837 delete_done = 0; 2838 2839 mtx_lock(&softc->ctl_lock); 2840 lun = softc->ctl_luns[delete_desc->lun_id]; 2841 if (lun == NULL) { 2842 mtx_unlock(&softc->ctl_lock); 2843 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2844 __func__, (uintmax_t)delete_desc->lun_id); 2845 retval = EINVAL; 2846 break; 2847 } 2848 mtx_lock(&lun->lun_lock); 2849 mtx_unlock(&softc->ctl_lock); 2850 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2851 if (desc->serial != delete_desc->serial) 2852 continue; 2853 2854 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2855 links); 2856 free(desc, M_CTL); 2857 delete_done = 1; 2858 } 2859 mtx_unlock(&lun->lun_lock); 2860 if (delete_done == 0) { 2861 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2862 "error serial %ju on LUN %u\n", __func__, 2863 delete_desc->serial, delete_desc->lun_id); 2864 retval = EINVAL; 2865 break; 2866 } 2867 break; 2868 } 2869 case CTL_DUMP_STRUCTS: { 2870 int i, j, k; 2871 struct ctl_frontend *fe; 2872 2873 printf("CTL IID to WWPN map start:\n"); 2874 for (i = 0; i < CTL_MAX_PORTS; i++) { 2875 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2876 if (softc->wwpn_iid[i][j].in_use == 0) 2877 continue; 2878 2879 printf("port %d iid %u WWPN %#jx\n", 2880 softc->wwpn_iid[i][j].port, 2881 softc->wwpn_iid[i][j].iid, 2882 (uintmax_t)softc->wwpn_iid[i][j].wwpn); 2883 } 2884 } 2885 printf("CTL IID to WWPN map end\n"); 2886 printf("CTL Persistent Reservation information start:\n"); 2887 for (i = 0; i < CTL_MAX_LUNS; i++) { 2888 struct ctl_lun *lun; 2889 2890 lun = softc->ctl_luns[i]; 2891 2892 if ((lun == NULL) 2893 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2894 continue; 2895 2896 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 2897 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2898 if (lun->per_res[j+k].registered == 0) 2899 continue; 2900 printf("LUN %d port %d iid %d key " 2901 "%#jx\n", i, j, k, 2902 (uintmax_t)scsi_8btou64( 2903 lun->per_res[j+k].res_key.key)); 2904 } 2905 } 2906 } 2907 printf("CTL Persistent Reservation information end\n"); 2908 printf("CTL Frontends:\n"); 2909 /* 2910 * XXX KDM calling this without a lock. We'd likely want 2911 * to drop the lock before calling the frontend's dump 2912 * routine anyway. 2913 */ 2914 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2915 printf("Frontend %s Type %u pport %d vport %d WWNN " 2916 "%#jx WWPN %#jx\n", fe->port_name, fe->port_type, 2917 fe->physical_port, fe->virtual_port, 2918 (uintmax_t)fe->wwnn, (uintmax_t)fe->wwpn); 2919 2920 /* 2921 * Frontends are not required to support the dump 2922 * routine. 2923 */ 2924 if (fe->fe_dump == NULL) 2925 continue; 2926 2927 fe->fe_dump(); 2928 } 2929 printf("CTL Frontend information end\n"); 2930 break; 2931 } 2932 case CTL_LUN_REQ: { 2933 struct ctl_lun_req *lun_req; 2934 struct ctl_backend_driver *backend; 2935 2936 lun_req = (struct ctl_lun_req *)addr; 2937 2938 backend = ctl_backend_find(lun_req->backend); 2939 if (backend == NULL) { 2940 lun_req->status = CTL_LUN_ERROR; 2941 snprintf(lun_req->error_str, 2942 sizeof(lun_req->error_str), 2943 "Backend \"%s\" not found.", 2944 lun_req->backend); 2945 break; 2946 } 2947 if (lun_req->num_be_args > 0) { 2948 lun_req->kern_be_args = ctl_copyin_args( 2949 lun_req->num_be_args, 2950 lun_req->be_args, 2951 lun_req->error_str, 2952 sizeof(lun_req->error_str)); 2953 if (lun_req->kern_be_args == NULL) { 2954 lun_req->status = CTL_LUN_ERROR; 2955 break; 2956 } 2957 } 2958 2959 retval = backend->ioctl(dev, cmd, addr, flag, td); 2960 2961 if (lun_req->num_be_args > 0) { 2962 ctl_free_args(lun_req->num_be_args, 2963 lun_req->kern_be_args); 2964 } 2965 break; 2966 } 2967 case CTL_LUN_LIST: { 2968 struct sbuf *sb; 2969 struct ctl_lun *lun; 2970 struct ctl_lun_list *list; 2971 struct ctl_be_lun_option *opt; 2972 2973 list = (struct ctl_lun_list *)addr; 2974 2975 /* 2976 * Allocate a fixed length sbuf here, based on the length 2977 * of the user's buffer. We could allocate an auto-extending 2978 * buffer, and then tell the user how much larger our 2979 * amount of data is than his buffer, but that presents 2980 * some problems: 2981 * 2982 * 1. The sbuf(9) routines use a blocking malloc, and so 2983 * we can't hold a lock while calling them with an 2984 * auto-extending buffer. 2985 * 2986 * 2. There is not currently a LUN reference counting 2987 * mechanism, outside of outstanding transactions on 2988 * the LUN's OOA queue. So a LUN could go away on us 2989 * while we're getting the LUN number, backend-specific 2990 * information, etc. Thus, given the way things 2991 * currently work, we need to hold the CTL lock while 2992 * grabbing LUN information. 2993 * 2994 * So, from the user's standpoint, the best thing to do is 2995 * allocate what he thinks is a reasonable buffer length, 2996 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2997 * double the buffer length and try again. (And repeat 2998 * that until he succeeds.) 2999 */ 3000 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3001 if (sb == NULL) { 3002 list->status = CTL_LUN_LIST_ERROR; 3003 snprintf(list->error_str, sizeof(list->error_str), 3004 "Unable to allocate %d bytes for LUN list", 3005 list->alloc_len); 3006 break; 3007 } 3008 3009 sbuf_printf(sb, "<ctllunlist>\n"); 3010 3011 mtx_lock(&softc->ctl_lock); 3012 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3013 mtx_lock(&lun->lun_lock); 3014 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3015 (uintmax_t)lun->lun); 3016 3017 /* 3018 * Bail out as soon as we see that we've overfilled 3019 * the buffer. 3020 */ 3021 if (retval != 0) 3022 break; 3023 3024 retval = sbuf_printf(sb, "\t<backend_type>%s" 3025 "</backend_type>\n", 3026 (lun->backend == NULL) ? "none" : 3027 lun->backend->name); 3028 3029 if (retval != 0) 3030 break; 3031 3032 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3033 lun->be_lun->lun_type); 3034 3035 if (retval != 0) 3036 break; 3037 3038 if (lun->backend == NULL) { 3039 retval = sbuf_printf(sb, "</lun>\n"); 3040 if (retval != 0) 3041 break; 3042 continue; 3043 } 3044 3045 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3046 (lun->be_lun->maxlba > 0) ? 3047 lun->be_lun->maxlba + 1 : 0); 3048 3049 if (retval != 0) 3050 break; 3051 3052 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3053 lun->be_lun->blocksize); 3054 3055 if (retval != 0) 3056 break; 3057 3058 retval = sbuf_printf(sb, "\t<serial_number>"); 3059 3060 if (retval != 0) 3061 break; 3062 3063 retval = ctl_sbuf_printf_esc(sb, 3064 lun->be_lun->serial_num); 3065 3066 if (retval != 0) 3067 break; 3068 3069 retval = sbuf_printf(sb, "</serial_number>\n"); 3070 3071 if (retval != 0) 3072 break; 3073 3074 retval = sbuf_printf(sb, "\t<device_id>"); 3075 3076 if (retval != 0) 3077 break; 3078 3079 retval = ctl_sbuf_printf_esc(sb,lun->be_lun->device_id); 3080 3081 if (retval != 0) 3082 break; 3083 3084 retval = sbuf_printf(sb, "</device_id>\n"); 3085 3086 if (retval != 0) 3087 break; 3088 3089 if (lun->backend->lun_info != NULL) { 3090 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3091 if (retval != 0) 3092 break; 3093 } 3094 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3095 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3096 opt->name, opt->value, opt->name); 3097 if (retval != 0) 3098 break; 3099 } 3100 3101 retval = sbuf_printf(sb, "</lun>\n"); 3102 3103 if (retval != 0) 3104 break; 3105 mtx_unlock(&lun->lun_lock); 3106 } 3107 if (lun != NULL) 3108 mtx_unlock(&lun->lun_lock); 3109 mtx_unlock(&softc->ctl_lock); 3110 3111 if ((retval != 0) 3112 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3113 retval = 0; 3114 sbuf_delete(sb); 3115 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3116 snprintf(list->error_str, sizeof(list->error_str), 3117 "Out of space, %d bytes is too small", 3118 list->alloc_len); 3119 break; 3120 } 3121 3122 sbuf_finish(sb); 3123 3124 retval = copyout(sbuf_data(sb), list->lun_xml, 3125 sbuf_len(sb) + 1); 3126 3127 list->fill_len = sbuf_len(sb) + 1; 3128 list->status = CTL_LUN_LIST_OK; 3129 sbuf_delete(sb); 3130 break; 3131 } 3132 case CTL_ISCSI: { 3133 struct ctl_iscsi *ci; 3134 struct ctl_frontend *fe; 3135 3136 ci = (struct ctl_iscsi *)addr; 3137 3138 mtx_lock(&softc->ctl_lock); 3139 STAILQ_FOREACH(fe, &softc->fe_list, links) { 3140 if (strcmp(fe->port_name, "iscsi") == 0) 3141 break; 3142 } 3143 mtx_unlock(&softc->ctl_lock); 3144 3145 if (fe == NULL) { 3146 ci->status = CTL_ISCSI_ERROR; 3147 snprintf(ci->error_str, sizeof(ci->error_str), "Backend \"iscsi\" not found."); 3148 break; 3149 } 3150 3151 retval = fe->ioctl(dev, cmd, addr, flag, td); 3152 break; 3153 } 3154 default: { 3155 /* XXX KDM should we fix this? */ 3156#if 0 3157 struct ctl_backend_driver *backend; 3158 unsigned int type; 3159 int found; 3160 3161 found = 0; 3162 3163 /* 3164 * We encode the backend type as the ioctl type for backend 3165 * ioctls. So parse it out here, and then search for a 3166 * backend of this type. 3167 */ 3168 type = _IOC_TYPE(cmd); 3169 3170 STAILQ_FOREACH(backend, &softc->be_list, links) { 3171 if (backend->type == type) { 3172 found = 1; 3173 break; 3174 } 3175 } 3176 if (found == 0) { 3177 printf("ctl: unknown ioctl command %#lx or backend " 3178 "%d\n", cmd, type); 3179 retval = EINVAL; 3180 break; 3181 } 3182 retval = backend->ioctl(dev, cmd, addr, flag, td); 3183#endif 3184 retval = ENOTTY; 3185 break; 3186 } 3187 } 3188 return (retval); 3189} 3190 3191uint32_t 3192ctl_get_initindex(struct ctl_nexus *nexus) 3193{ 3194 if (nexus->targ_port < CTL_MAX_PORTS) 3195 return (nexus->initid.id + 3196 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3197 else 3198 return (nexus->initid.id + 3199 ((nexus->targ_port - CTL_MAX_PORTS) * 3200 CTL_MAX_INIT_PER_PORT)); 3201} 3202 3203uint32_t 3204ctl_get_resindex(struct ctl_nexus *nexus) 3205{ 3206 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3207} 3208 3209uint32_t 3210ctl_port_idx(int port_num) 3211{ 3212 if (port_num < CTL_MAX_PORTS) 3213 return(port_num); 3214 else 3215 return(port_num - CTL_MAX_PORTS); 3216} 3217 3218/* 3219 * Note: This only works for bitmask sizes that are at least 32 bits, and 3220 * that are a power of 2. 3221 */ 3222int 3223ctl_ffz(uint32_t *mask, uint32_t size) 3224{ 3225 uint32_t num_chunks, num_pieces; 3226 int i, j; 3227 3228 num_chunks = (size >> 5); 3229 if (num_chunks == 0) 3230 num_chunks++; 3231 num_pieces = ctl_min((sizeof(uint32_t) * 8), size); 3232 3233 for (i = 0; i < num_chunks; i++) { 3234 for (j = 0; j < num_pieces; j++) { 3235 if ((mask[i] & (1 << j)) == 0) 3236 return ((i << 5) + j); 3237 } 3238 } 3239 3240 return (-1); 3241} 3242 3243int 3244ctl_set_mask(uint32_t *mask, uint32_t bit) 3245{ 3246 uint32_t chunk, piece; 3247 3248 chunk = bit >> 5; 3249 piece = bit % (sizeof(uint32_t) * 8); 3250 3251 if ((mask[chunk] & (1 << piece)) != 0) 3252 return (-1); 3253 else 3254 mask[chunk] |= (1 << piece); 3255 3256 return (0); 3257} 3258 3259int 3260ctl_clear_mask(uint32_t *mask, uint32_t bit) 3261{ 3262 uint32_t chunk, piece; 3263 3264 chunk = bit >> 5; 3265 piece = bit % (sizeof(uint32_t) * 8); 3266 3267 if ((mask[chunk] & (1 << piece)) == 0) 3268 return (-1); 3269 else 3270 mask[chunk] &= ~(1 << piece); 3271 3272 return (0); 3273} 3274 3275int 3276ctl_is_set(uint32_t *mask, uint32_t bit) 3277{ 3278 uint32_t chunk, piece; 3279 3280 chunk = bit >> 5; 3281 piece = bit % (sizeof(uint32_t) * 8); 3282 3283 if ((mask[chunk] & (1 << piece)) == 0) 3284 return (0); 3285 else 3286 return (1); 3287} 3288 3289#ifdef unused 3290/* 3291 * The bus, target and lun are optional, they can be filled in later. 3292 * can_wait is used to determine whether we can wait on the malloc or not. 3293 */ 3294union ctl_io* 3295ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, uint32_t targ_target, 3296 uint32_t targ_lun, int can_wait) 3297{ 3298 union ctl_io *io; 3299 3300 if (can_wait) 3301 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_WAITOK); 3302 else 3303 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT); 3304 3305 if (io != NULL) { 3306 io->io_hdr.io_type = io_type; 3307 io->io_hdr.targ_port = targ_port; 3308 /* 3309 * XXX KDM this needs to change/go away. We need to move 3310 * to a preallocated pool of ctl_scsiio structures. 3311 */ 3312 io->io_hdr.nexus.targ_target.id = targ_target; 3313 io->io_hdr.nexus.targ_lun = targ_lun; 3314 } 3315 3316 return (io); 3317} 3318 3319void 3320ctl_kfree_io(union ctl_io *io) 3321{ 3322 free(io, M_CTL); 3323} 3324#endif /* unused */ 3325 3326/* 3327 * ctl_softc, pool_type, total_ctl_io are passed in. 3328 * npool is passed out. 3329 */ 3330int 3331ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, 3332 uint32_t total_ctl_io, struct ctl_io_pool **npool) 3333{ 3334 uint32_t i; 3335 union ctl_io *cur_io, *next_io; 3336 struct ctl_io_pool *pool; 3337 int retval; 3338 3339 retval = 0; 3340 3341 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3342 M_NOWAIT | M_ZERO); 3343 if (pool == NULL) { 3344 retval = ENOMEM; 3345 goto bailout; 3346 } 3347 3348 pool->type = pool_type; 3349 pool->ctl_softc = ctl_softc; 3350 3351 mtx_lock(&ctl_softc->pool_lock); 3352 pool->id = ctl_softc->cur_pool_id++; 3353 mtx_unlock(&ctl_softc->pool_lock); 3354 3355 pool->flags = CTL_POOL_FLAG_NONE; 3356 pool->refcount = 1; /* Reference for validity. */ 3357 STAILQ_INIT(&pool->free_queue); 3358 3359 /* 3360 * XXX KDM other options here: 3361 * - allocate a page at a time 3362 * - allocate one big chunk of memory. 3363 * Page allocation might work well, but would take a little more 3364 * tracking. 3365 */ 3366 for (i = 0; i < total_ctl_io; i++) { 3367 cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTLIO, 3368 M_NOWAIT); 3369 if (cur_io == NULL) { 3370 retval = ENOMEM; 3371 break; 3372 } 3373 cur_io->io_hdr.pool = pool; 3374 STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links); 3375 pool->total_ctl_io++; 3376 pool->free_ctl_io++; 3377 } 3378 3379 if (retval != 0) { 3380 for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3381 cur_io != NULL; cur_io = next_io) { 3382 next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr, 3383 links); 3384 STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, 3385 ctl_io_hdr, links); 3386 free(cur_io, M_CTLIO); 3387 } 3388 3389 free(pool, M_CTL); 3390 goto bailout; 3391 } 3392 mtx_lock(&ctl_softc->pool_lock); 3393 ctl_softc->num_pools++; 3394 STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links); 3395 /* 3396 * Increment our usage count if this is an external consumer, so we 3397 * can't get unloaded until the external consumer (most likely a 3398 * FETD) unloads and frees his pool. 3399 * 3400 * XXX KDM will this increment the caller's module use count, or 3401 * mine? 3402 */ 3403#if 0 3404 if ((pool_type != CTL_POOL_EMERGENCY) 3405 && (pool_type != CTL_POOL_INTERNAL) 3406 && (pool_type != CTL_POOL_IOCTL) 3407 && (pool_type != CTL_POOL_4OTHERSC)) 3408 MOD_INC_USE_COUNT; 3409#endif 3410 3411 mtx_unlock(&ctl_softc->pool_lock); 3412 3413 *npool = pool; 3414 3415bailout: 3416 3417 return (retval); 3418} 3419 3420static int 3421ctl_pool_acquire(struct ctl_io_pool *pool) 3422{ 3423 3424 mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED); 3425 3426 if (pool->flags & CTL_POOL_FLAG_INVALID) 3427 return (EINVAL); 3428 3429 pool->refcount++; 3430 3431 return (0); 3432} 3433 3434static void 3435ctl_pool_release(struct ctl_io_pool *pool) 3436{ 3437 struct ctl_softc *ctl_softc = pool->ctl_softc; 3438 union ctl_io *io; 3439 3440 mtx_assert(&ctl_softc->pool_lock, MA_OWNED); 3441 3442 if (--pool->refcount != 0) 3443 return; 3444 3445 while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) { 3446 STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr, 3447 links); 3448 free(io, M_CTLIO); 3449 } 3450 3451 STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links); 3452 ctl_softc->num_pools--; 3453 3454 /* 3455 * XXX KDM will this decrement the caller's usage count or mine? 3456 */ 3457#if 0 3458 if ((pool->type != CTL_POOL_EMERGENCY) 3459 && (pool->type != CTL_POOL_INTERNAL) 3460 && (pool->type != CTL_POOL_IOCTL)) 3461 MOD_DEC_USE_COUNT; 3462#endif 3463 3464 free(pool, M_CTL); 3465} 3466 3467void 3468ctl_pool_free(struct ctl_io_pool *pool) 3469{ 3470 struct ctl_softc *ctl_softc; 3471 3472 if (pool == NULL) 3473 return; 3474 3475 ctl_softc = pool->ctl_softc; 3476 mtx_lock(&ctl_softc->pool_lock); 3477 pool->flags |= CTL_POOL_FLAG_INVALID; 3478 ctl_pool_release(pool); 3479 mtx_unlock(&ctl_softc->pool_lock); 3480} 3481 3482/* 3483 * This routine does not block (except for spinlocks of course). 3484 * It tries to allocate a ctl_io union from the caller's pool as quickly as 3485 * possible. 3486 */ 3487union ctl_io * 3488ctl_alloc_io(void *pool_ref) 3489{ 3490 union ctl_io *io; 3491 struct ctl_softc *ctl_softc; 3492 struct ctl_io_pool *pool, *npool; 3493 struct ctl_io_pool *emergency_pool; 3494 3495 pool = (struct ctl_io_pool *)pool_ref; 3496 3497 if (pool == NULL) { 3498 printf("%s: pool is NULL\n", __func__); 3499 return (NULL); 3500 } 3501 3502 emergency_pool = NULL; 3503 3504 ctl_softc = pool->ctl_softc; 3505 3506 mtx_lock(&ctl_softc->pool_lock); 3507 /* 3508 * First, try to get the io structure from the user's pool. 3509 */ 3510 if (ctl_pool_acquire(pool) == 0) { 3511 io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3512 if (io != NULL) { 3513 STAILQ_REMOVE_HEAD(&pool->free_queue, links); 3514 pool->total_allocated++; 3515 pool->free_ctl_io--; 3516 mtx_unlock(&ctl_softc->pool_lock); 3517 return (io); 3518 } else 3519 ctl_pool_release(pool); 3520 } 3521 /* 3522 * If he doesn't have any io structures left, search for an 3523 * emergency pool and grab one from there. 3524 */ 3525 STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) { 3526 if (npool->type != CTL_POOL_EMERGENCY) 3527 continue; 3528 3529 if (ctl_pool_acquire(npool) != 0) 3530 continue; 3531 3532 emergency_pool = npool; 3533 3534 io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue); 3535 if (io != NULL) { 3536 STAILQ_REMOVE_HEAD(&npool->free_queue, links); 3537 npool->total_allocated++; 3538 npool->free_ctl_io--; 3539 mtx_unlock(&ctl_softc->pool_lock); 3540 return (io); 3541 } else 3542 ctl_pool_release(npool); 3543 } 3544 3545 /* Drop the spinlock before we malloc */ 3546 mtx_unlock(&ctl_softc->pool_lock); 3547 3548 /* 3549 * The emergency pool (if it exists) didn't have one, so try an 3550 * atomic (i.e. nonblocking) malloc and see if we get lucky. 3551 */ 3552 io = (union ctl_io *)malloc(sizeof(*io), M_CTLIO, M_NOWAIT); 3553 if (io != NULL) { 3554 /* 3555 * If the emergency pool exists but is empty, add this 3556 * ctl_io to its list when it gets freed. 3557 */ 3558 if (emergency_pool != NULL) { 3559 mtx_lock(&ctl_softc->pool_lock); 3560 if (ctl_pool_acquire(emergency_pool) == 0) { 3561 io->io_hdr.pool = emergency_pool; 3562 emergency_pool->total_ctl_io++; 3563 /* 3564 * Need to bump this, otherwise 3565 * total_allocated and total_freed won't 3566 * match when we no longer have anything 3567 * outstanding. 3568 */ 3569 emergency_pool->total_allocated++; 3570 } 3571 mtx_unlock(&ctl_softc->pool_lock); 3572 } else 3573 io->io_hdr.pool = NULL; 3574 } 3575 3576 return (io); 3577} 3578 3579void 3580ctl_free_io(union ctl_io *io) 3581{ 3582 if (io == NULL) 3583 return; 3584 3585 /* 3586 * If this ctl_io has a pool, return it to that pool. 3587 */ 3588 if (io->io_hdr.pool != NULL) { 3589 struct ctl_io_pool *pool; 3590 3591 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3592 mtx_lock(&pool->ctl_softc->pool_lock); 3593 io->io_hdr.io_type = 0xff; 3594 STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links); 3595 pool->total_freed++; 3596 pool->free_ctl_io++; 3597 ctl_pool_release(pool); 3598 mtx_unlock(&pool->ctl_softc->pool_lock); 3599 } else { 3600 /* 3601 * Otherwise, just free it. We probably malloced it and 3602 * the emergency pool wasn't available. 3603 */ 3604 free(io, M_CTLIO); 3605 } 3606 3607} 3608 3609void 3610ctl_zero_io(union ctl_io *io) 3611{ 3612 void *pool_ref; 3613 3614 if (io == NULL) 3615 return; 3616 3617 /* 3618 * May need to preserve linked list pointers at some point too. 3619 */ 3620 pool_ref = io->io_hdr.pool; 3621 3622 memset(io, 0, sizeof(*io)); 3623 3624 io->io_hdr.pool = pool_ref; 3625} 3626 3627/* 3628 * This routine is currently used for internal copies of ctl_ios that need 3629 * to persist for some reason after we've already returned status to the 3630 * FETD. (Thus the flag set.) 3631 * 3632 * XXX XXX 3633 * Note that this makes a blind copy of all fields in the ctl_io, except 3634 * for the pool reference. This includes any memory that has been 3635 * allocated! That memory will no longer be valid after done has been 3636 * called, so this would be VERY DANGEROUS for command that actually does 3637 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3638 * start and stop commands, which don't transfer any data, so this is not a 3639 * problem. If it is used for anything else, the caller would also need to 3640 * allocate data buffer space and this routine would need to be modified to 3641 * copy the data buffer(s) as well. 3642 */ 3643void 3644ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3645{ 3646 void *pool_ref; 3647 3648 if ((src == NULL) 3649 || (dest == NULL)) 3650 return; 3651 3652 /* 3653 * May need to preserve linked list pointers at some point too. 3654 */ 3655 pool_ref = dest->io_hdr.pool; 3656 3657 memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest))); 3658 3659 dest->io_hdr.pool = pool_ref; 3660 /* 3661 * We need to know that this is an internal copy, and doesn't need 3662 * to get passed back to the FETD that allocated it. 3663 */ 3664 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3665} 3666 3667#ifdef NEEDTOPORT 3668static void 3669ctl_update_power_subpage(struct copan_power_subpage *page) 3670{ 3671 int num_luns, num_partitions, config_type; 3672 struct ctl_softc *softc; 3673 cs_BOOL_t aor_present, shelf_50pct_power; 3674 cs_raidset_personality_t rs_type; 3675 int max_active_luns; 3676 3677 softc = control_softc; 3678 3679 /* subtract out the processor LUN */ 3680 num_luns = softc->num_luns - 1; 3681 /* 3682 * Default to 7 LUNs active, which was the only number we allowed 3683 * in the past. 3684 */ 3685 max_active_luns = 7; 3686 3687 num_partitions = config_GetRsPartitionInfo(); 3688 config_type = config_GetConfigType(); 3689 shelf_50pct_power = config_GetShelfPowerMode(); 3690 aor_present = config_IsAorRsPresent(); 3691 3692 rs_type = ddb_GetRsRaidType(1); 3693 if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5) 3694 && (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) { 3695 EPRINT(0, "Unsupported RS type %d!", rs_type); 3696 } 3697 3698 3699 page->total_luns = num_luns; 3700 3701 switch (config_type) { 3702 case 40: 3703 /* 3704 * In a 40 drive configuration, it doesn't matter what DC 3705 * cards we have, whether we have AOR enabled or not, 3706 * partitioning or not, or what type of RAIDset we have. 3707 * In that scenario, we can power up every LUN we present 3708 * to the user. 3709 */ 3710 max_active_luns = num_luns; 3711 3712 break; 3713 case 64: 3714 if (shelf_50pct_power == CS_FALSE) { 3715 /* 25% power */ 3716 if (aor_present == CS_TRUE) { 3717 if (rs_type == 3718 CS_RAIDSET_PERSONALITY_RAID5) { 3719 max_active_luns = 7; 3720 } else if (rs_type == 3721 CS_RAIDSET_PERSONALITY_RAID1){ 3722 max_active_luns = 14; 3723 } else { 3724 /* XXX KDM now what?? */ 3725 } 3726 } else { 3727 if (rs_type == 3728 CS_RAIDSET_PERSONALITY_RAID5) { 3729 max_active_luns = 8; 3730 } else if (rs_type == 3731 CS_RAIDSET_PERSONALITY_RAID1){ 3732 max_active_luns = 16; 3733 } else { 3734 /* XXX KDM now what?? */ 3735 } 3736 } 3737 } else { 3738 /* 50% power */ 3739 /* 3740 * With 50% power in a 64 drive configuration, we 3741 * can power all LUNs we present. 3742 */ 3743 max_active_luns = num_luns; 3744 } 3745 break; 3746 case 112: 3747 if (shelf_50pct_power == CS_FALSE) { 3748 /* 25% power */ 3749 if (aor_present == CS_TRUE) { 3750 if (rs_type == 3751 CS_RAIDSET_PERSONALITY_RAID5) { 3752 max_active_luns = 7; 3753 } else if (rs_type == 3754 CS_RAIDSET_PERSONALITY_RAID1){ 3755 max_active_luns = 14; 3756 } else { 3757 /* XXX KDM now what?? */ 3758 } 3759 } else { 3760 if (rs_type == 3761 CS_RAIDSET_PERSONALITY_RAID5) { 3762 max_active_luns = 8; 3763 } else if (rs_type == 3764 CS_RAIDSET_PERSONALITY_RAID1){ 3765 max_active_luns = 16; 3766 } else { 3767 /* XXX KDM now what?? */ 3768 } 3769 } 3770 } else { 3771 /* 50% power */ 3772 if (aor_present == CS_TRUE) { 3773 if (rs_type == 3774 CS_RAIDSET_PERSONALITY_RAID5) { 3775 max_active_luns = 14; 3776 } else if (rs_type == 3777 CS_RAIDSET_PERSONALITY_RAID1){ 3778 /* 3779 * We're assuming here that disk 3780 * caching is enabled, and so we're 3781 * able to power up half of each 3782 * LUN, and cache all writes. 3783 */ 3784 max_active_luns = num_luns; 3785 } else { 3786 /* XXX KDM now what?? */ 3787 } 3788 } else { 3789 if (rs_type == 3790 CS_RAIDSET_PERSONALITY_RAID5) { 3791 max_active_luns = 15; 3792 } else if (rs_type == 3793 CS_RAIDSET_PERSONALITY_RAID1){ 3794 max_active_luns = 30; 3795 } else { 3796 /* XXX KDM now what?? */ 3797 } 3798 } 3799 } 3800 break; 3801 default: 3802 /* 3803 * In this case, we have an unknown configuration, so we 3804 * just use the default from above. 3805 */ 3806 break; 3807 } 3808 3809 page->max_active_luns = max_active_luns; 3810#if 0 3811 printk("%s: total_luns = %d, max_active_luns = %d\n", __func__, 3812 page->total_luns, page->max_active_luns); 3813#endif 3814} 3815#endif /* NEEDTOPORT */ 3816 3817/* 3818 * This routine could be used in the future to load default and/or saved 3819 * mode page parameters for a particuar lun. 3820 */ 3821static int 3822ctl_init_page_index(struct ctl_lun *lun) 3823{ 3824 int i; 3825 struct ctl_page_index *page_index; 3826 struct ctl_softc *softc; 3827 3828 memcpy(&lun->mode_pages.index, page_index_template, 3829 sizeof(page_index_template)); 3830 3831 softc = lun->ctl_softc; 3832 3833 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3834 3835 page_index = &lun->mode_pages.index[i]; 3836 /* 3837 * If this is a disk-only mode page, there's no point in 3838 * setting it up. For some pages, we have to have some 3839 * basic information about the disk in order to calculate the 3840 * mode page data. 3841 */ 3842 if ((lun->be_lun->lun_type != T_DIRECT) 3843 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3844 continue; 3845 3846 switch (page_index->page_code & SMPH_PC_MASK) { 3847 case SMS_FORMAT_DEVICE_PAGE: { 3848 struct scsi_format_page *format_page; 3849 3850 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3851 panic("subpage is incorrect!"); 3852 3853 /* 3854 * Sectors per track are set above. Bytes per 3855 * sector need to be set here on a per-LUN basis. 3856 */ 3857 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3858 &format_page_default, 3859 sizeof(format_page_default)); 3860 memcpy(&lun->mode_pages.format_page[ 3861 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3862 sizeof(format_page_changeable)); 3863 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3864 &format_page_default, 3865 sizeof(format_page_default)); 3866 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3867 &format_page_default, 3868 sizeof(format_page_default)); 3869 3870 format_page = &lun->mode_pages.format_page[ 3871 CTL_PAGE_CURRENT]; 3872 scsi_ulto2b(lun->be_lun->blocksize, 3873 format_page->bytes_per_sector); 3874 3875 format_page = &lun->mode_pages.format_page[ 3876 CTL_PAGE_DEFAULT]; 3877 scsi_ulto2b(lun->be_lun->blocksize, 3878 format_page->bytes_per_sector); 3879 3880 format_page = &lun->mode_pages.format_page[ 3881 CTL_PAGE_SAVED]; 3882 scsi_ulto2b(lun->be_lun->blocksize, 3883 format_page->bytes_per_sector); 3884 3885 page_index->page_data = 3886 (uint8_t *)lun->mode_pages.format_page; 3887 break; 3888 } 3889 case SMS_RIGID_DISK_PAGE: { 3890 struct scsi_rigid_disk_page *rigid_disk_page; 3891 uint32_t sectors_per_cylinder; 3892 uint64_t cylinders; 3893#ifndef __XSCALE__ 3894 int shift; 3895#endif /* !__XSCALE__ */ 3896 3897 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3898 panic("invalid subpage value %d", 3899 page_index->subpage); 3900 3901 /* 3902 * Rotation rate and sectors per track are set 3903 * above. We calculate the cylinders here based on 3904 * capacity. Due to the number of heads and 3905 * sectors per track we're using, smaller arrays 3906 * may turn out to have 0 cylinders. Linux and 3907 * FreeBSD don't pay attention to these mode pages 3908 * to figure out capacity, but Solaris does. It 3909 * seems to deal with 0 cylinders just fine, and 3910 * works out a fake geometry based on the capacity. 3911 */ 3912 memcpy(&lun->mode_pages.rigid_disk_page[ 3913 CTL_PAGE_CURRENT], &rigid_disk_page_default, 3914 sizeof(rigid_disk_page_default)); 3915 memcpy(&lun->mode_pages.rigid_disk_page[ 3916 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 3917 sizeof(rigid_disk_page_changeable)); 3918 memcpy(&lun->mode_pages.rigid_disk_page[ 3919 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 3920 sizeof(rigid_disk_page_default)); 3921 memcpy(&lun->mode_pages.rigid_disk_page[ 3922 CTL_PAGE_SAVED], &rigid_disk_page_default, 3923 sizeof(rigid_disk_page_default)); 3924 3925 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 3926 CTL_DEFAULT_HEADS; 3927 3928 /* 3929 * The divide method here will be more accurate, 3930 * probably, but results in floating point being 3931 * used in the kernel on i386 (__udivdi3()). On the 3932 * XScale, though, __udivdi3() is implemented in 3933 * software. 3934 * 3935 * The shift method for cylinder calculation is 3936 * accurate if sectors_per_cylinder is a power of 3937 * 2. Otherwise it might be slightly off -- you 3938 * might have a bit of a truncation problem. 3939 */ 3940#ifdef __XSCALE__ 3941 cylinders = (lun->be_lun->maxlba + 1) / 3942 sectors_per_cylinder; 3943#else 3944 for (shift = 31; shift > 0; shift--) { 3945 if (sectors_per_cylinder & (1 << shift)) 3946 break; 3947 } 3948 cylinders = (lun->be_lun->maxlba + 1) >> shift; 3949#endif 3950 3951 /* 3952 * We've basically got 3 bytes, or 24 bits for the 3953 * cylinder size in the mode page. If we're over, 3954 * just round down to 2^24. 3955 */ 3956 if (cylinders > 0xffffff) 3957 cylinders = 0xffffff; 3958 3959 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3960 CTL_PAGE_CURRENT]; 3961 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3962 3963 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3964 CTL_PAGE_DEFAULT]; 3965 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3966 3967 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3968 CTL_PAGE_SAVED]; 3969 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3970 3971 page_index->page_data = 3972 (uint8_t *)lun->mode_pages.rigid_disk_page; 3973 break; 3974 } 3975 case SMS_CACHING_PAGE: { 3976 3977 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3978 panic("invalid subpage value %d", 3979 page_index->subpage); 3980 /* 3981 * Defaults should be okay here, no calculations 3982 * needed. 3983 */ 3984 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 3985 &caching_page_default, 3986 sizeof(caching_page_default)); 3987 memcpy(&lun->mode_pages.caching_page[ 3988 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 3989 sizeof(caching_page_changeable)); 3990 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 3991 &caching_page_default, 3992 sizeof(caching_page_default)); 3993 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3994 &caching_page_default, 3995 sizeof(caching_page_default)); 3996 page_index->page_data = 3997 (uint8_t *)lun->mode_pages.caching_page; 3998 break; 3999 } 4000 case SMS_CONTROL_MODE_PAGE: { 4001 4002 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4003 panic("invalid subpage value %d", 4004 page_index->subpage); 4005 4006 /* 4007 * Defaults should be okay here, no calculations 4008 * needed. 4009 */ 4010 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 4011 &control_page_default, 4012 sizeof(control_page_default)); 4013 memcpy(&lun->mode_pages.control_page[ 4014 CTL_PAGE_CHANGEABLE], &control_page_changeable, 4015 sizeof(control_page_changeable)); 4016 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 4017 &control_page_default, 4018 sizeof(control_page_default)); 4019 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 4020 &control_page_default, 4021 sizeof(control_page_default)); 4022 page_index->page_data = 4023 (uint8_t *)lun->mode_pages.control_page; 4024 break; 4025 4026 } 4027 case SMS_VENDOR_SPECIFIC_PAGE:{ 4028 switch (page_index->subpage) { 4029 case PWR_SUBPAGE_CODE: { 4030 struct copan_power_subpage *current_page, 4031 *saved_page; 4032 4033 memcpy(&lun->mode_pages.power_subpage[ 4034 CTL_PAGE_CURRENT], 4035 &power_page_default, 4036 sizeof(power_page_default)); 4037 memcpy(&lun->mode_pages.power_subpage[ 4038 CTL_PAGE_CHANGEABLE], 4039 &power_page_changeable, 4040 sizeof(power_page_changeable)); 4041 memcpy(&lun->mode_pages.power_subpage[ 4042 CTL_PAGE_DEFAULT], 4043 &power_page_default, 4044 sizeof(power_page_default)); 4045 memcpy(&lun->mode_pages.power_subpage[ 4046 CTL_PAGE_SAVED], 4047 &power_page_default, 4048 sizeof(power_page_default)); 4049 page_index->page_data = 4050 (uint8_t *)lun->mode_pages.power_subpage; 4051 4052 current_page = (struct copan_power_subpage *) 4053 (page_index->page_data + 4054 (page_index->page_len * 4055 CTL_PAGE_CURRENT)); 4056 saved_page = (struct copan_power_subpage *) 4057 (page_index->page_data + 4058 (page_index->page_len * 4059 CTL_PAGE_SAVED)); 4060 break; 4061 } 4062 case APS_SUBPAGE_CODE: { 4063 struct copan_aps_subpage *current_page, 4064 *saved_page; 4065 4066 // This gets set multiple times but 4067 // it should always be the same. It's 4068 // only done during init so who cares. 4069 index_to_aps_page = i; 4070 4071 memcpy(&lun->mode_pages.aps_subpage[ 4072 CTL_PAGE_CURRENT], 4073 &aps_page_default, 4074 sizeof(aps_page_default)); 4075 memcpy(&lun->mode_pages.aps_subpage[ 4076 CTL_PAGE_CHANGEABLE], 4077 &aps_page_changeable, 4078 sizeof(aps_page_changeable)); 4079 memcpy(&lun->mode_pages.aps_subpage[ 4080 CTL_PAGE_DEFAULT], 4081 &aps_page_default, 4082 sizeof(aps_page_default)); 4083 memcpy(&lun->mode_pages.aps_subpage[ 4084 CTL_PAGE_SAVED], 4085 &aps_page_default, 4086 sizeof(aps_page_default)); 4087 page_index->page_data = 4088 (uint8_t *)lun->mode_pages.aps_subpage; 4089 4090 current_page = (struct copan_aps_subpage *) 4091 (page_index->page_data + 4092 (page_index->page_len * 4093 CTL_PAGE_CURRENT)); 4094 saved_page = (struct copan_aps_subpage *) 4095 (page_index->page_data + 4096 (page_index->page_len * 4097 CTL_PAGE_SAVED)); 4098 break; 4099 } 4100 case DBGCNF_SUBPAGE_CODE: { 4101 struct copan_debugconf_subpage *current_page, 4102 *saved_page; 4103 4104 memcpy(&lun->mode_pages.debugconf_subpage[ 4105 CTL_PAGE_CURRENT], 4106 &debugconf_page_default, 4107 sizeof(debugconf_page_default)); 4108 memcpy(&lun->mode_pages.debugconf_subpage[ 4109 CTL_PAGE_CHANGEABLE], 4110 &debugconf_page_changeable, 4111 sizeof(debugconf_page_changeable)); 4112 memcpy(&lun->mode_pages.debugconf_subpage[ 4113 CTL_PAGE_DEFAULT], 4114 &debugconf_page_default, 4115 sizeof(debugconf_page_default)); 4116 memcpy(&lun->mode_pages.debugconf_subpage[ 4117 CTL_PAGE_SAVED], 4118 &debugconf_page_default, 4119 sizeof(debugconf_page_default)); 4120 page_index->page_data = 4121 (uint8_t *)lun->mode_pages.debugconf_subpage; 4122 4123 current_page = (struct copan_debugconf_subpage *) 4124 (page_index->page_data + 4125 (page_index->page_len * 4126 CTL_PAGE_CURRENT)); 4127 saved_page = (struct copan_debugconf_subpage *) 4128 (page_index->page_data + 4129 (page_index->page_len * 4130 CTL_PAGE_SAVED)); 4131 break; 4132 } 4133 default: 4134 panic("invalid subpage value %d", 4135 page_index->subpage); 4136 break; 4137 } 4138 break; 4139 } 4140 default: 4141 panic("invalid page value %d", 4142 page_index->page_code & SMPH_PC_MASK); 4143 break; 4144 } 4145 } 4146 4147 return (CTL_RETVAL_COMPLETE); 4148} 4149 4150/* 4151 * LUN allocation. 4152 * 4153 * Requirements: 4154 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4155 * wants us to allocate the LUN and he can block. 4156 * - ctl_softc is always set 4157 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4158 * 4159 * Returns 0 for success, non-zero (errno) for failure. 4160 */ 4161static int 4162ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4163 struct ctl_be_lun *const be_lun, struct ctl_id target_id) 4164{ 4165 struct ctl_lun *nlun, *lun; 4166 struct ctl_frontend *fe; 4167 int lun_number, i, lun_malloced; 4168 4169 if (be_lun == NULL) 4170 return (EINVAL); 4171 4172 /* 4173 * We currently only support Direct Access or Processor LUN types. 4174 */ 4175 switch (be_lun->lun_type) { 4176 case T_DIRECT: 4177 break; 4178 case T_PROCESSOR: 4179 break; 4180 case T_SEQUENTIAL: 4181 case T_CHANGER: 4182 default: 4183 be_lun->lun_config_status(be_lun->be_lun, 4184 CTL_LUN_CONFIG_FAILURE); 4185 break; 4186 } 4187 if (ctl_lun == NULL) { 4188 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4189 lun_malloced = 1; 4190 } else { 4191 lun_malloced = 0; 4192 lun = ctl_lun; 4193 } 4194 4195 memset(lun, 0, sizeof(*lun)); 4196 if (lun_malloced) 4197 lun->flags = CTL_LUN_MALLOCED; 4198 4199 mtx_lock(&ctl_softc->ctl_lock); 4200 /* 4201 * See if the caller requested a particular LUN number. If so, see 4202 * if it is available. Otherwise, allocate the first available LUN. 4203 */ 4204 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4205 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4206 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4207 mtx_unlock(&ctl_softc->ctl_lock); 4208 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4209 printf("ctl: requested LUN ID %d is higher " 4210 "than CTL_MAX_LUNS - 1 (%d)\n", 4211 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4212 } else { 4213 /* 4214 * XXX KDM return an error, or just assign 4215 * another LUN ID in this case?? 4216 */ 4217 printf("ctl: requested LUN ID %d is already " 4218 "in use\n", be_lun->req_lun_id); 4219 } 4220 if (lun->flags & CTL_LUN_MALLOCED) 4221 free(lun, M_CTL); 4222 be_lun->lun_config_status(be_lun->be_lun, 4223 CTL_LUN_CONFIG_FAILURE); 4224 return (ENOSPC); 4225 } 4226 lun_number = be_lun->req_lun_id; 4227 } else { 4228 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4229 if (lun_number == -1) { 4230 mtx_unlock(&ctl_softc->ctl_lock); 4231 printf("ctl: can't allocate LUN on target %ju, out of " 4232 "LUNs\n", (uintmax_t)target_id.id); 4233 if (lun->flags & CTL_LUN_MALLOCED) 4234 free(lun, M_CTL); 4235 be_lun->lun_config_status(be_lun->be_lun, 4236 CTL_LUN_CONFIG_FAILURE); 4237 return (ENOSPC); 4238 } 4239 } 4240 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4241 4242 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4243 lun->target = target_id; 4244 lun->lun = lun_number; 4245 lun->be_lun = be_lun; 4246 /* 4247 * The processor LUN is always enabled. Disk LUNs come on line 4248 * disabled, and must be enabled by the backend. 4249 */ 4250 lun->flags |= CTL_LUN_DISABLED; 4251 lun->backend = be_lun->be; 4252 be_lun->ctl_lun = lun; 4253 be_lun->lun_id = lun_number; 4254 atomic_add_int(&be_lun->be->num_luns, 1); 4255 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4256 lun->flags |= CTL_LUN_STOPPED; 4257 4258 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4259 lun->flags |= CTL_LUN_INOPERABLE; 4260 4261 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4262 lun->flags |= CTL_LUN_PRIMARY_SC; 4263 4264 lun->ctl_softc = ctl_softc; 4265 TAILQ_INIT(&lun->ooa_queue); 4266 TAILQ_INIT(&lun->blocked_queue); 4267 STAILQ_INIT(&lun->error_list); 4268 4269 /* 4270 * Initialize the mode page index. 4271 */ 4272 ctl_init_page_index(lun); 4273 4274 /* 4275 * Set the poweron UA for all initiators on this LUN only. 4276 */ 4277 for (i = 0; i < CTL_MAX_INITIATORS; i++) 4278 lun->pending_sense[i].ua_pending = CTL_UA_POWERON; 4279 4280 /* 4281 * Now, before we insert this lun on the lun list, set the lun 4282 * inventory changed UA for all other luns. 4283 */ 4284 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4285 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4286 nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE; 4287 } 4288 } 4289 4290 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4291 4292 ctl_softc->ctl_luns[lun_number] = lun; 4293 4294 ctl_softc->num_luns++; 4295 4296 /* Setup statistics gathering */ 4297 lun->stats.device_type = be_lun->lun_type; 4298 lun->stats.lun_number = lun_number; 4299 if (lun->stats.device_type == T_DIRECT) 4300 lun->stats.blocksize = be_lun->blocksize; 4301 else 4302 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4303 for (i = 0;i < CTL_MAX_PORTS;i++) 4304 lun->stats.ports[i].targ_port = i; 4305 4306 mtx_unlock(&ctl_softc->ctl_lock); 4307 4308 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4309 4310 /* 4311 * Run through each registered FETD and bring it online if it isn't 4312 * already. Enable the target ID if it hasn't been enabled, and 4313 * enable this particular LUN. 4314 */ 4315 STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) { 4316 int retval; 4317 4318 /* 4319 * XXX KDM this only works for ONE TARGET ID. We'll need 4320 * to do things differently if we go to a multiple target 4321 * ID scheme. 4322 */ 4323 if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) == 0) { 4324 4325 retval = fe->targ_enable(fe->targ_lun_arg, target_id); 4326 if (retval != 0) { 4327 printf("ctl_alloc_lun: FETD %s port %d " 4328 "returned error %d for targ_enable on " 4329 "target %ju\n", fe->port_name, 4330 fe->targ_port, retval, 4331 (uintmax_t)target_id.id); 4332 } else 4333 fe->status |= CTL_PORT_STATUS_TARG_ONLINE; 4334 } 4335 4336 retval = fe->lun_enable(fe->targ_lun_arg, target_id,lun_number); 4337 if (retval != 0) { 4338 printf("ctl_alloc_lun: FETD %s port %d returned error " 4339 "%d for lun_enable on target %ju lun %d\n", 4340 fe->port_name, fe->targ_port, retval, 4341 (uintmax_t)target_id.id, lun_number); 4342 } else 4343 fe->status |= CTL_PORT_STATUS_LUN_ONLINE; 4344 } 4345 return (0); 4346} 4347 4348/* 4349 * Delete a LUN. 4350 * Assumptions: 4351 * - LUN has already been marked invalid and any pending I/O has been taken 4352 * care of. 4353 */ 4354static int 4355ctl_free_lun(struct ctl_lun *lun) 4356{ 4357 struct ctl_softc *softc; 4358#if 0 4359 struct ctl_frontend *fe; 4360#endif 4361 struct ctl_lun *nlun; 4362 int i; 4363 4364 softc = lun->ctl_softc; 4365 4366 mtx_assert(&softc->ctl_lock, MA_OWNED); 4367 4368 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4369 4370 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4371 4372 softc->ctl_luns[lun->lun] = NULL; 4373 4374 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4375 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4376 4377 softc->num_luns--; 4378 4379 /* 4380 * XXX KDM this scheme only works for a single target/multiple LUN 4381 * setup. It needs to be revamped for a multiple target scheme. 4382 * 4383 * XXX KDM this results in fe->lun_disable() getting called twice, 4384 * once when ctl_disable_lun() is called, and a second time here. 4385 * We really need to re-think the LUN disable semantics. There 4386 * should probably be several steps/levels to LUN removal: 4387 * - disable 4388 * - invalidate 4389 * - free 4390 * 4391 * Right now we only have a disable method when communicating to 4392 * the front end ports, at least for individual LUNs. 4393 */ 4394#if 0 4395 STAILQ_FOREACH(fe, &softc->fe_list, links) { 4396 int retval; 4397 4398 retval = fe->lun_disable(fe->targ_lun_arg, lun->target, 4399 lun->lun); 4400 if (retval != 0) { 4401 printf("ctl_free_lun: FETD %s port %d returned error " 4402 "%d for lun_disable on target %ju lun %jd\n", 4403 fe->port_name, fe->targ_port, retval, 4404 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4405 } 4406 4407 if (STAILQ_FIRST(&softc->lun_list) == NULL) { 4408 fe->status &= ~CTL_PORT_STATUS_LUN_ONLINE; 4409 4410 retval = fe->targ_disable(fe->targ_lun_arg,lun->target); 4411 if (retval != 0) { 4412 printf("ctl_free_lun: FETD %s port %d " 4413 "returned error %d for targ_disable on " 4414 "target %ju\n", fe->port_name, 4415 fe->targ_port, retval, 4416 (uintmax_t)lun->target.id); 4417 } else 4418 fe->status &= ~CTL_PORT_STATUS_TARG_ONLINE; 4419 4420 if ((fe->status & CTL_PORT_STATUS_TARG_ONLINE) != 0) 4421 continue; 4422 4423#if 0 4424 fe->port_offline(fe->onoff_arg); 4425 fe->status &= ~CTL_PORT_STATUS_ONLINE; 4426#endif 4427 } 4428 } 4429#endif 4430 4431 /* 4432 * Tell the backend to free resources, if this LUN has a backend. 4433 */ 4434 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4435 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4436 4437 mtx_destroy(&lun->lun_lock); 4438 if (lun->flags & CTL_LUN_MALLOCED) 4439 free(lun, M_CTL); 4440 4441 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4442 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4443 nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE; 4444 } 4445 } 4446 4447 return (0); 4448} 4449 4450static void 4451ctl_create_lun(struct ctl_be_lun *be_lun) 4452{ 4453 struct ctl_softc *ctl_softc; 4454 4455 ctl_softc = control_softc; 4456 4457 /* 4458 * ctl_alloc_lun() should handle all potential failure cases. 4459 */ 4460 ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target); 4461} 4462 4463int 4464ctl_add_lun(struct ctl_be_lun *be_lun) 4465{ 4466 struct ctl_softc *ctl_softc = control_softc; 4467 4468 mtx_lock(&ctl_softc->ctl_lock); 4469 STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links); 4470 mtx_unlock(&ctl_softc->ctl_lock); 4471 wakeup(&ctl_softc->pending_lun_queue); 4472 4473 return (0); 4474} 4475 4476int 4477ctl_enable_lun(struct ctl_be_lun *be_lun) 4478{ 4479 struct ctl_softc *ctl_softc; 4480 struct ctl_frontend *fe, *nfe; 4481 struct ctl_lun *lun; 4482 int retval; 4483 4484 ctl_softc = control_softc; 4485 4486 lun = (struct ctl_lun *)be_lun->ctl_lun; 4487 4488 mtx_lock(&ctl_softc->ctl_lock); 4489 mtx_lock(&lun->lun_lock); 4490 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4491 /* 4492 * eh? Why did we get called if the LUN is already 4493 * enabled? 4494 */ 4495 mtx_unlock(&lun->lun_lock); 4496 mtx_unlock(&ctl_softc->ctl_lock); 4497 return (0); 4498 } 4499 lun->flags &= ~CTL_LUN_DISABLED; 4500 mtx_unlock(&lun->lun_lock); 4501 4502 for (fe = STAILQ_FIRST(&ctl_softc->fe_list); fe != NULL; fe = nfe) { 4503 nfe = STAILQ_NEXT(fe, links); 4504 4505 /* 4506 * Drop the lock while we call the FETD's enable routine. 4507 * This can lead to a callback into CTL (at least in the 4508 * case of the internal initiator frontend. 4509 */ 4510 mtx_unlock(&ctl_softc->ctl_lock); 4511 retval = fe->lun_enable(fe->targ_lun_arg, lun->target,lun->lun); 4512 mtx_lock(&ctl_softc->ctl_lock); 4513 if (retval != 0) { 4514 printf("%s: FETD %s port %d returned error " 4515 "%d for lun_enable on target %ju lun %jd\n", 4516 __func__, fe->port_name, fe->targ_port, retval, 4517 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4518 } 4519#if 0 4520 else { 4521 /* NOTE: TODO: why does lun enable affect port status? */ 4522 fe->status |= CTL_PORT_STATUS_LUN_ONLINE; 4523 } 4524#endif 4525 } 4526 4527 mtx_unlock(&ctl_softc->ctl_lock); 4528 4529 return (0); 4530} 4531 4532int 4533ctl_disable_lun(struct ctl_be_lun *be_lun) 4534{ 4535 struct ctl_softc *ctl_softc; 4536 struct ctl_frontend *fe; 4537 struct ctl_lun *lun; 4538 int retval; 4539 4540 ctl_softc = control_softc; 4541 4542 lun = (struct ctl_lun *)be_lun->ctl_lun; 4543 4544 mtx_lock(&ctl_softc->ctl_lock); 4545 mtx_lock(&lun->lun_lock); 4546 if (lun->flags & CTL_LUN_DISABLED) { 4547 mtx_unlock(&lun->lun_lock); 4548 mtx_unlock(&ctl_softc->ctl_lock); 4549 return (0); 4550 } 4551 lun->flags |= CTL_LUN_DISABLED; 4552 mtx_unlock(&lun->lun_lock); 4553 4554 STAILQ_FOREACH(fe, &ctl_softc->fe_list, links) { 4555 mtx_unlock(&ctl_softc->ctl_lock); 4556 /* 4557 * Drop the lock before we call the frontend's disable 4558 * routine, to avoid lock order reversals. 4559 * 4560 * XXX KDM what happens if the frontend list changes while 4561 * we're traversing it? It's unlikely, but should be handled. 4562 */ 4563 retval = fe->lun_disable(fe->targ_lun_arg, lun->target, 4564 lun->lun); 4565 mtx_lock(&ctl_softc->ctl_lock); 4566 if (retval != 0) { 4567 printf("ctl_alloc_lun: FETD %s port %d returned error " 4568 "%d for lun_disable on target %ju lun %jd\n", 4569 fe->port_name, fe->targ_port, retval, 4570 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4571 } 4572 } 4573 4574 mtx_unlock(&ctl_softc->ctl_lock); 4575 4576 return (0); 4577} 4578 4579int 4580ctl_start_lun(struct ctl_be_lun *be_lun) 4581{ 4582 struct ctl_softc *ctl_softc; 4583 struct ctl_lun *lun; 4584 4585 ctl_softc = control_softc; 4586 4587 lun = (struct ctl_lun *)be_lun->ctl_lun; 4588 4589 mtx_lock(&lun->lun_lock); 4590 lun->flags &= ~CTL_LUN_STOPPED; 4591 mtx_unlock(&lun->lun_lock); 4592 4593 return (0); 4594} 4595 4596int 4597ctl_stop_lun(struct ctl_be_lun *be_lun) 4598{ 4599 struct ctl_softc *ctl_softc; 4600 struct ctl_lun *lun; 4601 4602 ctl_softc = control_softc; 4603 4604 lun = (struct ctl_lun *)be_lun->ctl_lun; 4605 4606 mtx_lock(&lun->lun_lock); 4607 lun->flags |= CTL_LUN_STOPPED; 4608 mtx_unlock(&lun->lun_lock); 4609 4610 return (0); 4611} 4612 4613int 4614ctl_lun_offline(struct ctl_be_lun *be_lun) 4615{ 4616 struct ctl_softc *ctl_softc; 4617 struct ctl_lun *lun; 4618 4619 ctl_softc = control_softc; 4620 4621 lun = (struct ctl_lun *)be_lun->ctl_lun; 4622 4623 mtx_lock(&lun->lun_lock); 4624 lun->flags |= CTL_LUN_OFFLINE; 4625 mtx_unlock(&lun->lun_lock); 4626 4627 return (0); 4628} 4629 4630int 4631ctl_lun_online(struct ctl_be_lun *be_lun) 4632{ 4633 struct ctl_softc *ctl_softc; 4634 struct ctl_lun *lun; 4635 4636 ctl_softc = control_softc; 4637 4638 lun = (struct ctl_lun *)be_lun->ctl_lun; 4639 4640 mtx_lock(&lun->lun_lock); 4641 lun->flags &= ~CTL_LUN_OFFLINE; 4642 mtx_unlock(&lun->lun_lock); 4643 4644 return (0); 4645} 4646 4647int 4648ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4649{ 4650 struct ctl_softc *ctl_softc; 4651 struct ctl_lun *lun; 4652 4653 ctl_softc = control_softc; 4654 4655 lun = (struct ctl_lun *)be_lun->ctl_lun; 4656 4657 mtx_lock(&lun->lun_lock); 4658 4659 /* 4660 * The LUN needs to be disabled before it can be marked invalid. 4661 */ 4662 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4663 mtx_unlock(&lun->lun_lock); 4664 return (-1); 4665 } 4666 /* 4667 * Mark the LUN invalid. 4668 */ 4669 lun->flags |= CTL_LUN_INVALID; 4670 4671 /* 4672 * If there is nothing in the OOA queue, go ahead and free the LUN. 4673 * If we have something in the OOA queue, we'll free it when the 4674 * last I/O completes. 4675 */ 4676 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4677 mtx_unlock(&lun->lun_lock); 4678 mtx_lock(&ctl_softc->ctl_lock); 4679 ctl_free_lun(lun); 4680 mtx_unlock(&ctl_softc->ctl_lock); 4681 } else 4682 mtx_unlock(&lun->lun_lock); 4683 4684 return (0); 4685} 4686 4687int 4688ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4689{ 4690 struct ctl_softc *ctl_softc; 4691 struct ctl_lun *lun; 4692 4693 ctl_softc = control_softc; 4694 lun = (struct ctl_lun *)be_lun->ctl_lun; 4695 4696 mtx_lock(&lun->lun_lock); 4697 lun->flags |= CTL_LUN_INOPERABLE; 4698 mtx_unlock(&lun->lun_lock); 4699 4700 return (0); 4701} 4702 4703int 4704ctl_lun_operable(struct ctl_be_lun *be_lun) 4705{ 4706 struct ctl_softc *ctl_softc; 4707 struct ctl_lun *lun; 4708 4709 ctl_softc = control_softc; 4710 lun = (struct ctl_lun *)be_lun->ctl_lun; 4711 4712 mtx_lock(&lun->lun_lock); 4713 lun->flags &= ~CTL_LUN_INOPERABLE; 4714 mtx_unlock(&lun->lun_lock); 4715 4716 return (0); 4717} 4718 4719int 4720ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus, 4721 int lock) 4722{ 4723 struct ctl_softc *softc; 4724 struct ctl_lun *lun; 4725 struct copan_aps_subpage *current_sp; 4726 struct ctl_page_index *page_index; 4727 int i; 4728 4729 softc = control_softc; 4730 4731 mtx_lock(&softc->ctl_lock); 4732 4733 lun = (struct ctl_lun *)be_lun->ctl_lun; 4734 mtx_lock(&lun->lun_lock); 4735 4736 page_index = NULL; 4737 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4738 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 4739 APS_PAGE_CODE) 4740 continue; 4741 4742 if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE) 4743 continue; 4744 page_index = &lun->mode_pages.index[i]; 4745 } 4746 4747 if (page_index == NULL) { 4748 mtx_unlock(&lun->lun_lock); 4749 mtx_unlock(&softc->ctl_lock); 4750 printf("%s: APS subpage not found for lun %ju!\n", __func__, 4751 (uintmax_t)lun->lun); 4752 return (1); 4753 } 4754#if 0 4755 if ((softc->aps_locked_lun != 0) 4756 && (softc->aps_locked_lun != lun->lun)) { 4757 printf("%s: attempt to lock LUN %llu when %llu is already " 4758 "locked\n"); 4759 mtx_unlock(&lun->lun_lock); 4760 mtx_unlock(&softc->ctl_lock); 4761 return (1); 4762 } 4763#endif 4764 4765 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 4766 (page_index->page_len * CTL_PAGE_CURRENT)); 4767 4768 if (lock != 0) { 4769 current_sp->lock_active = APS_LOCK_ACTIVE; 4770 softc->aps_locked_lun = lun->lun; 4771 } else { 4772 current_sp->lock_active = 0; 4773 softc->aps_locked_lun = 0; 4774 } 4775 4776 4777 /* 4778 * If we're in HA mode, try to send the lock message to the other 4779 * side. 4780 */ 4781 if (ctl_is_single == 0) { 4782 int isc_retval; 4783 union ctl_ha_msg lock_msg; 4784 4785 lock_msg.hdr.nexus = *nexus; 4786 lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK; 4787 if (lock != 0) 4788 lock_msg.aps.lock_flag = 1; 4789 else 4790 lock_msg.aps.lock_flag = 0; 4791 isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg, 4792 sizeof(lock_msg), 0); 4793 if (isc_retval > CTL_HA_STATUS_SUCCESS) { 4794 printf("%s: APS (lock=%d) error returned from " 4795 "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval); 4796 mtx_unlock(&lun->lun_lock); 4797 mtx_unlock(&softc->ctl_lock); 4798 return (1); 4799 } 4800 } 4801 4802 mtx_unlock(&lun->lun_lock); 4803 mtx_unlock(&softc->ctl_lock); 4804 4805 return (0); 4806} 4807 4808void 4809ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4810{ 4811 struct ctl_lun *lun; 4812 struct ctl_softc *softc; 4813 int i; 4814 4815 softc = control_softc; 4816 4817 lun = (struct ctl_lun *)be_lun->ctl_lun; 4818 4819 mtx_lock(&lun->lun_lock); 4820 4821 for (i = 0; i < CTL_MAX_INITIATORS; i++) 4822 lun->pending_sense[i].ua_pending |= CTL_UA_CAPACITY_CHANGED; 4823 4824 mtx_unlock(&lun->lun_lock); 4825} 4826 4827/* 4828 * Backend "memory move is complete" callback for requests that never 4829 * make it down to say RAIDCore's configuration code. 4830 */ 4831int 4832ctl_config_move_done(union ctl_io *io) 4833{ 4834 int retval; 4835 4836 retval = CTL_RETVAL_COMPLETE; 4837 4838 4839 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4840 /* 4841 * XXX KDM this shouldn't happen, but what if it does? 4842 */ 4843 if (io->io_hdr.io_type != CTL_IO_SCSI) 4844 panic("I/O type isn't CTL_IO_SCSI!"); 4845 4846 if ((io->io_hdr.port_status == 0) 4847 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 4848 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 4849 io->io_hdr.status = CTL_SUCCESS; 4850 else if ((io->io_hdr.port_status != 0) 4851 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 4852 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){ 4853 /* 4854 * For hardware error sense keys, the sense key 4855 * specific value is defined to be a retry count, 4856 * but we use it to pass back an internal FETD 4857 * error code. XXX KDM Hopefully the FETD is only 4858 * using 16 bits for an error code, since that's 4859 * all the space we have in the sks field. 4860 */ 4861 ctl_set_internal_failure(&io->scsiio, 4862 /*sks_valid*/ 1, 4863 /*retry_count*/ 4864 io->io_hdr.port_status); 4865 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4866 free(io->scsiio.kern_data_ptr, M_CTL); 4867 ctl_done(io); 4868 goto bailout; 4869 } 4870 4871 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 4872 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 4873 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4874 /* 4875 * XXX KDM just assuming a single pointer here, and not a 4876 * S/G list. If we start using S/G lists for config data, 4877 * we'll need to know how to clean them up here as well. 4878 */ 4879 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4880 free(io->scsiio.kern_data_ptr, M_CTL); 4881 /* Hopefully the user has already set the status... */ 4882 ctl_done(io); 4883 } else { 4884 /* 4885 * XXX KDM now we need to continue data movement. Some 4886 * options: 4887 * - call ctl_scsiio() again? We don't do this for data 4888 * writes, because for those at least we know ahead of 4889 * time where the write will go and how long it is. For 4890 * config writes, though, that information is largely 4891 * contained within the write itself, thus we need to 4892 * parse out the data again. 4893 * 4894 * - Call some other function once the data is in? 4895 */ 4896 4897 /* 4898 * XXX KDM call ctl_scsiio() again for now, and check flag 4899 * bits to see whether we're allocated or not. 4900 */ 4901 retval = ctl_scsiio(&io->scsiio); 4902 } 4903bailout: 4904 return (retval); 4905} 4906 4907/* 4908 * This gets called by a backend driver when it is done with a 4909 * data_submit method. 4910 */ 4911void 4912ctl_data_submit_done(union ctl_io *io) 4913{ 4914 /* 4915 * If the IO_CONT flag is set, we need to call the supplied 4916 * function to continue processing the I/O, instead of completing 4917 * the I/O just yet. 4918 * 4919 * If there is an error, though, we don't want to keep processing. 4920 * Instead, just send status back to the initiator. 4921 */ 4922 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4923 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4924 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4925 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4926 io->scsiio.io_cont(io); 4927 return; 4928 } 4929 ctl_done(io); 4930} 4931 4932/* 4933 * This gets called by a backend driver when it is done with a 4934 * configuration write. 4935 */ 4936void 4937ctl_config_write_done(union ctl_io *io) 4938{ 4939 /* 4940 * If the IO_CONT flag is set, we need to call the supplied 4941 * function to continue processing the I/O, instead of completing 4942 * the I/O just yet. 4943 * 4944 * If there is an error, though, we don't want to keep processing. 4945 * Instead, just send status back to the initiator. 4946 */ 4947 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) 4948 && (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) 4949 || ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))) { 4950 io->scsiio.io_cont(io); 4951 return; 4952 } 4953 /* 4954 * Since a configuration write can be done for commands that actually 4955 * have data allocated, like write buffer, and commands that have 4956 * no data, like start/stop unit, we need to check here. 4957 */ 4958 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 4959 free(io->scsiio.kern_data_ptr, M_CTL); 4960 ctl_done(io); 4961} 4962 4963/* 4964 * SCSI release command. 4965 */ 4966int 4967ctl_scsi_release(struct ctl_scsiio *ctsio) 4968{ 4969 int length, longid, thirdparty_id, resv_id; 4970 struct ctl_softc *ctl_softc; 4971 struct ctl_lun *lun; 4972 4973 length = 0; 4974 resv_id = 0; 4975 4976 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4977 4978 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4979 ctl_softc = control_softc; 4980 4981 switch (ctsio->cdb[0]) { 4982 case RELEASE: { 4983 struct scsi_release *cdb; 4984 4985 cdb = (struct scsi_release *)ctsio->cdb; 4986 if ((cdb->byte2 & 0x1f) != 0) { 4987 ctl_set_invalid_field(ctsio, 4988 /*sks_valid*/ 1, 4989 /*command*/ 1, 4990 /*field*/ 1, 4991 /*bit_valid*/ 0, 4992 /*bit*/ 0); 4993 ctl_done((union ctl_io *)ctsio); 4994 return (CTL_RETVAL_COMPLETE); 4995 } 4996 break; 4997 } 4998 case RELEASE_10: { 4999 struct scsi_release_10 *cdb; 5000 5001 cdb = (struct scsi_release_10 *)ctsio->cdb; 5002 5003 if ((cdb->byte2 & SR10_EXTENT) != 0) { 5004 ctl_set_invalid_field(ctsio, 5005 /*sks_valid*/ 1, 5006 /*command*/ 1, 5007 /*field*/ 1, 5008 /*bit_valid*/ 1, 5009 /*bit*/ 0); 5010 ctl_done((union ctl_io *)ctsio); 5011 return (CTL_RETVAL_COMPLETE); 5012 5013 } 5014 5015 if ((cdb->byte2 & SR10_3RDPTY) != 0) { 5016 ctl_set_invalid_field(ctsio, 5017 /*sks_valid*/ 1, 5018 /*command*/ 1, 5019 /*field*/ 1, 5020 /*bit_valid*/ 1, 5021 /*bit*/ 4); 5022 ctl_done((union ctl_io *)ctsio); 5023 return (CTL_RETVAL_COMPLETE); 5024 } 5025 5026 if (cdb->byte2 & SR10_LONGID) 5027 longid = 1; 5028 else 5029 thirdparty_id = cdb->thirdparty_id; 5030 5031 resv_id = cdb->resv_id; 5032 length = scsi_2btoul(cdb->length); 5033 break; 5034 } 5035 } 5036 5037 5038 /* 5039 * XXX KDM right now, we only support LUN reservation. We don't 5040 * support 3rd party reservations, or extent reservations, which 5041 * might actually need the parameter list. If we've gotten this 5042 * far, we've got a LUN reservation. Anything else got kicked out 5043 * above. So, according to SPC, ignore the length. 5044 */ 5045 length = 0; 5046 5047 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5048 && (length > 0)) { 5049 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5050 ctsio->kern_data_len = length; 5051 ctsio->kern_total_len = length; 5052 ctsio->kern_data_resid = 0; 5053 ctsio->kern_rel_offset = 0; 5054 ctsio->kern_sg_entries = 0; 5055 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5056 ctsio->be_move_done = ctl_config_move_done; 5057 ctl_datamove((union ctl_io *)ctsio); 5058 5059 return (CTL_RETVAL_COMPLETE); 5060 } 5061 5062 if (length > 0) 5063 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5064 5065 mtx_lock(&lun->lun_lock); 5066 5067 /* 5068 * According to SPC, it is not an error for an intiator to attempt 5069 * to release a reservation on a LUN that isn't reserved, or that 5070 * is reserved by another initiator. The reservation can only be 5071 * released, though, by the initiator who made it or by one of 5072 * several reset type events. 5073 */ 5074 if (lun->flags & CTL_LUN_RESERVED) { 5075 if ((ctsio->io_hdr.nexus.initid.id == lun->rsv_nexus.initid.id) 5076 && (ctsio->io_hdr.nexus.targ_port == lun->rsv_nexus.targ_port) 5077 && (ctsio->io_hdr.nexus.targ_target.id == 5078 lun->rsv_nexus.targ_target.id)) { 5079 lun->flags &= ~CTL_LUN_RESERVED; 5080 } 5081 } 5082 5083 mtx_unlock(&lun->lun_lock); 5084 5085 ctsio->scsi_status = SCSI_STATUS_OK; 5086 ctsio->io_hdr.status = CTL_SUCCESS; 5087 5088 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5089 free(ctsio->kern_data_ptr, M_CTL); 5090 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5091 } 5092 5093 ctl_done((union ctl_io *)ctsio); 5094 return (CTL_RETVAL_COMPLETE); 5095} 5096 5097int 5098ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5099{ 5100 int extent, thirdparty, longid; 5101 int resv_id, length; 5102 uint64_t thirdparty_id; 5103 struct ctl_softc *ctl_softc; 5104 struct ctl_lun *lun; 5105 5106 extent = 0; 5107 thirdparty = 0; 5108 longid = 0; 5109 resv_id = 0; 5110 length = 0; 5111 thirdparty_id = 0; 5112 5113 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5114 5115 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5116 ctl_softc = control_softc; 5117 5118 switch (ctsio->cdb[0]) { 5119 case RESERVE: { 5120 struct scsi_reserve *cdb; 5121 5122 cdb = (struct scsi_reserve *)ctsio->cdb; 5123 if ((cdb->byte2 & 0x1f) != 0) { 5124 ctl_set_invalid_field(ctsio, 5125 /*sks_valid*/ 1, 5126 /*command*/ 1, 5127 /*field*/ 1, 5128 /*bit_valid*/ 0, 5129 /*bit*/ 0); 5130 ctl_done((union ctl_io *)ctsio); 5131 return (CTL_RETVAL_COMPLETE); 5132 } 5133 resv_id = cdb->resv_id; 5134 length = scsi_2btoul(cdb->length); 5135 break; 5136 } 5137 case RESERVE_10: { 5138 struct scsi_reserve_10 *cdb; 5139 5140 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5141 5142 if ((cdb->byte2 & SR10_EXTENT) != 0) { 5143 ctl_set_invalid_field(ctsio, 5144 /*sks_valid*/ 1, 5145 /*command*/ 1, 5146 /*field*/ 1, 5147 /*bit_valid*/ 1, 5148 /*bit*/ 0); 5149 ctl_done((union ctl_io *)ctsio); 5150 return (CTL_RETVAL_COMPLETE); 5151 } 5152 if ((cdb->byte2 & SR10_3RDPTY) != 0) { 5153 ctl_set_invalid_field(ctsio, 5154 /*sks_valid*/ 1, 5155 /*command*/ 1, 5156 /*field*/ 1, 5157 /*bit_valid*/ 1, 5158 /*bit*/ 4); 5159 ctl_done((union ctl_io *)ctsio); 5160 return (CTL_RETVAL_COMPLETE); 5161 } 5162 if (cdb->byte2 & SR10_LONGID) 5163 longid = 1; 5164 else 5165 thirdparty_id = cdb->thirdparty_id; 5166 5167 resv_id = cdb->resv_id; 5168 length = scsi_2btoul(cdb->length); 5169 break; 5170 } 5171 } 5172 5173 /* 5174 * XXX KDM right now, we only support LUN reservation. We don't 5175 * support 3rd party reservations, or extent reservations, which 5176 * might actually need the parameter list. If we've gotten this 5177 * far, we've got a LUN reservation. Anything else got kicked out 5178 * above. So, according to SPC, ignore the length. 5179 */ 5180 length = 0; 5181 5182 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5183 && (length > 0)) { 5184 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5185 ctsio->kern_data_len = length; 5186 ctsio->kern_total_len = length; 5187 ctsio->kern_data_resid = 0; 5188 ctsio->kern_rel_offset = 0; 5189 ctsio->kern_sg_entries = 0; 5190 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5191 ctsio->be_move_done = ctl_config_move_done; 5192 ctl_datamove((union ctl_io *)ctsio); 5193 5194 return (CTL_RETVAL_COMPLETE); 5195 } 5196 5197 if (length > 0) 5198 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5199 5200 mtx_lock(&lun->lun_lock); 5201 if (lun->flags & CTL_LUN_RESERVED) { 5202 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id) 5203 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port) 5204 || (ctsio->io_hdr.nexus.targ_target.id != 5205 lun->rsv_nexus.targ_target.id)) { 5206 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 5207 ctsio->io_hdr.status = CTL_SCSI_ERROR; 5208 goto bailout; 5209 } 5210 } 5211 5212 lun->flags |= CTL_LUN_RESERVED; 5213 lun->rsv_nexus = ctsio->io_hdr.nexus; 5214 5215 ctsio->scsi_status = SCSI_STATUS_OK; 5216 ctsio->io_hdr.status = CTL_SUCCESS; 5217 5218bailout: 5219 mtx_unlock(&lun->lun_lock); 5220 5221 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5222 free(ctsio->kern_data_ptr, M_CTL); 5223 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5224 } 5225 5226 ctl_done((union ctl_io *)ctsio); 5227 return (CTL_RETVAL_COMPLETE); 5228} 5229 5230int 5231ctl_start_stop(struct ctl_scsiio *ctsio) 5232{ 5233 struct scsi_start_stop_unit *cdb; 5234 struct ctl_lun *lun; 5235 struct ctl_softc *ctl_softc; 5236 int retval; 5237 5238 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5239 5240 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5241 ctl_softc = control_softc; 5242 retval = 0; 5243 5244 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5245 5246 /* 5247 * XXX KDM 5248 * We don't support the immediate bit on a stop unit. In order to 5249 * do that, we would need to code up a way to know that a stop is 5250 * pending, and hold off any new commands until it completes, one 5251 * way or another. Then we could accept or reject those commands 5252 * depending on its status. We would almost need to do the reverse 5253 * of what we do below for an immediate start -- return the copy of 5254 * the ctl_io to the FETD with status to send to the host (and to 5255 * free the copy!) and then free the original I/O once the stop 5256 * actually completes. That way, the OOA queue mechanism can work 5257 * to block commands that shouldn't proceed. Another alternative 5258 * would be to put the copy in the queue in place of the original, 5259 * and return the original back to the caller. That could be 5260 * slightly safer.. 5261 */ 5262 if ((cdb->byte2 & SSS_IMMED) 5263 && ((cdb->how & SSS_START) == 0)) { 5264 ctl_set_invalid_field(ctsio, 5265 /*sks_valid*/ 1, 5266 /*command*/ 1, 5267 /*field*/ 1, 5268 /*bit_valid*/ 1, 5269 /*bit*/ 0); 5270 ctl_done((union ctl_io *)ctsio); 5271 return (CTL_RETVAL_COMPLETE); 5272 } 5273 5274 /* 5275 * We don't support the power conditions field. We need to check 5276 * this prior to checking the load/eject and start/stop bits. 5277 */ 5278 if ((cdb->how & SSS_PC_MASK) != SSS_PC_START_VALID) { 5279 ctl_set_invalid_field(ctsio, 5280 /*sks_valid*/ 1, 5281 /*command*/ 1, 5282 /*field*/ 4, 5283 /*bit_valid*/ 1, 5284 /*bit*/ 4); 5285 ctl_done((union ctl_io *)ctsio); 5286 return (CTL_RETVAL_COMPLETE); 5287 } 5288 5289 /* 5290 * Media isn't removable, so we can't load or eject it. 5291 */ 5292 if ((cdb->how & SSS_LOEJ) != 0) { 5293 ctl_set_invalid_field(ctsio, 5294 /*sks_valid*/ 1, 5295 /*command*/ 1, 5296 /*field*/ 4, 5297 /*bit_valid*/ 1, 5298 /*bit*/ 1); 5299 ctl_done((union ctl_io *)ctsio); 5300 return (CTL_RETVAL_COMPLETE); 5301 } 5302 5303 if ((lun->flags & CTL_LUN_PR_RESERVED) 5304 && ((cdb->how & SSS_START)==0)) { 5305 uint32_t residx; 5306 5307 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5308 if (!lun->per_res[residx].registered 5309 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5310 5311 ctl_set_reservation_conflict(ctsio); 5312 ctl_done((union ctl_io *)ctsio); 5313 return (CTL_RETVAL_COMPLETE); 5314 } 5315 } 5316 5317 /* 5318 * If there is no backend on this device, we can't start or stop 5319 * it. In theory we shouldn't get any start/stop commands in the 5320 * first place at this level if the LUN doesn't have a backend. 5321 * That should get stopped by the command decode code. 5322 */ 5323 if (lun->backend == NULL) { 5324 ctl_set_invalid_opcode(ctsio); 5325 ctl_done((union ctl_io *)ctsio); 5326 return (CTL_RETVAL_COMPLETE); 5327 } 5328 5329 /* 5330 * XXX KDM Copan-specific offline behavior. 5331 * Figure out a reasonable way to port this? 5332 */ 5333#ifdef NEEDTOPORT 5334 mtx_lock(&lun->lun_lock); 5335 5336 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5337 && (lun->flags & CTL_LUN_OFFLINE)) { 5338 /* 5339 * If the LUN is offline, and the on/offline bit isn't set, 5340 * reject the start or stop. Otherwise, let it through. 5341 */ 5342 mtx_unlock(&lun->lun_lock); 5343 ctl_set_lun_not_ready(ctsio); 5344 ctl_done((union ctl_io *)ctsio); 5345 } else { 5346 mtx_unlock(&lun->lun_lock); 5347#endif /* NEEDTOPORT */ 5348 /* 5349 * This could be a start or a stop when we're online, 5350 * or a stop/offline or start/online. A start or stop when 5351 * we're offline is covered in the case above. 5352 */ 5353 /* 5354 * In the non-immediate case, we send the request to 5355 * the backend and return status to the user when 5356 * it is done. 5357 * 5358 * In the immediate case, we allocate a new ctl_io 5359 * to hold a copy of the request, and send that to 5360 * the backend. We then set good status on the 5361 * user's request and return it immediately. 5362 */ 5363 if (cdb->byte2 & SSS_IMMED) { 5364 union ctl_io *new_io; 5365 5366 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5367 if (new_io == NULL) { 5368 ctl_set_busy(ctsio); 5369 ctl_done((union ctl_io *)ctsio); 5370 } else { 5371 ctl_copy_io((union ctl_io *)ctsio, 5372 new_io); 5373 retval = lun->backend->config_write(new_io); 5374 ctl_set_success(ctsio); 5375 ctl_done((union ctl_io *)ctsio); 5376 } 5377 } else { 5378 retval = lun->backend->config_write( 5379 (union ctl_io *)ctsio); 5380 } 5381#ifdef NEEDTOPORT 5382 } 5383#endif 5384 return (retval); 5385} 5386 5387/* 5388 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5389 * we don't really do anything with the LBA and length fields if the user 5390 * passes them in. Instead we'll just flush out the cache for the entire 5391 * LUN. 5392 */ 5393int 5394ctl_sync_cache(struct ctl_scsiio *ctsio) 5395{ 5396 struct ctl_lun *lun; 5397 struct ctl_softc *ctl_softc; 5398 uint64_t starting_lba; 5399 uint32_t block_count; 5400 int reladr, immed; 5401 int retval; 5402 5403 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5404 5405 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5406 ctl_softc = control_softc; 5407 retval = 0; 5408 reladr = 0; 5409 immed = 0; 5410 5411 switch (ctsio->cdb[0]) { 5412 case SYNCHRONIZE_CACHE: { 5413 struct scsi_sync_cache *cdb; 5414 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5415 5416 if (cdb->byte2 & SSC_RELADR) 5417 reladr = 1; 5418 5419 if (cdb->byte2 & SSC_IMMED) 5420 immed = 1; 5421 5422 starting_lba = scsi_4btoul(cdb->begin_lba); 5423 block_count = scsi_2btoul(cdb->lb_count); 5424 break; 5425 } 5426 case SYNCHRONIZE_CACHE_16: { 5427 struct scsi_sync_cache_16 *cdb; 5428 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5429 5430 if (cdb->byte2 & SSC_RELADR) 5431 reladr = 1; 5432 5433 if (cdb->byte2 & SSC_IMMED) 5434 immed = 1; 5435 5436 starting_lba = scsi_8btou64(cdb->begin_lba); 5437 block_count = scsi_4btoul(cdb->lb_count); 5438 break; 5439 } 5440 default: 5441 ctl_set_invalid_opcode(ctsio); 5442 ctl_done((union ctl_io *)ctsio); 5443 goto bailout; 5444 break; /* NOTREACHED */ 5445 } 5446 5447 if (immed) { 5448 /* 5449 * We don't support the immediate bit. Since it's in the 5450 * same place for the 10 and 16 byte SYNCHRONIZE CACHE 5451 * commands, we can just return the same error in either 5452 * case. 5453 */ 5454 ctl_set_invalid_field(ctsio, 5455 /*sks_valid*/ 1, 5456 /*command*/ 1, 5457 /*field*/ 1, 5458 /*bit_valid*/ 1, 5459 /*bit*/ 1); 5460 ctl_done((union ctl_io *)ctsio); 5461 goto bailout; 5462 } 5463 5464 if (reladr) { 5465 /* 5466 * We don't support the reladr bit either. It can only be 5467 * used with linked commands, and we don't support linked 5468 * commands. Since the bit is in the same place for the 5469 * 10 and 16 byte SYNCHRONIZE CACHE * commands, we can 5470 * just return the same error in either case. 5471 */ 5472 ctl_set_invalid_field(ctsio, 5473 /*sks_valid*/ 1, 5474 /*command*/ 1, 5475 /*field*/ 1, 5476 /*bit_valid*/ 1, 5477 /*bit*/ 0); 5478 ctl_done((union ctl_io *)ctsio); 5479 goto bailout; 5480 } 5481 5482 /* 5483 * We check the LBA and length, but don't do anything with them. 5484 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5485 * get flushed. This check will just help satisfy anyone who wants 5486 * to see an error for an out of range LBA. 5487 */ 5488 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5489 ctl_set_lba_out_of_range(ctsio); 5490 ctl_done((union ctl_io *)ctsio); 5491 goto bailout; 5492 } 5493 5494 /* 5495 * If this LUN has no backend, we can't flush the cache anyway. 5496 */ 5497 if (lun->backend == NULL) { 5498 ctl_set_invalid_opcode(ctsio); 5499 ctl_done((union ctl_io *)ctsio); 5500 goto bailout; 5501 } 5502 5503 /* 5504 * Check to see whether we're configured to send the SYNCHRONIZE 5505 * CACHE command directly to the back end. 5506 */ 5507 mtx_lock(&lun->lun_lock); 5508 if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC) 5509 && (++(lun->sync_count) >= lun->sync_interval)) { 5510 lun->sync_count = 0; 5511 mtx_unlock(&lun->lun_lock); 5512 retval = lun->backend->config_write((union ctl_io *)ctsio); 5513 } else { 5514 mtx_unlock(&lun->lun_lock); 5515 ctl_set_success(ctsio); 5516 ctl_done((union ctl_io *)ctsio); 5517 } 5518 5519bailout: 5520 5521 return (retval); 5522} 5523 5524int 5525ctl_format(struct ctl_scsiio *ctsio) 5526{ 5527 struct scsi_format *cdb; 5528 struct ctl_lun *lun; 5529 struct ctl_softc *ctl_softc; 5530 int length, defect_list_len; 5531 5532 CTL_DEBUG_PRINT(("ctl_format\n")); 5533 5534 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5535 ctl_softc = control_softc; 5536 5537 cdb = (struct scsi_format *)ctsio->cdb; 5538 5539 length = 0; 5540 if (cdb->byte2 & SF_FMTDATA) { 5541 if (cdb->byte2 & SF_LONGLIST) 5542 length = sizeof(struct scsi_format_header_long); 5543 else 5544 length = sizeof(struct scsi_format_header_short); 5545 } 5546 5547 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5548 && (length > 0)) { 5549 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5550 ctsio->kern_data_len = length; 5551 ctsio->kern_total_len = length; 5552 ctsio->kern_data_resid = 0; 5553 ctsio->kern_rel_offset = 0; 5554 ctsio->kern_sg_entries = 0; 5555 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5556 ctsio->be_move_done = ctl_config_move_done; 5557 ctl_datamove((union ctl_io *)ctsio); 5558 5559 return (CTL_RETVAL_COMPLETE); 5560 } 5561 5562 defect_list_len = 0; 5563 5564 if (cdb->byte2 & SF_FMTDATA) { 5565 if (cdb->byte2 & SF_LONGLIST) { 5566 struct scsi_format_header_long *header; 5567 5568 header = (struct scsi_format_header_long *) 5569 ctsio->kern_data_ptr; 5570 5571 defect_list_len = scsi_4btoul(header->defect_list_len); 5572 if (defect_list_len != 0) { 5573 ctl_set_invalid_field(ctsio, 5574 /*sks_valid*/ 1, 5575 /*command*/ 0, 5576 /*field*/ 2, 5577 /*bit_valid*/ 0, 5578 /*bit*/ 0); 5579 goto bailout; 5580 } 5581 } else { 5582 struct scsi_format_header_short *header; 5583 5584 header = (struct scsi_format_header_short *) 5585 ctsio->kern_data_ptr; 5586 5587 defect_list_len = scsi_2btoul(header->defect_list_len); 5588 if (defect_list_len != 0) { 5589 ctl_set_invalid_field(ctsio, 5590 /*sks_valid*/ 1, 5591 /*command*/ 0, 5592 /*field*/ 2, 5593 /*bit_valid*/ 0, 5594 /*bit*/ 0); 5595 goto bailout; 5596 } 5597 } 5598 } 5599 5600 /* 5601 * The format command will clear out the "Medium format corrupted" 5602 * status if set by the configuration code. That status is really 5603 * just a way to notify the host that we have lost the media, and 5604 * get them to issue a command that will basically make them think 5605 * they're blowing away the media. 5606 */ 5607 mtx_lock(&lun->lun_lock); 5608 lun->flags &= ~CTL_LUN_INOPERABLE; 5609 mtx_unlock(&lun->lun_lock); 5610 5611 ctsio->scsi_status = SCSI_STATUS_OK; 5612 ctsio->io_hdr.status = CTL_SUCCESS; 5613bailout: 5614 5615 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5616 free(ctsio->kern_data_ptr, M_CTL); 5617 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5618 } 5619 5620 ctl_done((union ctl_io *)ctsio); 5621 return (CTL_RETVAL_COMPLETE); 5622} 5623 5624int 5625ctl_read_buffer(struct ctl_scsiio *ctsio) 5626{ 5627 struct scsi_read_buffer *cdb; 5628 struct ctl_lun *lun; 5629 int buffer_offset, len; 5630 static uint8_t descr[4]; 5631 static uint8_t echo_descr[4] = { 0 }; 5632 5633 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5634 5635 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5636 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5637 5638 if (lun->flags & CTL_LUN_PR_RESERVED) { 5639 uint32_t residx; 5640 5641 /* 5642 * XXX KDM need a lock here. 5643 */ 5644 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5645 if ((lun->res_type == SPR_TYPE_EX_AC 5646 && residx != lun->pr_res_idx) 5647 || ((lun->res_type == SPR_TYPE_EX_AC_RO 5648 || lun->res_type == SPR_TYPE_EX_AC_AR) 5649 && !lun->per_res[residx].registered)) { 5650 ctl_set_reservation_conflict(ctsio); 5651 ctl_done((union ctl_io *)ctsio); 5652 return (CTL_RETVAL_COMPLETE); 5653 } 5654 } 5655 5656 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5657 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5658 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5659 ctl_set_invalid_field(ctsio, 5660 /*sks_valid*/ 1, 5661 /*command*/ 1, 5662 /*field*/ 1, 5663 /*bit_valid*/ 1, 5664 /*bit*/ 4); 5665 ctl_done((union ctl_io *)ctsio); 5666 return (CTL_RETVAL_COMPLETE); 5667 } 5668 if (cdb->buffer_id != 0) { 5669 ctl_set_invalid_field(ctsio, 5670 /*sks_valid*/ 1, 5671 /*command*/ 1, 5672 /*field*/ 2, 5673 /*bit_valid*/ 0, 5674 /*bit*/ 0); 5675 ctl_done((union ctl_io *)ctsio); 5676 return (CTL_RETVAL_COMPLETE); 5677 } 5678 5679 len = scsi_3btoul(cdb->length); 5680 buffer_offset = scsi_3btoul(cdb->offset); 5681 5682 if (buffer_offset + len > sizeof(lun->write_buffer)) { 5683 ctl_set_invalid_field(ctsio, 5684 /*sks_valid*/ 1, 5685 /*command*/ 1, 5686 /*field*/ 6, 5687 /*bit_valid*/ 0, 5688 /*bit*/ 0); 5689 ctl_done((union ctl_io *)ctsio); 5690 return (CTL_RETVAL_COMPLETE); 5691 } 5692 5693 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5694 descr[0] = 0; 5695 scsi_ulto3b(sizeof(lun->write_buffer), &descr[1]); 5696 ctsio->kern_data_ptr = descr; 5697 len = min(len, sizeof(descr)); 5698 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5699 ctsio->kern_data_ptr = echo_descr; 5700 len = min(len, sizeof(echo_descr)); 5701 } else 5702 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5703 ctsio->kern_data_len = len; 5704 ctsio->kern_total_len = len; 5705 ctsio->kern_data_resid = 0; 5706 ctsio->kern_rel_offset = 0; 5707 ctsio->kern_sg_entries = 0; 5708 ctsio->be_move_done = ctl_config_move_done; 5709 ctl_datamove((union ctl_io *)ctsio); 5710 5711 return (CTL_RETVAL_COMPLETE); 5712} 5713 5714int 5715ctl_write_buffer(struct ctl_scsiio *ctsio) 5716{ 5717 struct scsi_write_buffer *cdb; 5718 struct ctl_lun *lun; 5719 int buffer_offset, len; 5720 5721 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5722 5723 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5724 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5725 5726 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5727 ctl_set_invalid_field(ctsio, 5728 /*sks_valid*/ 1, 5729 /*command*/ 1, 5730 /*field*/ 1, 5731 /*bit_valid*/ 1, 5732 /*bit*/ 4); 5733 ctl_done((union ctl_io *)ctsio); 5734 return (CTL_RETVAL_COMPLETE); 5735 } 5736 if (cdb->buffer_id != 0) { 5737 ctl_set_invalid_field(ctsio, 5738 /*sks_valid*/ 1, 5739 /*command*/ 1, 5740 /*field*/ 2, 5741 /*bit_valid*/ 0, 5742 /*bit*/ 0); 5743 ctl_done((union ctl_io *)ctsio); 5744 return (CTL_RETVAL_COMPLETE); 5745 } 5746 5747 len = scsi_3btoul(cdb->length); 5748 buffer_offset = scsi_3btoul(cdb->offset); 5749 5750 if (buffer_offset + len > sizeof(lun->write_buffer)) { 5751 ctl_set_invalid_field(ctsio, 5752 /*sks_valid*/ 1, 5753 /*command*/ 1, 5754 /*field*/ 6, 5755 /*bit_valid*/ 0, 5756 /*bit*/ 0); 5757 ctl_done((union ctl_io *)ctsio); 5758 return (CTL_RETVAL_COMPLETE); 5759 } 5760 5761 /* 5762 * If we've got a kernel request that hasn't been malloced yet, 5763 * malloc it and tell the caller the data buffer is here. 5764 */ 5765 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5766 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5767 ctsio->kern_data_len = len; 5768 ctsio->kern_total_len = len; 5769 ctsio->kern_data_resid = 0; 5770 ctsio->kern_rel_offset = 0; 5771 ctsio->kern_sg_entries = 0; 5772 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5773 ctsio->be_move_done = ctl_config_move_done; 5774 ctl_datamove((union ctl_io *)ctsio); 5775 5776 return (CTL_RETVAL_COMPLETE); 5777 } 5778 5779 ctl_done((union ctl_io *)ctsio); 5780 5781 return (CTL_RETVAL_COMPLETE); 5782} 5783 5784int 5785ctl_write_same(struct ctl_scsiio *ctsio) 5786{ 5787 struct ctl_lun *lun; 5788 struct ctl_lba_len_flags *lbalen; 5789 uint64_t lba; 5790 uint32_t num_blocks; 5791 int len, retval; 5792 uint8_t byte2; 5793 5794 retval = CTL_RETVAL_COMPLETE; 5795 5796 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5797 5798 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5799 5800 switch (ctsio->cdb[0]) { 5801 case WRITE_SAME_10: { 5802 struct scsi_write_same_10 *cdb; 5803 5804 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5805 5806 lba = scsi_4btoul(cdb->addr); 5807 num_blocks = scsi_2btoul(cdb->length); 5808 byte2 = cdb->byte2; 5809 break; 5810 } 5811 case WRITE_SAME_16: { 5812 struct scsi_write_same_16 *cdb; 5813 5814 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5815 5816 lba = scsi_8btou64(cdb->addr); 5817 num_blocks = scsi_4btoul(cdb->length); 5818 byte2 = cdb->byte2; 5819 break; 5820 } 5821 default: 5822 /* 5823 * We got a command we don't support. This shouldn't 5824 * happen, commands should be filtered out above us. 5825 */ 5826 ctl_set_invalid_opcode(ctsio); 5827 ctl_done((union ctl_io *)ctsio); 5828 5829 return (CTL_RETVAL_COMPLETE); 5830 break; /* NOTREACHED */ 5831 } 5832 5833 /* 5834 * The first check is to make sure we're in bounds, the second 5835 * check is to catch wrap-around problems. If the lba + num blocks 5836 * is less than the lba, then we've wrapped around and the block 5837 * range is invalid anyway. 5838 */ 5839 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5840 || ((lba + num_blocks) < lba)) { 5841 ctl_set_lba_out_of_range(ctsio); 5842 ctl_done((union ctl_io *)ctsio); 5843 return (CTL_RETVAL_COMPLETE); 5844 } 5845 5846 /* Zero number of blocks means "to the last logical block" */ 5847 if (num_blocks == 0) { 5848 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5849 ctl_set_invalid_field(ctsio, 5850 /*sks_valid*/ 0, 5851 /*command*/ 1, 5852 /*field*/ 0, 5853 /*bit_valid*/ 0, 5854 /*bit*/ 0); 5855 ctl_done((union ctl_io *)ctsio); 5856 return (CTL_RETVAL_COMPLETE); 5857 } 5858 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5859 } 5860 5861 len = lun->be_lun->blocksize; 5862 5863 /* 5864 * If we've got a kernel request that hasn't been malloced yet, 5865 * malloc it and tell the caller the data buffer is here. 5866 */ 5867 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5868 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5869 ctsio->kern_data_len = len; 5870 ctsio->kern_total_len = len; 5871 ctsio->kern_data_resid = 0; 5872 ctsio->kern_rel_offset = 0; 5873 ctsio->kern_sg_entries = 0; 5874 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5875 ctsio->be_move_done = ctl_config_move_done; 5876 ctl_datamove((union ctl_io *)ctsio); 5877 5878 return (CTL_RETVAL_COMPLETE); 5879 } 5880 5881 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5882 lbalen->lba = lba; 5883 lbalen->len = num_blocks; 5884 lbalen->flags = byte2; 5885 retval = lun->backend->config_write((union ctl_io *)ctsio); 5886 5887 return (retval); 5888} 5889 5890int 5891ctl_unmap(struct ctl_scsiio *ctsio) 5892{ 5893 struct ctl_lun *lun; 5894 struct scsi_unmap *cdb; 5895 struct ctl_ptr_len_flags *ptrlen; 5896 struct scsi_unmap_header *hdr; 5897 struct scsi_unmap_desc *buf, *end; 5898 uint64_t lba; 5899 uint32_t num_blocks; 5900 int len, retval; 5901 uint8_t byte2; 5902 5903 retval = CTL_RETVAL_COMPLETE; 5904 5905 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5906 5907 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5908 cdb = (struct scsi_unmap *)ctsio->cdb; 5909 5910 len = scsi_2btoul(cdb->length); 5911 byte2 = cdb->byte2; 5912 5913 /* 5914 * If we've got a kernel request that hasn't been malloced yet, 5915 * malloc it and tell the caller the data buffer is here. 5916 */ 5917 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5918 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5919 ctsio->kern_data_len = len; 5920 ctsio->kern_total_len = len; 5921 ctsio->kern_data_resid = 0; 5922 ctsio->kern_rel_offset = 0; 5923 ctsio->kern_sg_entries = 0; 5924 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5925 ctsio->be_move_done = ctl_config_move_done; 5926 ctl_datamove((union ctl_io *)ctsio); 5927 5928 return (CTL_RETVAL_COMPLETE); 5929 } 5930 5931 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5932 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5933 if (len < sizeof (*hdr) || 5934 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5935 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5936 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5937 ctl_set_invalid_field(ctsio, 5938 /*sks_valid*/ 0, 5939 /*command*/ 0, 5940 /*field*/ 0, 5941 /*bit_valid*/ 0, 5942 /*bit*/ 0); 5943 ctl_done((union ctl_io *)ctsio); 5944 return (CTL_RETVAL_COMPLETE); 5945 } 5946 len = scsi_2btoul(hdr->desc_length); 5947 buf = (struct scsi_unmap_desc *)(hdr + 1); 5948 end = buf + len / sizeof(*buf); 5949 5950 ptrlen = (struct ctl_ptr_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5951 ptrlen->ptr = (void *)buf; 5952 ptrlen->len = len; 5953 ptrlen->flags = byte2; 5954 5955 for (; buf < end; buf++) { 5956 lba = scsi_8btou64(buf->lba); 5957 num_blocks = scsi_4btoul(buf->length); 5958 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5959 || ((lba + num_blocks) < lba)) { 5960 ctl_set_lba_out_of_range(ctsio); 5961 ctl_done((union ctl_io *)ctsio); 5962 return (CTL_RETVAL_COMPLETE); 5963 } 5964 } 5965 5966 retval = lun->backend->config_write((union ctl_io *)ctsio); 5967 5968 return (retval); 5969} 5970 5971/* 5972 * Note that this function currently doesn't actually do anything inside 5973 * CTL to enforce things if the DQue bit is turned on. 5974 * 5975 * Also note that this function can't be used in the default case, because 5976 * the DQue bit isn't set in the changeable mask for the control mode page 5977 * anyway. This is just here as an example for how to implement a page 5978 * handler, and a placeholder in case we want to allow the user to turn 5979 * tagged queueing on and off. 5980 * 5981 * The D_SENSE bit handling is functional, however, and will turn 5982 * descriptor sense on and off for a given LUN. 5983 */ 5984int 5985ctl_control_page_handler(struct ctl_scsiio *ctsio, 5986 struct ctl_page_index *page_index, uint8_t *page_ptr) 5987{ 5988 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5989 struct ctl_lun *lun; 5990 struct ctl_softc *softc; 5991 int set_ua; 5992 uint32_t initidx; 5993 5994 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5995 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5996 set_ua = 0; 5997 5998 user_cp = (struct scsi_control_page *)page_ptr; 5999 current_cp = (struct scsi_control_page *) 6000 (page_index->page_data + (page_index->page_len * 6001 CTL_PAGE_CURRENT)); 6002 saved_cp = (struct scsi_control_page *) 6003 (page_index->page_data + (page_index->page_len * 6004 CTL_PAGE_SAVED)); 6005 6006 softc = control_softc; 6007 6008 mtx_lock(&lun->lun_lock); 6009 if (((current_cp->rlec & SCP_DSENSE) == 0) 6010 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 6011 /* 6012 * Descriptor sense is currently turned off and the user 6013 * wants to turn it on. 6014 */ 6015 current_cp->rlec |= SCP_DSENSE; 6016 saved_cp->rlec |= SCP_DSENSE; 6017 lun->flags |= CTL_LUN_SENSE_DESC; 6018 set_ua = 1; 6019 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 6020 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 6021 /* 6022 * Descriptor sense is currently turned on, and the user 6023 * wants to turn it off. 6024 */ 6025 current_cp->rlec &= ~SCP_DSENSE; 6026 saved_cp->rlec &= ~SCP_DSENSE; 6027 lun->flags &= ~CTL_LUN_SENSE_DESC; 6028 set_ua = 1; 6029 } 6030 if (current_cp->queue_flags & SCP_QUEUE_DQUE) { 6031 if (user_cp->queue_flags & SCP_QUEUE_DQUE) { 6032#ifdef NEEDTOPORT 6033 csevent_log(CSC_CTL | CSC_SHELF_SW | 6034 CTL_UNTAG_TO_UNTAG, 6035 csevent_LogType_Trace, 6036 csevent_Severity_Information, 6037 csevent_AlertLevel_Green, 6038 csevent_FRU_Firmware, 6039 csevent_FRU_Unknown, 6040 "Received untagged to untagged transition"); 6041#endif /* NEEDTOPORT */ 6042 } else { 6043#ifdef NEEDTOPORT 6044 csevent_log(CSC_CTL | CSC_SHELF_SW | 6045 CTL_UNTAG_TO_TAG, 6046 csevent_LogType_ConfigChange, 6047 csevent_Severity_Information, 6048 csevent_AlertLevel_Green, 6049 csevent_FRU_Firmware, 6050 csevent_FRU_Unknown, 6051 "Received untagged to tagged " 6052 "queueing transition"); 6053#endif /* NEEDTOPORT */ 6054 6055 current_cp->queue_flags &= ~SCP_QUEUE_DQUE; 6056 saved_cp->queue_flags &= ~SCP_QUEUE_DQUE; 6057 set_ua = 1; 6058 } 6059 } else { 6060 if (user_cp->queue_flags & SCP_QUEUE_DQUE) { 6061#ifdef NEEDTOPORT 6062 csevent_log(CSC_CTL | CSC_SHELF_SW | 6063 CTL_TAG_TO_UNTAG, 6064 csevent_LogType_ConfigChange, 6065 csevent_Severity_Warning, 6066 csevent_AlertLevel_Yellow, 6067 csevent_FRU_Firmware, 6068 csevent_FRU_Unknown, 6069 "Received tagged queueing to untagged " 6070 "transition"); 6071#endif /* NEEDTOPORT */ 6072 6073 current_cp->queue_flags |= SCP_QUEUE_DQUE; 6074 saved_cp->queue_flags |= SCP_QUEUE_DQUE; 6075 set_ua = 1; 6076 } else { 6077#ifdef NEEDTOPORT 6078 csevent_log(CSC_CTL | CSC_SHELF_SW | 6079 CTL_TAG_TO_TAG, 6080 csevent_LogType_Trace, 6081 csevent_Severity_Information, 6082 csevent_AlertLevel_Green, 6083 csevent_FRU_Firmware, 6084 csevent_FRU_Unknown, 6085 "Received tagged queueing to tagged " 6086 "queueing transition"); 6087#endif /* NEEDTOPORT */ 6088 } 6089 } 6090 if (set_ua != 0) { 6091 int i; 6092 /* 6093 * Let other initiators know that the mode 6094 * parameters for this LUN have changed. 6095 */ 6096 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 6097 if (i == initidx) 6098 continue; 6099 6100 lun->pending_sense[i].ua_pending |= 6101 CTL_UA_MODE_CHANGE; 6102 } 6103 } 6104 mtx_unlock(&lun->lun_lock); 6105 6106 return (0); 6107} 6108 6109int 6110ctl_power_sp_handler(struct ctl_scsiio *ctsio, 6111 struct ctl_page_index *page_index, uint8_t *page_ptr) 6112{ 6113 return (0); 6114} 6115 6116int 6117ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio, 6118 struct ctl_page_index *page_index, int pc) 6119{ 6120 struct copan_power_subpage *page; 6121 6122 page = (struct copan_power_subpage *)page_index->page_data + 6123 (page_index->page_len * pc); 6124 6125 switch (pc) { 6126 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6127 /* 6128 * We don't update the changable bits for this page. 6129 */ 6130 break; 6131 case SMS_PAGE_CTRL_CURRENT >> 6: 6132 case SMS_PAGE_CTRL_DEFAULT >> 6: 6133 case SMS_PAGE_CTRL_SAVED >> 6: 6134#ifdef NEEDTOPORT 6135 ctl_update_power_subpage(page); 6136#endif 6137 break; 6138 default: 6139#ifdef NEEDTOPORT 6140 EPRINT(0, "Invalid PC %d!!", pc); 6141#endif 6142 break; 6143 } 6144 return (0); 6145} 6146 6147 6148int 6149ctl_aps_sp_handler(struct ctl_scsiio *ctsio, 6150 struct ctl_page_index *page_index, uint8_t *page_ptr) 6151{ 6152 struct copan_aps_subpage *user_sp; 6153 struct copan_aps_subpage *current_sp; 6154 union ctl_modepage_info *modepage_info; 6155 struct ctl_softc *softc; 6156 struct ctl_lun *lun; 6157 int retval; 6158 6159 retval = CTL_RETVAL_COMPLETE; 6160 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 6161 (page_index->page_len * CTL_PAGE_CURRENT)); 6162 softc = control_softc; 6163 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6164 6165 user_sp = (struct copan_aps_subpage *)page_ptr; 6166 6167 modepage_info = (union ctl_modepage_info *) 6168 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6169 6170 modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK; 6171 modepage_info->header.subpage = page_index->subpage; 6172 modepage_info->aps.lock_active = user_sp->lock_active; 6173 6174 mtx_lock(&softc->ctl_lock); 6175 6176 /* 6177 * If there is a request to lock the LUN and another LUN is locked 6178 * this is an error. If the requested LUN is already locked ignore 6179 * the request. If no LUN is locked attempt to lock it. 6180 * if there is a request to unlock the LUN and the LUN is currently 6181 * locked attempt to unlock it. Otherwise ignore the request. i.e. 6182 * if another LUN is locked or no LUN is locked. 6183 */ 6184 if (user_sp->lock_active & APS_LOCK_ACTIVE) { 6185 if (softc->aps_locked_lun == lun->lun) { 6186 /* 6187 * This LUN is already locked, so we're done. 6188 */ 6189 retval = CTL_RETVAL_COMPLETE; 6190 } else if (softc->aps_locked_lun == 0) { 6191 /* 6192 * No one has the lock, pass the request to the 6193 * backend. 6194 */ 6195 retval = lun->backend->config_write( 6196 (union ctl_io *)ctsio); 6197 } else { 6198 /* 6199 * Someone else has the lock, throw out the request. 6200 */ 6201 ctl_set_already_locked(ctsio); 6202 free(ctsio->kern_data_ptr, M_CTL); 6203 ctl_done((union ctl_io *)ctsio); 6204 6205 /* 6206 * Set the return value so that ctl_do_mode_select() 6207 * won't try to complete the command. We already 6208 * completed it here. 6209 */ 6210 retval = CTL_RETVAL_ERROR; 6211 } 6212 } else if (softc->aps_locked_lun == lun->lun) { 6213 /* 6214 * This LUN is locked, so pass the unlock request to the 6215 * backend. 6216 */ 6217 retval = lun->backend->config_write((union ctl_io *)ctsio); 6218 } 6219 mtx_unlock(&softc->ctl_lock); 6220 6221 return (retval); 6222} 6223 6224int 6225ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 6226 struct ctl_page_index *page_index, 6227 uint8_t *page_ptr) 6228{ 6229 uint8_t *c; 6230 int i; 6231 6232 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 6233 ctl_time_io_secs = 6234 (c[0] << 8) | 6235 (c[1] << 0) | 6236 0; 6237 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 6238 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 6239 printf("page data:"); 6240 for (i=0; i<8; i++) 6241 printf(" %.2x",page_ptr[i]); 6242 printf("\n"); 6243 return (0); 6244} 6245 6246int 6247ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 6248 struct ctl_page_index *page_index, 6249 int pc) 6250{ 6251 struct copan_debugconf_subpage *page; 6252 6253 page = (struct copan_debugconf_subpage *)page_index->page_data + 6254 (page_index->page_len * pc); 6255 6256 switch (pc) { 6257 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6258 case SMS_PAGE_CTRL_DEFAULT >> 6: 6259 case SMS_PAGE_CTRL_SAVED >> 6: 6260 /* 6261 * We don't update the changable or default bits for this page. 6262 */ 6263 break; 6264 case SMS_PAGE_CTRL_CURRENT >> 6: 6265 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 6266 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 6267 break; 6268 default: 6269#ifdef NEEDTOPORT 6270 EPRINT(0, "Invalid PC %d!!", pc); 6271#endif /* NEEDTOPORT */ 6272 break; 6273 } 6274 return (0); 6275} 6276 6277 6278static int 6279ctl_do_mode_select(union ctl_io *io) 6280{ 6281 struct scsi_mode_page_header *page_header; 6282 struct ctl_page_index *page_index; 6283 struct ctl_scsiio *ctsio; 6284 int control_dev, page_len; 6285 int page_len_offset, page_len_size; 6286 union ctl_modepage_info *modepage_info; 6287 struct ctl_lun *lun; 6288 int *len_left, *len_used; 6289 int retval, i; 6290 6291 ctsio = &io->scsiio; 6292 page_index = NULL; 6293 page_len = 0; 6294 retval = CTL_RETVAL_COMPLETE; 6295 6296 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6297 6298 if (lun->be_lun->lun_type != T_DIRECT) 6299 control_dev = 1; 6300 else 6301 control_dev = 0; 6302 6303 modepage_info = (union ctl_modepage_info *) 6304 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6305 len_left = &modepage_info->header.len_left; 6306 len_used = &modepage_info->header.len_used; 6307 6308do_next_page: 6309 6310 page_header = (struct scsi_mode_page_header *) 6311 (ctsio->kern_data_ptr + *len_used); 6312 6313 if (*len_left == 0) { 6314 free(ctsio->kern_data_ptr, M_CTL); 6315 ctl_set_success(ctsio); 6316 ctl_done((union ctl_io *)ctsio); 6317 return (CTL_RETVAL_COMPLETE); 6318 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6319 6320 free(ctsio->kern_data_ptr, M_CTL); 6321 ctl_set_param_len_error(ctsio); 6322 ctl_done((union ctl_io *)ctsio); 6323 return (CTL_RETVAL_COMPLETE); 6324 6325 } else if ((page_header->page_code & SMPH_SPF) 6326 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6327 6328 free(ctsio->kern_data_ptr, M_CTL); 6329 ctl_set_param_len_error(ctsio); 6330 ctl_done((union ctl_io *)ctsio); 6331 return (CTL_RETVAL_COMPLETE); 6332 } 6333 6334 6335 /* 6336 * XXX KDM should we do something with the block descriptor? 6337 */ 6338 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6339 6340 if ((control_dev != 0) 6341 && (lun->mode_pages.index[i].page_flags & 6342 CTL_PAGE_FLAG_DISK_ONLY)) 6343 continue; 6344 6345 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6346 (page_header->page_code & SMPH_PC_MASK)) 6347 continue; 6348 6349 /* 6350 * If neither page has a subpage code, then we've got a 6351 * match. 6352 */ 6353 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6354 && ((page_header->page_code & SMPH_SPF) == 0)) { 6355 page_index = &lun->mode_pages.index[i]; 6356 page_len = page_header->page_length; 6357 break; 6358 } 6359 6360 /* 6361 * If both pages have subpages, then the subpage numbers 6362 * have to match. 6363 */ 6364 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6365 && (page_header->page_code & SMPH_SPF)) { 6366 struct scsi_mode_page_header_sp *sph; 6367 6368 sph = (struct scsi_mode_page_header_sp *)page_header; 6369 6370 if (lun->mode_pages.index[i].subpage == 6371 sph->subpage) { 6372 page_index = &lun->mode_pages.index[i]; 6373 page_len = scsi_2btoul(sph->page_length); 6374 break; 6375 } 6376 } 6377 } 6378 6379 /* 6380 * If we couldn't find the page, or if we don't have a mode select 6381 * handler for it, send back an error to the user. 6382 */ 6383 if ((page_index == NULL) 6384 || (page_index->select_handler == NULL)) { 6385 ctl_set_invalid_field(ctsio, 6386 /*sks_valid*/ 1, 6387 /*command*/ 0, 6388 /*field*/ *len_used, 6389 /*bit_valid*/ 0, 6390 /*bit*/ 0); 6391 free(ctsio->kern_data_ptr, M_CTL); 6392 ctl_done((union ctl_io *)ctsio); 6393 return (CTL_RETVAL_COMPLETE); 6394 } 6395 6396 if (page_index->page_code & SMPH_SPF) { 6397 page_len_offset = 2; 6398 page_len_size = 2; 6399 } else { 6400 page_len_size = 1; 6401 page_len_offset = 1; 6402 } 6403 6404 /* 6405 * If the length the initiator gives us isn't the one we specify in 6406 * the mode page header, or if they didn't specify enough data in 6407 * the CDB to avoid truncating this page, kick out the request. 6408 */ 6409 if ((page_len != (page_index->page_len - page_len_offset - 6410 page_len_size)) 6411 || (*len_left < page_index->page_len)) { 6412 6413 6414 ctl_set_invalid_field(ctsio, 6415 /*sks_valid*/ 1, 6416 /*command*/ 0, 6417 /*field*/ *len_used + page_len_offset, 6418 /*bit_valid*/ 0, 6419 /*bit*/ 0); 6420 free(ctsio->kern_data_ptr, M_CTL); 6421 ctl_done((union ctl_io *)ctsio); 6422 return (CTL_RETVAL_COMPLETE); 6423 } 6424 6425 /* 6426 * Run through the mode page, checking to make sure that the bits 6427 * the user changed are actually legal for him to change. 6428 */ 6429 for (i = 0; i < page_index->page_len; i++) { 6430 uint8_t *user_byte, *change_mask, *current_byte; 6431 int bad_bit; 6432 int j; 6433 6434 user_byte = (uint8_t *)page_header + i; 6435 change_mask = page_index->page_data + 6436 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6437 current_byte = page_index->page_data + 6438 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6439 6440 /* 6441 * Check to see whether the user set any bits in this byte 6442 * that he is not allowed to set. 6443 */ 6444 if ((*user_byte & ~(*change_mask)) == 6445 (*current_byte & ~(*change_mask))) 6446 continue; 6447 6448 /* 6449 * Go through bit by bit to determine which one is illegal. 6450 */ 6451 bad_bit = 0; 6452 for (j = 7; j >= 0; j--) { 6453 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6454 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6455 bad_bit = i; 6456 break; 6457 } 6458 } 6459 ctl_set_invalid_field(ctsio, 6460 /*sks_valid*/ 1, 6461 /*command*/ 0, 6462 /*field*/ *len_used + i, 6463 /*bit_valid*/ 1, 6464 /*bit*/ bad_bit); 6465 free(ctsio->kern_data_ptr, M_CTL); 6466 ctl_done((union ctl_io *)ctsio); 6467 return (CTL_RETVAL_COMPLETE); 6468 } 6469 6470 /* 6471 * Decrement these before we call the page handler, since we may 6472 * end up getting called back one way or another before the handler 6473 * returns to this context. 6474 */ 6475 *len_left -= page_index->page_len; 6476 *len_used += page_index->page_len; 6477 6478 retval = page_index->select_handler(ctsio, page_index, 6479 (uint8_t *)page_header); 6480 6481 /* 6482 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6483 * wait until this queued command completes to finish processing 6484 * the mode page. If it returns anything other than 6485 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6486 * already set the sense information, freed the data pointer, and 6487 * completed the io for us. 6488 */ 6489 if (retval != CTL_RETVAL_COMPLETE) 6490 goto bailout_no_done; 6491 6492 /* 6493 * If the initiator sent us more than one page, parse the next one. 6494 */ 6495 if (*len_left > 0) 6496 goto do_next_page; 6497 6498 ctl_set_success(ctsio); 6499 free(ctsio->kern_data_ptr, M_CTL); 6500 ctl_done((union ctl_io *)ctsio); 6501 6502bailout_no_done: 6503 6504 return (CTL_RETVAL_COMPLETE); 6505 6506} 6507 6508int 6509ctl_mode_select(struct ctl_scsiio *ctsio) 6510{ 6511 int param_len, pf, sp; 6512 int header_size, bd_len; 6513 int len_left, len_used; 6514 struct ctl_page_index *page_index; 6515 struct ctl_lun *lun; 6516 int control_dev, page_len; 6517 union ctl_modepage_info *modepage_info; 6518 int retval; 6519 6520 pf = 0; 6521 sp = 0; 6522 page_len = 0; 6523 len_used = 0; 6524 len_left = 0; 6525 retval = 0; 6526 bd_len = 0; 6527 page_index = NULL; 6528 6529 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6530 6531 if (lun->be_lun->lun_type != T_DIRECT) 6532 control_dev = 1; 6533 else 6534 control_dev = 0; 6535 6536 switch (ctsio->cdb[0]) { 6537 case MODE_SELECT_6: { 6538 struct scsi_mode_select_6 *cdb; 6539 6540 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6541 6542 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6543 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6544 6545 param_len = cdb->length; 6546 header_size = sizeof(struct scsi_mode_header_6); 6547 break; 6548 } 6549 case MODE_SELECT_10: { 6550 struct scsi_mode_select_10 *cdb; 6551 6552 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6553 6554 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6555 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6556 6557 param_len = scsi_2btoul(cdb->length); 6558 header_size = sizeof(struct scsi_mode_header_10); 6559 break; 6560 } 6561 default: 6562 ctl_set_invalid_opcode(ctsio); 6563 ctl_done((union ctl_io *)ctsio); 6564 return (CTL_RETVAL_COMPLETE); 6565 break; /* NOTREACHED */ 6566 } 6567 6568 /* 6569 * From SPC-3: 6570 * "A parameter list length of zero indicates that the Data-Out Buffer 6571 * shall be empty. This condition shall not be considered as an error." 6572 */ 6573 if (param_len == 0) { 6574 ctl_set_success(ctsio); 6575 ctl_done((union ctl_io *)ctsio); 6576 return (CTL_RETVAL_COMPLETE); 6577 } 6578 6579 /* 6580 * Since we'll hit this the first time through, prior to 6581 * allocation, we don't need to free a data buffer here. 6582 */ 6583 if (param_len < header_size) { 6584 ctl_set_param_len_error(ctsio); 6585 ctl_done((union ctl_io *)ctsio); 6586 return (CTL_RETVAL_COMPLETE); 6587 } 6588 6589 /* 6590 * Allocate the data buffer and grab the user's data. In theory, 6591 * we shouldn't have to sanity check the parameter list length here 6592 * because the maximum size is 64K. We should be able to malloc 6593 * that much without too many problems. 6594 */ 6595 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6596 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6597 ctsio->kern_data_len = param_len; 6598 ctsio->kern_total_len = param_len; 6599 ctsio->kern_data_resid = 0; 6600 ctsio->kern_rel_offset = 0; 6601 ctsio->kern_sg_entries = 0; 6602 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6603 ctsio->be_move_done = ctl_config_move_done; 6604 ctl_datamove((union ctl_io *)ctsio); 6605 6606 return (CTL_RETVAL_COMPLETE); 6607 } 6608 6609 switch (ctsio->cdb[0]) { 6610 case MODE_SELECT_6: { 6611 struct scsi_mode_header_6 *mh6; 6612 6613 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6614 bd_len = mh6->blk_desc_len; 6615 break; 6616 } 6617 case MODE_SELECT_10: { 6618 struct scsi_mode_header_10 *mh10; 6619 6620 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6621 bd_len = scsi_2btoul(mh10->blk_desc_len); 6622 break; 6623 } 6624 default: 6625 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6626 break; 6627 } 6628 6629 if (param_len < (header_size + bd_len)) { 6630 free(ctsio->kern_data_ptr, M_CTL); 6631 ctl_set_param_len_error(ctsio); 6632 ctl_done((union ctl_io *)ctsio); 6633 return (CTL_RETVAL_COMPLETE); 6634 } 6635 6636 /* 6637 * Set the IO_CONT flag, so that if this I/O gets passed to 6638 * ctl_config_write_done(), it'll get passed back to 6639 * ctl_do_mode_select() for further processing, or completion if 6640 * we're all done. 6641 */ 6642 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6643 ctsio->io_cont = ctl_do_mode_select; 6644 6645 modepage_info = (union ctl_modepage_info *) 6646 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6647 6648 memset(modepage_info, 0, sizeof(*modepage_info)); 6649 6650 len_left = param_len - header_size - bd_len; 6651 len_used = header_size + bd_len; 6652 6653 modepage_info->header.len_left = len_left; 6654 modepage_info->header.len_used = len_used; 6655 6656 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6657} 6658 6659int 6660ctl_mode_sense(struct ctl_scsiio *ctsio) 6661{ 6662 struct ctl_lun *lun; 6663 int pc, page_code, dbd, llba, subpage; 6664 int alloc_len, page_len, header_len, total_len; 6665 struct scsi_mode_block_descr *block_desc; 6666 struct ctl_page_index *page_index; 6667 int control_dev; 6668 6669 dbd = 0; 6670 llba = 0; 6671 block_desc = NULL; 6672 page_index = NULL; 6673 6674 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6675 6676 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6677 6678 if (lun->be_lun->lun_type != T_DIRECT) 6679 control_dev = 1; 6680 else 6681 control_dev = 0; 6682 6683 if (lun->flags & CTL_LUN_PR_RESERVED) { 6684 uint32_t residx; 6685 6686 /* 6687 * XXX KDM need a lock here. 6688 */ 6689 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 6690 if ((lun->res_type == SPR_TYPE_EX_AC 6691 && residx != lun->pr_res_idx) 6692 || ((lun->res_type == SPR_TYPE_EX_AC_RO 6693 || lun->res_type == SPR_TYPE_EX_AC_AR) 6694 && !lun->per_res[residx].registered)) { 6695 ctl_set_reservation_conflict(ctsio); 6696 ctl_done((union ctl_io *)ctsio); 6697 return (CTL_RETVAL_COMPLETE); 6698 } 6699 } 6700 6701 switch (ctsio->cdb[0]) { 6702 case MODE_SENSE_6: { 6703 struct scsi_mode_sense_6 *cdb; 6704 6705 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6706 6707 header_len = sizeof(struct scsi_mode_hdr_6); 6708 if (cdb->byte2 & SMS_DBD) 6709 dbd = 1; 6710 else 6711 header_len += sizeof(struct scsi_mode_block_descr); 6712 6713 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6714 page_code = cdb->page & SMS_PAGE_CODE; 6715 subpage = cdb->subpage; 6716 alloc_len = cdb->length; 6717 break; 6718 } 6719 case MODE_SENSE_10: { 6720 struct scsi_mode_sense_10 *cdb; 6721 6722 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6723 6724 header_len = sizeof(struct scsi_mode_hdr_10); 6725 6726 if (cdb->byte2 & SMS_DBD) 6727 dbd = 1; 6728 else 6729 header_len += sizeof(struct scsi_mode_block_descr); 6730 if (cdb->byte2 & SMS10_LLBAA) 6731 llba = 1; 6732 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6733 page_code = cdb->page & SMS_PAGE_CODE; 6734 subpage = cdb->subpage; 6735 alloc_len = scsi_2btoul(cdb->length); 6736 break; 6737 } 6738 default: 6739 ctl_set_invalid_opcode(ctsio); 6740 ctl_done((union ctl_io *)ctsio); 6741 return (CTL_RETVAL_COMPLETE); 6742 break; /* NOTREACHED */ 6743 } 6744 6745 /* 6746 * We have to make a first pass through to calculate the size of 6747 * the pages that match the user's query. Then we allocate enough 6748 * memory to hold it, and actually copy the data into the buffer. 6749 */ 6750 switch (page_code) { 6751 case SMS_ALL_PAGES_PAGE: { 6752 int i; 6753 6754 page_len = 0; 6755 6756 /* 6757 * At the moment, values other than 0 and 0xff here are 6758 * reserved according to SPC-3. 6759 */ 6760 if ((subpage != SMS_SUBPAGE_PAGE_0) 6761 && (subpage != SMS_SUBPAGE_ALL)) { 6762 ctl_set_invalid_field(ctsio, 6763 /*sks_valid*/ 1, 6764 /*command*/ 1, 6765 /*field*/ 3, 6766 /*bit_valid*/ 0, 6767 /*bit*/ 0); 6768 ctl_done((union ctl_io *)ctsio); 6769 return (CTL_RETVAL_COMPLETE); 6770 } 6771 6772 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6773 if ((control_dev != 0) 6774 && (lun->mode_pages.index[i].page_flags & 6775 CTL_PAGE_FLAG_DISK_ONLY)) 6776 continue; 6777 6778 /* 6779 * We don't use this subpage if the user didn't 6780 * request all subpages. 6781 */ 6782 if ((lun->mode_pages.index[i].subpage != 0) 6783 && (subpage == SMS_SUBPAGE_PAGE_0)) 6784 continue; 6785 6786#if 0 6787 printf("found page %#x len %d\n", 6788 lun->mode_pages.index[i].page_code & 6789 SMPH_PC_MASK, 6790 lun->mode_pages.index[i].page_len); 6791#endif 6792 page_len += lun->mode_pages.index[i].page_len; 6793 } 6794 break; 6795 } 6796 default: { 6797 int i; 6798 6799 page_len = 0; 6800 6801 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6802 /* Look for the right page code */ 6803 if ((lun->mode_pages.index[i].page_code & 6804 SMPH_PC_MASK) != page_code) 6805 continue; 6806 6807 /* Look for the right subpage or the subpage wildcard*/ 6808 if ((lun->mode_pages.index[i].subpage != subpage) 6809 && (subpage != SMS_SUBPAGE_ALL)) 6810 continue; 6811 6812 /* Make sure the page is supported for this dev type */ 6813 if ((control_dev != 0) 6814 && (lun->mode_pages.index[i].page_flags & 6815 CTL_PAGE_FLAG_DISK_ONLY)) 6816 continue; 6817 6818#if 0 6819 printf("found page %#x len %d\n", 6820 lun->mode_pages.index[i].page_code & 6821 SMPH_PC_MASK, 6822 lun->mode_pages.index[i].page_len); 6823#endif 6824 6825 page_len += lun->mode_pages.index[i].page_len; 6826 } 6827 6828 if (page_len == 0) { 6829 ctl_set_invalid_field(ctsio, 6830 /*sks_valid*/ 1, 6831 /*command*/ 1, 6832 /*field*/ 2, 6833 /*bit_valid*/ 1, 6834 /*bit*/ 5); 6835 ctl_done((union ctl_io *)ctsio); 6836 return (CTL_RETVAL_COMPLETE); 6837 } 6838 break; 6839 } 6840 } 6841 6842 total_len = header_len + page_len; 6843#if 0 6844 printf("header_len = %d, page_len = %d, total_len = %d\n", 6845 header_len, page_len, total_len); 6846#endif 6847 6848 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6849 ctsio->kern_sg_entries = 0; 6850 ctsio->kern_data_resid = 0; 6851 ctsio->kern_rel_offset = 0; 6852 if (total_len < alloc_len) { 6853 ctsio->residual = alloc_len - total_len; 6854 ctsio->kern_data_len = total_len; 6855 ctsio->kern_total_len = total_len; 6856 } else { 6857 ctsio->residual = 0; 6858 ctsio->kern_data_len = alloc_len; 6859 ctsio->kern_total_len = alloc_len; 6860 } 6861 6862 switch (ctsio->cdb[0]) { 6863 case MODE_SENSE_6: { 6864 struct scsi_mode_hdr_6 *header; 6865 6866 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6867 6868 header->datalen = ctl_min(total_len - 1, 254); 6869 6870 if (dbd) 6871 header->block_descr_len = 0; 6872 else 6873 header->block_descr_len = 6874 sizeof(struct scsi_mode_block_descr); 6875 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6876 break; 6877 } 6878 case MODE_SENSE_10: { 6879 struct scsi_mode_hdr_10 *header; 6880 int datalen; 6881 6882 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6883 6884 datalen = ctl_min(total_len - 2, 65533); 6885 scsi_ulto2b(datalen, header->datalen); 6886 if (dbd) 6887 scsi_ulto2b(0, header->block_descr_len); 6888 else 6889 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6890 header->block_descr_len); 6891 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6892 break; 6893 } 6894 default: 6895 panic("invalid CDB type %#x", ctsio->cdb[0]); 6896 break; /* NOTREACHED */ 6897 } 6898 6899 /* 6900 * If we've got a disk, use its blocksize in the block 6901 * descriptor. Otherwise, just set it to 0. 6902 */ 6903 if (dbd == 0) { 6904 if (control_dev != 0) 6905 scsi_ulto3b(lun->be_lun->blocksize, 6906 block_desc->block_len); 6907 else 6908 scsi_ulto3b(0, block_desc->block_len); 6909 } 6910 6911 switch (page_code) { 6912 case SMS_ALL_PAGES_PAGE: { 6913 int i, data_used; 6914 6915 data_used = header_len; 6916 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6917 struct ctl_page_index *page_index; 6918 6919 page_index = &lun->mode_pages.index[i]; 6920 6921 if ((control_dev != 0) 6922 && (page_index->page_flags & 6923 CTL_PAGE_FLAG_DISK_ONLY)) 6924 continue; 6925 6926 /* 6927 * We don't use this subpage if the user didn't 6928 * request all subpages. We already checked (above) 6929 * to make sure the user only specified a subpage 6930 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6931 */ 6932 if ((page_index->subpage != 0) 6933 && (subpage == SMS_SUBPAGE_PAGE_0)) 6934 continue; 6935 6936 /* 6937 * Call the handler, if it exists, to update the 6938 * page to the latest values. 6939 */ 6940 if (page_index->sense_handler != NULL) 6941 page_index->sense_handler(ctsio, page_index,pc); 6942 6943 memcpy(ctsio->kern_data_ptr + data_used, 6944 page_index->page_data + 6945 (page_index->page_len * pc), 6946 page_index->page_len); 6947 data_used += page_index->page_len; 6948 } 6949 break; 6950 } 6951 default: { 6952 int i, data_used; 6953 6954 data_used = header_len; 6955 6956 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6957 struct ctl_page_index *page_index; 6958 6959 page_index = &lun->mode_pages.index[i]; 6960 6961 /* Look for the right page code */ 6962 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6963 continue; 6964 6965 /* Look for the right subpage or the subpage wildcard*/ 6966 if ((page_index->subpage != subpage) 6967 && (subpage != SMS_SUBPAGE_ALL)) 6968 continue; 6969 6970 /* Make sure the page is supported for this dev type */ 6971 if ((control_dev != 0) 6972 && (page_index->page_flags & 6973 CTL_PAGE_FLAG_DISK_ONLY)) 6974 continue; 6975 6976 /* 6977 * Call the handler, if it exists, to update the 6978 * page to the latest values. 6979 */ 6980 if (page_index->sense_handler != NULL) 6981 page_index->sense_handler(ctsio, page_index,pc); 6982 6983 memcpy(ctsio->kern_data_ptr + data_used, 6984 page_index->page_data + 6985 (page_index->page_len * pc), 6986 page_index->page_len); 6987 data_used += page_index->page_len; 6988 } 6989 break; 6990 } 6991 } 6992 6993 ctsio->scsi_status = SCSI_STATUS_OK; 6994 6995 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6996 ctsio->be_move_done = ctl_config_move_done; 6997 ctl_datamove((union ctl_io *)ctsio); 6998 6999 return (CTL_RETVAL_COMPLETE); 7000} 7001 7002int 7003ctl_read_capacity(struct ctl_scsiio *ctsio) 7004{ 7005 struct scsi_read_capacity *cdb; 7006 struct scsi_read_capacity_data *data; 7007 struct ctl_lun *lun; 7008 uint32_t lba; 7009 7010 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 7011 7012 cdb = (struct scsi_read_capacity *)ctsio->cdb; 7013 7014 lba = scsi_4btoul(cdb->addr); 7015 if (((cdb->pmi & SRC_PMI) == 0) 7016 && (lba != 0)) { 7017 ctl_set_invalid_field(/*ctsio*/ ctsio, 7018 /*sks_valid*/ 1, 7019 /*command*/ 1, 7020 /*field*/ 2, 7021 /*bit_valid*/ 0, 7022 /*bit*/ 0); 7023 ctl_done((union ctl_io *)ctsio); 7024 return (CTL_RETVAL_COMPLETE); 7025 } 7026 7027 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7028 7029 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7030 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 7031 ctsio->residual = 0; 7032 ctsio->kern_data_len = sizeof(*data); 7033 ctsio->kern_total_len = sizeof(*data); 7034 ctsio->kern_data_resid = 0; 7035 ctsio->kern_rel_offset = 0; 7036 ctsio->kern_sg_entries = 0; 7037 7038 /* 7039 * If the maximum LBA is greater than 0xfffffffe, the user must 7040 * issue a SERVICE ACTION IN (16) command, with the read capacity 7041 * serivce action set. 7042 */ 7043 if (lun->be_lun->maxlba > 0xfffffffe) 7044 scsi_ulto4b(0xffffffff, data->addr); 7045 else 7046 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 7047 7048 /* 7049 * XXX KDM this may not be 512 bytes... 7050 */ 7051 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7052 7053 ctsio->scsi_status = SCSI_STATUS_OK; 7054 7055 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7056 ctsio->be_move_done = ctl_config_move_done; 7057 ctl_datamove((union ctl_io *)ctsio); 7058 7059 return (CTL_RETVAL_COMPLETE); 7060} 7061 7062static int 7063ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7064{ 7065 struct scsi_read_capacity_16 *cdb; 7066 struct scsi_read_capacity_data_long *data; 7067 struct ctl_lun *lun; 7068 uint64_t lba; 7069 uint32_t alloc_len; 7070 7071 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7072 7073 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7074 7075 alloc_len = scsi_4btoul(cdb->alloc_len); 7076 lba = scsi_8btou64(cdb->addr); 7077 7078 if ((cdb->reladr & SRC16_PMI) 7079 && (lba != 0)) { 7080 ctl_set_invalid_field(/*ctsio*/ ctsio, 7081 /*sks_valid*/ 1, 7082 /*command*/ 1, 7083 /*field*/ 2, 7084 /*bit_valid*/ 0, 7085 /*bit*/ 0); 7086 ctl_done((union ctl_io *)ctsio); 7087 return (CTL_RETVAL_COMPLETE); 7088 } 7089 7090 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7091 7092 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7093 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7094 7095 if (sizeof(*data) < alloc_len) { 7096 ctsio->residual = alloc_len - sizeof(*data); 7097 ctsio->kern_data_len = sizeof(*data); 7098 ctsio->kern_total_len = sizeof(*data); 7099 } else { 7100 ctsio->residual = 0; 7101 ctsio->kern_data_len = alloc_len; 7102 ctsio->kern_total_len = alloc_len; 7103 } 7104 ctsio->kern_data_resid = 0; 7105 ctsio->kern_rel_offset = 0; 7106 ctsio->kern_sg_entries = 0; 7107 7108 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7109 /* XXX KDM this may not be 512 bytes... */ 7110 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7111 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7112 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7113 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7114 data->lalba_lbp[0] |= SRC16_LBPME; 7115 7116 ctsio->scsi_status = SCSI_STATUS_OK; 7117 7118 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7119 ctsio->be_move_done = ctl_config_move_done; 7120 ctl_datamove((union ctl_io *)ctsio); 7121 7122 return (CTL_RETVAL_COMPLETE); 7123} 7124 7125int 7126ctl_service_action_in(struct ctl_scsiio *ctsio) 7127{ 7128 struct scsi_service_action_in *cdb; 7129 int retval; 7130 7131 CTL_DEBUG_PRINT(("ctl_service_action_in\n")); 7132 7133 cdb = (struct scsi_service_action_in *)ctsio->cdb; 7134 7135 retval = CTL_RETVAL_COMPLETE; 7136 7137 switch (cdb->service_action) { 7138 case SRC16_SERVICE_ACTION: 7139 retval = ctl_read_capacity_16(ctsio); 7140 break; 7141 default: 7142 ctl_set_invalid_field(/*ctsio*/ ctsio, 7143 /*sks_valid*/ 1, 7144 /*command*/ 1, 7145 /*field*/ 1, 7146 /*bit_valid*/ 1, 7147 /*bit*/ 4); 7148 ctl_done((union ctl_io *)ctsio); 7149 break; 7150 } 7151 7152 return (retval); 7153} 7154 7155int 7156ctl_maintenance_in(struct ctl_scsiio *ctsio) 7157{ 7158 struct scsi_maintenance_in *cdb; 7159 int retval; 7160 int alloc_len, total_len = 0; 7161 int num_target_port_groups, single; 7162 struct ctl_lun *lun; 7163 struct ctl_softc *softc; 7164 struct scsi_target_group_data *rtg_ptr; 7165 struct scsi_target_port_group_descriptor *tpg_desc_ptr1, *tpg_desc_ptr2; 7166 struct scsi_target_port_descriptor *tp_desc_ptr1_1, *tp_desc_ptr1_2, 7167 *tp_desc_ptr2_1, *tp_desc_ptr2_2; 7168 7169 CTL_DEBUG_PRINT(("ctl_maintenance_in\n")); 7170 7171 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7172 softc = control_softc; 7173 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7174 7175 retval = CTL_RETVAL_COMPLETE; 7176 7177 if ((cdb->byte2 & SERVICE_ACTION_MASK) != SA_RPRT_TRGT_GRP) { 7178 ctl_set_invalid_field(/*ctsio*/ ctsio, 7179 /*sks_valid*/ 1, 7180 /*command*/ 1, 7181 /*field*/ 1, 7182 /*bit_valid*/ 1, 7183 /*bit*/ 4); 7184 ctl_done((union ctl_io *)ctsio); 7185 return(retval); 7186 } 7187 7188 single = ctl_is_single; 7189 if (single) 7190 num_target_port_groups = NUM_TARGET_PORT_GROUPS - 1; 7191 else 7192 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7193 7194 total_len = sizeof(struct scsi_target_group_data) + 7195 sizeof(struct scsi_target_port_group_descriptor) * 7196 num_target_port_groups + 7197 sizeof(struct scsi_target_port_descriptor) * 7198 NUM_PORTS_PER_GRP * num_target_port_groups; 7199 7200 alloc_len = scsi_4btoul(cdb->length); 7201 7202 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7203 7204 ctsio->kern_sg_entries = 0; 7205 7206 if (total_len < alloc_len) { 7207 ctsio->residual = alloc_len - total_len; 7208 ctsio->kern_data_len = total_len; 7209 ctsio->kern_total_len = total_len; 7210 } else { 7211 ctsio->residual = 0; 7212 ctsio->kern_data_len = alloc_len; 7213 ctsio->kern_total_len = alloc_len; 7214 } 7215 ctsio->kern_data_resid = 0; 7216 ctsio->kern_rel_offset = 0; 7217 7218 rtg_ptr = (struct scsi_target_group_data *)ctsio->kern_data_ptr; 7219 7220 tpg_desc_ptr1 = &rtg_ptr->groups[0]; 7221 tp_desc_ptr1_1 = &tpg_desc_ptr1->descriptors[0]; 7222 tp_desc_ptr1_2 = (struct scsi_target_port_descriptor *) 7223 &tp_desc_ptr1_1->desc_list[0]; 7224 7225 if (single == 0) { 7226 tpg_desc_ptr2 = (struct scsi_target_port_group_descriptor *) 7227 &tp_desc_ptr1_2->desc_list[0]; 7228 tp_desc_ptr2_1 = &tpg_desc_ptr2->descriptors[0]; 7229 tp_desc_ptr2_2 = (struct scsi_target_port_descriptor *) 7230 &tp_desc_ptr2_1->desc_list[0]; 7231 } else { 7232 tpg_desc_ptr2 = NULL; 7233 tp_desc_ptr2_1 = NULL; 7234 tp_desc_ptr2_2 = NULL; 7235 } 7236 7237 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7238 if (single == 0) { 7239 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) { 7240 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7241 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 7242 tpg_desc_ptr2->pref_state = 7243 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7244 } else { 7245 tpg_desc_ptr1->pref_state = 7246 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7247 tpg_desc_ptr2->pref_state = TPG_PRIMARY; 7248 } 7249 } else { 7250 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7251 tpg_desc_ptr1->pref_state = 7252 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7253 tpg_desc_ptr2->pref_state = TPG_PRIMARY; 7254 } else { 7255 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 7256 tpg_desc_ptr2->pref_state = 7257 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7258 } 7259 } 7260 } else { 7261 tpg_desc_ptr1->pref_state = TPG_PRIMARY; 7262 } 7263 tpg_desc_ptr1->support = 0; 7264 tpg_desc_ptr1->target_port_group[1] = 1; 7265 tpg_desc_ptr1->status = TPG_IMPLICIT; 7266 tpg_desc_ptr1->target_port_count= NUM_PORTS_PER_GRP; 7267 7268 if (single == 0) { 7269 tpg_desc_ptr2->support = 0; 7270 tpg_desc_ptr2->target_port_group[1] = 2; 7271 tpg_desc_ptr2->status = TPG_IMPLICIT; 7272 tpg_desc_ptr2->target_port_count = NUM_PORTS_PER_GRP; 7273 7274 tp_desc_ptr1_1->relative_target_port_identifier[1] = 1; 7275 tp_desc_ptr1_2->relative_target_port_identifier[1] = 2; 7276 7277 tp_desc_ptr2_1->relative_target_port_identifier[1] = 9; 7278 tp_desc_ptr2_2->relative_target_port_identifier[1] = 10; 7279 } else { 7280 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS) { 7281 tp_desc_ptr1_1->relative_target_port_identifier[1] = 1; 7282 tp_desc_ptr1_2->relative_target_port_identifier[1] = 2; 7283 } else { 7284 tp_desc_ptr1_1->relative_target_port_identifier[1] = 9; 7285 tp_desc_ptr1_2->relative_target_port_identifier[1] = 10; 7286 } 7287 } 7288 7289 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7290 ctsio->be_move_done = ctl_config_move_done; 7291 7292 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 7293 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 7294 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 7295 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 7296 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 7297 7298 ctl_datamove((union ctl_io *)ctsio); 7299 return(retval); 7300} 7301 7302int 7303ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7304{ 7305 struct scsi_per_res_in *cdb; 7306 int alloc_len, total_len = 0; 7307 /* struct scsi_per_res_in_rsrv in_data; */ 7308 struct ctl_lun *lun; 7309 struct ctl_softc *softc; 7310 7311 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7312 7313 softc = control_softc; 7314 7315 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7316 7317 alloc_len = scsi_2btoul(cdb->length); 7318 7319 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7320 7321retry: 7322 mtx_lock(&lun->lun_lock); 7323 switch (cdb->action) { 7324 case SPRI_RK: /* read keys */ 7325 total_len = sizeof(struct scsi_per_res_in_keys) + 7326 lun->pr_key_count * 7327 sizeof(struct scsi_per_res_key); 7328 break; 7329 case SPRI_RR: /* read reservation */ 7330 if (lun->flags & CTL_LUN_PR_RESERVED) 7331 total_len = sizeof(struct scsi_per_res_in_rsrv); 7332 else 7333 total_len = sizeof(struct scsi_per_res_in_header); 7334 break; 7335 case SPRI_RC: /* report capabilities */ 7336 total_len = sizeof(struct scsi_per_res_cap); 7337 break; 7338 case SPRI_RS: /* read full status */ 7339 default: 7340 mtx_unlock(&lun->lun_lock); 7341 ctl_set_invalid_field(ctsio, 7342 /*sks_valid*/ 1, 7343 /*command*/ 1, 7344 /*field*/ 1, 7345 /*bit_valid*/ 1, 7346 /*bit*/ 0); 7347 ctl_done((union ctl_io *)ctsio); 7348 return (CTL_RETVAL_COMPLETE); 7349 break; /* NOTREACHED */ 7350 } 7351 mtx_unlock(&lun->lun_lock); 7352 7353 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7354 7355 if (total_len < alloc_len) { 7356 ctsio->residual = alloc_len - total_len; 7357 ctsio->kern_data_len = total_len; 7358 ctsio->kern_total_len = total_len; 7359 } else { 7360 ctsio->residual = 0; 7361 ctsio->kern_data_len = alloc_len; 7362 ctsio->kern_total_len = alloc_len; 7363 } 7364 7365 ctsio->kern_data_resid = 0; 7366 ctsio->kern_rel_offset = 0; 7367 ctsio->kern_sg_entries = 0; 7368 7369 mtx_lock(&lun->lun_lock); 7370 switch (cdb->action) { 7371 case SPRI_RK: { // read keys 7372 struct scsi_per_res_in_keys *res_keys; 7373 int i, key_count; 7374 7375 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7376 7377 /* 7378 * We had to drop the lock to allocate our buffer, which 7379 * leaves time for someone to come in with another 7380 * persistent reservation. (That is unlikely, though, 7381 * since this should be the only persistent reservation 7382 * command active right now.) 7383 */ 7384 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7385 (lun->pr_key_count * 7386 sizeof(struct scsi_per_res_key)))){ 7387 mtx_unlock(&lun->lun_lock); 7388 free(ctsio->kern_data_ptr, M_CTL); 7389 printf("%s: reservation length changed, retrying\n", 7390 __func__); 7391 goto retry; 7392 } 7393 7394 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7395 7396 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7397 lun->pr_key_count, res_keys->header.length); 7398 7399 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7400 if (!lun->per_res[i].registered) 7401 continue; 7402 7403 /* 7404 * We used lun->pr_key_count to calculate the 7405 * size to allocate. If it turns out the number of 7406 * initiators with the registered flag set is 7407 * larger than that (i.e. they haven't been kept in 7408 * sync), we've got a problem. 7409 */ 7410 if (key_count >= lun->pr_key_count) { 7411#ifdef NEEDTOPORT 7412 csevent_log(CSC_CTL | CSC_SHELF_SW | 7413 CTL_PR_ERROR, 7414 csevent_LogType_Fault, 7415 csevent_AlertLevel_Yellow, 7416 csevent_FRU_ShelfController, 7417 csevent_FRU_Firmware, 7418 csevent_FRU_Unknown, 7419 "registered keys %d >= key " 7420 "count %d", key_count, 7421 lun->pr_key_count); 7422#endif 7423 key_count++; 7424 continue; 7425 } 7426 memcpy(res_keys->keys[key_count].key, 7427 lun->per_res[i].res_key.key, 7428 ctl_min(sizeof(res_keys->keys[key_count].key), 7429 sizeof(lun->per_res[i].res_key))); 7430 key_count++; 7431 } 7432 break; 7433 } 7434 case SPRI_RR: { // read reservation 7435 struct scsi_per_res_in_rsrv *res; 7436 int tmp_len, header_only; 7437 7438 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7439 7440 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7441 7442 if (lun->flags & CTL_LUN_PR_RESERVED) 7443 { 7444 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7445 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7446 res->header.length); 7447 header_only = 0; 7448 } else { 7449 tmp_len = sizeof(struct scsi_per_res_in_header); 7450 scsi_ulto4b(0, res->header.length); 7451 header_only = 1; 7452 } 7453 7454 /* 7455 * We had to drop the lock to allocate our buffer, which 7456 * leaves time for someone to come in with another 7457 * persistent reservation. (That is unlikely, though, 7458 * since this should be the only persistent reservation 7459 * command active right now.) 7460 */ 7461 if (tmp_len != total_len) { 7462 mtx_unlock(&lun->lun_lock); 7463 free(ctsio->kern_data_ptr, M_CTL); 7464 printf("%s: reservation status changed, retrying\n", 7465 __func__); 7466 goto retry; 7467 } 7468 7469 /* 7470 * No reservation held, so we're done. 7471 */ 7472 if (header_only != 0) 7473 break; 7474 7475 /* 7476 * If the registration is an All Registrants type, the key 7477 * is 0, since it doesn't really matter. 7478 */ 7479 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7480 memcpy(res->data.reservation, 7481 &lun->per_res[lun->pr_res_idx].res_key, 7482 sizeof(struct scsi_per_res_key)); 7483 } 7484 res->data.scopetype = lun->res_type; 7485 break; 7486 } 7487 case SPRI_RC: //report capabilities 7488 { 7489 struct scsi_per_res_cap *res_cap; 7490 uint16_t type_mask; 7491 7492 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7493 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7494 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_3; 7495 type_mask = SPRI_TM_WR_EX_AR | 7496 SPRI_TM_EX_AC_RO | 7497 SPRI_TM_WR_EX_RO | 7498 SPRI_TM_EX_AC | 7499 SPRI_TM_WR_EX | 7500 SPRI_TM_EX_AC_AR; 7501 scsi_ulto2b(type_mask, res_cap->type_mask); 7502 break; 7503 } 7504 case SPRI_RS: //read full status 7505 default: 7506 /* 7507 * This is a bug, because we just checked for this above, 7508 * and should have returned an error. 7509 */ 7510 panic("Invalid PR type %x", cdb->action); 7511 break; /* NOTREACHED */ 7512 } 7513 mtx_unlock(&lun->lun_lock); 7514 7515 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7516 ctsio->be_move_done = ctl_config_move_done; 7517 7518 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 7519 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 7520 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 7521 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 7522 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 7523 7524 ctl_datamove((union ctl_io *)ctsio); 7525 7526 return (CTL_RETVAL_COMPLETE); 7527} 7528 7529/* 7530 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7531 * it should return. 7532 */ 7533static int 7534ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7535 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7536 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7537 struct scsi_per_res_out_parms* param) 7538{ 7539 union ctl_ha_msg persis_io; 7540 int retval, i; 7541 int isc_retval; 7542 7543 retval = 0; 7544 7545 mtx_lock(&lun->lun_lock); 7546 if (sa_res_key == 0) { 7547 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7548 /* validate scope and type */ 7549 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7550 SPR_LU_SCOPE) { 7551 mtx_unlock(&lun->lun_lock); 7552 ctl_set_invalid_field(/*ctsio*/ ctsio, 7553 /*sks_valid*/ 1, 7554 /*command*/ 1, 7555 /*field*/ 2, 7556 /*bit_valid*/ 1, 7557 /*bit*/ 4); 7558 ctl_done((union ctl_io *)ctsio); 7559 return (1); 7560 } 7561 7562 if (type>8 || type==2 || type==4 || type==0) { 7563 mtx_unlock(&lun->lun_lock); 7564 ctl_set_invalid_field(/*ctsio*/ ctsio, 7565 /*sks_valid*/ 1, 7566 /*command*/ 1, 7567 /*field*/ 2, 7568 /*bit_valid*/ 1, 7569 /*bit*/ 0); 7570 ctl_done((union ctl_io *)ctsio); 7571 return (1); 7572 } 7573 7574 /* temporarily unregister this nexus */ 7575 lun->per_res[residx].registered = 0; 7576 7577 /* 7578 * Unregister everybody else and build UA for 7579 * them 7580 */ 7581 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7582 if (lun->per_res[i].registered == 0) 7583 continue; 7584 7585 if (!persis_offset 7586 && i <CTL_MAX_INITIATORS) 7587 lun->pending_sense[i].ua_pending |= 7588 CTL_UA_REG_PREEMPT; 7589 else if (persis_offset 7590 && i >= persis_offset) 7591 lun->pending_sense[i-persis_offset 7592 ].ua_pending |= 7593 CTL_UA_REG_PREEMPT; 7594 lun->per_res[i].registered = 0; 7595 memset(&lun->per_res[i].res_key, 0, 7596 sizeof(struct scsi_per_res_key)); 7597 } 7598 lun->per_res[residx].registered = 1; 7599 lun->pr_key_count = 1; 7600 lun->res_type = type; 7601 if (lun->res_type != SPR_TYPE_WR_EX_AR 7602 && lun->res_type != SPR_TYPE_EX_AC_AR) 7603 lun->pr_res_idx = residx; 7604 7605 /* send msg to other side */ 7606 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7607 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7608 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7609 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7610 persis_io.pr.pr_info.res_type = type; 7611 memcpy(persis_io.pr.pr_info.sa_res_key, 7612 param->serv_act_res_key, 7613 sizeof(param->serv_act_res_key)); 7614 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7615 &persis_io, sizeof(persis_io), 0)) > 7616 CTL_HA_STATUS_SUCCESS) { 7617 printf("CTL:Persis Out error returned " 7618 "from ctl_ha_msg_send %d\n", 7619 isc_retval); 7620 } 7621 } else { 7622 /* not all registrants */ 7623 mtx_unlock(&lun->lun_lock); 7624 free(ctsio->kern_data_ptr, M_CTL); 7625 ctl_set_invalid_field(ctsio, 7626 /*sks_valid*/ 1, 7627 /*command*/ 0, 7628 /*field*/ 8, 7629 /*bit_valid*/ 0, 7630 /*bit*/ 0); 7631 ctl_done((union ctl_io *)ctsio); 7632 return (1); 7633 } 7634 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7635 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7636 int found = 0; 7637 7638 if (res_key == sa_res_key) { 7639 /* special case */ 7640 /* 7641 * The spec implies this is not good but doesn't 7642 * say what to do. There are two choices either 7643 * generate a res conflict or check condition 7644 * with illegal field in parameter data. Since 7645 * that is what is done when the sa_res_key is 7646 * zero I'll take that approach since this has 7647 * to do with the sa_res_key. 7648 */ 7649 mtx_unlock(&lun->lun_lock); 7650 free(ctsio->kern_data_ptr, M_CTL); 7651 ctl_set_invalid_field(ctsio, 7652 /*sks_valid*/ 1, 7653 /*command*/ 0, 7654 /*field*/ 8, 7655 /*bit_valid*/ 0, 7656 /*bit*/ 0); 7657 ctl_done((union ctl_io *)ctsio); 7658 return (1); 7659 } 7660 7661 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7662 if (lun->per_res[i].registered 7663 && memcmp(param->serv_act_res_key, 7664 lun->per_res[i].res_key.key, 7665 sizeof(struct scsi_per_res_key)) != 0) 7666 continue; 7667 7668 found = 1; 7669 lun->per_res[i].registered = 0; 7670 memset(&lun->per_res[i].res_key, 0, 7671 sizeof(struct scsi_per_res_key)); 7672 lun->pr_key_count--; 7673 7674 if (!persis_offset 7675 && i < CTL_MAX_INITIATORS) 7676 lun->pending_sense[i].ua_pending |= 7677 CTL_UA_REG_PREEMPT; 7678 else if (persis_offset 7679 && i >= persis_offset) 7680 lun->pending_sense[i-persis_offset].ua_pending|= 7681 CTL_UA_REG_PREEMPT; 7682 } 7683 if (!found) { 7684 mtx_unlock(&lun->lun_lock); 7685 free(ctsio->kern_data_ptr, M_CTL); 7686 ctl_set_reservation_conflict(ctsio); 7687 ctl_done((union ctl_io *)ctsio); 7688 return (CTL_RETVAL_COMPLETE); 7689 } 7690 /* send msg to other side */ 7691 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7692 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7693 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7694 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7695 persis_io.pr.pr_info.res_type = type; 7696 memcpy(persis_io.pr.pr_info.sa_res_key, 7697 param->serv_act_res_key, 7698 sizeof(param->serv_act_res_key)); 7699 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7700 &persis_io, sizeof(persis_io), 0)) > 7701 CTL_HA_STATUS_SUCCESS) { 7702 printf("CTL:Persis Out error returned from " 7703 "ctl_ha_msg_send %d\n", isc_retval); 7704 } 7705 } else { 7706 /* Reserved but not all registrants */ 7707 /* sa_res_key is res holder */ 7708 if (memcmp(param->serv_act_res_key, 7709 lun->per_res[lun->pr_res_idx].res_key.key, 7710 sizeof(struct scsi_per_res_key)) == 0) { 7711 /* validate scope and type */ 7712 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7713 SPR_LU_SCOPE) { 7714 mtx_unlock(&lun->lun_lock); 7715 ctl_set_invalid_field(/*ctsio*/ ctsio, 7716 /*sks_valid*/ 1, 7717 /*command*/ 1, 7718 /*field*/ 2, 7719 /*bit_valid*/ 1, 7720 /*bit*/ 4); 7721 ctl_done((union ctl_io *)ctsio); 7722 return (1); 7723 } 7724 7725 if (type>8 || type==2 || type==4 || type==0) { 7726 mtx_unlock(&lun->lun_lock); 7727 ctl_set_invalid_field(/*ctsio*/ ctsio, 7728 /*sks_valid*/ 1, 7729 /*command*/ 1, 7730 /*field*/ 2, 7731 /*bit_valid*/ 1, 7732 /*bit*/ 0); 7733 ctl_done((union ctl_io *)ctsio); 7734 return (1); 7735 } 7736 7737 /* 7738 * Do the following: 7739 * if sa_res_key != res_key remove all 7740 * registrants w/sa_res_key and generate UA 7741 * for these registrants(Registrations 7742 * Preempted) if it wasn't an exclusive 7743 * reservation generate UA(Reservations 7744 * Preempted) for all other registered nexuses 7745 * if the type has changed. Establish the new 7746 * reservation and holder. If res_key and 7747 * sa_res_key are the same do the above 7748 * except don't unregister the res holder. 7749 */ 7750 7751 /* 7752 * Temporarily unregister so it won't get 7753 * removed or UA generated 7754 */ 7755 lun->per_res[residx].registered = 0; 7756 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7757 if (lun->per_res[i].registered == 0) 7758 continue; 7759 7760 if (memcmp(param->serv_act_res_key, 7761 lun->per_res[i].res_key.key, 7762 sizeof(struct scsi_per_res_key)) == 0) { 7763 lun->per_res[i].registered = 0; 7764 memset(&lun->per_res[i].res_key, 7765 0, 7766 sizeof(struct scsi_per_res_key)); 7767 lun->pr_key_count--; 7768 7769 if (!persis_offset 7770 && i < CTL_MAX_INITIATORS) 7771 lun->pending_sense[i 7772 ].ua_pending |= 7773 CTL_UA_REG_PREEMPT; 7774 else if (persis_offset 7775 && i >= persis_offset) 7776 lun->pending_sense[ 7777 i-persis_offset].ua_pending |= 7778 CTL_UA_REG_PREEMPT; 7779 } else if (type != lun->res_type 7780 && (lun->res_type == SPR_TYPE_WR_EX_RO 7781 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 7782 if (!persis_offset 7783 && i < CTL_MAX_INITIATORS) 7784 lun->pending_sense[i 7785 ].ua_pending |= 7786 CTL_UA_RES_RELEASE; 7787 else if (persis_offset 7788 && i >= persis_offset) 7789 lun->pending_sense[ 7790 i-persis_offset 7791 ].ua_pending |= 7792 CTL_UA_RES_RELEASE; 7793 } 7794 } 7795 lun->per_res[residx].registered = 1; 7796 lun->res_type = type; 7797 if (lun->res_type != SPR_TYPE_WR_EX_AR 7798 && lun->res_type != SPR_TYPE_EX_AC_AR) 7799 lun->pr_res_idx = residx; 7800 else 7801 lun->pr_res_idx = 7802 CTL_PR_ALL_REGISTRANTS; 7803 7804 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7805 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7806 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7807 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7808 persis_io.pr.pr_info.res_type = type; 7809 memcpy(persis_io.pr.pr_info.sa_res_key, 7810 param->serv_act_res_key, 7811 sizeof(param->serv_act_res_key)); 7812 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7813 &persis_io, sizeof(persis_io), 0)) > 7814 CTL_HA_STATUS_SUCCESS) { 7815 printf("CTL:Persis Out error returned " 7816 "from ctl_ha_msg_send %d\n", 7817 isc_retval); 7818 } 7819 } else { 7820 /* 7821 * sa_res_key is not the res holder just 7822 * remove registrants 7823 */ 7824 int found=0; 7825 7826 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7827 if (memcmp(param->serv_act_res_key, 7828 lun->per_res[i].res_key.key, 7829 sizeof(struct scsi_per_res_key)) != 0) 7830 continue; 7831 7832 found = 1; 7833 lun->per_res[i].registered = 0; 7834 memset(&lun->per_res[i].res_key, 0, 7835 sizeof(struct scsi_per_res_key)); 7836 lun->pr_key_count--; 7837 7838 if (!persis_offset 7839 && i < CTL_MAX_INITIATORS) 7840 lun->pending_sense[i].ua_pending |= 7841 CTL_UA_REG_PREEMPT; 7842 else if (persis_offset 7843 && i >= persis_offset) 7844 lun->pending_sense[ 7845 i-persis_offset].ua_pending |= 7846 CTL_UA_REG_PREEMPT; 7847 } 7848 7849 if (!found) { 7850 mtx_unlock(&lun->lun_lock); 7851 free(ctsio->kern_data_ptr, M_CTL); 7852 ctl_set_reservation_conflict(ctsio); 7853 ctl_done((union ctl_io *)ctsio); 7854 return (1); 7855 } 7856 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7857 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7858 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7859 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7860 persis_io.pr.pr_info.res_type = type; 7861 memcpy(persis_io.pr.pr_info.sa_res_key, 7862 param->serv_act_res_key, 7863 sizeof(param->serv_act_res_key)); 7864 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7865 &persis_io, sizeof(persis_io), 0)) > 7866 CTL_HA_STATUS_SUCCESS) { 7867 printf("CTL:Persis Out error returned " 7868 "from ctl_ha_msg_send %d\n", 7869 isc_retval); 7870 } 7871 } 7872 } 7873 7874 lun->PRGeneration++; 7875 mtx_unlock(&lun->lun_lock); 7876 7877 return (retval); 7878} 7879 7880static void 7881ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7882{ 7883 int i; 7884 7885 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7886 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7887 || memcmp(&lun->per_res[lun->pr_res_idx].res_key, 7888 msg->pr.pr_info.sa_res_key, 7889 sizeof(struct scsi_per_res_key)) != 0) { 7890 uint64_t sa_res_key; 7891 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7892 7893 if (sa_res_key == 0) { 7894 /* temporarily unregister this nexus */ 7895 lun->per_res[msg->pr.pr_info.residx].registered = 0; 7896 7897 /* 7898 * Unregister everybody else and build UA for 7899 * them 7900 */ 7901 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7902 if (lun->per_res[i].registered == 0) 7903 continue; 7904 7905 if (!persis_offset 7906 && i < CTL_MAX_INITIATORS) 7907 lun->pending_sense[i].ua_pending |= 7908 CTL_UA_REG_PREEMPT; 7909 else if (persis_offset && i >= persis_offset) 7910 lun->pending_sense[i - 7911 persis_offset].ua_pending |= 7912 CTL_UA_REG_PREEMPT; 7913 lun->per_res[i].registered = 0; 7914 memset(&lun->per_res[i].res_key, 0, 7915 sizeof(struct scsi_per_res_key)); 7916 } 7917 7918 lun->per_res[msg->pr.pr_info.residx].registered = 1; 7919 lun->pr_key_count = 1; 7920 lun->res_type = msg->pr.pr_info.res_type; 7921 if (lun->res_type != SPR_TYPE_WR_EX_AR 7922 && lun->res_type != SPR_TYPE_EX_AC_AR) 7923 lun->pr_res_idx = msg->pr.pr_info.residx; 7924 } else { 7925 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7926 if (memcmp(msg->pr.pr_info.sa_res_key, 7927 lun->per_res[i].res_key.key, 7928 sizeof(struct scsi_per_res_key)) != 0) 7929 continue; 7930 7931 lun->per_res[i].registered = 0; 7932 memset(&lun->per_res[i].res_key, 0, 7933 sizeof(struct scsi_per_res_key)); 7934 lun->pr_key_count--; 7935 7936 if (!persis_offset 7937 && i < persis_offset) 7938 lun->pending_sense[i].ua_pending |= 7939 CTL_UA_REG_PREEMPT; 7940 else if (persis_offset 7941 && i >= persis_offset) 7942 lun->pending_sense[i - 7943 persis_offset].ua_pending |= 7944 CTL_UA_REG_PREEMPT; 7945 } 7946 } 7947 } else { 7948 /* 7949 * Temporarily unregister so it won't get removed 7950 * or UA generated 7951 */ 7952 lun->per_res[msg->pr.pr_info.residx].registered = 0; 7953 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7954 if (lun->per_res[i].registered == 0) 7955 continue; 7956 7957 if (memcmp(msg->pr.pr_info.sa_res_key, 7958 lun->per_res[i].res_key.key, 7959 sizeof(struct scsi_per_res_key)) == 0) { 7960 lun->per_res[i].registered = 0; 7961 memset(&lun->per_res[i].res_key, 0, 7962 sizeof(struct scsi_per_res_key)); 7963 lun->pr_key_count--; 7964 if (!persis_offset 7965 && i < CTL_MAX_INITIATORS) 7966 lun->pending_sense[i].ua_pending |= 7967 CTL_UA_REG_PREEMPT; 7968 else if (persis_offset 7969 && i >= persis_offset) 7970 lun->pending_sense[i - 7971 persis_offset].ua_pending |= 7972 CTL_UA_REG_PREEMPT; 7973 } else if (msg->pr.pr_info.res_type != lun->res_type 7974 && (lun->res_type == SPR_TYPE_WR_EX_RO 7975 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 7976 if (!persis_offset 7977 && i < persis_offset) 7978 lun->pending_sense[i 7979 ].ua_pending |= 7980 CTL_UA_RES_RELEASE; 7981 else if (persis_offset 7982 && i >= persis_offset) 7983 lun->pending_sense[i - 7984 persis_offset].ua_pending |= 7985 CTL_UA_RES_RELEASE; 7986 } 7987 } 7988 lun->per_res[msg->pr.pr_info.residx].registered = 1; 7989 lun->res_type = msg->pr.pr_info.res_type; 7990 if (lun->res_type != SPR_TYPE_WR_EX_AR 7991 && lun->res_type != SPR_TYPE_EX_AC_AR) 7992 lun->pr_res_idx = msg->pr.pr_info.residx; 7993 else 7994 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7995 } 7996 lun->PRGeneration++; 7997 7998} 7999 8000 8001int 8002ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8003{ 8004 int retval; 8005 int isc_retval; 8006 u_int32_t param_len; 8007 struct scsi_per_res_out *cdb; 8008 struct ctl_lun *lun; 8009 struct scsi_per_res_out_parms* param; 8010 struct ctl_softc *softc; 8011 uint32_t residx; 8012 uint64_t res_key, sa_res_key; 8013 uint8_t type; 8014 union ctl_ha_msg persis_io; 8015 int i; 8016 8017 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8018 8019 retval = CTL_RETVAL_COMPLETE; 8020 8021 softc = control_softc; 8022 8023 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8024 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8025 8026 /* 8027 * We only support whole-LUN scope. The scope & type are ignored for 8028 * register, register and ignore existing key and clear. 8029 * We sometimes ignore scope and type on preempts too!! 8030 * Verify reservation type here as well. 8031 */ 8032 type = cdb->scope_type & SPR_TYPE_MASK; 8033 if ((cdb->action == SPRO_RESERVE) 8034 || (cdb->action == SPRO_RELEASE)) { 8035 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8036 ctl_set_invalid_field(/*ctsio*/ ctsio, 8037 /*sks_valid*/ 1, 8038 /*command*/ 1, 8039 /*field*/ 2, 8040 /*bit_valid*/ 1, 8041 /*bit*/ 4); 8042 ctl_done((union ctl_io *)ctsio); 8043 return (CTL_RETVAL_COMPLETE); 8044 } 8045 8046 if (type>8 || type==2 || type==4 || type==0) { 8047 ctl_set_invalid_field(/*ctsio*/ ctsio, 8048 /*sks_valid*/ 1, 8049 /*command*/ 1, 8050 /*field*/ 2, 8051 /*bit_valid*/ 1, 8052 /*bit*/ 0); 8053 ctl_done((union ctl_io *)ctsio); 8054 return (CTL_RETVAL_COMPLETE); 8055 } 8056 } 8057 8058 switch (cdb->action & SPRO_ACTION_MASK) { 8059 case SPRO_REGISTER: 8060 case SPRO_RESERVE: 8061 case SPRO_RELEASE: 8062 case SPRO_CLEAR: 8063 case SPRO_PREEMPT: 8064 case SPRO_REG_IGNO: 8065 break; 8066 case SPRO_REG_MOVE: 8067 case SPRO_PRE_ABO: 8068 default: 8069 ctl_set_invalid_field(/*ctsio*/ ctsio, 8070 /*sks_valid*/ 1, 8071 /*command*/ 1, 8072 /*field*/ 1, 8073 /*bit_valid*/ 1, 8074 /*bit*/ 0); 8075 ctl_done((union ctl_io *)ctsio); 8076 return (CTL_RETVAL_COMPLETE); 8077 break; /* NOTREACHED */ 8078 } 8079 8080 param_len = scsi_4btoul(cdb->length); 8081 8082 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8083 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8084 ctsio->kern_data_len = param_len; 8085 ctsio->kern_total_len = param_len; 8086 ctsio->kern_data_resid = 0; 8087 ctsio->kern_rel_offset = 0; 8088 ctsio->kern_sg_entries = 0; 8089 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8090 ctsio->be_move_done = ctl_config_move_done; 8091 ctl_datamove((union ctl_io *)ctsio); 8092 8093 return (CTL_RETVAL_COMPLETE); 8094 } 8095 8096 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8097 8098 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8099 res_key = scsi_8btou64(param->res_key.key); 8100 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8101 8102 /* 8103 * Validate the reservation key here except for SPRO_REG_IGNO 8104 * This must be done for all other service actions 8105 */ 8106 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8107 mtx_lock(&lun->lun_lock); 8108 if (lun->per_res[residx].registered) { 8109 if (memcmp(param->res_key.key, 8110 lun->per_res[residx].res_key.key, 8111 ctl_min(sizeof(param->res_key), 8112 sizeof(lun->per_res[residx].res_key))) != 0) { 8113 /* 8114 * The current key passed in doesn't match 8115 * the one the initiator previously 8116 * registered. 8117 */ 8118 mtx_unlock(&lun->lun_lock); 8119 free(ctsio->kern_data_ptr, M_CTL); 8120 ctl_set_reservation_conflict(ctsio); 8121 ctl_done((union ctl_io *)ctsio); 8122 return (CTL_RETVAL_COMPLETE); 8123 } 8124 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8125 /* 8126 * We are not registered 8127 */ 8128 mtx_unlock(&lun->lun_lock); 8129 free(ctsio->kern_data_ptr, M_CTL); 8130 ctl_set_reservation_conflict(ctsio); 8131 ctl_done((union ctl_io *)ctsio); 8132 return (CTL_RETVAL_COMPLETE); 8133 } else if (res_key != 0) { 8134 /* 8135 * We are not registered and trying to register but 8136 * the register key isn't zero. 8137 */ 8138 mtx_unlock(&lun->lun_lock); 8139 free(ctsio->kern_data_ptr, M_CTL); 8140 ctl_set_reservation_conflict(ctsio); 8141 ctl_done((union ctl_io *)ctsio); 8142 return (CTL_RETVAL_COMPLETE); 8143 } 8144 mtx_unlock(&lun->lun_lock); 8145 } 8146 8147 switch (cdb->action & SPRO_ACTION_MASK) { 8148 case SPRO_REGISTER: 8149 case SPRO_REG_IGNO: { 8150 8151#if 0 8152 printf("Registration received\n"); 8153#endif 8154 8155 /* 8156 * We don't support any of these options, as we report in 8157 * the read capabilities request (see 8158 * ctl_persistent_reserve_in(), above). 8159 */ 8160 if ((param->flags & SPR_SPEC_I_PT) 8161 || (param->flags & SPR_ALL_TG_PT) 8162 || (param->flags & SPR_APTPL)) { 8163 int bit_ptr; 8164 8165 if (param->flags & SPR_APTPL) 8166 bit_ptr = 0; 8167 else if (param->flags & SPR_ALL_TG_PT) 8168 bit_ptr = 2; 8169 else /* SPR_SPEC_I_PT */ 8170 bit_ptr = 3; 8171 8172 free(ctsio->kern_data_ptr, M_CTL); 8173 ctl_set_invalid_field(ctsio, 8174 /*sks_valid*/ 1, 8175 /*command*/ 0, 8176 /*field*/ 20, 8177 /*bit_valid*/ 1, 8178 /*bit*/ bit_ptr); 8179 ctl_done((union ctl_io *)ctsio); 8180 return (CTL_RETVAL_COMPLETE); 8181 } 8182 8183 mtx_lock(&lun->lun_lock); 8184 8185 /* 8186 * The initiator wants to clear the 8187 * key/unregister. 8188 */ 8189 if (sa_res_key == 0) { 8190 if ((res_key == 0 8191 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8192 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8193 && !lun->per_res[residx].registered)) { 8194 mtx_unlock(&lun->lun_lock); 8195 goto done; 8196 } 8197 8198 lun->per_res[residx].registered = 0; 8199 memset(&lun->per_res[residx].res_key, 8200 0, sizeof(lun->per_res[residx].res_key)); 8201 lun->pr_key_count--; 8202 8203 if (residx == lun->pr_res_idx) { 8204 lun->flags &= ~CTL_LUN_PR_RESERVED; 8205 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8206 8207 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8208 || lun->res_type == SPR_TYPE_EX_AC_RO) 8209 && lun->pr_key_count) { 8210 /* 8211 * If the reservation is a registrants 8212 * only type we need to generate a UA 8213 * for other registered inits. The 8214 * sense code should be RESERVATIONS 8215 * RELEASED 8216 */ 8217 8218 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 8219 if (lun->per_res[ 8220 i+persis_offset].registered 8221 == 0) 8222 continue; 8223 lun->pending_sense[i 8224 ].ua_pending |= 8225 CTL_UA_RES_RELEASE; 8226 } 8227 } 8228 lun->res_type = 0; 8229 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8230 if (lun->pr_key_count==0) { 8231 lun->flags &= ~CTL_LUN_PR_RESERVED; 8232 lun->res_type = 0; 8233 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8234 } 8235 } 8236 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8237 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8238 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8239 persis_io.pr.pr_info.residx = residx; 8240 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8241 &persis_io, sizeof(persis_io), 0 )) > 8242 CTL_HA_STATUS_SUCCESS) { 8243 printf("CTL:Persis Out error returned from " 8244 "ctl_ha_msg_send %d\n", isc_retval); 8245 } 8246 } else /* sa_res_key != 0 */ { 8247 8248 /* 8249 * If we aren't registered currently then increment 8250 * the key count and set the registered flag. 8251 */ 8252 if (!lun->per_res[residx].registered) { 8253 lun->pr_key_count++; 8254 lun->per_res[residx].registered = 1; 8255 } 8256 8257 memcpy(&lun->per_res[residx].res_key, 8258 param->serv_act_res_key, 8259 ctl_min(sizeof(param->serv_act_res_key), 8260 sizeof(lun->per_res[residx].res_key))); 8261 8262 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8263 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8264 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8265 persis_io.pr.pr_info.residx = residx; 8266 memcpy(persis_io.pr.pr_info.sa_res_key, 8267 param->serv_act_res_key, 8268 sizeof(param->serv_act_res_key)); 8269 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8270 &persis_io, sizeof(persis_io), 0)) > 8271 CTL_HA_STATUS_SUCCESS) { 8272 printf("CTL:Persis Out error returned from " 8273 "ctl_ha_msg_send %d\n", isc_retval); 8274 } 8275 } 8276 lun->PRGeneration++; 8277 mtx_unlock(&lun->lun_lock); 8278 8279 break; 8280 } 8281 case SPRO_RESERVE: 8282#if 0 8283 printf("Reserve executed type %d\n", type); 8284#endif 8285 mtx_lock(&lun->lun_lock); 8286 if (lun->flags & CTL_LUN_PR_RESERVED) { 8287 /* 8288 * if this isn't the reservation holder and it's 8289 * not a "all registrants" type or if the type is 8290 * different then we have a conflict 8291 */ 8292 if ((lun->pr_res_idx != residx 8293 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8294 || lun->res_type != type) { 8295 mtx_unlock(&lun->lun_lock); 8296 free(ctsio->kern_data_ptr, M_CTL); 8297 ctl_set_reservation_conflict(ctsio); 8298 ctl_done((union ctl_io *)ctsio); 8299 return (CTL_RETVAL_COMPLETE); 8300 } 8301 mtx_unlock(&lun->lun_lock); 8302 } else /* create a reservation */ { 8303 /* 8304 * If it's not an "all registrants" type record 8305 * reservation holder 8306 */ 8307 if (type != SPR_TYPE_WR_EX_AR 8308 && type != SPR_TYPE_EX_AC_AR) 8309 lun->pr_res_idx = residx; /* Res holder */ 8310 else 8311 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8312 8313 lun->flags |= CTL_LUN_PR_RESERVED; 8314 lun->res_type = type; 8315 8316 mtx_unlock(&lun->lun_lock); 8317 8318 /* send msg to other side */ 8319 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8320 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8321 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8322 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8323 persis_io.pr.pr_info.res_type = type; 8324 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8325 &persis_io, sizeof(persis_io), 0)) > 8326 CTL_HA_STATUS_SUCCESS) { 8327 printf("CTL:Persis Out error returned from " 8328 "ctl_ha_msg_send %d\n", isc_retval); 8329 } 8330 } 8331 break; 8332 8333 case SPRO_RELEASE: 8334 mtx_lock(&lun->lun_lock); 8335 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8336 /* No reservation exists return good status */ 8337 mtx_unlock(&lun->lun_lock); 8338 goto done; 8339 } 8340 /* 8341 * Is this nexus a reservation holder? 8342 */ 8343 if (lun->pr_res_idx != residx 8344 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8345 /* 8346 * not a res holder return good status but 8347 * do nothing 8348 */ 8349 mtx_unlock(&lun->lun_lock); 8350 goto done; 8351 } 8352 8353 if (lun->res_type != type) { 8354 mtx_unlock(&lun->lun_lock); 8355 free(ctsio->kern_data_ptr, M_CTL); 8356 ctl_set_illegal_pr_release(ctsio); 8357 ctl_done((union ctl_io *)ctsio); 8358 return (CTL_RETVAL_COMPLETE); 8359 } 8360 8361 /* okay to release */ 8362 lun->flags &= ~CTL_LUN_PR_RESERVED; 8363 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8364 lun->res_type = 0; 8365 8366 /* 8367 * if this isn't an exclusive access 8368 * res generate UA for all other 8369 * registrants. 8370 */ 8371 if (type != SPR_TYPE_EX_AC 8372 && type != SPR_TYPE_WR_EX) { 8373 /* 8374 * temporarily unregister so we don't generate UA 8375 */ 8376 lun->per_res[residx].registered = 0; 8377 8378 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8379 if (lun->per_res[i+persis_offset].registered 8380 == 0) 8381 continue; 8382 lun->pending_sense[i].ua_pending |= 8383 CTL_UA_RES_RELEASE; 8384 } 8385 8386 lun->per_res[residx].registered = 1; 8387 } 8388 mtx_unlock(&lun->lun_lock); 8389 /* Send msg to other side */ 8390 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8391 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8392 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8393 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8394 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8395 printf("CTL:Persis Out error returned from " 8396 "ctl_ha_msg_send %d\n", isc_retval); 8397 } 8398 break; 8399 8400 case SPRO_CLEAR: 8401 /* send msg to other side */ 8402 8403 mtx_lock(&lun->lun_lock); 8404 lun->flags &= ~CTL_LUN_PR_RESERVED; 8405 lun->res_type = 0; 8406 lun->pr_key_count = 0; 8407 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8408 8409 8410 memset(&lun->per_res[residx].res_key, 8411 0, sizeof(lun->per_res[residx].res_key)); 8412 lun->per_res[residx].registered = 0; 8413 8414 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 8415 if (lun->per_res[i].registered) { 8416 if (!persis_offset && i < CTL_MAX_INITIATORS) 8417 lun->pending_sense[i].ua_pending |= 8418 CTL_UA_RES_PREEMPT; 8419 else if (persis_offset && i >= persis_offset) 8420 lun->pending_sense[i-persis_offset 8421 ].ua_pending |= CTL_UA_RES_PREEMPT; 8422 8423 memset(&lun->per_res[i].res_key, 8424 0, sizeof(struct scsi_per_res_key)); 8425 lun->per_res[i].registered = 0; 8426 } 8427 lun->PRGeneration++; 8428 mtx_unlock(&lun->lun_lock); 8429 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8430 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8431 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8432 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8433 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8434 printf("CTL:Persis Out error returned from " 8435 "ctl_ha_msg_send %d\n", isc_retval); 8436 } 8437 break; 8438 8439 case SPRO_PREEMPT: { 8440 int nretval; 8441 8442 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8443 residx, ctsio, cdb, param); 8444 if (nretval != 0) 8445 return (CTL_RETVAL_COMPLETE); 8446 break; 8447 } 8448 case SPRO_REG_MOVE: 8449 case SPRO_PRE_ABO: 8450 default: 8451 free(ctsio->kern_data_ptr, M_CTL); 8452 ctl_set_invalid_field(/*ctsio*/ ctsio, 8453 /*sks_valid*/ 1, 8454 /*command*/ 1, 8455 /*field*/ 1, 8456 /*bit_valid*/ 1, 8457 /*bit*/ 0); 8458 ctl_done((union ctl_io *)ctsio); 8459 return (CTL_RETVAL_COMPLETE); 8460 break; /* NOTREACHED */ 8461 } 8462 8463done: 8464 free(ctsio->kern_data_ptr, M_CTL); 8465 ctl_set_success(ctsio); 8466 ctl_done((union ctl_io *)ctsio); 8467 8468 return (retval); 8469} 8470 8471/* 8472 * This routine is for handling a message from the other SC pertaining to 8473 * persistent reserve out. All the error checking will have been done 8474 * so only perorming the action need be done here to keep the two 8475 * in sync. 8476 */ 8477static void 8478ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8479{ 8480 struct ctl_lun *lun; 8481 struct ctl_softc *softc; 8482 int i; 8483 uint32_t targ_lun; 8484 8485 softc = control_softc; 8486 8487 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8488 lun = softc->ctl_luns[targ_lun]; 8489 mtx_lock(&lun->lun_lock); 8490 switch(msg->pr.pr_info.action) { 8491 case CTL_PR_REG_KEY: 8492 if (!lun->per_res[msg->pr.pr_info.residx].registered) { 8493 lun->per_res[msg->pr.pr_info.residx].registered = 1; 8494 lun->pr_key_count++; 8495 } 8496 lun->PRGeneration++; 8497 memcpy(&lun->per_res[msg->pr.pr_info.residx].res_key, 8498 msg->pr.pr_info.sa_res_key, 8499 sizeof(struct scsi_per_res_key)); 8500 break; 8501 8502 case CTL_PR_UNREG_KEY: 8503 lun->per_res[msg->pr.pr_info.residx].registered = 0; 8504 memset(&lun->per_res[msg->pr.pr_info.residx].res_key, 8505 0, sizeof(struct scsi_per_res_key)); 8506 lun->pr_key_count--; 8507 8508 /* XXX Need to see if the reservation has been released */ 8509 /* if so do we need to generate UA? */ 8510 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8511 lun->flags &= ~CTL_LUN_PR_RESERVED; 8512 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8513 8514 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8515 || lun->res_type == SPR_TYPE_EX_AC_RO) 8516 && lun->pr_key_count) { 8517 /* 8518 * If the reservation is a registrants 8519 * only type we need to generate a UA 8520 * for other registered inits. The 8521 * sense code should be RESERVATIONS 8522 * RELEASED 8523 */ 8524 8525 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8526 if (lun->per_res[i+ 8527 persis_offset].registered == 0) 8528 continue; 8529 8530 lun->pending_sense[i 8531 ].ua_pending |= 8532 CTL_UA_RES_RELEASE; 8533 } 8534 } 8535 lun->res_type = 0; 8536 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8537 if (lun->pr_key_count==0) { 8538 lun->flags &= ~CTL_LUN_PR_RESERVED; 8539 lun->res_type = 0; 8540 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8541 } 8542 } 8543 lun->PRGeneration++; 8544 break; 8545 8546 case CTL_PR_RESERVE: 8547 lun->flags |= CTL_LUN_PR_RESERVED; 8548 lun->res_type = msg->pr.pr_info.res_type; 8549 lun->pr_res_idx = msg->pr.pr_info.residx; 8550 8551 break; 8552 8553 case CTL_PR_RELEASE: 8554 /* 8555 * if this isn't an exclusive access res generate UA for all 8556 * other registrants. 8557 */ 8558 if (lun->res_type != SPR_TYPE_EX_AC 8559 && lun->res_type != SPR_TYPE_WR_EX) { 8560 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8561 if (lun->per_res[i+persis_offset].registered) 8562 lun->pending_sense[i].ua_pending |= 8563 CTL_UA_RES_RELEASE; 8564 } 8565 8566 lun->flags &= ~CTL_LUN_PR_RESERVED; 8567 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8568 lun->res_type = 0; 8569 break; 8570 8571 case CTL_PR_PREEMPT: 8572 ctl_pro_preempt_other(lun, msg); 8573 break; 8574 case CTL_PR_CLEAR: 8575 lun->flags &= ~CTL_LUN_PR_RESERVED; 8576 lun->res_type = 0; 8577 lun->pr_key_count = 0; 8578 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8579 8580 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8581 if (lun->per_res[i].registered == 0) 8582 continue; 8583 if (!persis_offset 8584 && i < CTL_MAX_INITIATORS) 8585 lun->pending_sense[i].ua_pending |= 8586 CTL_UA_RES_PREEMPT; 8587 else if (persis_offset 8588 && i >= persis_offset) 8589 lun->pending_sense[i-persis_offset].ua_pending|= 8590 CTL_UA_RES_PREEMPT; 8591 memset(&lun->per_res[i].res_key, 0, 8592 sizeof(struct scsi_per_res_key)); 8593 lun->per_res[i].registered = 0; 8594 } 8595 lun->PRGeneration++; 8596 break; 8597 } 8598 8599 mtx_unlock(&lun->lun_lock); 8600} 8601 8602int 8603ctl_read_write(struct ctl_scsiio *ctsio) 8604{ 8605 struct ctl_lun *lun; 8606 struct ctl_lba_len_flags *lbalen; 8607 uint64_t lba; 8608 uint32_t num_blocks; 8609 int reladdr, fua, dpo, ebp; 8610 int retval; 8611 int isread; 8612 8613 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8614 8615 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8616 8617 reladdr = 0; 8618 fua = 0; 8619 dpo = 0; 8620 ebp = 0; 8621 8622 retval = CTL_RETVAL_COMPLETE; 8623 8624 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8625 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8626 if (lun->flags & CTL_LUN_PR_RESERVED && isread) { 8627 uint32_t residx; 8628 8629 /* 8630 * XXX KDM need a lock here. 8631 */ 8632 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8633 if ((lun->res_type == SPR_TYPE_EX_AC 8634 && residx != lun->pr_res_idx) 8635 || ((lun->res_type == SPR_TYPE_EX_AC_RO 8636 || lun->res_type == SPR_TYPE_EX_AC_AR) 8637 && !lun->per_res[residx].registered)) { 8638 ctl_set_reservation_conflict(ctsio); 8639 ctl_done((union ctl_io *)ctsio); 8640 return (CTL_RETVAL_COMPLETE); 8641 } 8642 } 8643 8644 switch (ctsio->cdb[0]) { 8645 case READ_6: 8646 case WRITE_6: { 8647 struct scsi_rw_6 *cdb; 8648 8649 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8650 8651 lba = scsi_3btoul(cdb->addr); 8652 /* only 5 bits are valid in the most significant address byte */ 8653 lba &= 0x1fffff; 8654 num_blocks = cdb->length; 8655 /* 8656 * This is correct according to SBC-2. 8657 */ 8658 if (num_blocks == 0) 8659 num_blocks = 256; 8660 break; 8661 } 8662 case READ_10: 8663 case WRITE_10: { 8664 struct scsi_rw_10 *cdb; 8665 8666 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8667 8668 if (cdb->byte2 & SRW10_RELADDR) 8669 reladdr = 1; 8670 if (cdb->byte2 & SRW10_FUA) 8671 fua = 1; 8672 if (cdb->byte2 & SRW10_DPO) 8673 dpo = 1; 8674 8675 if ((cdb->opcode == WRITE_10) 8676 && (cdb->byte2 & SRW10_EBP)) 8677 ebp = 1; 8678 8679 lba = scsi_4btoul(cdb->addr); 8680 num_blocks = scsi_2btoul(cdb->length); 8681 break; 8682 } 8683 case WRITE_VERIFY_10: { 8684 struct scsi_write_verify_10 *cdb; 8685 8686 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8687 8688 /* 8689 * XXX KDM we should do actual write verify support at some 8690 * point. This is obviously fake, we're just translating 8691 * things to a write. So we don't even bother checking the 8692 * BYTCHK field, since we don't do any verification. If 8693 * the user asks for it, we'll just pretend we did it. 8694 */ 8695 if (cdb->byte2 & SWV_DPO) 8696 dpo = 1; 8697 8698 lba = scsi_4btoul(cdb->addr); 8699 num_blocks = scsi_2btoul(cdb->length); 8700 break; 8701 } 8702 case READ_12: 8703 case WRITE_12: { 8704 struct scsi_rw_12 *cdb; 8705 8706 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8707 8708 if (cdb->byte2 & SRW12_RELADDR) 8709 reladdr = 1; 8710 if (cdb->byte2 & SRW12_FUA) 8711 fua = 1; 8712 if (cdb->byte2 & SRW12_DPO) 8713 dpo = 1; 8714 lba = scsi_4btoul(cdb->addr); 8715 num_blocks = scsi_4btoul(cdb->length); 8716 break; 8717 } 8718 case WRITE_VERIFY_12: { 8719 struct scsi_write_verify_12 *cdb; 8720 8721 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8722 8723 if (cdb->byte2 & SWV_DPO) 8724 dpo = 1; 8725 8726 lba = scsi_4btoul(cdb->addr); 8727 num_blocks = scsi_4btoul(cdb->length); 8728 8729 break; 8730 } 8731 case READ_16: 8732 case WRITE_16: { 8733 struct scsi_rw_16 *cdb; 8734 8735 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8736 8737 if (cdb->byte2 & SRW12_RELADDR) 8738 reladdr = 1; 8739 if (cdb->byte2 & SRW12_FUA) 8740 fua = 1; 8741 if (cdb->byte2 & SRW12_DPO) 8742 dpo = 1; 8743 8744 lba = scsi_8btou64(cdb->addr); 8745 num_blocks = scsi_4btoul(cdb->length); 8746 break; 8747 } 8748 case WRITE_VERIFY_16: { 8749 struct scsi_write_verify_16 *cdb; 8750 8751 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8752 8753 if (cdb->byte2 & SWV_DPO) 8754 dpo = 1; 8755 8756 lba = scsi_8btou64(cdb->addr); 8757 num_blocks = scsi_4btoul(cdb->length); 8758 break; 8759 } 8760 default: 8761 /* 8762 * We got a command we don't support. This shouldn't 8763 * happen, commands should be filtered out above us. 8764 */ 8765 ctl_set_invalid_opcode(ctsio); 8766 ctl_done((union ctl_io *)ctsio); 8767 8768 return (CTL_RETVAL_COMPLETE); 8769 break; /* NOTREACHED */ 8770 } 8771 8772 /* 8773 * XXX KDM what do we do with the DPO and FUA bits? FUA might be 8774 * interesting for us, but if RAIDCore is in write-back mode, 8775 * getting it to do write-through for a particular transaction may 8776 * not be possible. 8777 */ 8778 /* 8779 * We don't support relative addressing. That also requires 8780 * supporting linked commands, which we don't do. 8781 */ 8782 if (reladdr != 0) { 8783 ctl_set_invalid_field(ctsio, 8784 /*sks_valid*/ 1, 8785 /*command*/ 1, 8786 /*field*/ 1, 8787 /*bit_valid*/ 1, 8788 /*bit*/ 0); 8789 ctl_done((union ctl_io *)ctsio); 8790 return (CTL_RETVAL_COMPLETE); 8791 } 8792 8793 /* 8794 * The first check is to make sure we're in bounds, the second 8795 * check is to catch wrap-around problems. If the lba + num blocks 8796 * is less than the lba, then we've wrapped around and the block 8797 * range is invalid anyway. 8798 */ 8799 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8800 || ((lba + num_blocks) < lba)) { 8801 ctl_set_lba_out_of_range(ctsio); 8802 ctl_done((union ctl_io *)ctsio); 8803 return (CTL_RETVAL_COMPLETE); 8804 } 8805 8806 /* 8807 * According to SBC-3, a transfer length of 0 is not an error. 8808 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8809 * translates to 256 blocks for those commands. 8810 */ 8811 if (num_blocks == 0) { 8812 ctl_set_success(ctsio); 8813 ctl_done((union ctl_io *)ctsio); 8814 return (CTL_RETVAL_COMPLETE); 8815 } 8816 8817 lbalen = (struct ctl_lba_len_flags *) 8818 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8819 lbalen->lba = lba; 8820 lbalen->len = num_blocks; 8821 lbalen->flags = isread ? CTL_LLF_READ : CTL_LLF_WRITE; 8822 8823 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8824 ctsio->kern_rel_offset = 0; 8825 8826 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8827 8828 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8829 8830 return (retval); 8831} 8832 8833static int 8834ctl_cnw_cont(union ctl_io *io) 8835{ 8836 struct ctl_scsiio *ctsio; 8837 struct ctl_lun *lun; 8838 struct ctl_lba_len_flags *lbalen; 8839 int retval; 8840 8841 ctsio = &io->scsiio; 8842 ctsio->io_hdr.status = CTL_STATUS_NONE; 8843 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8844 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8845 lbalen = (struct ctl_lba_len_flags *) 8846 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8847 lbalen->flags = CTL_LLF_WRITE; 8848 8849 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8850 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8851 return (retval); 8852} 8853 8854int 8855ctl_cnw(struct ctl_scsiio *ctsio) 8856{ 8857 struct ctl_lun *lun; 8858 struct ctl_lba_len_flags *lbalen; 8859 uint64_t lba; 8860 uint32_t num_blocks; 8861 int fua, dpo; 8862 int retval; 8863 8864 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8865 8866 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8867 8868 fua = 0; 8869 dpo = 0; 8870 8871 retval = CTL_RETVAL_COMPLETE; 8872 8873 switch (ctsio->cdb[0]) { 8874 case COMPARE_AND_WRITE: { 8875 struct scsi_compare_and_write *cdb; 8876 8877 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8878 8879 if (cdb->byte2 & SRW10_FUA) 8880 fua = 1; 8881 if (cdb->byte2 & SRW10_DPO) 8882 dpo = 1; 8883 lba = scsi_8btou64(cdb->addr); 8884 num_blocks = cdb->length; 8885 break; 8886 } 8887 default: 8888 /* 8889 * We got a command we don't support. This shouldn't 8890 * happen, commands should be filtered out above us. 8891 */ 8892 ctl_set_invalid_opcode(ctsio); 8893 ctl_done((union ctl_io *)ctsio); 8894 8895 return (CTL_RETVAL_COMPLETE); 8896 break; /* NOTREACHED */ 8897 } 8898 8899 /* 8900 * XXX KDM what do we do with the DPO and FUA bits? FUA might be 8901 * interesting for us, but if RAIDCore is in write-back mode, 8902 * getting it to do write-through for a particular transaction may 8903 * not be possible. 8904 */ 8905 8906 /* 8907 * The first check is to make sure we're in bounds, the second 8908 * check is to catch wrap-around problems. If the lba + num blocks 8909 * is less than the lba, then we've wrapped around and the block 8910 * range is invalid anyway. 8911 */ 8912 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8913 || ((lba + num_blocks) < lba)) { 8914 ctl_set_lba_out_of_range(ctsio); 8915 ctl_done((union ctl_io *)ctsio); 8916 return (CTL_RETVAL_COMPLETE); 8917 } 8918 8919 /* 8920 * According to SBC-3, a transfer length of 0 is not an error. 8921 */ 8922 if (num_blocks == 0) { 8923 ctl_set_success(ctsio); 8924 ctl_done((union ctl_io *)ctsio); 8925 return (CTL_RETVAL_COMPLETE); 8926 } 8927 8928 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8929 ctsio->kern_rel_offset = 0; 8930 8931 /* 8932 * Set the IO_CONT flag, so that if this I/O gets passed to 8933 * ctl_data_submit_done(), it'll get passed back to 8934 * ctl_ctl_cnw_cont() for further processing. 8935 */ 8936 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8937 ctsio->io_cont = ctl_cnw_cont; 8938 8939 lbalen = (struct ctl_lba_len_flags *) 8940 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8941 lbalen->lba = lba; 8942 lbalen->len = num_blocks; 8943 lbalen->flags = CTL_LLF_COMPARE; 8944 8945 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8946 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8947 return (retval); 8948} 8949 8950int 8951ctl_verify(struct ctl_scsiio *ctsio) 8952{ 8953 struct ctl_lun *lun; 8954 struct ctl_lba_len_flags *lbalen; 8955 uint64_t lba; 8956 uint32_t num_blocks; 8957 int bytchk, dpo; 8958 int retval; 8959 8960 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8961 8962 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8963 8964 bytchk = 0; 8965 dpo = 0; 8966 retval = CTL_RETVAL_COMPLETE; 8967 8968 switch (ctsio->cdb[0]) { 8969 case VERIFY_10: { 8970 struct scsi_verify_10 *cdb; 8971 8972 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8973 if (cdb->byte2 & SVFY_BYTCHK) 8974 bytchk = 1; 8975 if (cdb->byte2 & SVFY_DPO) 8976 dpo = 1; 8977 lba = scsi_4btoul(cdb->addr); 8978 num_blocks = scsi_2btoul(cdb->length); 8979 break; 8980 } 8981 case VERIFY_12: { 8982 struct scsi_verify_12 *cdb; 8983 8984 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8985 if (cdb->byte2 & SVFY_BYTCHK) 8986 bytchk = 1; 8987 if (cdb->byte2 & SVFY_DPO) 8988 dpo = 1; 8989 lba = scsi_4btoul(cdb->addr); 8990 num_blocks = scsi_4btoul(cdb->length); 8991 break; 8992 } 8993 case VERIFY_16: { 8994 struct scsi_rw_16 *cdb; 8995 8996 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8997 if (cdb->byte2 & SVFY_BYTCHK) 8998 bytchk = 1; 8999 if (cdb->byte2 & SVFY_DPO) 9000 dpo = 1; 9001 lba = scsi_8btou64(cdb->addr); 9002 num_blocks = scsi_4btoul(cdb->length); 9003 break; 9004 } 9005 default: 9006 /* 9007 * We got a command we don't support. This shouldn't 9008 * happen, commands should be filtered out above us. 9009 */ 9010 ctl_set_invalid_opcode(ctsio); 9011 ctl_done((union ctl_io *)ctsio); 9012 return (CTL_RETVAL_COMPLETE); 9013 } 9014 9015 /* 9016 * The first check is to make sure we're in bounds, the second 9017 * check is to catch wrap-around problems. If the lba + num blocks 9018 * is less than the lba, then we've wrapped around and the block 9019 * range is invalid anyway. 9020 */ 9021 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9022 || ((lba + num_blocks) < lba)) { 9023 ctl_set_lba_out_of_range(ctsio); 9024 ctl_done((union ctl_io *)ctsio); 9025 return (CTL_RETVAL_COMPLETE); 9026 } 9027 9028 /* 9029 * According to SBC-3, a transfer length of 0 is not an error. 9030 */ 9031 if (num_blocks == 0) { 9032 ctl_set_success(ctsio); 9033 ctl_done((union ctl_io *)ctsio); 9034 return (CTL_RETVAL_COMPLETE); 9035 } 9036 9037 lbalen = (struct ctl_lba_len_flags *) 9038 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9039 lbalen->lba = lba; 9040 lbalen->len = num_blocks; 9041 if (bytchk) { 9042 lbalen->flags = CTL_LLF_COMPARE; 9043 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9044 } else { 9045 lbalen->flags = CTL_LLF_VERIFY; 9046 ctsio->kern_total_len = 0; 9047 } 9048 ctsio->kern_rel_offset = 0; 9049 9050 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9051 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9052 return (retval); 9053} 9054 9055int 9056ctl_report_luns(struct ctl_scsiio *ctsio) 9057{ 9058 struct scsi_report_luns *cdb; 9059 struct scsi_report_luns_data *lun_data; 9060 struct ctl_lun *lun, *request_lun; 9061 int num_luns, retval; 9062 uint32_t alloc_len, lun_datalen; 9063 int num_filled, well_known; 9064 uint32_t initidx, targ_lun_id, lun_id; 9065 9066 retval = CTL_RETVAL_COMPLETE; 9067 well_known = 0; 9068 9069 cdb = (struct scsi_report_luns *)ctsio->cdb; 9070 9071 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9072 9073 mtx_lock(&control_softc->ctl_lock); 9074 num_luns = control_softc->num_luns; 9075 mtx_unlock(&control_softc->ctl_lock); 9076 9077 switch (cdb->select_report) { 9078 case RPL_REPORT_DEFAULT: 9079 case RPL_REPORT_ALL: 9080 break; 9081 case RPL_REPORT_WELLKNOWN: 9082 well_known = 1; 9083 num_luns = 0; 9084 break; 9085 default: 9086 ctl_set_invalid_field(ctsio, 9087 /*sks_valid*/ 1, 9088 /*command*/ 1, 9089 /*field*/ 2, 9090 /*bit_valid*/ 0, 9091 /*bit*/ 0); 9092 ctl_done((union ctl_io *)ctsio); 9093 return (retval); 9094 break; /* NOTREACHED */ 9095 } 9096 9097 alloc_len = scsi_4btoul(cdb->length); 9098 /* 9099 * The initiator has to allocate at least 16 bytes for this request, 9100 * so he can at least get the header and the first LUN. Otherwise 9101 * we reject the request (per SPC-3 rev 14, section 6.21). 9102 */ 9103 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9104 sizeof(struct scsi_report_luns_lundata))) { 9105 ctl_set_invalid_field(ctsio, 9106 /*sks_valid*/ 1, 9107 /*command*/ 1, 9108 /*field*/ 6, 9109 /*bit_valid*/ 0, 9110 /*bit*/ 0); 9111 ctl_done((union ctl_io *)ctsio); 9112 return (retval); 9113 } 9114 9115 request_lun = (struct ctl_lun *) 9116 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9117 9118 lun_datalen = sizeof(*lun_data) + 9119 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9120 9121 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9122 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9123 ctsio->kern_sg_entries = 0; 9124 9125 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9126 9127 mtx_lock(&control_softc->ctl_lock); 9128 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9129 lun_id = targ_lun_id; 9130 if (ctsio->io_hdr.nexus.lun_map_fn != NULL) 9131 lun_id = ctsio->io_hdr.nexus.lun_map_fn(ctsio->io_hdr.nexus.lun_map_arg, lun_id); 9132 if (lun_id >= CTL_MAX_LUNS) 9133 continue; 9134 lun = control_softc->ctl_luns[lun_id]; 9135 if (lun == NULL) 9136 continue; 9137 9138 if (targ_lun_id <= 0xff) { 9139 /* 9140 * Peripheral addressing method, bus number 0. 9141 */ 9142 lun_data->luns[num_filled].lundata[0] = 9143 RPL_LUNDATA_ATYP_PERIPH; 9144 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9145 num_filled++; 9146 } else if (targ_lun_id <= 0x3fff) { 9147 /* 9148 * Flat addressing method. 9149 */ 9150 lun_data->luns[num_filled].lundata[0] = 9151 RPL_LUNDATA_ATYP_FLAT | 9152 (targ_lun_id & RPL_LUNDATA_FLAT_LUN_MASK); 9153#ifdef OLDCTLHEADERS 9154 (SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) | 9155 (targ_lun_id & SRLD_BUS_LUN_MASK); 9156#endif 9157 lun_data->luns[num_filled].lundata[1] = 9158#ifdef OLDCTLHEADERS 9159 targ_lun_id >> SRLD_BUS_LUN_BITS; 9160#endif 9161 targ_lun_id >> RPL_LUNDATA_FLAT_LUN_BITS; 9162 num_filled++; 9163 } else { 9164 printf("ctl_report_luns: bogus LUN number %jd, " 9165 "skipping\n", (intmax_t)targ_lun_id); 9166 } 9167 /* 9168 * According to SPC-3, rev 14 section 6.21: 9169 * 9170 * "The execution of a REPORT LUNS command to any valid and 9171 * installed logical unit shall clear the REPORTED LUNS DATA 9172 * HAS CHANGED unit attention condition for all logical 9173 * units of that target with respect to the requesting 9174 * initiator. A valid and installed logical unit is one 9175 * having a PERIPHERAL QUALIFIER of 000b in the standard 9176 * INQUIRY data (see 6.4.2)." 9177 * 9178 * If request_lun is NULL, the LUN this report luns command 9179 * was issued to is either disabled or doesn't exist. In that 9180 * case, we shouldn't clear any pending lun change unit 9181 * attention. 9182 */ 9183 if (request_lun != NULL) { 9184 mtx_lock(&lun->lun_lock); 9185 lun->pending_sense[initidx].ua_pending &= 9186 ~CTL_UA_LUN_CHANGE; 9187 mtx_unlock(&lun->lun_lock); 9188 } 9189 } 9190 mtx_unlock(&control_softc->ctl_lock); 9191 9192 /* 9193 * It's quite possible that we've returned fewer LUNs than we allocated 9194 * space for. Trim it. 9195 */ 9196 lun_datalen = sizeof(*lun_data) + 9197 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9198 9199 if (lun_datalen < alloc_len) { 9200 ctsio->residual = alloc_len - lun_datalen; 9201 ctsio->kern_data_len = lun_datalen; 9202 ctsio->kern_total_len = lun_datalen; 9203 } else { 9204 ctsio->residual = 0; 9205 ctsio->kern_data_len = alloc_len; 9206 ctsio->kern_total_len = alloc_len; 9207 } 9208 ctsio->kern_data_resid = 0; 9209 ctsio->kern_rel_offset = 0; 9210 ctsio->kern_sg_entries = 0; 9211 9212 /* 9213 * We set this to the actual data length, regardless of how much 9214 * space we actually have to return results. If the user looks at 9215 * this value, he'll know whether or not he allocated enough space 9216 * and reissue the command if necessary. We don't support well 9217 * known logical units, so if the user asks for that, return none. 9218 */ 9219 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9220 9221 /* 9222 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9223 * this request. 9224 */ 9225 ctsio->scsi_status = SCSI_STATUS_OK; 9226 9227 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9228 ctsio->be_move_done = ctl_config_move_done; 9229 ctl_datamove((union ctl_io *)ctsio); 9230 9231 return (retval); 9232} 9233 9234int 9235ctl_request_sense(struct ctl_scsiio *ctsio) 9236{ 9237 struct scsi_request_sense *cdb; 9238 struct scsi_sense_data *sense_ptr; 9239 struct ctl_lun *lun; 9240 uint32_t initidx; 9241 int have_error; 9242 scsi_sense_data_type sense_format; 9243 9244 cdb = (struct scsi_request_sense *)ctsio->cdb; 9245 9246 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9247 9248 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9249 9250 /* 9251 * Determine which sense format the user wants. 9252 */ 9253 if (cdb->byte2 & SRS_DESC) 9254 sense_format = SSD_TYPE_DESC; 9255 else 9256 sense_format = SSD_TYPE_FIXED; 9257 9258 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9259 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9260 ctsio->kern_sg_entries = 0; 9261 9262 /* 9263 * struct scsi_sense_data, which is currently set to 256 bytes, is 9264 * larger than the largest allowed value for the length field in the 9265 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9266 */ 9267 ctsio->residual = 0; 9268 ctsio->kern_data_len = cdb->length; 9269 ctsio->kern_total_len = cdb->length; 9270 9271 ctsio->kern_data_resid = 0; 9272 ctsio->kern_rel_offset = 0; 9273 ctsio->kern_sg_entries = 0; 9274 9275 /* 9276 * If we don't have a LUN, we don't have any pending sense. 9277 */ 9278 if (lun == NULL) 9279 goto no_sense; 9280 9281 have_error = 0; 9282 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9283 /* 9284 * Check for pending sense, and then for pending unit attentions. 9285 * Pending sense gets returned first, then pending unit attentions. 9286 */ 9287 mtx_lock(&lun->lun_lock); 9288 if (ctl_is_set(lun->have_ca, initidx)) { 9289 scsi_sense_data_type stored_format; 9290 9291 /* 9292 * Check to see which sense format was used for the stored 9293 * sense data. 9294 */ 9295 stored_format = scsi_sense_type( 9296 &lun->pending_sense[initidx].sense); 9297 9298 /* 9299 * If the user requested a different sense format than the 9300 * one we stored, then we need to convert it to the other 9301 * format. If we're going from descriptor to fixed format 9302 * sense data, we may lose things in translation, depending 9303 * on what options were used. 9304 * 9305 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9306 * for some reason we'll just copy it out as-is. 9307 */ 9308 if ((stored_format == SSD_TYPE_FIXED) 9309 && (sense_format == SSD_TYPE_DESC)) 9310 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9311 &lun->pending_sense[initidx].sense, 9312 (struct scsi_sense_data_desc *)sense_ptr); 9313 else if ((stored_format == SSD_TYPE_DESC) 9314 && (sense_format == SSD_TYPE_FIXED)) 9315 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9316 &lun->pending_sense[initidx].sense, 9317 (struct scsi_sense_data_fixed *)sense_ptr); 9318 else 9319 memcpy(sense_ptr, &lun->pending_sense[initidx].sense, 9320 ctl_min(sizeof(*sense_ptr), 9321 sizeof(lun->pending_sense[initidx].sense))); 9322 9323 ctl_clear_mask(lun->have_ca, initidx); 9324 have_error = 1; 9325 } else if (lun->pending_sense[initidx].ua_pending != CTL_UA_NONE) { 9326 ctl_ua_type ua_type; 9327 9328 ua_type = ctl_build_ua(lun->pending_sense[initidx].ua_pending, 9329 sense_ptr, sense_format); 9330 if (ua_type != CTL_UA_NONE) { 9331 have_error = 1; 9332 /* We're reporting this UA, so clear it */ 9333 lun->pending_sense[initidx].ua_pending &= ~ua_type; 9334 } 9335 } 9336 mtx_unlock(&lun->lun_lock); 9337 9338 /* 9339 * We already have a pending error, return it. 9340 */ 9341 if (have_error != 0) { 9342 /* 9343 * We report the SCSI status as OK, since the status of the 9344 * request sense command itself is OK. 9345 */ 9346 ctsio->scsi_status = SCSI_STATUS_OK; 9347 9348 /* 9349 * We report 0 for the sense length, because we aren't doing 9350 * autosense in this case. We're reporting sense as 9351 * parameter data. 9352 */ 9353 ctsio->sense_len = 0; 9354 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9355 ctsio->be_move_done = ctl_config_move_done; 9356 ctl_datamove((union ctl_io *)ctsio); 9357 9358 return (CTL_RETVAL_COMPLETE); 9359 } 9360 9361no_sense: 9362 9363 /* 9364 * No sense information to report, so we report that everything is 9365 * okay. 9366 */ 9367 ctl_set_sense_data(sense_ptr, 9368 lun, 9369 sense_format, 9370 /*current_error*/ 1, 9371 /*sense_key*/ SSD_KEY_NO_SENSE, 9372 /*asc*/ 0x00, 9373 /*ascq*/ 0x00, 9374 SSD_ELEM_NONE); 9375 9376 ctsio->scsi_status = SCSI_STATUS_OK; 9377 9378 /* 9379 * We report 0 for the sense length, because we aren't doing 9380 * autosense in this case. We're reporting sense as parameter data. 9381 */ 9382 ctsio->sense_len = 0; 9383 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9384 ctsio->be_move_done = ctl_config_move_done; 9385 ctl_datamove((union ctl_io *)ctsio); 9386 9387 return (CTL_RETVAL_COMPLETE); 9388} 9389 9390int 9391ctl_tur(struct ctl_scsiio *ctsio) 9392{ 9393 struct ctl_lun *lun; 9394 9395 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9396 9397 CTL_DEBUG_PRINT(("ctl_tur\n")); 9398 9399 if (lun == NULL) 9400 return (EINVAL); 9401 9402 ctsio->scsi_status = SCSI_STATUS_OK; 9403 ctsio->io_hdr.status = CTL_SUCCESS; 9404 9405 ctl_done((union ctl_io *)ctsio); 9406 9407 return (CTL_RETVAL_COMPLETE); 9408} 9409 9410#ifdef notyet 9411static int 9412ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 9413{ 9414 9415} 9416#endif 9417 9418static int 9419ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9420{ 9421 struct scsi_vpd_supported_pages *pages; 9422 int sup_page_size; 9423 struct ctl_lun *lun; 9424 9425 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9426 9427 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9428 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9429 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9430 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9431 ctsio->kern_sg_entries = 0; 9432 9433 if (sup_page_size < alloc_len) { 9434 ctsio->residual = alloc_len - sup_page_size; 9435 ctsio->kern_data_len = sup_page_size; 9436 ctsio->kern_total_len = sup_page_size; 9437 } else { 9438 ctsio->residual = 0; 9439 ctsio->kern_data_len = alloc_len; 9440 ctsio->kern_total_len = alloc_len; 9441 } 9442 ctsio->kern_data_resid = 0; 9443 ctsio->kern_rel_offset = 0; 9444 ctsio->kern_sg_entries = 0; 9445 9446 /* 9447 * The control device is always connected. The disk device, on the 9448 * other hand, may not be online all the time. Need to change this 9449 * to figure out whether the disk device is actually online or not. 9450 */ 9451 if (lun != NULL) 9452 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9453 lun->be_lun->lun_type; 9454 else 9455 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9456 9457 pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES; 9458 /* Supported VPD pages */ 9459 pages->page_list[0] = SVPD_SUPPORTED_PAGES; 9460 /* Serial Number */ 9461 pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER; 9462 /* Device Identification */ 9463 pages->page_list[2] = SVPD_DEVICE_ID; 9464 /* Block limits */ 9465 pages->page_list[3] = SVPD_BLOCK_LIMITS; 9466 /* Logical Block Provisioning */ 9467 pages->page_list[4] = SVPD_LBP; 9468 9469 ctsio->scsi_status = SCSI_STATUS_OK; 9470 9471 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9472 ctsio->be_move_done = ctl_config_move_done; 9473 ctl_datamove((union ctl_io *)ctsio); 9474 9475 return (CTL_RETVAL_COMPLETE); 9476} 9477 9478static int 9479ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9480{ 9481 struct scsi_vpd_unit_serial_number *sn_ptr; 9482 struct ctl_lun *lun; 9483#ifndef CTL_USE_BACKEND_SN 9484 char tmpstr[32]; 9485#endif 9486 9487 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9488 9489 ctsio->kern_data_ptr = malloc(sizeof(*sn_ptr), M_CTL, M_WAITOK | M_ZERO); 9490 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9491 ctsio->kern_sg_entries = 0; 9492 9493 if (sizeof(*sn_ptr) < alloc_len) { 9494 ctsio->residual = alloc_len - sizeof(*sn_ptr); 9495 ctsio->kern_data_len = sizeof(*sn_ptr); 9496 ctsio->kern_total_len = sizeof(*sn_ptr); 9497 } else { 9498 ctsio->residual = 0; 9499 ctsio->kern_data_len = alloc_len; 9500 ctsio->kern_total_len = alloc_len; 9501 } 9502 ctsio->kern_data_resid = 0; 9503 ctsio->kern_rel_offset = 0; 9504 ctsio->kern_sg_entries = 0; 9505 9506 /* 9507 * The control device is always connected. The disk device, on the 9508 * other hand, may not be online all the time. Need to change this 9509 * to figure out whether the disk device is actually online or not. 9510 */ 9511 if (lun != NULL) 9512 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9513 lun->be_lun->lun_type; 9514 else 9515 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9516 9517 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9518 sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN); 9519#ifdef CTL_USE_BACKEND_SN 9520 /* 9521 * If we don't have a LUN, we just leave the serial number as 9522 * all spaces. 9523 */ 9524 memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num)); 9525 if (lun != NULL) { 9526 strncpy((char *)sn_ptr->serial_num, 9527 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9528 } 9529#else 9530 /* 9531 * Note that we're using a non-unique serial number here, 9532 */ 9533 snprintf(tmpstr, sizeof(tmpstr), "MYSERIALNUMIS000"); 9534 memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num)); 9535 strncpy(sn_ptr->serial_num, tmpstr, ctl_min(CTL_SN_LEN, 9536 ctl_min(sizeof(tmpstr), sizeof(*sn_ptr) - 4))); 9537#endif 9538 ctsio->scsi_status = SCSI_STATUS_OK; 9539 9540 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9541 ctsio->be_move_done = ctl_config_move_done; 9542 ctl_datamove((union ctl_io *)ctsio); 9543 9544 return (CTL_RETVAL_COMPLETE); 9545} 9546 9547 9548static int 9549ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9550{ 9551 struct scsi_vpd_device_id *devid_ptr; 9552 struct scsi_vpd_id_descriptor *desc, *desc1; 9553 struct scsi_vpd_id_descriptor *desc2, *desc3; /* for types 4h and 5h */ 9554 struct scsi_vpd_id_t10 *t10id; 9555 struct ctl_softc *ctl_softc; 9556 struct ctl_lun *lun; 9557 struct ctl_frontend *fe; 9558 char *val; 9559#ifndef CTL_USE_BACKEND_SN 9560 char tmpstr[32]; 9561#endif /* CTL_USE_BACKEND_SN */ 9562 int devid_len; 9563 9564 ctl_softc = control_softc; 9565 9566 fe = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 9567 9568 if (fe->devid != NULL) 9569 return ((fe->devid)(ctsio, alloc_len)); 9570 9571 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9572 9573 devid_len = sizeof(struct scsi_vpd_device_id) + 9574 sizeof(struct scsi_vpd_id_descriptor) + 9575 sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN + 9576 sizeof(struct scsi_vpd_id_descriptor) + CTL_WWPN_LEN + 9577 sizeof(struct scsi_vpd_id_descriptor) + 9578 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9579 sizeof(struct scsi_vpd_id_descriptor) + 9580 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9581 9582 ctsio->kern_data_ptr = malloc(devid_len, M_CTL, M_WAITOK | M_ZERO); 9583 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9584 ctsio->kern_sg_entries = 0; 9585 9586 if (devid_len < alloc_len) { 9587 ctsio->residual = alloc_len - devid_len; 9588 ctsio->kern_data_len = devid_len; 9589 ctsio->kern_total_len = devid_len; 9590 } else { 9591 ctsio->residual = 0; 9592 ctsio->kern_data_len = alloc_len; 9593 ctsio->kern_total_len = alloc_len; 9594 } 9595 ctsio->kern_data_resid = 0; 9596 ctsio->kern_rel_offset = 0; 9597 ctsio->kern_sg_entries = 0; 9598 9599 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9600 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 9601 desc1 = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9602 sizeof(struct scsi_vpd_id_t10) + CTL_DEVID_LEN); 9603 desc2 = (struct scsi_vpd_id_descriptor *)(&desc1->identifier[0] + 9604 CTL_WWPN_LEN); 9605 desc3 = (struct scsi_vpd_id_descriptor *)(&desc2->identifier[0] + 9606 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9607 9608 /* 9609 * The control device is always connected. The disk device, on the 9610 * other hand, may not be online all the time. 9611 */ 9612 if (lun != NULL) 9613 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9614 lun->be_lun->lun_type; 9615 else 9616 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9617 9618 devid_ptr->page_code = SVPD_DEVICE_ID; 9619 9620 scsi_ulto2b(devid_len - 4, devid_ptr->length); 9621 9622 /* 9623 * For Fibre channel, 9624 */ 9625 if (fe->port_type == CTL_PORT_FC) 9626 { 9627 desc->proto_codeset = (SCSI_PROTO_FC << 4) | 9628 SVPD_ID_CODESET_ASCII; 9629 desc1->proto_codeset = (SCSI_PROTO_FC << 4) | 9630 SVPD_ID_CODESET_BINARY; 9631 } 9632 else 9633 { 9634 desc->proto_codeset = (SCSI_PROTO_SPI << 4) | 9635 SVPD_ID_CODESET_ASCII; 9636 desc1->proto_codeset = (SCSI_PROTO_SPI << 4) | 9637 SVPD_ID_CODESET_BINARY; 9638 } 9639 desc2->proto_codeset = desc3->proto_codeset = desc1->proto_codeset; 9640 9641 /* 9642 * We're using a LUN association here. i.e., this device ID is a 9643 * per-LUN identifier. 9644 */ 9645 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 9646 desc->length = sizeof(*t10id) + CTL_DEVID_LEN; 9647 if (lun == NULL || (val = ctl_get_opt(lun->be_lun, "vendor")) == NULL) { 9648 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 9649 } else { 9650 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 9651 strncpy(t10id->vendor, val, 9652 min(sizeof(t10id->vendor), strlen(val))); 9653 } 9654 9655 /* 9656 * desc1 is for the WWPN which is a port asscociation. 9657 */ 9658 desc1->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA; 9659 desc1->length = CTL_WWPN_LEN; 9660 /* XXX Call Reggie's get_WWNN func here then add port # to the end */ 9661 /* For testing just create the WWPN */ 9662#if 0 9663 ddb_GetWWNN((char *)desc1->identifier); 9664 9665 /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */ 9666 /* This is so Copancontrol will return something sane */ 9667 if (ctsio->io_hdr.nexus.targ_port!=0 && 9668 ctsio->io_hdr.nexus.targ_port!=8) 9669 desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port-1; 9670 else 9671 desc1->identifier[7] += ctsio->io_hdr.nexus.targ_port; 9672#endif 9673 9674 be64enc(desc1->identifier, fe->wwpn); 9675 9676 /* 9677 * desc2 is for the Relative Target Port(type 4h) identifier 9678 */ 9679 desc2->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT 9680 | SVPD_ID_TYPE_RELTARG; 9681 desc2->length = 4; 9682//#if 0 9683 /* NOTE: if the port is 0 or 8 we don't want to subtract 1 */ 9684 /* This is so Copancontrol will return something sane */ 9685 if (ctsio->io_hdr.nexus.targ_port!=0 && 9686 ctsio->io_hdr.nexus.targ_port!=8) 9687 desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port - 1; 9688 else 9689 desc2->identifier[3] = ctsio->io_hdr.nexus.targ_port; 9690//#endif 9691 9692 /* 9693 * desc3 is for the Target Port Group(type 5h) identifier 9694 */ 9695 desc3->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT 9696 | SVPD_ID_TYPE_TPORTGRP; 9697 desc3->length = 4; 9698 if (ctsio->io_hdr.nexus.targ_port < CTL_MAX_PORTS || ctl_is_single) 9699 desc3->identifier[3] = 1; 9700 else 9701 desc3->identifier[3] = 2; 9702 9703#ifdef CTL_USE_BACKEND_SN 9704 /* 9705 * If we've actually got a backend, copy the device id from the 9706 * per-LUN data. Otherwise, set it to all spaces. 9707 */ 9708 if (lun != NULL) { 9709 /* 9710 * Copy the backend's LUN ID. 9711 */ 9712 strncpy((char *)t10id->vendor_spec_id, 9713 (char *)lun->be_lun->device_id, CTL_DEVID_LEN); 9714 } else { 9715 /* 9716 * No backend, set this to spaces. 9717 */ 9718 memset(t10id->vendor_spec_id, 0x20, CTL_DEVID_LEN); 9719 } 9720#else 9721 snprintf(tmpstr, sizeof(tmpstr), "MYDEVICEIDIS%4d", 9722 (lun != NULL) ? (int)lun->lun : 0); 9723 strncpy(t10id->vendor_spec_id, tmpstr, ctl_min(CTL_DEVID_LEN, 9724 sizeof(tmpstr))); 9725#endif 9726 9727 ctsio->scsi_status = SCSI_STATUS_OK; 9728 9729 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9730 ctsio->be_move_done = ctl_config_move_done; 9731 ctl_datamove((union ctl_io *)ctsio); 9732 9733 return (CTL_RETVAL_COMPLETE); 9734} 9735 9736static int 9737ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9738{ 9739 struct scsi_vpd_block_limits *bl_ptr; 9740 struct ctl_lun *lun; 9741 int bs; 9742 9743 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9744 bs = lun->be_lun->blocksize; 9745 9746 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9747 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9748 ctsio->kern_sg_entries = 0; 9749 9750 if (sizeof(*bl_ptr) < alloc_len) { 9751 ctsio->residual = alloc_len - sizeof(*bl_ptr); 9752 ctsio->kern_data_len = sizeof(*bl_ptr); 9753 ctsio->kern_total_len = sizeof(*bl_ptr); 9754 } else { 9755 ctsio->residual = 0; 9756 ctsio->kern_data_len = alloc_len; 9757 ctsio->kern_total_len = alloc_len; 9758 } 9759 ctsio->kern_data_resid = 0; 9760 ctsio->kern_rel_offset = 0; 9761 ctsio->kern_sg_entries = 0; 9762 9763 /* 9764 * The control device is always connected. The disk device, on the 9765 * other hand, may not be online all the time. Need to change this 9766 * to figure out whether the disk device is actually online or not. 9767 */ 9768 if (lun != NULL) 9769 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9770 lun->be_lun->lun_type; 9771 else 9772 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9773 9774 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9775 scsi_ulto2b(sizeof(*bl_ptr), bl_ptr->page_length); 9776 bl_ptr->max_cmp_write_len = 0xff; 9777 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9778 scsi_ulto4b(MAXPHYS / bs, bl_ptr->opt_txfer_len); 9779 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9780 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 9781 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 9782 } 9783 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 9784 9785 ctsio->scsi_status = SCSI_STATUS_OK; 9786 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9787 ctsio->be_move_done = ctl_config_move_done; 9788 ctl_datamove((union ctl_io *)ctsio); 9789 9790 return (CTL_RETVAL_COMPLETE); 9791} 9792 9793static int 9794ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9795{ 9796 struct scsi_vpd_logical_block_prov *lbp_ptr; 9797 struct ctl_lun *lun; 9798 int bs; 9799 9800 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9801 bs = lun->be_lun->blocksize; 9802 9803 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9804 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9805 ctsio->kern_sg_entries = 0; 9806 9807 if (sizeof(*lbp_ptr) < alloc_len) { 9808 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 9809 ctsio->kern_data_len = sizeof(*lbp_ptr); 9810 ctsio->kern_total_len = sizeof(*lbp_ptr); 9811 } else { 9812 ctsio->residual = 0; 9813 ctsio->kern_data_len = alloc_len; 9814 ctsio->kern_total_len = alloc_len; 9815 } 9816 ctsio->kern_data_resid = 0; 9817 ctsio->kern_rel_offset = 0; 9818 ctsio->kern_sg_entries = 0; 9819 9820 /* 9821 * The control device is always connected. The disk device, on the 9822 * other hand, may not be online all the time. Need to change this 9823 * to figure out whether the disk device is actually online or not. 9824 */ 9825 if (lun != NULL) 9826 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9827 lun->be_lun->lun_type; 9828 else 9829 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9830 9831 lbp_ptr->page_code = SVPD_LBP; 9832 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 9833 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | SVPD_LBP_WS10; 9834 9835 ctsio->scsi_status = SCSI_STATUS_OK; 9836 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9837 ctsio->be_move_done = ctl_config_move_done; 9838 ctl_datamove((union ctl_io *)ctsio); 9839 9840 return (CTL_RETVAL_COMPLETE); 9841} 9842 9843static int 9844ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9845{ 9846 struct scsi_inquiry *cdb; 9847 struct ctl_lun *lun; 9848 int alloc_len, retval; 9849 9850 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9851 cdb = (struct scsi_inquiry *)ctsio->cdb; 9852 9853 retval = CTL_RETVAL_COMPLETE; 9854 9855 alloc_len = scsi_2btoul(cdb->length); 9856 9857 switch (cdb->page_code) { 9858 case SVPD_SUPPORTED_PAGES: 9859 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9860 break; 9861 case SVPD_UNIT_SERIAL_NUMBER: 9862 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9863 break; 9864 case SVPD_DEVICE_ID: 9865 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9866 break; 9867 case SVPD_BLOCK_LIMITS: 9868 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9869 break; 9870 case SVPD_LBP: 9871 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 9872 break; 9873 default: 9874 ctl_set_invalid_field(ctsio, 9875 /*sks_valid*/ 1, 9876 /*command*/ 1, 9877 /*field*/ 2, 9878 /*bit_valid*/ 0, 9879 /*bit*/ 0); 9880 ctl_done((union ctl_io *)ctsio); 9881 retval = CTL_RETVAL_COMPLETE; 9882 break; 9883 } 9884 9885 return (retval); 9886} 9887 9888static int 9889ctl_inquiry_std(struct ctl_scsiio *ctsio) 9890{ 9891 struct scsi_inquiry_data *inq_ptr; 9892 struct scsi_inquiry *cdb; 9893 struct ctl_softc *ctl_softc; 9894 struct ctl_lun *lun; 9895 char *val; 9896 uint32_t alloc_len; 9897 int is_fc; 9898 9899 ctl_softc = control_softc; 9900 9901 /* 9902 * Figure out whether we're talking to a Fibre Channel port or not. 9903 * We treat the ioctl front end, and any SCSI adapters, as packetized 9904 * SCSI front ends. 9905 */ 9906 if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type != 9907 CTL_PORT_FC) 9908 is_fc = 0; 9909 else 9910 is_fc = 1; 9911 9912 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9913 cdb = (struct scsi_inquiry *)ctsio->cdb; 9914 alloc_len = scsi_2btoul(cdb->length); 9915 9916 /* 9917 * We malloc the full inquiry data size here and fill it 9918 * in. If the user only asks for less, we'll give him 9919 * that much. 9920 */ 9921 ctsio->kern_data_ptr = malloc(sizeof(*inq_ptr), M_CTL, M_WAITOK | M_ZERO); 9922 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 9923 ctsio->kern_sg_entries = 0; 9924 ctsio->kern_data_resid = 0; 9925 ctsio->kern_rel_offset = 0; 9926 9927 if (sizeof(*inq_ptr) < alloc_len) { 9928 ctsio->residual = alloc_len - sizeof(*inq_ptr); 9929 ctsio->kern_data_len = sizeof(*inq_ptr); 9930 ctsio->kern_total_len = sizeof(*inq_ptr); 9931 } else { 9932 ctsio->residual = 0; 9933 ctsio->kern_data_len = alloc_len; 9934 ctsio->kern_total_len = alloc_len; 9935 } 9936 9937 /* 9938 * If we have a LUN configured, report it as connected. Otherwise, 9939 * report that it is offline or no device is supported, depending 9940 * on the value of inquiry_pq_no_lun. 9941 * 9942 * According to the spec (SPC-4 r34), the peripheral qualifier 9943 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 9944 * 9945 * "A peripheral device having the specified peripheral device type 9946 * is not connected to this logical unit. However, the device 9947 * server is capable of supporting the specified peripheral device 9948 * type on this logical unit." 9949 * 9950 * According to the same spec, the peripheral qualifier 9951 * SID_QUAL_BAD_LU (011b) is used in this scenario: 9952 * 9953 * "The device server is not capable of supporting a peripheral 9954 * device on this logical unit. For this peripheral qualifier the 9955 * peripheral device type shall be set to 1Fh. All other peripheral 9956 * device type values are reserved for this peripheral qualifier." 9957 * 9958 * Given the text, it would seem that we probably want to report that 9959 * the LUN is offline here. There is no LUN connected, but we can 9960 * support a LUN at the given LUN number. 9961 * 9962 * In the real world, though, it sounds like things are a little 9963 * different: 9964 * 9965 * - Linux, when presented with a LUN with the offline peripheral 9966 * qualifier, will create an sg driver instance for it. So when 9967 * you attach it to CTL, you wind up with a ton of sg driver 9968 * instances. (One for every LUN that Linux bothered to probe.) 9969 * Linux does this despite the fact that it issues a REPORT LUNs 9970 * to LUN 0 to get the inventory of supported LUNs. 9971 * 9972 * - There is other anecdotal evidence (from Emulex folks) about 9973 * arrays that use the offline peripheral qualifier for LUNs that 9974 * are on the "passive" path in an active/passive array. 9975 * 9976 * So the solution is provide a hopefully reasonable default 9977 * (return bad/no LUN) and allow the user to change the behavior 9978 * with a tunable/sysctl variable. 9979 */ 9980 if (lun != NULL) 9981 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9982 lun->be_lun->lun_type; 9983 else if (ctl_softc->inquiry_pq_no_lun == 0) 9984 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9985 else 9986 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 9987 9988 /* RMB in byte 2 is 0 */ 9989 inq_ptr->version = SCSI_REV_SPC3; 9990 9991 /* 9992 * According to SAM-3, even if a device only supports a single 9993 * level of LUN addressing, it should still set the HISUP bit: 9994 * 9995 * 4.9.1 Logical unit numbers overview 9996 * 9997 * All logical unit number formats described in this standard are 9998 * hierarchical in structure even when only a single level in that 9999 * hierarchy is used. The HISUP bit shall be set to one in the 10000 * standard INQUIRY data (see SPC-2) when any logical unit number 10001 * format described in this standard is used. Non-hierarchical 10002 * formats are outside the scope of this standard. 10003 * 10004 * Therefore we set the HiSup bit here. 10005 * 10006 * The reponse format is 2, per SPC-3. 10007 */ 10008 inq_ptr->response_format = SID_HiSup | 2; 10009 10010 inq_ptr->additional_length = sizeof(*inq_ptr) - 4; 10011 CTL_DEBUG_PRINT(("additional_length = %d\n", 10012 inq_ptr->additional_length)); 10013 10014 inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT; 10015 /* 16 bit addressing */ 10016 if (is_fc == 0) 10017 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10018 /* XXX set the SID_MultiP bit here if we're actually going to 10019 respond on multiple ports */ 10020 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10021 10022 /* 16 bit data bus, synchronous transfers */ 10023 /* XXX these flags don't apply for FC */ 10024 if (is_fc == 0) 10025 inq_ptr->flags = SID_WBus16 | SID_Sync; 10026 /* 10027 * XXX KDM do we want to support tagged queueing on the control 10028 * device at all? 10029 */ 10030 if ((lun == NULL) 10031 || (lun->be_lun->lun_type != T_PROCESSOR)) 10032 inq_ptr->flags |= SID_CmdQue; 10033 /* 10034 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10035 * We have 8 bytes for the vendor name, and 16 bytes for the device 10036 * name and 4 bytes for the revision. 10037 */ 10038 if (lun == NULL || (val = ctl_get_opt(lun->be_lun, "vendor")) == NULL) { 10039 strcpy(inq_ptr->vendor, CTL_VENDOR); 10040 } else { 10041 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10042 strncpy(inq_ptr->vendor, val, 10043 min(sizeof(inq_ptr->vendor), strlen(val))); 10044 } 10045 if (lun == NULL) { 10046 strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT); 10047 } else if ((val = ctl_get_opt(lun->be_lun, "product")) == NULL) { 10048 switch (lun->be_lun->lun_type) { 10049 case T_DIRECT: 10050 strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT); 10051 break; 10052 case T_PROCESSOR: 10053 strcpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT); 10054 break; 10055 default: 10056 strcpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT); 10057 break; 10058 } 10059 } else { 10060 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10061 strncpy(inq_ptr->product, val, 10062 min(sizeof(inq_ptr->product), strlen(val))); 10063 } 10064 10065 /* 10066 * XXX make this a macro somewhere so it automatically gets 10067 * incremented when we make changes. 10068 */ 10069 if (lun == NULL || (val = ctl_get_opt(lun->be_lun, "revision")) == NULL) { 10070 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10071 } else { 10072 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10073 strncpy(inq_ptr->revision, val, 10074 min(sizeof(inq_ptr->revision), strlen(val))); 10075 } 10076 10077 /* 10078 * For parallel SCSI, we support double transition and single 10079 * transition clocking. We also support QAS (Quick Arbitration 10080 * and Selection) and Information Unit transfers on both the 10081 * control and array devices. 10082 */ 10083 if (is_fc == 0) 10084 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10085 SID_SPI_IUS; 10086 10087 /* SAM-3 */ 10088 scsi_ulto2b(0x0060, inq_ptr->version1); 10089 /* SPC-3 (no version claimed) XXX should we claim a version? */ 10090 scsi_ulto2b(0x0300, inq_ptr->version2); 10091 if (is_fc) { 10092 /* FCP-2 ANSI INCITS.350:2003 */ 10093 scsi_ulto2b(0x0917, inq_ptr->version3); 10094 } else { 10095 /* SPI-4 ANSI INCITS.362:200x */ 10096 scsi_ulto2b(0x0B56, inq_ptr->version3); 10097 } 10098 10099 if (lun == NULL) { 10100 /* SBC-2 (no version claimed) XXX should we claim a version? */ 10101 scsi_ulto2b(0x0320, inq_ptr->version4); 10102 } else { 10103 switch (lun->be_lun->lun_type) { 10104 case T_DIRECT: 10105 /* 10106 * SBC-2 (no version claimed) XXX should we claim a 10107 * version? 10108 */ 10109 scsi_ulto2b(0x0320, inq_ptr->version4); 10110 break; 10111 case T_PROCESSOR: 10112 default: 10113 break; 10114 } 10115 } 10116 10117 ctsio->scsi_status = SCSI_STATUS_OK; 10118 if (ctsio->kern_data_len > 0) { 10119 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10120 ctsio->be_move_done = ctl_config_move_done; 10121 ctl_datamove((union ctl_io *)ctsio); 10122 } else { 10123 ctsio->io_hdr.status = CTL_SUCCESS; 10124 ctl_done((union ctl_io *)ctsio); 10125 } 10126 10127 return (CTL_RETVAL_COMPLETE); 10128} 10129 10130int 10131ctl_inquiry(struct ctl_scsiio *ctsio) 10132{ 10133 struct scsi_inquiry *cdb; 10134 int retval; 10135 10136 cdb = (struct scsi_inquiry *)ctsio->cdb; 10137 10138 retval = 0; 10139 10140 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10141 10142 /* 10143 * Right now, we don't support the CmdDt inquiry information. 10144 * This would be nice to support in the future. When we do 10145 * support it, we should change this test so that it checks to make 10146 * sure SI_EVPD and SI_CMDDT aren't both set at the same time. 10147 */ 10148#ifdef notyet 10149 if (((cdb->byte2 & SI_EVPD) 10150 && (cdb->byte2 & SI_CMDDT))) 10151#endif 10152 if (cdb->byte2 & SI_CMDDT) { 10153 /* 10154 * Point to the SI_CMDDT bit. We might change this 10155 * when we support SI_CMDDT, but since both bits would be 10156 * "wrong", this should probably just stay as-is then. 10157 */ 10158 ctl_set_invalid_field(ctsio, 10159 /*sks_valid*/ 1, 10160 /*command*/ 1, 10161 /*field*/ 1, 10162 /*bit_valid*/ 1, 10163 /*bit*/ 1); 10164 ctl_done((union ctl_io *)ctsio); 10165 return (CTL_RETVAL_COMPLETE); 10166 } 10167 if (cdb->byte2 & SI_EVPD) 10168 retval = ctl_inquiry_evpd(ctsio); 10169#ifdef notyet 10170 else if (cdb->byte2 & SI_CMDDT) 10171 retval = ctl_inquiry_cmddt(ctsio); 10172#endif 10173 else 10174 retval = ctl_inquiry_std(ctsio); 10175 10176 return (retval); 10177} 10178 10179/* 10180 * For known CDB types, parse the LBA and length. 10181 */ 10182static int 10183ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len) 10184{ 10185 if (io->io_hdr.io_type != CTL_IO_SCSI) 10186 return (1); 10187 10188 switch (io->scsiio.cdb[0]) { 10189 case COMPARE_AND_WRITE: { 10190 struct scsi_compare_and_write *cdb; 10191 10192 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10193 10194 *lba = scsi_8btou64(cdb->addr); 10195 *len = cdb->length; 10196 break; 10197 } 10198 case READ_6: 10199 case WRITE_6: { 10200 struct scsi_rw_6 *cdb; 10201 10202 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10203 10204 *lba = scsi_3btoul(cdb->addr); 10205 /* only 5 bits are valid in the most significant address byte */ 10206 *lba &= 0x1fffff; 10207 *len = cdb->length; 10208 break; 10209 } 10210 case READ_10: 10211 case WRITE_10: { 10212 struct scsi_rw_10 *cdb; 10213 10214 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10215 10216 *lba = scsi_4btoul(cdb->addr); 10217 *len = scsi_2btoul(cdb->length); 10218 break; 10219 } 10220 case WRITE_VERIFY_10: { 10221 struct scsi_write_verify_10 *cdb; 10222 10223 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10224 10225 *lba = scsi_4btoul(cdb->addr); 10226 *len = scsi_2btoul(cdb->length); 10227 break; 10228 } 10229 case READ_12: 10230 case WRITE_12: { 10231 struct scsi_rw_12 *cdb; 10232 10233 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10234 10235 *lba = scsi_4btoul(cdb->addr); 10236 *len = scsi_4btoul(cdb->length); 10237 break; 10238 } 10239 case WRITE_VERIFY_12: { 10240 struct scsi_write_verify_12 *cdb; 10241 10242 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10243 10244 *lba = scsi_4btoul(cdb->addr); 10245 *len = scsi_4btoul(cdb->length); 10246 break; 10247 } 10248 case READ_16: 10249 case WRITE_16: { 10250 struct scsi_rw_16 *cdb; 10251 10252 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10253 10254 *lba = scsi_8btou64(cdb->addr); 10255 *len = scsi_4btoul(cdb->length); 10256 break; 10257 } 10258 case WRITE_VERIFY_16: { 10259 struct scsi_write_verify_16 *cdb; 10260 10261 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10262 10263 10264 *lba = scsi_8btou64(cdb->addr); 10265 *len = scsi_4btoul(cdb->length); 10266 break; 10267 } 10268 case WRITE_SAME_10: { 10269 struct scsi_write_same_10 *cdb; 10270 10271 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10272 10273 *lba = scsi_4btoul(cdb->addr); 10274 *len = scsi_2btoul(cdb->length); 10275 break; 10276 } 10277 case WRITE_SAME_16: { 10278 struct scsi_write_same_16 *cdb; 10279 10280 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10281 10282 *lba = scsi_8btou64(cdb->addr); 10283 *len = scsi_4btoul(cdb->length); 10284 break; 10285 } 10286 case VERIFY_10: { 10287 struct scsi_verify_10 *cdb; 10288 10289 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10290 10291 *lba = scsi_4btoul(cdb->addr); 10292 *len = scsi_2btoul(cdb->length); 10293 break; 10294 } 10295 case VERIFY_12: { 10296 struct scsi_verify_12 *cdb; 10297 10298 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10299 10300 *lba = scsi_4btoul(cdb->addr); 10301 *len = scsi_4btoul(cdb->length); 10302 break; 10303 } 10304 case VERIFY_16: { 10305 struct scsi_verify_16 *cdb; 10306 10307 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10308 10309 *lba = scsi_8btou64(cdb->addr); 10310 *len = scsi_4btoul(cdb->length); 10311 break; 10312 } 10313 default: 10314 return (1); 10315 break; /* NOTREACHED */ 10316 } 10317 10318 return (0); 10319} 10320 10321static ctl_action 10322ctl_extent_check_lba(uint64_t lba1, uint32_t len1, uint64_t lba2, uint32_t len2) 10323{ 10324 uint64_t endlba1, endlba2; 10325 10326 endlba1 = lba1 + len1 - 1; 10327 endlba2 = lba2 + len2 - 1; 10328 10329 if ((endlba1 < lba2) 10330 || (endlba2 < lba1)) 10331 return (CTL_ACTION_PASS); 10332 else 10333 return (CTL_ACTION_BLOCK); 10334} 10335 10336static ctl_action 10337ctl_extent_check(union ctl_io *io1, union ctl_io *io2) 10338{ 10339 uint64_t lba1, lba2; 10340 uint32_t len1, len2; 10341 int retval; 10342 10343 retval = ctl_get_lba_len(io1, &lba1, &len1); 10344 if (retval != 0) 10345 return (CTL_ACTION_ERROR); 10346 10347 retval = ctl_get_lba_len(io2, &lba2, &len2); 10348 if (retval != 0) 10349 return (CTL_ACTION_ERROR); 10350 10351 return (ctl_extent_check_lba(lba1, len1, lba2, len2)); 10352} 10353 10354static ctl_action 10355ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io) 10356{ 10357 struct ctl_cmd_entry *pending_entry, *ooa_entry; 10358 ctl_serialize_action *serialize_row; 10359 10360 /* 10361 * The initiator attempted multiple untagged commands at the same 10362 * time. Can't do that. 10363 */ 10364 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10365 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10366 && ((pending_io->io_hdr.nexus.targ_port == 10367 ooa_io->io_hdr.nexus.targ_port) 10368 && (pending_io->io_hdr.nexus.initid.id == 10369 ooa_io->io_hdr.nexus.initid.id)) 10370 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 10371 return (CTL_ACTION_OVERLAP); 10372 10373 /* 10374 * The initiator attempted to send multiple tagged commands with 10375 * the same ID. (It's fine if different initiators have the same 10376 * tag ID.) 10377 * 10378 * Even if all of those conditions are true, we don't kill the I/O 10379 * if the command ahead of us has been aborted. We won't end up 10380 * sending it to the FETD, and it's perfectly legal to resend a 10381 * command with the same tag number as long as the previous 10382 * instance of this tag number has been aborted somehow. 10383 */ 10384 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10385 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10386 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10387 && ((pending_io->io_hdr.nexus.targ_port == 10388 ooa_io->io_hdr.nexus.targ_port) 10389 && (pending_io->io_hdr.nexus.initid.id == 10390 ooa_io->io_hdr.nexus.initid.id)) 10391 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 10392 return (CTL_ACTION_OVERLAP_TAG); 10393 10394 /* 10395 * If we get a head of queue tag, SAM-3 says that we should 10396 * immediately execute it. 10397 * 10398 * What happens if this command would normally block for some other 10399 * reason? e.g. a request sense with a head of queue tag 10400 * immediately after a write. Normally that would block, but this 10401 * will result in its getting executed immediately... 10402 * 10403 * We currently return "pass" instead of "skip", so we'll end up 10404 * going through the rest of the queue to check for overlapped tags. 10405 * 10406 * XXX KDM check for other types of blockage first?? 10407 */ 10408 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10409 return (CTL_ACTION_PASS); 10410 10411 /* 10412 * Ordered tags have to block until all items ahead of them 10413 * have completed. If we get called with an ordered tag, we always 10414 * block, if something else is ahead of us in the queue. 10415 */ 10416 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10417 return (CTL_ACTION_BLOCK); 10418 10419 /* 10420 * Simple tags get blocked until all head of queue and ordered tags 10421 * ahead of them have completed. I'm lumping untagged commands in 10422 * with simple tags here. XXX KDM is that the right thing to do? 10423 */ 10424 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10425 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10426 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10427 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10428 return (CTL_ACTION_BLOCK); 10429 10430 pending_entry = &ctl_cmd_table[pending_io->scsiio.cdb[0]]; 10431 ooa_entry = &ctl_cmd_table[ooa_io->scsiio.cdb[0]]; 10432 10433 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10434 10435 switch (serialize_row[pending_entry->seridx]) { 10436 case CTL_SER_BLOCK: 10437 return (CTL_ACTION_BLOCK); 10438 break; /* NOTREACHED */ 10439 case CTL_SER_EXTENT: 10440 return (ctl_extent_check(pending_io, ooa_io)); 10441 break; /* NOTREACHED */ 10442 case CTL_SER_PASS: 10443 return (CTL_ACTION_PASS); 10444 break; /* NOTREACHED */ 10445 case CTL_SER_SKIP: 10446 return (CTL_ACTION_SKIP); 10447 break; 10448 default: 10449 panic("invalid serialization value %d", 10450 serialize_row[pending_entry->seridx]); 10451 break; /* NOTREACHED */ 10452 } 10453 10454 return (CTL_ACTION_ERROR); 10455} 10456 10457/* 10458 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10459 * Assumptions: 10460 * - pending_io is generally either incoming, or on the blocked queue 10461 * - starting I/O is the I/O we want to start the check with. 10462 */ 10463static ctl_action 10464ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10465 union ctl_io *starting_io) 10466{ 10467 union ctl_io *ooa_io; 10468 ctl_action action; 10469 10470 mtx_assert(&lun->lun_lock, MA_OWNED); 10471 10472 /* 10473 * Run back along the OOA queue, starting with the current 10474 * blocked I/O and going through every I/O before it on the 10475 * queue. If starting_io is NULL, we'll just end up returning 10476 * CTL_ACTION_PASS. 10477 */ 10478 for (ooa_io = starting_io; ooa_io != NULL; 10479 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10480 ooa_links)){ 10481 10482 /* 10483 * This routine just checks to see whether 10484 * cur_blocked is blocked by ooa_io, which is ahead 10485 * of it in the queue. It doesn't queue/dequeue 10486 * cur_blocked. 10487 */ 10488 action = ctl_check_for_blockage(pending_io, ooa_io); 10489 switch (action) { 10490 case CTL_ACTION_BLOCK: 10491 case CTL_ACTION_OVERLAP: 10492 case CTL_ACTION_OVERLAP_TAG: 10493 case CTL_ACTION_SKIP: 10494 case CTL_ACTION_ERROR: 10495 return (action); 10496 break; /* NOTREACHED */ 10497 case CTL_ACTION_PASS: 10498 break; 10499 default: 10500 panic("invalid action %d", action); 10501 break; /* NOTREACHED */ 10502 } 10503 } 10504 10505 return (CTL_ACTION_PASS); 10506} 10507 10508/* 10509 * Assumptions: 10510 * - An I/O has just completed, and has been removed from the per-LUN OOA 10511 * queue, so some items on the blocked queue may now be unblocked. 10512 */ 10513static int 10514ctl_check_blocked(struct ctl_lun *lun) 10515{ 10516 union ctl_io *cur_blocked, *next_blocked; 10517 10518 mtx_assert(&lun->lun_lock, MA_OWNED); 10519 10520 /* 10521 * Run forward from the head of the blocked queue, checking each 10522 * entry against the I/Os prior to it on the OOA queue to see if 10523 * there is still any blockage. 10524 * 10525 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10526 * with our removing a variable on it while it is traversing the 10527 * list. 10528 */ 10529 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10530 cur_blocked != NULL; cur_blocked = next_blocked) { 10531 union ctl_io *prev_ooa; 10532 ctl_action action; 10533 10534 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10535 blocked_links); 10536 10537 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10538 ctl_ooaq, ooa_links); 10539 10540 /* 10541 * If cur_blocked happens to be the first item in the OOA 10542 * queue now, prev_ooa will be NULL, and the action 10543 * returned will just be CTL_ACTION_PASS. 10544 */ 10545 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10546 10547 switch (action) { 10548 case CTL_ACTION_BLOCK: 10549 /* Nothing to do here, still blocked */ 10550 break; 10551 case CTL_ACTION_OVERLAP: 10552 case CTL_ACTION_OVERLAP_TAG: 10553 /* 10554 * This shouldn't happen! In theory we've already 10555 * checked this command for overlap... 10556 */ 10557 break; 10558 case CTL_ACTION_PASS: 10559 case CTL_ACTION_SKIP: { 10560 struct ctl_softc *softc; 10561 struct ctl_cmd_entry *entry; 10562 uint32_t initidx; 10563 uint8_t opcode; 10564 int isc_retval; 10565 10566 /* 10567 * The skip case shouldn't happen, this transaction 10568 * should have never made it onto the blocked queue. 10569 */ 10570 /* 10571 * This I/O is no longer blocked, we can remove it 10572 * from the blocked queue. Since this is a TAILQ 10573 * (doubly linked list), we can do O(1) removals 10574 * from any place on the list. 10575 */ 10576 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10577 blocked_links); 10578 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10579 10580 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 10581 /* 10582 * Need to send IO back to original side to 10583 * run 10584 */ 10585 union ctl_ha_msg msg_info; 10586 10587 msg_info.hdr.original_sc = 10588 cur_blocked->io_hdr.original_sc; 10589 msg_info.hdr.serializing_sc = cur_blocked; 10590 msg_info.hdr.msg_type = CTL_MSG_R2R; 10591 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10592 &msg_info, sizeof(msg_info), 0)) > 10593 CTL_HA_STATUS_SUCCESS) { 10594 printf("CTL:Check Blocked error from " 10595 "ctl_ha_msg_send %d\n", 10596 isc_retval); 10597 } 10598 break; 10599 } 10600 opcode = cur_blocked->scsiio.cdb[0]; 10601 entry = &ctl_cmd_table[opcode]; 10602 softc = control_softc; 10603 10604 initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus); 10605 10606 /* 10607 * Check this I/O for LUN state changes that may 10608 * have happened while this command was blocked. 10609 * The LUN state may have been changed by a command 10610 * ahead of us in the queue, so we need to re-check 10611 * for any states that can be caused by SCSI 10612 * commands. 10613 */ 10614 if (ctl_scsiio_lun_check(softc, lun, entry, 10615 &cur_blocked->scsiio) == 0) { 10616 cur_blocked->io_hdr.flags |= 10617 CTL_FLAG_IS_WAS_ON_RTR; 10618 ctl_enqueue_rtr(cur_blocked); 10619 } else 10620 ctl_done(cur_blocked); 10621 break; 10622 } 10623 default: 10624 /* 10625 * This probably shouldn't happen -- we shouldn't 10626 * get CTL_ACTION_ERROR, or anything else. 10627 */ 10628 break; 10629 } 10630 } 10631 10632 return (CTL_RETVAL_COMPLETE); 10633} 10634 10635/* 10636 * This routine (with one exception) checks LUN flags that can be set by 10637 * commands ahead of us in the OOA queue. These flags have to be checked 10638 * when a command initially comes in, and when we pull a command off the 10639 * blocked queue and are preparing to execute it. The reason we have to 10640 * check these flags for commands on the blocked queue is that the LUN 10641 * state may have been changed by a command ahead of us while we're on the 10642 * blocked queue. 10643 * 10644 * Ordering is somewhat important with these checks, so please pay 10645 * careful attention to the placement of any new checks. 10646 */ 10647static int 10648ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 10649 struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 10650{ 10651 int retval; 10652 10653 retval = 0; 10654 10655 mtx_assert(&lun->lun_lock, MA_OWNED); 10656 10657 /* 10658 * If this shelf is a secondary shelf controller, we have to reject 10659 * any media access commands. 10660 */ 10661#if 0 10662 /* No longer needed for HA */ 10663 if (((ctl_softc->flags & CTL_FLAG_MASTER_SHELF) == 0) 10664 && ((entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0)) { 10665 ctl_set_lun_standby(ctsio); 10666 retval = 1; 10667 goto bailout; 10668 } 10669#endif 10670 10671 /* 10672 * Check for a reservation conflict. If this command isn't allowed 10673 * even on reserved LUNs, and if this initiator isn't the one who 10674 * reserved us, reject the command with a reservation conflict. 10675 */ 10676 if ((lun->flags & CTL_LUN_RESERVED) 10677 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10678 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id) 10679 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port) 10680 || (ctsio->io_hdr.nexus.targ_target.id != 10681 lun->rsv_nexus.targ_target.id)) { 10682 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 10683 ctsio->io_hdr.status = CTL_SCSI_ERROR; 10684 retval = 1; 10685 goto bailout; 10686 } 10687 } 10688 10689 if ( (lun->flags & CTL_LUN_PR_RESERVED) 10690 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV) == 0)) { 10691 uint32_t residx; 10692 10693 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 10694 /* 10695 * if we aren't registered or it's a res holder type 10696 * reservation and this isn't the res holder then set a 10697 * conflict. 10698 * NOTE: Commands which might be allowed on write exclusive 10699 * type reservations are checked in the particular command 10700 * for a conflict. Read and SSU are the only ones. 10701 */ 10702 if (!lun->per_res[residx].registered 10703 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10704 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 10705 ctsio->io_hdr.status = CTL_SCSI_ERROR; 10706 retval = 1; 10707 goto bailout; 10708 } 10709 10710 } 10711 10712 if ((lun->flags & CTL_LUN_OFFLINE) 10713 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 10714 ctl_set_lun_not_ready(ctsio); 10715 retval = 1; 10716 goto bailout; 10717 } 10718 10719 /* 10720 * If the LUN is stopped, see if this particular command is allowed 10721 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 10722 */ 10723 if ((lun->flags & CTL_LUN_STOPPED) 10724 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10725 /* "Logical unit not ready, initializing cmd. required" */ 10726 ctl_set_lun_stopped(ctsio); 10727 retval = 1; 10728 goto bailout; 10729 } 10730 10731 if ((lun->flags & CTL_LUN_INOPERABLE) 10732 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10733 /* "Medium format corrupted" */ 10734 ctl_set_medium_format_corrupted(ctsio); 10735 retval = 1; 10736 goto bailout; 10737 } 10738 10739bailout: 10740 return (retval); 10741 10742} 10743 10744static void 10745ctl_failover_io(union ctl_io *io, int have_lock) 10746{ 10747 ctl_set_busy(&io->scsiio); 10748 ctl_done(io); 10749} 10750 10751static void 10752ctl_failover(void) 10753{ 10754 struct ctl_lun *lun; 10755 struct ctl_softc *ctl_softc; 10756 union ctl_io *next_io, *pending_io; 10757 union ctl_io *io; 10758 int lun_idx; 10759 int i; 10760 10761 ctl_softc = control_softc; 10762 10763 mtx_lock(&ctl_softc->ctl_lock); 10764 /* 10765 * Remove any cmds from the other SC from the rtr queue. These 10766 * will obviously only be for LUNs for which we're the primary. 10767 * We can't send status or get/send data for these commands. 10768 * Since they haven't been executed yet, we can just remove them. 10769 * We'll either abort them or delete them below, depending on 10770 * which HA mode we're in. 10771 */ 10772#ifdef notyet 10773 mtx_lock(&ctl_softc->queue_lock); 10774 for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue); 10775 io != NULL; io = next_io) { 10776 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 10777 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10778 STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr, 10779 ctl_io_hdr, links); 10780 } 10781 mtx_unlock(&ctl_softc->queue_lock); 10782#endif 10783 10784 for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) { 10785 lun = ctl_softc->ctl_luns[lun_idx]; 10786 if (lun==NULL) 10787 continue; 10788 10789 /* 10790 * Processor LUNs are primary on both sides. 10791 * XXX will this always be true? 10792 */ 10793 if (lun->be_lun->lun_type == T_PROCESSOR) 10794 continue; 10795 10796 if ((lun->flags & CTL_LUN_PRIMARY_SC) 10797 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10798 printf("FAILOVER: primary lun %d\n", lun_idx); 10799 /* 10800 * Remove all commands from the other SC. First from the 10801 * blocked queue then from the ooa queue. Once we have 10802 * removed them. Call ctl_check_blocked to see if there 10803 * is anything that can run. 10804 */ 10805 for (io = (union ctl_io *)TAILQ_FIRST( 10806 &lun->blocked_queue); io != NULL; io = next_io) { 10807 10808 next_io = (union ctl_io *)TAILQ_NEXT( 10809 &io->io_hdr, blocked_links); 10810 10811 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10812 TAILQ_REMOVE(&lun->blocked_queue, 10813 &io->io_hdr,blocked_links); 10814 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10815 TAILQ_REMOVE(&lun->ooa_queue, 10816 &io->io_hdr, ooa_links); 10817 10818 ctl_free_io(io); 10819 } 10820 } 10821 10822 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10823 io != NULL; io = next_io) { 10824 10825 next_io = (union ctl_io *)TAILQ_NEXT( 10826 &io->io_hdr, ooa_links); 10827 10828 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10829 10830 TAILQ_REMOVE(&lun->ooa_queue, 10831 &io->io_hdr, 10832 ooa_links); 10833 10834 ctl_free_io(io); 10835 } 10836 } 10837 ctl_check_blocked(lun); 10838 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 10839 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 10840 10841 printf("FAILOVER: primary lun %d\n", lun_idx); 10842 /* 10843 * Abort all commands from the other SC. We can't 10844 * send status back for them now. These should get 10845 * cleaned up when they are completed or come out 10846 * for a datamove operation. 10847 */ 10848 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10849 io != NULL; io = next_io) { 10850 next_io = (union ctl_io *)TAILQ_NEXT( 10851 &io->io_hdr, ooa_links); 10852 10853 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10854 io->io_hdr.flags |= CTL_FLAG_ABORT; 10855 } 10856 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10857 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 10858 10859 printf("FAILOVER: secondary lun %d\n", lun_idx); 10860 10861 lun->flags |= CTL_LUN_PRIMARY_SC; 10862 10863 /* 10864 * We send all I/O that was sent to this controller 10865 * and redirected to the other side back with 10866 * busy status, and have the initiator retry it. 10867 * Figuring out how much data has been transferred, 10868 * etc. and picking up where we left off would be 10869 * very tricky. 10870 * 10871 * XXX KDM need to remove I/O from the blocked 10872 * queue as well! 10873 */ 10874 for (pending_io = (union ctl_io *)TAILQ_FIRST( 10875 &lun->ooa_queue); pending_io != NULL; 10876 pending_io = next_io) { 10877 10878 next_io = (union ctl_io *)TAILQ_NEXT( 10879 &pending_io->io_hdr, ooa_links); 10880 10881 pending_io->io_hdr.flags &= 10882 ~CTL_FLAG_SENT_2OTHER_SC; 10883 10884 if (pending_io->io_hdr.flags & 10885 CTL_FLAG_IO_ACTIVE) { 10886 pending_io->io_hdr.flags |= 10887 CTL_FLAG_FAILOVER; 10888 } else { 10889 ctl_set_busy(&pending_io->scsiio); 10890 ctl_done(pending_io); 10891 } 10892 } 10893 10894 /* 10895 * Build Unit Attention 10896 */ 10897 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10898 lun->pending_sense[i].ua_pending |= 10899 CTL_UA_ASYM_ACC_CHANGE; 10900 } 10901 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10902 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10903 printf("FAILOVER: secondary lun %d\n", lun_idx); 10904 /* 10905 * if the first io on the OOA is not on the RtR queue 10906 * add it. 10907 */ 10908 lun->flags |= CTL_LUN_PRIMARY_SC; 10909 10910 pending_io = (union ctl_io *)TAILQ_FIRST( 10911 &lun->ooa_queue); 10912 if (pending_io==NULL) { 10913 printf("Nothing on OOA queue\n"); 10914 continue; 10915 } 10916 10917 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10918 if ((pending_io->io_hdr.flags & 10919 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 10920 pending_io->io_hdr.flags |= 10921 CTL_FLAG_IS_WAS_ON_RTR; 10922 ctl_enqueue_rtr(pending_io); 10923 } 10924#if 0 10925 else 10926 { 10927 printf("Tag 0x%04x is running\n", 10928 pending_io->scsiio.tag_num); 10929 } 10930#endif 10931 10932 next_io = (union ctl_io *)TAILQ_NEXT( 10933 &pending_io->io_hdr, ooa_links); 10934 for (pending_io=next_io; pending_io != NULL; 10935 pending_io = next_io) { 10936 pending_io->io_hdr.flags &= 10937 ~CTL_FLAG_SENT_2OTHER_SC; 10938 next_io = (union ctl_io *)TAILQ_NEXT( 10939 &pending_io->io_hdr, ooa_links); 10940 if (pending_io->io_hdr.flags & 10941 CTL_FLAG_IS_WAS_ON_RTR) { 10942#if 0 10943 printf("Tag 0x%04x is running\n", 10944 pending_io->scsiio.tag_num); 10945#endif 10946 continue; 10947 } 10948 10949 switch (ctl_check_ooa(lun, pending_io, 10950 (union ctl_io *)TAILQ_PREV( 10951 &pending_io->io_hdr, ctl_ooaq, 10952 ooa_links))) { 10953 10954 case CTL_ACTION_BLOCK: 10955 TAILQ_INSERT_TAIL(&lun->blocked_queue, 10956 &pending_io->io_hdr, 10957 blocked_links); 10958 pending_io->io_hdr.flags |= 10959 CTL_FLAG_BLOCKED; 10960 break; 10961 case CTL_ACTION_PASS: 10962 case CTL_ACTION_SKIP: 10963 pending_io->io_hdr.flags |= 10964 CTL_FLAG_IS_WAS_ON_RTR; 10965 ctl_enqueue_rtr(pending_io); 10966 break; 10967 case CTL_ACTION_OVERLAP: 10968 ctl_set_overlapped_cmd( 10969 (struct ctl_scsiio *)pending_io); 10970 ctl_done(pending_io); 10971 break; 10972 case CTL_ACTION_OVERLAP_TAG: 10973 ctl_set_overlapped_tag( 10974 (struct ctl_scsiio *)pending_io, 10975 pending_io->scsiio.tag_num & 0xff); 10976 ctl_done(pending_io); 10977 break; 10978 case CTL_ACTION_ERROR: 10979 default: 10980 ctl_set_internal_failure( 10981 (struct ctl_scsiio *)pending_io, 10982 0, // sks_valid 10983 0); //retry count 10984 ctl_done(pending_io); 10985 break; 10986 } 10987 } 10988 10989 /* 10990 * Build Unit Attention 10991 */ 10992 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 10993 lun->pending_sense[i].ua_pending |= 10994 CTL_UA_ASYM_ACC_CHANGE; 10995 } 10996 } else { 10997 panic("Unhandled HA mode failover, LUN flags = %#x, " 10998 "ha_mode = #%x", lun->flags, ctl_softc->ha_mode); 10999 } 11000 } 11001 ctl_pause_rtr = 0; 11002 mtx_unlock(&ctl_softc->ctl_lock); 11003} 11004 11005static int 11006ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio) 11007{ 11008 struct ctl_lun *lun; 11009 struct ctl_cmd_entry *entry; 11010 uint8_t opcode; 11011 uint32_t initidx, targ_lun; 11012 int retval; 11013 11014 retval = 0; 11015 11016 lun = NULL; 11017 11018 opcode = ctsio->cdb[0]; 11019 11020 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11021 if ((targ_lun < CTL_MAX_LUNS) 11022 && (ctl_softc->ctl_luns[targ_lun] != NULL)) { 11023 lun = ctl_softc->ctl_luns[targ_lun]; 11024 /* 11025 * If the LUN is invalid, pretend that it doesn't exist. 11026 * It will go away as soon as all pending I/O has been 11027 * completed. 11028 */ 11029 if (lun->flags & CTL_LUN_DISABLED) { 11030 lun = NULL; 11031 } else { 11032 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11033 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11034 lun->be_lun; 11035 if (lun->be_lun->lun_type == T_PROCESSOR) { 11036 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 11037 } 11038 } 11039 } else { 11040 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11041 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11042 } 11043 11044 entry = &ctl_cmd_table[opcode]; 11045 11046 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11047 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11048 11049 /* 11050 * Check to see whether we can send this command to LUNs that don't 11051 * exist. This should pretty much only be the case for inquiry 11052 * and request sense. Further checks, below, really require having 11053 * a LUN, so we can't really check the command anymore. Just put 11054 * it on the rtr queue. 11055 */ 11056 if (lun == NULL) { 11057 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11058 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11059 ctl_enqueue_rtr((union ctl_io *)ctsio); 11060 return (retval); 11061 } 11062 11063 ctl_set_unsupported_lun(ctsio); 11064 ctl_done((union ctl_io *)ctsio); 11065 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11066 return (retval); 11067 } else { 11068 mtx_lock(&lun->lun_lock); 11069 11070 /* 11071 * Every I/O goes into the OOA queue for a particular LUN, and 11072 * stays there until completion. 11073 */ 11074 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 11075 11076 /* 11077 * Make sure we support this particular command on this LUN. 11078 * e.g., we don't support writes to the control LUN. 11079 */ 11080 switch (lun->be_lun->lun_type) { 11081 case T_PROCESSOR: 11082 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) 11083 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) 11084 == 0)) { 11085 mtx_unlock(&lun->lun_lock); 11086 ctl_set_invalid_opcode(ctsio); 11087 ctl_done((union ctl_io *)ctsio); 11088 return (retval); 11089 } 11090 break; 11091 case T_DIRECT: 11092 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) 11093 && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) 11094 == 0)){ 11095 mtx_unlock(&lun->lun_lock); 11096 ctl_set_invalid_opcode(ctsio); 11097 ctl_done((union ctl_io *)ctsio); 11098 return (retval); 11099 } 11100 break; 11101 default: 11102 mtx_unlock(&lun->lun_lock); 11103 panic("Unsupported CTL LUN type %d\n", 11104 lun->be_lun->lun_type); 11105 } 11106 } 11107 11108 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11109 11110 /* 11111 * If we've got a request sense, it'll clear the contingent 11112 * allegiance condition. Otherwise, if we have a CA condition for 11113 * this initiator, clear it, because it sent down a command other 11114 * than request sense. 11115 */ 11116 if ((opcode != REQUEST_SENSE) 11117 && (ctl_is_set(lun->have_ca, initidx))) 11118 ctl_clear_mask(lun->have_ca, initidx); 11119 11120 /* 11121 * If the command has this flag set, it handles its own unit 11122 * attention reporting, we shouldn't do anything. Otherwise we 11123 * check for any pending unit attentions, and send them back to the 11124 * initiator. We only do this when a command initially comes in, 11125 * not when we pull it off the blocked queue. 11126 * 11127 * According to SAM-3, section 5.3.2, the order that things get 11128 * presented back to the host is basically unit attentions caused 11129 * by some sort of reset event, busy status, reservation conflicts 11130 * or task set full, and finally any other status. 11131 * 11132 * One issue here is that some of the unit attentions we report 11133 * don't fall into the "reset" category (e.g. "reported luns data 11134 * has changed"). So reporting it here, before the reservation 11135 * check, may be technically wrong. I guess the only thing to do 11136 * would be to check for and report the reset events here, and then 11137 * check for the other unit attention types after we check for a 11138 * reservation conflict. 11139 * 11140 * XXX KDM need to fix this 11141 */ 11142 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11143 ctl_ua_type ua_type; 11144 11145 ua_type = lun->pending_sense[initidx].ua_pending; 11146 if (ua_type != CTL_UA_NONE) { 11147 scsi_sense_data_type sense_format; 11148 11149 if (lun != NULL) 11150 sense_format = (lun->flags & 11151 CTL_LUN_SENSE_DESC) ? SSD_TYPE_DESC : 11152 SSD_TYPE_FIXED; 11153 else 11154 sense_format = SSD_TYPE_FIXED; 11155 11156 ua_type = ctl_build_ua(ua_type, &ctsio->sense_data, 11157 sense_format); 11158 if (ua_type != CTL_UA_NONE) { 11159 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11160 ctsio->io_hdr.status = CTL_SCSI_ERROR | 11161 CTL_AUTOSENSE; 11162 ctsio->sense_len = SSD_FULL_SIZE; 11163 lun->pending_sense[initidx].ua_pending &= 11164 ~ua_type; 11165 mtx_unlock(&lun->lun_lock); 11166 ctl_done((union ctl_io *)ctsio); 11167 return (retval); 11168 } 11169 } 11170 } 11171 11172 11173 if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) { 11174 mtx_unlock(&lun->lun_lock); 11175 ctl_done((union ctl_io *)ctsio); 11176 return (retval); 11177 } 11178 11179 /* 11180 * XXX CHD this is where we want to send IO to other side if 11181 * this LUN is secondary on this SC. We will need to make a copy 11182 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11183 * the copy we send as FROM_OTHER. 11184 * We also need to stuff the address of the original IO so we can 11185 * find it easily. Something similar will need be done on the other 11186 * side so when we are done we can find the copy. 11187 */ 11188 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11189 union ctl_ha_msg msg_info; 11190 int isc_retval; 11191 11192 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11193 11194 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11195 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11196#if 0 11197 printf("1. ctsio %p\n", ctsio); 11198#endif 11199 msg_info.hdr.serializing_sc = NULL; 11200 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11201 msg_info.scsi.tag_num = ctsio->tag_num; 11202 msg_info.scsi.tag_type = ctsio->tag_type; 11203 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11204 11205 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11206 11207 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11208 (void *)&msg_info, sizeof(msg_info), 0)) > 11209 CTL_HA_STATUS_SUCCESS) { 11210 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 11211 isc_retval); 11212 printf("CTL:opcode is %x\n",opcode); 11213 } else { 11214#if 0 11215 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 11216#endif 11217 } 11218 11219 /* 11220 * XXX KDM this I/O is off the incoming queue, but hasn't 11221 * been inserted on any other queue. We may need to come 11222 * up with a holding queue while we wait for serialization 11223 * so that we have an idea of what we're waiting for from 11224 * the other side. 11225 */ 11226 mtx_unlock(&lun->lun_lock); 11227 return (retval); 11228 } 11229 11230 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11231 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11232 ctl_ooaq, ooa_links))) { 11233 case CTL_ACTION_BLOCK: 11234 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11235 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11236 blocked_links); 11237 mtx_unlock(&lun->lun_lock); 11238 return (retval); 11239 case CTL_ACTION_PASS: 11240 case CTL_ACTION_SKIP: 11241 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11242 mtx_unlock(&lun->lun_lock); 11243 ctl_enqueue_rtr((union ctl_io *)ctsio); 11244 break; 11245 case CTL_ACTION_OVERLAP: 11246 mtx_unlock(&lun->lun_lock); 11247 ctl_set_overlapped_cmd(ctsio); 11248 ctl_done((union ctl_io *)ctsio); 11249 break; 11250 case CTL_ACTION_OVERLAP_TAG: 11251 mtx_unlock(&lun->lun_lock); 11252 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11253 ctl_done((union ctl_io *)ctsio); 11254 break; 11255 case CTL_ACTION_ERROR: 11256 default: 11257 mtx_unlock(&lun->lun_lock); 11258 ctl_set_internal_failure(ctsio, 11259 /*sks_valid*/ 0, 11260 /*retry_count*/ 0); 11261 ctl_done((union ctl_io *)ctsio); 11262 break; 11263 } 11264 return (retval); 11265} 11266 11267static int 11268ctl_scsiio(struct ctl_scsiio *ctsio) 11269{ 11270 int retval; 11271 struct ctl_cmd_entry *entry; 11272 11273 retval = CTL_RETVAL_COMPLETE; 11274 11275 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11276 11277 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11278 11279 /* 11280 * If this I/O has been aborted, just send it straight to 11281 * ctl_done() without executing it. 11282 */ 11283 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11284 ctl_done((union ctl_io *)ctsio); 11285 goto bailout; 11286 } 11287 11288 /* 11289 * All the checks should have been handled by ctl_scsiio_precheck(). 11290 * We should be clear now to just execute the I/O. 11291 */ 11292 retval = entry->execute(ctsio); 11293 11294bailout: 11295 return (retval); 11296} 11297 11298/* 11299 * Since we only implement one target right now, a bus reset simply resets 11300 * our single target. 11301 */ 11302static int 11303ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io) 11304{ 11305 return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET)); 11306} 11307 11308static int 11309ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 11310 ctl_ua_type ua_type) 11311{ 11312 struct ctl_lun *lun; 11313 int retval; 11314 11315 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11316 union ctl_ha_msg msg_info; 11317 11318 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11319 msg_info.hdr.nexus = io->io_hdr.nexus; 11320 if (ua_type==CTL_UA_TARG_RESET) 11321 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11322 else 11323 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11324 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11325 msg_info.hdr.original_sc = NULL; 11326 msg_info.hdr.serializing_sc = NULL; 11327 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11328 (void *)&msg_info, sizeof(msg_info), 0)) { 11329 } 11330 } 11331 retval = 0; 11332 11333 mtx_lock(&ctl_softc->ctl_lock); 11334 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) 11335 retval += ctl_lun_reset(lun, io, ua_type); 11336 mtx_unlock(&ctl_softc->ctl_lock); 11337 11338 return (retval); 11339} 11340 11341/* 11342 * The LUN should always be set. The I/O is optional, and is used to 11343 * distinguish between I/Os sent by this initiator, and by other 11344 * initiators. We set unit attention for initiators other than this one. 11345 * SAM-3 is vague on this point. It does say that a unit attention should 11346 * be established for other initiators when a LUN is reset (see section 11347 * 5.7.3), but it doesn't specifically say that the unit attention should 11348 * be established for this particular initiator when a LUN is reset. Here 11349 * is the relevant text, from SAM-3 rev 8: 11350 * 11351 * 5.7.2 When a SCSI initiator port aborts its own tasks 11352 * 11353 * When a SCSI initiator port causes its own task(s) to be aborted, no 11354 * notification that the task(s) have been aborted shall be returned to 11355 * the SCSI initiator port other than the completion response for the 11356 * command or task management function action that caused the task(s) to 11357 * be aborted and notification(s) associated with related effects of the 11358 * action (e.g., a reset unit attention condition). 11359 * 11360 * XXX KDM for now, we're setting unit attention for all initiators. 11361 */ 11362static int 11363ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11364{ 11365 union ctl_io *xio; 11366#if 0 11367 uint32_t initindex; 11368#endif 11369 int i; 11370 11371 mtx_lock(&lun->lun_lock); 11372 /* 11373 * Run through the OOA queue and abort each I/O. 11374 */ 11375#if 0 11376 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 11377#endif 11378 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11379 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11380 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11381 } 11382 11383 /* 11384 * This version sets unit attention for every 11385 */ 11386#if 0 11387 initindex = ctl_get_initindex(&io->io_hdr.nexus); 11388 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 11389 if (initindex == i) 11390 continue; 11391 lun->pending_sense[i].ua_pending |= ua_type; 11392 } 11393#endif 11394 11395 /* 11396 * A reset (any kind, really) clears reservations established with 11397 * RESERVE/RELEASE. It does not clear reservations established 11398 * with PERSISTENT RESERVE OUT, but we don't support that at the 11399 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11400 * reservations made with the RESERVE/RELEASE commands, because 11401 * those commands are obsolete in SPC-3. 11402 */ 11403 lun->flags &= ~CTL_LUN_RESERVED; 11404 11405 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 11406 ctl_clear_mask(lun->have_ca, i); 11407 lun->pending_sense[i].ua_pending |= ua_type; 11408 } 11409 mtx_unlock(&lun->lun_lock); 11410 11411 return (0); 11412} 11413 11414static int 11415ctl_abort_task(union ctl_io *io) 11416{ 11417 union ctl_io *xio; 11418 struct ctl_lun *lun; 11419 struct ctl_softc *ctl_softc; 11420#if 0 11421 struct sbuf sb; 11422 char printbuf[128]; 11423#endif 11424 int found; 11425 uint32_t targ_lun; 11426 11427 ctl_softc = control_softc; 11428 found = 0; 11429 11430 /* 11431 * Look up the LUN. 11432 */ 11433 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11434 mtx_lock(&ctl_softc->ctl_lock); 11435 if ((targ_lun < CTL_MAX_LUNS) 11436 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 11437 lun = ctl_softc->ctl_luns[targ_lun]; 11438 else { 11439 mtx_unlock(&ctl_softc->ctl_lock); 11440 goto bailout; 11441 } 11442 11443#if 0 11444 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11445 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11446#endif 11447 11448 mtx_lock(&lun->lun_lock); 11449 mtx_unlock(&ctl_softc->ctl_lock); 11450 /* 11451 * Run through the OOA queue and attempt to find the given I/O. 11452 * The target port, initiator ID, tag type and tag number have to 11453 * match the values that we got from the initiator. If we have an 11454 * untagged command to abort, simply abort the first untagged command 11455 * we come to. We only allow one untagged command at a time of course. 11456 */ 11457#if 0 11458 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 11459#endif 11460 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11461 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11462#if 0 11463 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11464 11465 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11466 lun->lun, xio->scsiio.tag_num, 11467 xio->scsiio.tag_type, 11468 (xio->io_hdr.blocked_links.tqe_prev 11469 == NULL) ? "" : " BLOCKED", 11470 (xio->io_hdr.flags & 11471 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11472 (xio->io_hdr.flags & 11473 CTL_FLAG_ABORT) ? " ABORT" : "", 11474 (xio->io_hdr.flags & 11475 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11476 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11477 sbuf_finish(&sb); 11478 printf("%s\n", sbuf_data(&sb)); 11479#endif 11480 11481 if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port) 11482 && (xio->io_hdr.nexus.initid.id == 11483 io->io_hdr.nexus.initid.id)) { 11484 /* 11485 * If the abort says that the task is untagged, the 11486 * task in the queue must be untagged. Otherwise, 11487 * we just check to see whether the tag numbers 11488 * match. This is because the QLogic firmware 11489 * doesn't pass back the tag type in an abort 11490 * request. 11491 */ 11492#if 0 11493 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11494 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11495 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 11496#endif 11497 /* 11498 * XXX KDM we've got problems with FC, because it 11499 * doesn't send down a tag type with aborts. So we 11500 * can only really go by the tag number... 11501 * This may cause problems with parallel SCSI. 11502 * Need to figure that out!! 11503 */ 11504 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11505 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11506 found = 1; 11507 if ((io->io_hdr.flags & 11508 CTL_FLAG_FROM_OTHER_SC) == 0 && 11509 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11510 union ctl_ha_msg msg_info; 11511 11512 io->io_hdr.flags |= 11513 CTL_FLAG_SENT_2OTHER_SC; 11514 msg_info.hdr.nexus = io->io_hdr.nexus; 11515 msg_info.task.task_action = 11516 CTL_TASK_ABORT_TASK; 11517 msg_info.task.tag_num = 11518 io->taskio.tag_num; 11519 msg_info.task.tag_type = 11520 io->taskio.tag_type; 11521 msg_info.hdr.msg_type = 11522 CTL_MSG_MANAGE_TASKS; 11523 msg_info.hdr.original_sc = NULL; 11524 msg_info.hdr.serializing_sc = NULL; 11525#if 0 11526 printf("Sent Abort to other side\n"); 11527#endif 11528 if (CTL_HA_STATUS_SUCCESS != 11529 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11530 (void *)&msg_info, 11531 sizeof(msg_info), 0)) { 11532 } 11533 } 11534#if 0 11535 printf("ctl_abort_task: found I/O to abort\n"); 11536#endif 11537 break; 11538 } 11539 } 11540 } 11541 mtx_unlock(&lun->lun_lock); 11542 11543bailout: 11544 11545 if (found == 0) { 11546 /* 11547 * This isn't really an error. It's entirely possible for 11548 * the abort and command completion to cross on the wire. 11549 * This is more of an informative/diagnostic error. 11550 */ 11551#if 0 11552 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11553 "%d:%d:%d:%d tag %d type %d\n", 11554 io->io_hdr.nexus.initid.id, 11555 io->io_hdr.nexus.targ_port, 11556 io->io_hdr.nexus.targ_target.id, 11557 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11558 io->taskio.tag_type); 11559#endif 11560 return (1); 11561 } else 11562 return (0); 11563} 11564 11565static void 11566ctl_run_task(union ctl_io *io) 11567{ 11568 struct ctl_softc *ctl_softc; 11569 int retval; 11570 const char *task_desc; 11571 11572 CTL_DEBUG_PRINT(("ctl_run_task\n")); 11573 11574 ctl_softc = control_softc; 11575 retval = 0; 11576 11577 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 11578 ("ctl_run_task: Unextected io_type %d\n", 11579 io->io_hdr.io_type)); 11580 11581 task_desc = ctl_scsi_task_string(&io->taskio); 11582 if (task_desc != NULL) { 11583#ifdef NEEDTOPORT 11584 csevent_log(CSC_CTL | CSC_SHELF_SW | 11585 CTL_TASK_REPORT, 11586 csevent_LogType_Trace, 11587 csevent_Severity_Information, 11588 csevent_AlertLevel_Green, 11589 csevent_FRU_Firmware, 11590 csevent_FRU_Unknown, 11591 "CTL: received task: %s",task_desc); 11592#endif 11593 } else { 11594#ifdef NEEDTOPORT 11595 csevent_log(CSC_CTL | CSC_SHELF_SW | 11596 CTL_TASK_REPORT, 11597 csevent_LogType_Trace, 11598 csevent_Severity_Information, 11599 csevent_AlertLevel_Green, 11600 csevent_FRU_Firmware, 11601 csevent_FRU_Unknown, 11602 "CTL: received unknown task " 11603 "type: %d (%#x)", 11604 io->taskio.task_action, 11605 io->taskio.task_action); 11606#endif 11607 } 11608 switch (io->taskio.task_action) { 11609 case CTL_TASK_ABORT_TASK: 11610 retval = ctl_abort_task(io); 11611 break; 11612 case CTL_TASK_ABORT_TASK_SET: 11613 break; 11614 case CTL_TASK_CLEAR_ACA: 11615 break; 11616 case CTL_TASK_CLEAR_TASK_SET: 11617 break; 11618 case CTL_TASK_LUN_RESET: { 11619 struct ctl_lun *lun; 11620 uint32_t targ_lun; 11621 int retval; 11622 11623 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11624 mtx_lock(&ctl_softc->ctl_lock); 11625 if ((targ_lun < CTL_MAX_LUNS) 11626 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 11627 lun = ctl_softc->ctl_luns[targ_lun]; 11628 else { 11629 mtx_unlock(&ctl_softc->ctl_lock); 11630 retval = 1; 11631 break; 11632 } 11633 11634 if (!(io->io_hdr.flags & 11635 CTL_FLAG_FROM_OTHER_SC)) { 11636 union ctl_ha_msg msg_info; 11637 11638 io->io_hdr.flags |= 11639 CTL_FLAG_SENT_2OTHER_SC; 11640 msg_info.hdr.msg_type = 11641 CTL_MSG_MANAGE_TASKS; 11642 msg_info.hdr.nexus = io->io_hdr.nexus; 11643 msg_info.task.task_action = 11644 CTL_TASK_LUN_RESET; 11645 msg_info.hdr.original_sc = NULL; 11646 msg_info.hdr.serializing_sc = NULL; 11647 if (CTL_HA_STATUS_SUCCESS != 11648 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11649 (void *)&msg_info, 11650 sizeof(msg_info), 0)) { 11651 } 11652 } 11653 11654 retval = ctl_lun_reset(lun, io, 11655 CTL_UA_LUN_RESET); 11656 mtx_unlock(&ctl_softc->ctl_lock); 11657 break; 11658 } 11659 case CTL_TASK_TARGET_RESET: 11660 retval = ctl_target_reset(ctl_softc, io, CTL_UA_TARG_RESET); 11661 break; 11662 case CTL_TASK_BUS_RESET: 11663 retval = ctl_bus_reset(ctl_softc, io); 11664 break; 11665 case CTL_TASK_PORT_LOGIN: 11666 break; 11667 case CTL_TASK_PORT_LOGOUT: 11668 break; 11669 default: 11670 printf("ctl_run_task: got unknown task management event %d\n", 11671 io->taskio.task_action); 11672 break; 11673 } 11674 if (retval == 0) 11675 io->io_hdr.status = CTL_SUCCESS; 11676 else 11677 io->io_hdr.status = CTL_ERROR; 11678 11679 /* 11680 * This will queue this I/O to the done queue, but the 11681 * work thread won't be able to process it until we 11682 * return and the lock is released. 11683 */ 11684 ctl_done(io); 11685} 11686 11687/* 11688 * For HA operation. Handle commands that come in from the other 11689 * controller. 11690 */ 11691static void 11692ctl_handle_isc(union ctl_io *io) 11693{ 11694 int free_io; 11695 struct ctl_lun *lun; 11696 struct ctl_softc *ctl_softc; 11697 uint32_t targ_lun; 11698 11699 ctl_softc = control_softc; 11700 11701 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11702 lun = ctl_softc->ctl_luns[targ_lun]; 11703 11704 switch (io->io_hdr.msg_type) { 11705 case CTL_MSG_SERIALIZE: 11706 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 11707 break; 11708 case CTL_MSG_R2R: { 11709 uint8_t opcode; 11710 struct ctl_cmd_entry *entry; 11711 11712 /* 11713 * This is only used in SER_ONLY mode. 11714 */ 11715 free_io = 0; 11716 opcode = io->scsiio.cdb[0]; 11717 entry = &ctl_cmd_table[opcode]; 11718 mtx_lock(&lun->lun_lock); 11719 if (ctl_scsiio_lun_check(ctl_softc, lun, 11720 entry, (struct ctl_scsiio *)io) != 0) { 11721 mtx_unlock(&lun->lun_lock); 11722 ctl_done(io); 11723 break; 11724 } 11725 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11726 mtx_unlock(&lun->lun_lock); 11727 ctl_enqueue_rtr(io); 11728 break; 11729 } 11730 case CTL_MSG_FINISH_IO: 11731 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 11732 free_io = 0; 11733 ctl_done(io); 11734 } else { 11735 free_io = 1; 11736 mtx_lock(&lun->lun_lock); 11737 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 11738 ooa_links); 11739 ctl_check_blocked(lun); 11740 mtx_unlock(&lun->lun_lock); 11741 } 11742 break; 11743 case CTL_MSG_PERS_ACTION: 11744 ctl_hndl_per_res_out_on_other_sc( 11745 (union ctl_ha_msg *)&io->presio.pr_msg); 11746 free_io = 1; 11747 break; 11748 case CTL_MSG_BAD_JUJU: 11749 free_io = 0; 11750 ctl_done(io); 11751 break; 11752 case CTL_MSG_DATAMOVE: 11753 /* Only used in XFER mode */ 11754 free_io = 0; 11755 ctl_datamove_remote(io); 11756 break; 11757 case CTL_MSG_DATAMOVE_DONE: 11758 /* Only used in XFER mode */ 11759 free_io = 0; 11760 io->scsiio.be_move_done(io); 11761 break; 11762 default: 11763 free_io = 1; 11764 printf("%s: Invalid message type %d\n", 11765 __func__, io->io_hdr.msg_type); 11766 break; 11767 } 11768 if (free_io) 11769 ctl_free_io(io); 11770 11771} 11772 11773 11774/* 11775 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 11776 * there is no match. 11777 */ 11778static ctl_lun_error_pattern 11779ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 11780{ 11781 struct ctl_cmd_entry *entry; 11782 ctl_lun_error_pattern filtered_pattern, pattern; 11783 uint8_t opcode; 11784 11785 pattern = desc->error_pattern; 11786 11787 /* 11788 * XXX KDM we need more data passed into this function to match a 11789 * custom pattern, and we actually need to implement custom pattern 11790 * matching. 11791 */ 11792 if (pattern & CTL_LUN_PAT_CMD) 11793 return (CTL_LUN_PAT_CMD); 11794 11795 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 11796 return (CTL_LUN_PAT_ANY); 11797 11798 opcode = ctsio->cdb[0]; 11799 entry = &ctl_cmd_table[opcode]; 11800 11801 filtered_pattern = entry->pattern & pattern; 11802 11803 /* 11804 * If the user requested specific flags in the pattern (e.g. 11805 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 11806 * flags. 11807 * 11808 * If the user did not specify any flags, it doesn't matter whether 11809 * or not the command supports the flags. 11810 */ 11811 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 11812 (pattern & ~CTL_LUN_PAT_MASK)) 11813 return (CTL_LUN_PAT_NONE); 11814 11815 /* 11816 * If the user asked for a range check, see if the requested LBA 11817 * range overlaps with this command's LBA range. 11818 */ 11819 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 11820 uint64_t lba1; 11821 uint32_t len1; 11822 ctl_action action; 11823 int retval; 11824 11825 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 11826 if (retval != 0) 11827 return (CTL_LUN_PAT_NONE); 11828 11829 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 11830 desc->lba_range.len); 11831 /* 11832 * A "pass" means that the LBA ranges don't overlap, so 11833 * this doesn't match the user's range criteria. 11834 */ 11835 if (action == CTL_ACTION_PASS) 11836 return (CTL_LUN_PAT_NONE); 11837 } 11838 11839 return (filtered_pattern); 11840} 11841 11842static void 11843ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 11844{ 11845 struct ctl_error_desc *desc, *desc2; 11846 11847 mtx_assert(&lun->lun_lock, MA_OWNED); 11848 11849 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 11850 ctl_lun_error_pattern pattern; 11851 /* 11852 * Check to see whether this particular command matches 11853 * the pattern in the descriptor. 11854 */ 11855 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 11856 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 11857 continue; 11858 11859 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 11860 case CTL_LUN_INJ_ABORTED: 11861 ctl_set_aborted(&io->scsiio); 11862 break; 11863 case CTL_LUN_INJ_MEDIUM_ERR: 11864 ctl_set_medium_error(&io->scsiio); 11865 break; 11866 case CTL_LUN_INJ_UA: 11867 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 11868 * OCCURRED */ 11869 ctl_set_ua(&io->scsiio, 0x29, 0x00); 11870 break; 11871 case CTL_LUN_INJ_CUSTOM: 11872 /* 11873 * We're assuming the user knows what he is doing. 11874 * Just copy the sense information without doing 11875 * checks. 11876 */ 11877 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 11878 ctl_min(sizeof(desc->custom_sense), 11879 sizeof(io->scsiio.sense_data))); 11880 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 11881 io->scsiio.sense_len = SSD_FULL_SIZE; 11882 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11883 break; 11884 case CTL_LUN_INJ_NONE: 11885 default: 11886 /* 11887 * If this is an error injection type we don't know 11888 * about, clear the continuous flag (if it is set) 11889 * so it will get deleted below. 11890 */ 11891 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 11892 break; 11893 } 11894 /* 11895 * By default, each error injection action is a one-shot 11896 */ 11897 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 11898 continue; 11899 11900 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 11901 11902 free(desc, M_CTL); 11903 } 11904} 11905 11906#ifdef CTL_IO_DELAY 11907static void 11908ctl_datamove_timer_wakeup(void *arg) 11909{ 11910 union ctl_io *io; 11911 11912 io = (union ctl_io *)arg; 11913 11914 ctl_datamove(io); 11915} 11916#endif /* CTL_IO_DELAY */ 11917 11918void 11919ctl_datamove(union ctl_io *io) 11920{ 11921 void (*fe_datamove)(union ctl_io *io); 11922 11923 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 11924 11925 CTL_DEBUG_PRINT(("ctl_datamove\n")); 11926 11927#ifdef CTL_TIME_IO 11928 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 11929 char str[256]; 11930 char path_str[64]; 11931 struct sbuf sb; 11932 11933 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 11934 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 11935 11936 sbuf_cat(&sb, path_str); 11937 switch (io->io_hdr.io_type) { 11938 case CTL_IO_SCSI: 11939 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 11940 sbuf_printf(&sb, "\n"); 11941 sbuf_cat(&sb, path_str); 11942 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 11943 io->scsiio.tag_num, io->scsiio.tag_type); 11944 break; 11945 case CTL_IO_TASK: 11946 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 11947 "Tag Type: %d\n", io->taskio.task_action, 11948 io->taskio.tag_num, io->taskio.tag_type); 11949 break; 11950 default: 11951 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 11952 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 11953 break; 11954 } 11955 sbuf_cat(&sb, path_str); 11956 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 11957 (intmax_t)time_uptime - io->io_hdr.start_time); 11958 sbuf_finish(&sb); 11959 printf("%s", sbuf_data(&sb)); 11960 } 11961#endif /* CTL_TIME_IO */ 11962 11963#ifdef CTL_IO_DELAY 11964 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 11965 struct ctl_lun *lun; 11966 11967 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 11968 11969 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 11970 } else { 11971 struct ctl_lun *lun; 11972 11973 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 11974 if ((lun != NULL) 11975 && (lun->delay_info.datamove_delay > 0)) { 11976 struct callout *callout; 11977 11978 callout = (struct callout *)&io->io_hdr.timer_bytes; 11979 callout_init(callout, /*mpsafe*/ 1); 11980 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 11981 callout_reset(callout, 11982 lun->delay_info.datamove_delay * hz, 11983 ctl_datamove_timer_wakeup, io); 11984 if (lun->delay_info.datamove_type == 11985 CTL_DELAY_TYPE_ONESHOT) 11986 lun->delay_info.datamove_delay = 0; 11987 return; 11988 } 11989 } 11990#endif 11991 11992 /* 11993 * This command has been aborted. Set the port status, so we fail 11994 * the data move. 11995 */ 11996 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 11997 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 11998 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 11999 io->io_hdr.nexus.targ_port, 12000 (uintmax_t)io->io_hdr.nexus.targ_target.id, 12001 io->io_hdr.nexus.targ_lun); 12002 io->io_hdr.status = CTL_CMD_ABORTED; 12003 io->io_hdr.port_status = 31337; 12004 /* 12005 * Note that the backend, in this case, will get the 12006 * callback in its context. In other cases it may get 12007 * called in the frontend's interrupt thread context. 12008 */ 12009 io->scsiio.be_move_done(io); 12010 return; 12011 } 12012 12013 /* 12014 * If we're in XFER mode and this I/O is from the other shelf 12015 * controller, we need to send the DMA to the other side to 12016 * actually transfer the data to/from the host. In serialize only 12017 * mode the transfer happens below CTL and ctl_datamove() is only 12018 * called on the machine that originally received the I/O. 12019 */ 12020 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12021 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12022 union ctl_ha_msg msg; 12023 uint32_t sg_entries_sent; 12024 int do_sg_copy; 12025 int i; 12026 12027 memset(&msg, 0, sizeof(msg)); 12028 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12029 msg.hdr.original_sc = io->io_hdr.original_sc; 12030 msg.hdr.serializing_sc = io; 12031 msg.hdr.nexus = io->io_hdr.nexus; 12032 msg.dt.flags = io->io_hdr.flags; 12033 /* 12034 * We convert everything into a S/G list here. We can't 12035 * pass by reference, only by value between controllers. 12036 * So we can't pass a pointer to the S/G list, only as many 12037 * S/G entries as we can fit in here. If it's possible for 12038 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12039 * then we need to break this up into multiple transfers. 12040 */ 12041 if (io->scsiio.kern_sg_entries == 0) { 12042 msg.dt.kern_sg_entries = 1; 12043 /* 12044 * If this is in cached memory, flush the cache 12045 * before we send the DMA request to the other 12046 * controller. We want to do this in either the 12047 * read or the write case. The read case is 12048 * straightforward. In the write case, we want to 12049 * make sure nothing is in the local cache that 12050 * could overwrite the DMAed data. 12051 */ 12052 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12053 /* 12054 * XXX KDM use bus_dmamap_sync() here. 12055 */ 12056 } 12057 12058 /* 12059 * Convert to a physical address if this is a 12060 * virtual address. 12061 */ 12062 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12063 msg.dt.sg_list[0].addr = 12064 io->scsiio.kern_data_ptr; 12065 } else { 12066 /* 12067 * XXX KDM use busdma here! 12068 */ 12069#if 0 12070 msg.dt.sg_list[0].addr = (void *) 12071 vtophys(io->scsiio.kern_data_ptr); 12072#endif 12073 } 12074 12075 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12076 do_sg_copy = 0; 12077 } else { 12078 struct ctl_sg_entry *sgl; 12079 12080 do_sg_copy = 1; 12081 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12082 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 12083 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12084 /* 12085 * XXX KDM use bus_dmamap_sync() here. 12086 */ 12087 } 12088 } 12089 12090 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12091 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12092 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12093 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12094 msg.dt.sg_sequence = 0; 12095 12096 /* 12097 * Loop until we've sent all of the S/G entries. On the 12098 * other end, we'll recompose these S/G entries into one 12099 * contiguous list before passing it to the 12100 */ 12101 for (sg_entries_sent = 0; sg_entries_sent < 12102 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12103 msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/ 12104 sizeof(msg.dt.sg_list[0])), 12105 msg.dt.kern_sg_entries - sg_entries_sent); 12106 12107 if (do_sg_copy != 0) { 12108 struct ctl_sg_entry *sgl; 12109 int j; 12110 12111 sgl = (struct ctl_sg_entry *) 12112 io->scsiio.kern_data_ptr; 12113 /* 12114 * If this is in cached memory, flush the cache 12115 * before we send the DMA request to the other 12116 * controller. We want to do this in either 12117 * the * read or the write case. The read 12118 * case is straightforward. In the write 12119 * case, we want to make sure nothing is 12120 * in the local cache that could overwrite 12121 * the DMAed data. 12122 */ 12123 12124 for (i = sg_entries_sent, j = 0; 12125 i < msg.dt.cur_sg_entries; i++, j++) { 12126 if ((io->io_hdr.flags & 12127 CTL_FLAG_NO_DATASYNC) == 0) { 12128 /* 12129 * XXX KDM use bus_dmamap_sync() 12130 */ 12131 } 12132 if ((io->io_hdr.flags & 12133 CTL_FLAG_BUS_ADDR) == 0) { 12134 /* 12135 * XXX KDM use busdma. 12136 */ 12137#if 0 12138 msg.dt.sg_list[j].addr =(void *) 12139 vtophys(sgl[i].addr); 12140#endif 12141 } else { 12142 msg.dt.sg_list[j].addr = 12143 sgl[i].addr; 12144 } 12145 msg.dt.sg_list[j].len = sgl[i].len; 12146 } 12147 } 12148 12149 sg_entries_sent += msg.dt.cur_sg_entries; 12150 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12151 msg.dt.sg_last = 1; 12152 else 12153 msg.dt.sg_last = 0; 12154 12155 /* 12156 * XXX KDM drop and reacquire the lock here? 12157 */ 12158 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12159 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12160 /* 12161 * XXX do something here. 12162 */ 12163 } 12164 12165 msg.dt.sent_sg_entries = sg_entries_sent; 12166 } 12167 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12168 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 12169 ctl_failover_io(io, /*have_lock*/ 0); 12170 12171 } else { 12172 12173 /* 12174 * Lookup the fe_datamove() function for this particular 12175 * front end. 12176 */ 12177 fe_datamove = 12178 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12179 12180 fe_datamove(io); 12181 } 12182} 12183 12184static void 12185ctl_send_datamove_done(union ctl_io *io, int have_lock) 12186{ 12187 union ctl_ha_msg msg; 12188 int isc_status; 12189 12190 memset(&msg, 0, sizeof(msg)); 12191 12192 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12193 msg.hdr.original_sc = io; 12194 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12195 msg.hdr.nexus = io->io_hdr.nexus; 12196 msg.hdr.status = io->io_hdr.status; 12197 msg.scsi.tag_num = io->scsiio.tag_num; 12198 msg.scsi.tag_type = io->scsiio.tag_type; 12199 msg.scsi.scsi_status = io->scsiio.scsi_status; 12200 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12201 sizeof(io->scsiio.sense_data)); 12202 msg.scsi.sense_len = io->scsiio.sense_len; 12203 msg.scsi.sense_residual = io->scsiio.sense_residual; 12204 msg.scsi.fetd_status = io->io_hdr.port_status; 12205 msg.scsi.residual = io->scsiio.residual; 12206 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12207 12208 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12209 ctl_failover_io(io, /*have_lock*/ have_lock); 12210 return; 12211 } 12212 12213 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 12214 if (isc_status > CTL_HA_STATUS_SUCCESS) { 12215 /* XXX do something if this fails */ 12216 } 12217 12218} 12219 12220/* 12221 * The DMA to the remote side is done, now we need to tell the other side 12222 * we're done so it can continue with its data movement. 12223 */ 12224static void 12225ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12226{ 12227 union ctl_io *io; 12228 12229 io = rq->context; 12230 12231 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12232 printf("%s: ISC DMA write failed with error %d", __func__, 12233 rq->ret); 12234 ctl_set_internal_failure(&io->scsiio, 12235 /*sks_valid*/ 1, 12236 /*retry_count*/ rq->ret); 12237 } 12238 12239 ctl_dt_req_free(rq); 12240 12241 /* 12242 * In this case, we had to malloc the memory locally. Free it. 12243 */ 12244 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12245 int i; 12246 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12247 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12248 } 12249 /* 12250 * The data is in local and remote memory, so now we need to send 12251 * status (good or back) back to the other side. 12252 */ 12253 ctl_send_datamove_done(io, /*have_lock*/ 0); 12254} 12255 12256/* 12257 * We've moved the data from the host/controller into local memory. Now we 12258 * need to push it over to the remote controller's memory. 12259 */ 12260static int 12261ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12262{ 12263 int retval; 12264 12265 retval = 0; 12266 12267 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12268 ctl_datamove_remote_write_cb); 12269 12270 return (retval); 12271} 12272 12273static void 12274ctl_datamove_remote_write(union ctl_io *io) 12275{ 12276 int retval; 12277 void (*fe_datamove)(union ctl_io *io); 12278 12279 /* 12280 * - Get the data from the host/HBA into local memory. 12281 * - DMA memory from the local controller to the remote controller. 12282 * - Send status back to the remote controller. 12283 */ 12284 12285 retval = ctl_datamove_remote_sgl_setup(io); 12286 if (retval != 0) 12287 return; 12288 12289 /* Switch the pointer over so the FETD knows what to do */ 12290 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12291 12292 /* 12293 * Use a custom move done callback, since we need to send completion 12294 * back to the other controller, not to the backend on this side. 12295 */ 12296 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12297 12298 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12299 12300 fe_datamove(io); 12301 12302 return; 12303 12304} 12305 12306static int 12307ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12308{ 12309#if 0 12310 char str[256]; 12311 char path_str[64]; 12312 struct sbuf sb; 12313#endif 12314 12315 /* 12316 * In this case, we had to malloc the memory locally. Free it. 12317 */ 12318 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12319 int i; 12320 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12321 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12322 } 12323 12324#if 0 12325 scsi_path_string(io, path_str, sizeof(path_str)); 12326 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12327 sbuf_cat(&sb, path_str); 12328 scsi_command_string(&io->scsiio, NULL, &sb); 12329 sbuf_printf(&sb, "\n"); 12330 sbuf_cat(&sb, path_str); 12331 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12332 io->scsiio.tag_num, io->scsiio.tag_type); 12333 sbuf_cat(&sb, path_str); 12334 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12335 io->io_hdr.flags, io->io_hdr.status); 12336 sbuf_finish(&sb); 12337 printk("%s", sbuf_data(&sb)); 12338#endif 12339 12340 12341 /* 12342 * The read is done, now we need to send status (good or bad) back 12343 * to the other side. 12344 */ 12345 ctl_send_datamove_done(io, /*have_lock*/ 0); 12346 12347 return (0); 12348} 12349 12350static void 12351ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12352{ 12353 union ctl_io *io; 12354 void (*fe_datamove)(union ctl_io *io); 12355 12356 io = rq->context; 12357 12358 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12359 printf("%s: ISC DMA read failed with error %d", __func__, 12360 rq->ret); 12361 ctl_set_internal_failure(&io->scsiio, 12362 /*sks_valid*/ 1, 12363 /*retry_count*/ rq->ret); 12364 } 12365 12366 ctl_dt_req_free(rq); 12367 12368 /* Switch the pointer over so the FETD knows what to do */ 12369 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12370 12371 /* 12372 * Use a custom move done callback, since we need to send completion 12373 * back to the other controller, not to the backend on this side. 12374 */ 12375 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12376 12377 /* XXX KDM add checks like the ones in ctl_datamove? */ 12378 12379 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12380 12381 fe_datamove(io); 12382} 12383 12384static int 12385ctl_datamove_remote_sgl_setup(union ctl_io *io) 12386{ 12387 struct ctl_sg_entry *local_sglist, *remote_sglist; 12388 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 12389 struct ctl_softc *softc; 12390 int retval; 12391 int i; 12392 12393 retval = 0; 12394 softc = control_softc; 12395 12396 local_sglist = io->io_hdr.local_sglist; 12397 local_dma_sglist = io->io_hdr.local_dma_sglist; 12398 remote_sglist = io->io_hdr.remote_sglist; 12399 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 12400 12401 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 12402 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 12403 local_sglist[i].len = remote_sglist[i].len; 12404 12405 /* 12406 * XXX Detect the situation where the RS-level I/O 12407 * redirector on the other side has already read the 12408 * data off of the AOR RS on this side, and 12409 * transferred it to remote (mirror) memory on the 12410 * other side. Since we already have the data in 12411 * memory here, we just need to use it. 12412 * 12413 * XXX KDM this can probably be removed once we 12414 * get the cache device code in and take the 12415 * current AOR implementation out. 12416 */ 12417#ifdef NEEDTOPORT 12418 if ((remote_sglist[i].addr >= 12419 (void *)vtophys(softc->mirr->addr)) 12420 && (remote_sglist[i].addr < 12421 ((void *)vtophys(softc->mirr->addr) + 12422 CacheMirrorOffset))) { 12423 local_sglist[i].addr = remote_sglist[i].addr - 12424 CacheMirrorOffset; 12425 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12426 CTL_FLAG_DATA_IN) 12427 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 12428 } else { 12429 local_sglist[i].addr = remote_sglist[i].addr + 12430 CacheMirrorOffset; 12431 } 12432#endif 12433#if 0 12434 printf("%s: local %p, remote %p, len %d\n", 12435 __func__, local_sglist[i].addr, 12436 remote_sglist[i].addr, local_sglist[i].len); 12437#endif 12438 } 12439 } else { 12440 uint32_t len_to_go; 12441 12442 /* 12443 * In this case, we don't have automatically allocated 12444 * memory for this I/O on this controller. This typically 12445 * happens with internal CTL I/O -- e.g. inquiry, mode 12446 * sense, etc. Anything coming from RAIDCore will have 12447 * a mirror area available. 12448 */ 12449 len_to_go = io->scsiio.kern_data_len; 12450 12451 /* 12452 * Clear the no datasync flag, we have to use malloced 12453 * buffers. 12454 */ 12455 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 12456 12457 /* 12458 * The difficult thing here is that the size of the various 12459 * S/G segments may be different than the size from the 12460 * remote controller. That'll make it harder when DMAing 12461 * the data back to the other side. 12462 */ 12463 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 12464 sizeof(io->io_hdr.remote_sglist[0])) && 12465 (len_to_go > 0); i++) { 12466 local_sglist[i].len = ctl_min(len_to_go, 131072); 12467 CTL_SIZE_8B(local_dma_sglist[i].len, 12468 local_sglist[i].len); 12469 local_sglist[i].addr = 12470 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 12471 12472 local_dma_sglist[i].addr = local_sglist[i].addr; 12473 12474 if (local_sglist[i].addr == NULL) { 12475 int j; 12476 12477 printf("malloc failed for %zd bytes!", 12478 local_dma_sglist[i].len); 12479 for (j = 0; j < i; j++) { 12480 free(local_sglist[j].addr, M_CTL); 12481 } 12482 ctl_set_internal_failure(&io->scsiio, 12483 /*sks_valid*/ 1, 12484 /*retry_count*/ 4857); 12485 retval = 1; 12486 goto bailout_error; 12487 12488 } 12489 /* XXX KDM do we need a sync here? */ 12490 12491 len_to_go -= local_sglist[i].len; 12492 } 12493 /* 12494 * Reset the number of S/G entries accordingly. The 12495 * original number of S/G entries is available in 12496 * rem_sg_entries. 12497 */ 12498 io->scsiio.kern_sg_entries = i; 12499 12500#if 0 12501 printf("%s: kern_sg_entries = %d\n", __func__, 12502 io->scsiio.kern_sg_entries); 12503 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12504 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 12505 local_sglist[i].addr, local_sglist[i].len, 12506 local_dma_sglist[i].len); 12507#endif 12508 } 12509 12510 12511 return (retval); 12512 12513bailout_error: 12514 12515 ctl_send_datamove_done(io, /*have_lock*/ 0); 12516 12517 return (retval); 12518} 12519 12520static int 12521ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12522 ctl_ha_dt_cb callback) 12523{ 12524 struct ctl_ha_dt_req *rq; 12525 struct ctl_sg_entry *remote_sglist, *local_sglist; 12526 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 12527 uint32_t local_used, remote_used, total_used; 12528 int retval; 12529 int i, j; 12530 12531 retval = 0; 12532 12533 rq = ctl_dt_req_alloc(); 12534 12535 /* 12536 * If we failed to allocate the request, and if the DMA didn't fail 12537 * anyway, set busy status. This is just a resource allocation 12538 * failure. 12539 */ 12540 if ((rq == NULL) 12541 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 12542 ctl_set_busy(&io->scsiio); 12543 12544 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 12545 12546 if (rq != NULL) 12547 ctl_dt_req_free(rq); 12548 12549 /* 12550 * The data move failed. We need to return status back 12551 * to the other controller. No point in trying to DMA 12552 * data to the remote controller. 12553 */ 12554 12555 ctl_send_datamove_done(io, /*have_lock*/ 0); 12556 12557 retval = 1; 12558 12559 goto bailout; 12560 } 12561 12562 local_sglist = io->io_hdr.local_sglist; 12563 local_dma_sglist = io->io_hdr.local_dma_sglist; 12564 remote_sglist = io->io_hdr.remote_sglist; 12565 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 12566 local_used = 0; 12567 remote_used = 0; 12568 total_used = 0; 12569 12570 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 12571 rq->ret = CTL_HA_STATUS_SUCCESS; 12572 rq->context = io; 12573 callback(rq); 12574 goto bailout; 12575 } 12576 12577 /* 12578 * Pull/push the data over the wire from/to the other controller. 12579 * This takes into account the possibility that the local and 12580 * remote sglists may not be identical in terms of the size of 12581 * the elements and the number of elements. 12582 * 12583 * One fundamental assumption here is that the length allocated for 12584 * both the local and remote sglists is identical. Otherwise, we've 12585 * essentially got a coding error of some sort. 12586 */ 12587 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12588 int isc_ret; 12589 uint32_t cur_len, dma_length; 12590 uint8_t *tmp_ptr; 12591 12592 rq->id = CTL_HA_DATA_CTL; 12593 rq->command = command; 12594 rq->context = io; 12595 12596 /* 12597 * Both pointers should be aligned. But it is possible 12598 * that the allocation length is not. They should both 12599 * also have enough slack left over at the end, though, 12600 * to round up to the next 8 byte boundary. 12601 */ 12602 cur_len = ctl_min(local_sglist[i].len - local_used, 12603 remote_sglist[j].len - remote_used); 12604 12605 /* 12606 * In this case, we have a size issue and need to decrease 12607 * the size, except in the case where we actually have less 12608 * than 8 bytes left. In that case, we need to increase 12609 * the DMA length to get the last bit. 12610 */ 12611 if ((cur_len & 0x7) != 0) { 12612 if (cur_len > 0x7) { 12613 cur_len = cur_len - (cur_len & 0x7); 12614 dma_length = cur_len; 12615 } else { 12616 CTL_SIZE_8B(dma_length, cur_len); 12617 } 12618 12619 } else 12620 dma_length = cur_len; 12621 12622 /* 12623 * If we had to allocate memory for this I/O, instead of using 12624 * the non-cached mirror memory, we'll need to flush the cache 12625 * before trying to DMA to the other controller. 12626 * 12627 * We could end up doing this multiple times for the same 12628 * segment if we have a larger local segment than remote 12629 * segment. That shouldn't be an issue. 12630 */ 12631 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12632 /* 12633 * XXX KDM use bus_dmamap_sync() here. 12634 */ 12635 } 12636 12637 rq->size = dma_length; 12638 12639 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12640 tmp_ptr += local_used; 12641 12642 /* Use physical addresses when talking to ISC hardware */ 12643 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12644 /* XXX KDM use busdma */ 12645#if 0 12646 rq->local = vtophys(tmp_ptr); 12647#endif 12648 } else 12649 rq->local = tmp_ptr; 12650 12651 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12652 tmp_ptr += remote_used; 12653 rq->remote = tmp_ptr; 12654 12655 rq->callback = NULL; 12656 12657 local_used += cur_len; 12658 if (local_used >= local_sglist[i].len) { 12659 i++; 12660 local_used = 0; 12661 } 12662 12663 remote_used += cur_len; 12664 if (remote_used >= remote_sglist[j].len) { 12665 j++; 12666 remote_used = 0; 12667 } 12668 total_used += cur_len; 12669 12670 if (total_used >= io->scsiio.kern_data_len) 12671 rq->callback = callback; 12672 12673 if ((rq->size & 0x7) != 0) { 12674 printf("%s: warning: size %d is not on 8b boundary\n", 12675 __func__, rq->size); 12676 } 12677 if (((uintptr_t)rq->local & 0x7) != 0) { 12678 printf("%s: warning: local %p not on 8b boundary\n", 12679 __func__, rq->local); 12680 } 12681 if (((uintptr_t)rq->remote & 0x7) != 0) { 12682 printf("%s: warning: remote %p not on 8b boundary\n", 12683 __func__, rq->local); 12684 } 12685#if 0 12686 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12687 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12688 rq->local, rq->remote, rq->size); 12689#endif 12690 12691 isc_ret = ctl_dt_single(rq); 12692 if (isc_ret == CTL_HA_STATUS_WAIT) 12693 continue; 12694 12695 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 12696 rq->ret = CTL_HA_STATUS_SUCCESS; 12697 } else { 12698 rq->ret = isc_ret; 12699 } 12700 callback(rq); 12701 goto bailout; 12702 } 12703 12704bailout: 12705 return (retval); 12706 12707} 12708 12709static void 12710ctl_datamove_remote_read(union ctl_io *io) 12711{ 12712 int retval; 12713 int i; 12714 12715 /* 12716 * This will send an error to the other controller in the case of a 12717 * failure. 12718 */ 12719 retval = ctl_datamove_remote_sgl_setup(io); 12720 if (retval != 0) 12721 return; 12722 12723 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12724 ctl_datamove_remote_read_cb); 12725 if ((retval != 0) 12726 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 12727 /* 12728 * Make sure we free memory if there was an error.. The 12729 * ctl_datamove_remote_xfer() function will send the 12730 * datamove done message, or call the callback with an 12731 * error if there is a problem. 12732 */ 12733 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12734 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12735 } 12736 12737 return; 12738} 12739 12740/* 12741 * Process a datamove request from the other controller. This is used for 12742 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12743 * first. Once that is complete, the data gets DMAed into the remote 12744 * controller's memory. For reads, we DMA from the remote controller's 12745 * memory into our memory first, and then move it out to the FETD. 12746 */ 12747static void 12748ctl_datamove_remote(union ctl_io *io) 12749{ 12750 struct ctl_softc *softc; 12751 12752 softc = control_softc; 12753 12754 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 12755 12756 /* 12757 * Note that we look for an aborted I/O here, but don't do some of 12758 * the other checks that ctl_datamove() normally does. We don't 12759 * need to run the task queue, because this I/O is on the ISC 12760 * queue, which is executed by the work thread after the task queue. 12761 * We don't need to run the datamove delay code, since that should 12762 * have been done if need be on the other controller. 12763 */ 12764 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12765 12766 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 12767 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 12768 io->io_hdr.nexus.targ_port, 12769 io->io_hdr.nexus.targ_target.id, 12770 io->io_hdr.nexus.targ_lun); 12771 io->io_hdr.status = CTL_CMD_ABORTED; 12772 io->io_hdr.port_status = 31338; 12773 12774 ctl_send_datamove_done(io, /*have_lock*/ 0); 12775 12776 return; 12777 } 12778 12779 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 12780 ctl_datamove_remote_write(io); 12781 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 12782 ctl_datamove_remote_read(io); 12783 } else { 12784 union ctl_ha_msg msg; 12785 struct scsi_sense_data *sense; 12786 uint8_t sks[3]; 12787 int retry_count; 12788 12789 memset(&msg, 0, sizeof(msg)); 12790 12791 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 12792 msg.hdr.status = CTL_SCSI_ERROR; 12793 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 12794 12795 retry_count = 4243; 12796 12797 sense = &msg.scsi.sense_data; 12798 sks[0] = SSD_SCS_VALID; 12799 sks[1] = (retry_count >> 8) & 0xff; 12800 sks[2] = retry_count & 0xff; 12801 12802 /* "Internal target failure" */ 12803 scsi_set_sense_data(sense, 12804 /*sense_format*/ SSD_TYPE_NONE, 12805 /*current_error*/ 1, 12806 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 12807 /*asc*/ 0x44, 12808 /*ascq*/ 0x00, 12809 /*type*/ SSD_ELEM_SKS, 12810 /*size*/ sizeof(sks), 12811 /*data*/ sks, 12812 SSD_ELEM_NONE); 12813 12814 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12815 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12816 ctl_failover_io(io, /*have_lock*/ 1); 12817 return; 12818 } 12819 12820 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 12821 CTL_HA_STATUS_SUCCESS) { 12822 /* XXX KDM what to do if this fails? */ 12823 } 12824 return; 12825 } 12826 12827} 12828 12829static int 12830ctl_process_done(union ctl_io *io) 12831{ 12832 struct ctl_lun *lun; 12833 struct ctl_softc *ctl_softc; 12834 void (*fe_done)(union ctl_io *io); 12835 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 12836 12837 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12838 12839 fe_done = 12840 control_softc->ctl_ports[targ_port]->fe_done; 12841 12842#ifdef CTL_TIME_IO 12843 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12844 char str[256]; 12845 char path_str[64]; 12846 struct sbuf sb; 12847 12848 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12849 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12850 12851 sbuf_cat(&sb, path_str); 12852 switch (io->io_hdr.io_type) { 12853 case CTL_IO_SCSI: 12854 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12855 sbuf_printf(&sb, "\n"); 12856 sbuf_cat(&sb, path_str); 12857 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12858 io->scsiio.tag_num, io->scsiio.tag_type); 12859 break; 12860 case CTL_IO_TASK: 12861 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12862 "Tag Type: %d\n", io->taskio.task_action, 12863 io->taskio.tag_num, io->taskio.tag_type); 12864 break; 12865 default: 12866 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12867 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12868 break; 12869 } 12870 sbuf_cat(&sb, path_str); 12871 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12872 (intmax_t)time_uptime - io->io_hdr.start_time); 12873 sbuf_finish(&sb); 12874 printf("%s", sbuf_data(&sb)); 12875 } 12876#endif /* CTL_TIME_IO */ 12877 12878 switch (io->io_hdr.io_type) { 12879 case CTL_IO_SCSI: 12880 break; 12881 case CTL_IO_TASK: 12882 if (bootverbose || verbose > 0) 12883 ctl_io_error_print(io, NULL); 12884 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 12885 ctl_free_io(io); 12886 else 12887 fe_done(io); 12888 return (CTL_RETVAL_COMPLETE); 12889 break; 12890 default: 12891 printf("ctl_process_done: invalid io type %d\n", 12892 io->io_hdr.io_type); 12893 panic("ctl_process_done: invalid io type %d\n", 12894 io->io_hdr.io_type); 12895 break; /* NOTREACHED */ 12896 } 12897 12898 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12899 if (lun == NULL) { 12900 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12901 io->io_hdr.nexus.targ_mapped_lun)); 12902 fe_done(io); 12903 goto bailout; 12904 } 12905 ctl_softc = lun->ctl_softc; 12906 12907 mtx_lock(&lun->lun_lock); 12908 12909 /* 12910 * Check to see if we have any errors to inject here. We only 12911 * inject errors for commands that don't already have errors set. 12912 */ 12913 if ((STAILQ_FIRST(&lun->error_list) != NULL) 12914 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) 12915 ctl_inject_error(lun, io); 12916 12917 /* 12918 * XXX KDM how do we treat commands that aren't completed 12919 * successfully? 12920 * 12921 * XXX KDM should we also track I/O latency? 12922 */ 12923 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 12924 io->io_hdr.io_type == CTL_IO_SCSI) { 12925#ifdef CTL_TIME_IO 12926 struct bintime cur_bt; 12927#endif 12928 int type; 12929 12930 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12931 CTL_FLAG_DATA_IN) 12932 type = CTL_STATS_READ; 12933 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12934 CTL_FLAG_DATA_OUT) 12935 type = CTL_STATS_WRITE; 12936 else 12937 type = CTL_STATS_NO_IO; 12938 12939 lun->stats.ports[targ_port].bytes[type] += 12940 io->scsiio.kern_total_len; 12941 lun->stats.ports[targ_port].operations[type]++; 12942#ifdef CTL_TIME_IO 12943 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 12944 &io->io_hdr.dma_bt); 12945 lun->stats.ports[targ_port].num_dmas[type] += 12946 io->io_hdr.num_dmas; 12947 getbintime(&cur_bt); 12948 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 12949 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 12950#endif 12951 } 12952 12953 /* 12954 * Remove this from the OOA queue. 12955 */ 12956 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12957 12958 /* 12959 * Run through the blocked queue on this LUN and see if anything 12960 * has become unblocked, now that this transaction is done. 12961 */ 12962 ctl_check_blocked(lun); 12963 12964 /* 12965 * If the LUN has been invalidated, free it if there is nothing 12966 * left on its OOA queue. 12967 */ 12968 if ((lun->flags & CTL_LUN_INVALID) 12969 && TAILQ_EMPTY(&lun->ooa_queue)) { 12970 mtx_unlock(&lun->lun_lock); 12971 mtx_lock(&ctl_softc->ctl_lock); 12972 ctl_free_lun(lun); 12973 mtx_unlock(&ctl_softc->ctl_lock); 12974 } else 12975 mtx_unlock(&lun->lun_lock); 12976 12977 /* 12978 * If this command has been aborted, make sure we set the status 12979 * properly. The FETD is responsible for freeing the I/O and doing 12980 * whatever it needs to do to clean up its state. 12981 */ 12982 if (io->io_hdr.flags & CTL_FLAG_ABORT) 12983 io->io_hdr.status = CTL_CMD_ABORTED; 12984 12985 /* 12986 * We print out status for every task management command. For SCSI 12987 * commands, we filter out any unit attention errors; they happen 12988 * on every boot, and would clutter up the log. Note: task 12989 * management commands aren't printed here, they are printed above, 12990 * since they should never even make it down here. 12991 */ 12992 switch (io->io_hdr.io_type) { 12993 case CTL_IO_SCSI: { 12994 int error_code, sense_key, asc, ascq; 12995 12996 sense_key = 0; 12997 12998 if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) 12999 && (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) { 13000 /* 13001 * Since this is just for printing, no need to 13002 * show errors here. 13003 */ 13004 scsi_extract_sense_len(&io->scsiio.sense_data, 13005 io->scsiio.sense_len, 13006 &error_code, 13007 &sense_key, 13008 &asc, 13009 &ascq, 13010 /*show_errors*/ 0); 13011 } 13012 13013 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 13014 && (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) 13015 || (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) 13016 || (sense_key != SSD_KEY_UNIT_ATTENTION))) { 13017 13018 if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){ 13019 ctl_softc->skipped_prints++; 13020 } else { 13021 uint32_t skipped_prints; 13022 13023 skipped_prints = ctl_softc->skipped_prints; 13024 13025 ctl_softc->skipped_prints = 0; 13026 ctl_softc->last_print_jiffies = time_uptime; 13027 13028 if (skipped_prints > 0) { 13029#ifdef NEEDTOPORT 13030 csevent_log(CSC_CTL | CSC_SHELF_SW | 13031 CTL_ERROR_REPORT, 13032 csevent_LogType_Trace, 13033 csevent_Severity_Information, 13034 csevent_AlertLevel_Green, 13035 csevent_FRU_Firmware, 13036 csevent_FRU_Unknown, 13037 "High CTL error volume, %d prints " 13038 "skipped", skipped_prints); 13039#endif 13040 } 13041 if (bootverbose || verbose > 0) 13042 ctl_io_error_print(io, NULL); 13043 } 13044 } 13045 break; 13046 } 13047 case CTL_IO_TASK: 13048 if (bootverbose || verbose > 0) 13049 ctl_io_error_print(io, NULL); 13050 break; 13051 default: 13052 break; 13053 } 13054 13055 /* 13056 * Tell the FETD or the other shelf controller we're done with this 13057 * command. Note that only SCSI commands get to this point. Task 13058 * management commands are completed above. 13059 * 13060 * We only send status to the other controller if we're in XFER 13061 * mode. In SER_ONLY mode, the I/O is done on the controller that 13062 * received the I/O (from CTL's perspective), and so the status is 13063 * generated there. 13064 * 13065 * XXX KDM if we hold the lock here, we could cause a deadlock 13066 * if the frontend comes back in in this context to queue 13067 * something. 13068 */ 13069 if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER) 13070 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13071 union ctl_ha_msg msg; 13072 13073 memset(&msg, 0, sizeof(msg)); 13074 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13075 msg.hdr.original_sc = io->io_hdr.original_sc; 13076 msg.hdr.nexus = io->io_hdr.nexus; 13077 msg.hdr.status = io->io_hdr.status; 13078 msg.scsi.scsi_status = io->scsiio.scsi_status; 13079 msg.scsi.tag_num = io->scsiio.tag_num; 13080 msg.scsi.tag_type = io->scsiio.tag_type; 13081 msg.scsi.sense_len = io->scsiio.sense_len; 13082 msg.scsi.sense_residual = io->scsiio.sense_residual; 13083 msg.scsi.residual = io->scsiio.residual; 13084 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13085 sizeof(io->scsiio.sense_data)); 13086 /* 13087 * We copy this whether or not this is an I/O-related 13088 * command. Otherwise, we'd have to go and check to see 13089 * whether it's a read/write command, and it really isn't 13090 * worth it. 13091 */ 13092 memcpy(&msg.scsi.lbalen, 13093 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 13094 sizeof(msg.scsi.lbalen)); 13095 13096 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13097 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 13098 /* XXX do something here */ 13099 } 13100 13101 ctl_free_io(io); 13102 } else 13103 fe_done(io); 13104 13105bailout: 13106 13107 return (CTL_RETVAL_COMPLETE); 13108} 13109 13110/* 13111 * Front end should call this if it doesn't do autosense. When the request 13112 * sense comes back in from the initiator, we'll dequeue this and send it. 13113 */ 13114int 13115ctl_queue_sense(union ctl_io *io) 13116{ 13117 struct ctl_lun *lun; 13118 struct ctl_softc *ctl_softc; 13119 uint32_t initidx, targ_lun; 13120 13121 ctl_softc = control_softc; 13122 13123 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13124 13125 /* 13126 * LUN lookup will likely move to the ctl_work_thread() once we 13127 * have our new queueing infrastructure (that doesn't put things on 13128 * a per-LUN queue initially). That is so that we can handle 13129 * things like an INQUIRY to a LUN that we don't have enabled. We 13130 * can't deal with that right now. 13131 */ 13132 mtx_lock(&ctl_softc->ctl_lock); 13133 13134 /* 13135 * If we don't have a LUN for this, just toss the sense 13136 * information. 13137 */ 13138 targ_lun = io->io_hdr.nexus.targ_lun; 13139 if (io->io_hdr.nexus.lun_map_fn != NULL) 13140 targ_lun = io->io_hdr.nexus.lun_map_fn(io->io_hdr.nexus.lun_map_arg, targ_lun); 13141 if ((targ_lun < CTL_MAX_LUNS) 13142 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 13143 lun = ctl_softc->ctl_luns[targ_lun]; 13144 else 13145 goto bailout; 13146 13147 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13148 13149 mtx_lock(&lun->lun_lock); 13150 /* 13151 * Already have CA set for this LUN...toss the sense information. 13152 */ 13153 if (ctl_is_set(lun->have_ca, initidx)) { 13154 mtx_unlock(&lun->lun_lock); 13155 goto bailout; 13156 } 13157 13158 memcpy(&lun->pending_sense[initidx].sense, &io->scsiio.sense_data, 13159 ctl_min(sizeof(lun->pending_sense[initidx].sense), 13160 sizeof(io->scsiio.sense_data))); 13161 ctl_set_mask(lun->have_ca, initidx); 13162 mtx_unlock(&lun->lun_lock); 13163 13164bailout: 13165 mtx_unlock(&ctl_softc->ctl_lock); 13166 13167 ctl_free_io(io); 13168 13169 return (CTL_RETVAL_COMPLETE); 13170} 13171 13172/* 13173 * Primary command inlet from frontend ports. All SCSI and task I/O 13174 * requests must go through this function. 13175 */ 13176int 13177ctl_queue(union ctl_io *io) 13178{ 13179 struct ctl_softc *ctl_softc; 13180 13181 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13182 13183 ctl_softc = control_softc; 13184 13185#ifdef CTL_TIME_IO 13186 io->io_hdr.start_time = time_uptime; 13187 getbintime(&io->io_hdr.start_bt); 13188#endif /* CTL_TIME_IO */ 13189 13190 /* Map FE-specific LUN ID into global one. */ 13191 if (io->io_hdr.nexus.lun_map_fn != NULL) 13192 io->io_hdr.nexus.targ_mapped_lun = io->io_hdr.nexus.lun_map_fn( 13193 io->io_hdr.nexus.lun_map_arg, io->io_hdr.nexus.targ_lun); 13194 else 13195 io->io_hdr.nexus.targ_mapped_lun = io->io_hdr.nexus.targ_lun; 13196 13197 switch (io->io_hdr.io_type) { 13198 case CTL_IO_SCSI: 13199 case CTL_IO_TASK: 13200 ctl_enqueue_incoming(io); 13201 break; 13202 default: 13203 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13204 return (EINVAL); 13205 } 13206 13207 return (CTL_RETVAL_COMPLETE); 13208} 13209 13210#ifdef CTL_IO_DELAY 13211static void 13212ctl_done_timer_wakeup(void *arg) 13213{ 13214 union ctl_io *io; 13215 13216 io = (union ctl_io *)arg; 13217 ctl_done(io); 13218} 13219#endif /* CTL_IO_DELAY */ 13220 13221void 13222ctl_done(union ctl_io *io) 13223{ 13224 struct ctl_softc *ctl_softc; 13225 13226 ctl_softc = control_softc; 13227 13228 /* 13229 * Enable this to catch duplicate completion issues. 13230 */ 13231#if 0 13232 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13233 printf("%s: type %d msg %d cdb %x iptl: " 13234 "%d:%d:%d:%d tag 0x%04x " 13235 "flag %#x status %x\n", 13236 __func__, 13237 io->io_hdr.io_type, 13238 io->io_hdr.msg_type, 13239 io->scsiio.cdb[0], 13240 io->io_hdr.nexus.initid.id, 13241 io->io_hdr.nexus.targ_port, 13242 io->io_hdr.nexus.targ_target.id, 13243 io->io_hdr.nexus.targ_lun, 13244 (io->io_hdr.io_type == 13245 CTL_IO_TASK) ? 13246 io->taskio.tag_num : 13247 io->scsiio.tag_num, 13248 io->io_hdr.flags, 13249 io->io_hdr.status); 13250 } else 13251 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13252#endif 13253 13254 /* 13255 * This is an internal copy of an I/O, and should not go through 13256 * the normal done processing logic. 13257 */ 13258 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13259 return; 13260 13261 /* 13262 * We need to send a msg to the serializing shelf to finish the IO 13263 * as well. We don't send a finish message to the other shelf if 13264 * this is a task management command. Task management commands 13265 * aren't serialized in the OOA queue, but rather just executed on 13266 * both shelf controllers for commands that originated on that 13267 * controller. 13268 */ 13269 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 13270 && (io->io_hdr.io_type != CTL_IO_TASK)) { 13271 union ctl_ha_msg msg_io; 13272 13273 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 13274 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 13275 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 13276 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 13277 } 13278 /* continue on to finish IO */ 13279 } 13280#ifdef CTL_IO_DELAY 13281 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13282 struct ctl_lun *lun; 13283 13284 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13285 13286 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13287 } else { 13288 struct ctl_lun *lun; 13289 13290 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13291 13292 if ((lun != NULL) 13293 && (lun->delay_info.done_delay > 0)) { 13294 struct callout *callout; 13295 13296 callout = (struct callout *)&io->io_hdr.timer_bytes; 13297 callout_init(callout, /*mpsafe*/ 1); 13298 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13299 callout_reset(callout, 13300 lun->delay_info.done_delay * hz, 13301 ctl_done_timer_wakeup, io); 13302 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13303 lun->delay_info.done_delay = 0; 13304 return; 13305 } 13306 } 13307#endif /* CTL_IO_DELAY */ 13308 13309 ctl_enqueue_done(io); 13310} 13311 13312int 13313ctl_isc(struct ctl_scsiio *ctsio) 13314{ 13315 struct ctl_lun *lun; 13316 int retval; 13317 13318 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13319 13320 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 13321 13322 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 13323 13324 retval = lun->backend->data_submit((union ctl_io *)ctsio); 13325 13326 return (retval); 13327} 13328 13329 13330static void 13331ctl_work_thread(void *arg) 13332{ 13333 struct ctl_thread *thr = (struct ctl_thread *)arg; 13334 struct ctl_softc *softc = thr->ctl_softc; 13335 union ctl_io *io; 13336 int retval; 13337 13338 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13339 13340 for (;;) { 13341 retval = 0; 13342 13343 /* 13344 * We handle the queues in this order: 13345 * - ISC 13346 * - done queue (to free up resources, unblock other commands) 13347 * - RtR queue 13348 * - incoming queue 13349 * 13350 * If those queues are empty, we break out of the loop and 13351 * go to sleep. 13352 */ 13353 mtx_lock(&thr->queue_lock); 13354 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13355 if (io != NULL) { 13356 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13357 mtx_unlock(&thr->queue_lock); 13358 ctl_handle_isc(io); 13359 continue; 13360 } 13361 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13362 if (io != NULL) { 13363 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13364 /* clear any blocked commands, call fe_done */ 13365 mtx_unlock(&thr->queue_lock); 13366 retval = ctl_process_done(io); 13367 continue; 13368 } 13369 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13370 if (io != NULL) { 13371 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13372 mtx_unlock(&thr->queue_lock); 13373 if (io->io_hdr.io_type == CTL_IO_TASK) 13374 ctl_run_task(io); 13375 else 13376 ctl_scsiio_precheck(softc, &io->scsiio); 13377 continue; 13378 } 13379 if (!ctl_pause_rtr) { 13380 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13381 if (io != NULL) { 13382 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13383 mtx_unlock(&thr->queue_lock); 13384 retval = ctl_scsiio(&io->scsiio); 13385 if (retval != CTL_RETVAL_COMPLETE) 13386 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13387 continue; 13388 } 13389 } 13390 13391 /* Sleep until we have something to do. */ 13392 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13393 } 13394} 13395 13396static void 13397ctl_lun_thread(void *arg) 13398{ 13399 struct ctl_softc *softc = (struct ctl_softc *)arg; 13400 struct ctl_be_lun *be_lun; 13401 int retval; 13402 13403 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13404 13405 for (;;) { 13406 retval = 0; 13407 mtx_lock(&softc->ctl_lock); 13408 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13409 if (be_lun != NULL) { 13410 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13411 mtx_unlock(&softc->ctl_lock); 13412 ctl_create_lun(be_lun); 13413 continue; 13414 } 13415 13416 /* Sleep until we have something to do. */ 13417 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13418 PDROP | PRIBIO, "-", 0); 13419 } 13420} 13421 13422static void 13423ctl_enqueue_incoming(union ctl_io *io) 13424{ 13425 struct ctl_softc *softc = control_softc; 13426 struct ctl_thread *thr; 13427 u_int idx; 13428 13429 idx = (io->io_hdr.nexus.targ_port * 127 + 13430 io->io_hdr.nexus.initid.id) % worker_threads; 13431 thr = &softc->threads[idx]; 13432 mtx_lock(&thr->queue_lock); 13433 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13434 mtx_unlock(&thr->queue_lock); 13435 wakeup(thr); 13436} 13437 13438static void 13439ctl_enqueue_rtr(union ctl_io *io) 13440{ 13441 struct ctl_softc *softc = control_softc; 13442 struct ctl_thread *thr; 13443 13444 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13445 mtx_lock(&thr->queue_lock); 13446 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13447 mtx_unlock(&thr->queue_lock); 13448 wakeup(thr); 13449} 13450 13451static void 13452ctl_enqueue_done(union ctl_io *io) 13453{ 13454 struct ctl_softc *softc = control_softc; 13455 struct ctl_thread *thr; 13456 13457 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13458 mtx_lock(&thr->queue_lock); 13459 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13460 mtx_unlock(&thr->queue_lock); 13461 wakeup(thr); 13462} 13463 13464static void 13465ctl_enqueue_isc(union ctl_io *io) 13466{ 13467 struct ctl_softc *softc = control_softc; 13468 struct ctl_thread *thr; 13469 13470 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13471 mtx_lock(&thr->queue_lock); 13472 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13473 mtx_unlock(&thr->queue_lock); 13474 wakeup(thr); 13475} 13476 13477/* Initialization and failover */ 13478 13479void 13480ctl_init_isc_msg(void) 13481{ 13482 printf("CTL: Still calling this thing\n"); 13483} 13484 13485/* 13486 * Init component 13487 * Initializes component into configuration defined by bootMode 13488 * (see hasc-sv.c) 13489 * returns hasc_Status: 13490 * OK 13491 * ERROR - fatal error 13492 */ 13493static ctl_ha_comp_status 13494ctl_isc_init(struct ctl_ha_component *c) 13495{ 13496 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13497 13498 c->status = ret; 13499 return ret; 13500} 13501 13502/* Start component 13503 * Starts component in state requested. If component starts successfully, 13504 * it must set its own state to the requestrd state 13505 * When requested state is HASC_STATE_HA, the component may refine it 13506 * by adding _SLAVE or _MASTER flags. 13507 * Currently allowed state transitions are: 13508 * UNKNOWN->HA - initial startup 13509 * UNKNOWN->SINGLE - initial startup when no parter detected 13510 * HA->SINGLE - failover 13511 * returns ctl_ha_comp_status: 13512 * OK - component successfully started in requested state 13513 * FAILED - could not start the requested state, failover may 13514 * be possible 13515 * ERROR - fatal error detected, no future startup possible 13516 */ 13517static ctl_ha_comp_status 13518ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 13519{ 13520 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13521 13522 printf("%s: go\n", __func__); 13523 13524 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 13525 if (c->state == CTL_HA_STATE_UNKNOWN ) { 13526 ctl_is_single = 0; 13527 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 13528 != CTL_HA_STATUS_SUCCESS) { 13529 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 13530 ret = CTL_HA_COMP_STATUS_ERROR; 13531 } 13532 } else if (CTL_HA_STATE_IS_HA(c->state) 13533 && CTL_HA_STATE_IS_SINGLE(state)){ 13534 // HA->SINGLE transition 13535 ctl_failover(); 13536 ctl_is_single = 1; 13537 } else { 13538 printf("ctl_isc_start:Invalid state transition %X->%X\n", 13539 c->state, state); 13540 ret = CTL_HA_COMP_STATUS_ERROR; 13541 } 13542 if (CTL_HA_STATE_IS_SINGLE(state)) 13543 ctl_is_single = 1; 13544 13545 c->state = state; 13546 c->status = ret; 13547 return ret; 13548} 13549 13550/* 13551 * Quiesce component 13552 * The component must clear any error conditions (set status to OK) and 13553 * prepare itself to another Start call 13554 * returns ctl_ha_comp_status: 13555 * OK 13556 * ERROR 13557 */ 13558static ctl_ha_comp_status 13559ctl_isc_quiesce(struct ctl_ha_component *c) 13560{ 13561 int ret = CTL_HA_COMP_STATUS_OK; 13562 13563 ctl_pause_rtr = 1; 13564 c->status = ret; 13565 return ret; 13566} 13567 13568struct ctl_ha_component ctl_ha_component_ctlisc = 13569{ 13570 .name = "CTL ISC", 13571 .state = CTL_HA_STATE_UNKNOWN, 13572 .init = ctl_isc_init, 13573 .start = ctl_isc_start, 13574 .quiesce = ctl_isc_quiesce 13575}; 13576 13577/* 13578 * vim: ts=8 13579 */ 13580