ctl.c revision 268363
1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.c#8 $ 35 */ 36/* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42#define _CTL_C 43 44#include <sys/cdefs.h> 45__FBSDID("$FreeBSD: head/sys/cam/ctl/ctl.c 268363 2014-07-07 11:05:04Z mav $"); 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/types.h> 51#include <sys/kthread.h> 52#include <sys/bio.h> 53#include <sys/fcntl.h> 54#include <sys/lock.h> 55#include <sys/module.h> 56#include <sys/mutex.h> 57#include <sys/condvar.h> 58#include <sys/malloc.h> 59#include <sys/conf.h> 60#include <sys/ioccom.h> 61#include <sys/queue.h> 62#include <sys/sbuf.h> 63#include <sys/smp.h> 64#include <sys/endian.h> 65#include <sys/sysctl.h> 66 67#include <cam/cam.h> 68#include <cam/scsi/scsi_all.h> 69#include <cam/scsi/scsi_da.h> 70#include <cam/ctl/ctl_io.h> 71#include <cam/ctl/ctl.h> 72#include <cam/ctl/ctl_frontend.h> 73#include <cam/ctl/ctl_frontend_internal.h> 74#include <cam/ctl/ctl_util.h> 75#include <cam/ctl/ctl_backend.h> 76#include <cam/ctl/ctl_ioctl.h> 77#include <cam/ctl/ctl_ha.h> 78#include <cam/ctl/ctl_private.h> 79#include <cam/ctl/ctl_debug.h> 80#include <cam/ctl/ctl_scsi_all.h> 81#include <cam/ctl/ctl_error.h> 82 83struct ctl_softc *control_softc = NULL; 84 85/* 86 * Size and alignment macros needed for Copan-specific HA hardware. These 87 * can go away when the HA code is re-written, and uses busdma for any 88 * hardware. 89 */ 90#define CTL_ALIGN_8B(target, source, type) \ 91 if (((uint32_t)source & 0x7) != 0) \ 92 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 93 else \ 94 target = (type)source; 95 96#define CTL_SIZE_8B(target, size) \ 97 if ((size & 0x7) != 0) \ 98 target = size + (0x8 - (size & 0x7)); \ 99 else \ 100 target = size; 101 102#define CTL_ALIGN_8B_MARGIN 16 103 104/* 105 * Template mode pages. 106 */ 107 108/* 109 * Note that these are default values only. The actual values will be 110 * filled in when the user does a mode sense. 111 */ 112static struct copan_power_subpage power_page_default = { 113 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 114 /*subpage*/ PWR_SUBPAGE_CODE, 115 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 116 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 117 /*page_version*/ PWR_VERSION, 118 /* total_luns */ 26, 119 /* max_active_luns*/ PWR_DFLT_MAX_LUNS, 120 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 121 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 122 0, 0, 0, 0, 0, 0} 123}; 124 125static struct copan_power_subpage power_page_changeable = { 126 /*page_code*/ PWR_PAGE_CODE | SMPH_SPF, 127 /*subpage*/ PWR_SUBPAGE_CODE, 128 /*page_length*/ {(sizeof(struct copan_power_subpage) - 4) & 0xff00, 129 (sizeof(struct copan_power_subpage) - 4) & 0x00ff}, 130 /*page_version*/ 0, 131 /* total_luns */ 0, 132 /* max_active_luns*/ 0, 133 /*reserved*/ {0, 0, 0, 0, 0, 0, 0, 0, 0, 134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 135 0, 0, 0, 0, 0, 0} 136}; 137 138static struct copan_aps_subpage aps_page_default = { 139 APS_PAGE_CODE | SMPH_SPF, //page_code 140 APS_SUBPAGE_CODE, //subpage 141 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 142 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 143 APS_VERSION, //page_version 144 0, //lock_active 145 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 147 0, 0, 0, 0, 0} //reserved 148}; 149 150static struct copan_aps_subpage aps_page_changeable = { 151 APS_PAGE_CODE | SMPH_SPF, //page_code 152 APS_SUBPAGE_CODE, //subpage 153 {(sizeof(struct copan_aps_subpage) - 4) & 0xff00, 154 (sizeof(struct copan_aps_subpage) - 4) & 0x00ff}, //page_length 155 0, //page_version 156 0, //lock_active 157 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 158 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 159 0, 0, 0, 0, 0} //reserved 160}; 161 162static struct copan_debugconf_subpage debugconf_page_default = { 163 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 164 DBGCNF_SUBPAGE_CODE, /* subpage */ 165 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 166 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 167 DBGCNF_VERSION, /* page_version */ 168 {CTL_TIME_IO_DEFAULT_SECS>>8, 169 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 170}; 171 172static struct copan_debugconf_subpage debugconf_page_changeable = { 173 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 174 DBGCNF_SUBPAGE_CODE, /* subpage */ 175 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 176 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 177 0, /* page_version */ 178 {0xff,0xff}, /* ctl_time_io_secs */ 179}; 180 181static struct scsi_format_page format_page_default = { 182 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 183 /*page_length*/sizeof(struct scsi_format_page) - 2, 184 /*tracks_per_zone*/ {0, 0}, 185 /*alt_sectors_per_zone*/ {0, 0}, 186 /*alt_tracks_per_zone*/ {0, 0}, 187 /*alt_tracks_per_lun*/ {0, 0}, 188 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 189 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 190 /*bytes_per_sector*/ {0, 0}, 191 /*interleave*/ {0, 0}, 192 /*track_skew*/ {0, 0}, 193 /*cylinder_skew*/ {0, 0}, 194 /*flags*/ SFP_HSEC, 195 /*reserved*/ {0, 0, 0} 196}; 197 198static struct scsi_format_page format_page_changeable = { 199 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 200 /*page_length*/sizeof(struct scsi_format_page) - 2, 201 /*tracks_per_zone*/ {0, 0}, 202 /*alt_sectors_per_zone*/ {0, 0}, 203 /*alt_tracks_per_zone*/ {0, 0}, 204 /*alt_tracks_per_lun*/ {0, 0}, 205 /*sectors_per_track*/ {0, 0}, 206 /*bytes_per_sector*/ {0, 0}, 207 /*interleave*/ {0, 0}, 208 /*track_skew*/ {0, 0}, 209 /*cylinder_skew*/ {0, 0}, 210 /*flags*/ 0, 211 /*reserved*/ {0, 0, 0} 212}; 213 214static struct scsi_rigid_disk_page rigid_disk_page_default = { 215 /*page_code*/SMS_RIGID_DISK_PAGE, 216 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 217 /*cylinders*/ {0, 0, 0}, 218 /*heads*/ CTL_DEFAULT_HEADS, 219 /*start_write_precomp*/ {0, 0, 0}, 220 /*start_reduced_current*/ {0, 0, 0}, 221 /*step_rate*/ {0, 0}, 222 /*landing_zone_cylinder*/ {0, 0, 0}, 223 /*rpl*/ SRDP_RPL_DISABLED, 224 /*rotational_offset*/ 0, 225 /*reserved1*/ 0, 226 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 227 CTL_DEFAULT_ROTATION_RATE & 0xff}, 228 /*reserved2*/ {0, 0} 229}; 230 231static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 232 /*page_code*/SMS_RIGID_DISK_PAGE, 233 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 234 /*cylinders*/ {0, 0, 0}, 235 /*heads*/ 0, 236 /*start_write_precomp*/ {0, 0, 0}, 237 /*start_reduced_current*/ {0, 0, 0}, 238 /*step_rate*/ {0, 0}, 239 /*landing_zone_cylinder*/ {0, 0, 0}, 240 /*rpl*/ 0, 241 /*rotational_offset*/ 0, 242 /*reserved1*/ 0, 243 /*rotation_rate*/ {0, 0}, 244 /*reserved2*/ {0, 0} 245}; 246 247static struct scsi_caching_page caching_page_default = { 248 /*page_code*/SMS_CACHING_PAGE, 249 /*page_length*/sizeof(struct scsi_caching_page) - 2, 250 /*flags1*/ SCP_DISC | SCP_WCE, 251 /*ret_priority*/ 0, 252 /*disable_pf_transfer_len*/ {0xff, 0xff}, 253 /*min_prefetch*/ {0, 0}, 254 /*max_prefetch*/ {0xff, 0xff}, 255 /*max_pf_ceiling*/ {0xff, 0xff}, 256 /*flags2*/ 0, 257 /*cache_segments*/ 0, 258 /*cache_seg_size*/ {0, 0}, 259 /*reserved*/ 0, 260 /*non_cache_seg_size*/ {0, 0, 0} 261}; 262 263static struct scsi_caching_page caching_page_changeable = { 264 /*page_code*/SMS_CACHING_PAGE, 265 /*page_length*/sizeof(struct scsi_caching_page) - 2, 266 /*flags1*/ 0, 267 /*ret_priority*/ 0, 268 /*disable_pf_transfer_len*/ {0, 0}, 269 /*min_prefetch*/ {0, 0}, 270 /*max_prefetch*/ {0, 0}, 271 /*max_pf_ceiling*/ {0, 0}, 272 /*flags2*/ 0, 273 /*cache_segments*/ 0, 274 /*cache_seg_size*/ {0, 0}, 275 /*reserved*/ 0, 276 /*non_cache_seg_size*/ {0, 0, 0} 277}; 278 279static struct scsi_control_page control_page_default = { 280 /*page_code*/SMS_CONTROL_MODE_PAGE, 281 /*page_length*/sizeof(struct scsi_control_page) - 2, 282 /*rlec*/0, 283 /*queue_flags*/0, 284 /*eca_and_aen*/0, 285 /*reserved*/0, 286 /*aen_holdoff_period*/{0, 0} 287}; 288 289static struct scsi_control_page control_page_changeable = { 290 /*page_code*/SMS_CONTROL_MODE_PAGE, 291 /*page_length*/sizeof(struct scsi_control_page) - 2, 292 /*rlec*/SCP_DSENSE, 293 /*queue_flags*/0, 294 /*eca_and_aen*/0, 295 /*reserved*/0, 296 /*aen_holdoff_period*/{0, 0} 297}; 298 299 300/* 301 * XXX KDM move these into the softc. 302 */ 303static int rcv_sync_msg; 304static int persis_offset; 305static uint8_t ctl_pause_rtr; 306static int ctl_is_single = 1; 307static int index_to_aps_page; 308 309SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 310static int worker_threads = -1; 311SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 312 &worker_threads, 1, "Number of worker threads"); 313static int verbose = 0; 314SYSCTL_INT(_kern_cam_ctl, OID_AUTO, verbose, CTLFLAG_RWTUN, 315 &verbose, 0, "Show SCSI errors returned to initiator"); 316 317/* 318 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 319 * SCSI Ports (0x88), Block limits (0xB0) and 320 * Logical Block Provisioning (0xB2) 321 */ 322#define SCSI_EVPD_NUM_SUPPORTED_PAGES 6 323 324static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 325 int param); 326static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 327static int ctl_init(void); 328void ctl_shutdown(void); 329static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 330static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 331static void ctl_ioctl_online(void *arg); 332static void ctl_ioctl_offline(void *arg); 333static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id); 334static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id); 335static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio); 336static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 337static int ctl_ioctl_submit_wait(union ctl_io *io); 338static void ctl_ioctl_datamove(union ctl_io *io); 339static void ctl_ioctl_done(union ctl_io *io); 340static void ctl_ioctl_hard_startstop_callback(void *arg, 341 struct cfi_metatask *metatask); 342static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask); 343static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 344 struct ctl_ooa *ooa_hdr, 345 struct ctl_ooa_entry *kern_entries); 346static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 347 struct thread *td); 348uint32_t ctl_get_resindex(struct ctl_nexus *nexus); 349uint32_t ctl_port_idx(int port_num); 350static uint32_t ctl_map_lun(int port_num, uint32_t lun); 351static uint32_t ctl_map_lun_back(int port_num, uint32_t lun); 352#ifdef unused 353static union ctl_io *ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, 354 uint32_t targ_target, uint32_t targ_lun, 355 int can_wait); 356static void ctl_kfree_io(union ctl_io *io); 357#endif /* unused */ 358static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 359 struct ctl_be_lun *be_lun, struct ctl_id target_id); 360static int ctl_free_lun(struct ctl_lun *lun); 361static void ctl_create_lun(struct ctl_be_lun *be_lun); 362/** 363static void ctl_failover_change_pages(struct ctl_softc *softc, 364 struct ctl_scsiio *ctsio, int master); 365**/ 366 367static int ctl_do_mode_select(union ctl_io *io); 368static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 369 uint64_t res_key, uint64_t sa_res_key, 370 uint8_t type, uint32_t residx, 371 struct ctl_scsiio *ctsio, 372 struct scsi_per_res_out *cdb, 373 struct scsi_per_res_out_parms* param); 374static void ctl_pro_preempt_other(struct ctl_lun *lun, 375 union ctl_ha_msg *msg); 376static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 377static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 378static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 379static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 380static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 381 int alloc_len); 382static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 383 int alloc_len); 384static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 385static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 386static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 387static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len); 388static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2); 389static ctl_action ctl_check_for_blockage(union ctl_io *pending_io, 390 union ctl_io *ooa_io); 391static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 392 union ctl_io *starting_io); 393static int ctl_check_blocked(struct ctl_lun *lun); 394static int ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, 395 struct ctl_lun *lun, 396 const struct ctl_cmd_entry *entry, 397 struct ctl_scsiio *ctsio); 398//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 399static void ctl_failover(void); 400static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 401 struct ctl_scsiio *ctsio); 402static int ctl_scsiio(struct ctl_scsiio *ctsio); 403 404static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 405static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 406 ctl_ua_type ua_type); 407static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 408 ctl_ua_type ua_type); 409static int ctl_abort_task(union ctl_io *io); 410static int ctl_abort_task_set(union ctl_io *io); 411static int ctl_i_t_nexus_reset(union ctl_io *io); 412static void ctl_run_task(union ctl_io *io); 413#ifdef CTL_IO_DELAY 414static void ctl_datamove_timer_wakeup(void *arg); 415static void ctl_done_timer_wakeup(void *arg); 416#endif /* CTL_IO_DELAY */ 417 418static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 419static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 420static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 421static void ctl_datamove_remote_write(union ctl_io *io); 422static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 423static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 424static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 425static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 426 ctl_ha_dt_cb callback); 427static void ctl_datamove_remote_read(union ctl_io *io); 428static void ctl_datamove_remote(union ctl_io *io); 429static int ctl_process_done(union ctl_io *io); 430static void ctl_lun_thread(void *arg); 431static void ctl_work_thread(void *arg); 432static void ctl_enqueue_incoming(union ctl_io *io); 433static void ctl_enqueue_rtr(union ctl_io *io); 434static void ctl_enqueue_done(union ctl_io *io); 435static void ctl_enqueue_isc(union ctl_io *io); 436static const struct ctl_cmd_entry * 437 ctl_get_cmd_entry(struct ctl_scsiio *ctsio); 438static const struct ctl_cmd_entry * 439 ctl_validate_command(struct ctl_scsiio *ctsio); 440static int ctl_cmd_applicable(uint8_t lun_type, 441 const struct ctl_cmd_entry *entry); 442 443/* 444 * Load the serialization table. This isn't very pretty, but is probably 445 * the easiest way to do it. 446 */ 447#include "ctl_ser_table.c" 448 449/* 450 * We only need to define open, close and ioctl routines for this driver. 451 */ 452static struct cdevsw ctl_cdevsw = { 453 .d_version = D_VERSION, 454 .d_flags = 0, 455 .d_open = ctl_open, 456 .d_close = ctl_close, 457 .d_ioctl = ctl_ioctl, 458 .d_name = "ctl", 459}; 460 461 462MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 463MALLOC_DEFINE(M_CTLIO, "ctlio", "Memory used for CTL requests"); 464 465static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 466 467static moduledata_t ctl_moduledata = { 468 "ctl", 469 ctl_module_event_handler, 470 NULL 471}; 472 473DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 474MODULE_VERSION(ctl, 1); 475 476static struct ctl_frontend ioctl_frontend = 477{ 478 .name = "ioctl", 479}; 480 481static void 482ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 483 union ctl_ha_msg *msg_info) 484{ 485 struct ctl_scsiio *ctsio; 486 487 if (msg_info->hdr.original_sc == NULL) { 488 printf("%s: original_sc == NULL!\n", __func__); 489 /* XXX KDM now what? */ 490 return; 491 } 492 493 ctsio = &msg_info->hdr.original_sc->scsiio; 494 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 495 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 496 ctsio->io_hdr.status = msg_info->hdr.status; 497 ctsio->scsi_status = msg_info->scsi.scsi_status; 498 ctsio->sense_len = msg_info->scsi.sense_len; 499 ctsio->sense_residual = msg_info->scsi.sense_residual; 500 ctsio->residual = msg_info->scsi.residual; 501 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 502 sizeof(ctsio->sense_data)); 503 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 504 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 505 ctl_enqueue_isc((union ctl_io *)ctsio); 506} 507 508static void 509ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 510 union ctl_ha_msg *msg_info) 511{ 512 struct ctl_scsiio *ctsio; 513 514 if (msg_info->hdr.serializing_sc == NULL) { 515 printf("%s: serializing_sc == NULL!\n", __func__); 516 /* XXX KDM now what? */ 517 return; 518 } 519 520 ctsio = &msg_info->hdr.serializing_sc->scsiio; 521#if 0 522 /* 523 * Attempt to catch the situation where an I/O has 524 * been freed, and we're using it again. 525 */ 526 if (ctsio->io_hdr.io_type == 0xff) { 527 union ctl_io *tmp_io; 528 tmp_io = (union ctl_io *)ctsio; 529 printf("%s: %p use after free!\n", __func__, 530 ctsio); 531 printf("%s: type %d msg %d cdb %x iptl: " 532 "%d:%d:%d:%d tag 0x%04x " 533 "flag %#x status %x\n", 534 __func__, 535 tmp_io->io_hdr.io_type, 536 tmp_io->io_hdr.msg_type, 537 tmp_io->scsiio.cdb[0], 538 tmp_io->io_hdr.nexus.initid.id, 539 tmp_io->io_hdr.nexus.targ_port, 540 tmp_io->io_hdr.nexus.targ_target.id, 541 tmp_io->io_hdr.nexus.targ_lun, 542 (tmp_io->io_hdr.io_type == 543 CTL_IO_TASK) ? 544 tmp_io->taskio.tag_num : 545 tmp_io->scsiio.tag_num, 546 tmp_io->io_hdr.flags, 547 tmp_io->io_hdr.status); 548 } 549#endif 550 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 551 ctl_enqueue_isc((union ctl_io *)ctsio); 552} 553 554/* 555 * ISC (Inter Shelf Communication) event handler. Events from the HA 556 * subsystem come in here. 557 */ 558static void 559ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 560{ 561 struct ctl_softc *ctl_softc; 562 union ctl_io *io; 563 struct ctl_prio *presio; 564 ctl_ha_status isc_status; 565 566 ctl_softc = control_softc; 567 io = NULL; 568 569 570#if 0 571 printf("CTL: Isc Msg event %d\n", event); 572#endif 573 if (event == CTL_HA_EVT_MSG_RECV) { 574 union ctl_ha_msg msg_info; 575 576 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 577 sizeof(msg_info), /*wait*/ 0); 578#if 0 579 printf("CTL: msg_type %d\n", msg_info.msg_type); 580#endif 581 if (isc_status != 0) { 582 printf("Error receiving message, status = %d\n", 583 isc_status); 584 return; 585 } 586 587 switch (msg_info.hdr.msg_type) { 588 case CTL_MSG_SERIALIZE: 589#if 0 590 printf("Serialize\n"); 591#endif 592 io = ctl_alloc_io((void *)ctl_softc->othersc_pool); 593 if (io == NULL) { 594 printf("ctl_isc_event_handler: can't allocate " 595 "ctl_io!\n"); 596 /* Bad Juju */ 597 /* Need to set busy and send msg back */ 598 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 599 msg_info.hdr.status = CTL_SCSI_ERROR; 600 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 601 msg_info.scsi.sense_len = 0; 602 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 603 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 604 } 605 goto bailout; 606 } 607 ctl_zero_io(io); 608 // populate ctsio from msg_info 609 io->io_hdr.io_type = CTL_IO_SCSI; 610 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 611 io->io_hdr.original_sc = msg_info.hdr.original_sc; 612#if 0 613 printf("pOrig %x\n", (int)msg_info.original_sc); 614#endif 615 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 616 CTL_FLAG_IO_ACTIVE; 617 /* 618 * If we're in serialization-only mode, we don't 619 * want to go through full done processing. Thus 620 * the COPY flag. 621 * 622 * XXX KDM add another flag that is more specific. 623 */ 624 if (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY) 625 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 626 io->io_hdr.nexus = msg_info.hdr.nexus; 627#if 0 628 printf("targ %d, port %d, iid %d, lun %d\n", 629 io->io_hdr.nexus.targ_target.id, 630 io->io_hdr.nexus.targ_port, 631 io->io_hdr.nexus.initid.id, 632 io->io_hdr.nexus.targ_lun); 633#endif 634 io->scsiio.tag_num = msg_info.scsi.tag_num; 635 io->scsiio.tag_type = msg_info.scsi.tag_type; 636 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 637 CTL_MAX_CDBLEN); 638 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 639 const struct ctl_cmd_entry *entry; 640 641 entry = ctl_get_cmd_entry(&io->scsiio); 642 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 643 io->io_hdr.flags |= 644 entry->flags & CTL_FLAG_DATA_MASK; 645 } 646 ctl_enqueue_isc(io); 647 break; 648 649 /* Performed on the Originating SC, XFER mode only */ 650 case CTL_MSG_DATAMOVE: { 651 struct ctl_sg_entry *sgl; 652 int i, j; 653 654 io = msg_info.hdr.original_sc; 655 if (io == NULL) { 656 printf("%s: original_sc == NULL!\n", __func__); 657 /* XXX KDM do something here */ 658 break; 659 } 660 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 661 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 662 /* 663 * Keep track of this, we need to send it back over 664 * when the datamove is complete. 665 */ 666 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 667 668 if (msg_info.dt.sg_sequence == 0) { 669 /* 670 * XXX KDM we use the preallocated S/G list 671 * here, but we'll need to change this to 672 * dynamic allocation if we need larger S/G 673 * lists. 674 */ 675 if (msg_info.dt.kern_sg_entries > 676 sizeof(io->io_hdr.remote_sglist) / 677 sizeof(io->io_hdr.remote_sglist[0])) { 678 printf("%s: number of S/G entries " 679 "needed %u > allocated num %zd\n", 680 __func__, 681 msg_info.dt.kern_sg_entries, 682 sizeof(io->io_hdr.remote_sglist)/ 683 sizeof(io->io_hdr.remote_sglist[0])); 684 685 /* 686 * XXX KDM send a message back to 687 * the other side to shut down the 688 * DMA. The error will come back 689 * through via the normal channel. 690 */ 691 break; 692 } 693 sgl = io->io_hdr.remote_sglist; 694 memset(sgl, 0, 695 sizeof(io->io_hdr.remote_sglist)); 696 697 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 698 699 io->scsiio.kern_sg_entries = 700 msg_info.dt.kern_sg_entries; 701 io->scsiio.rem_sg_entries = 702 msg_info.dt.kern_sg_entries; 703 io->scsiio.kern_data_len = 704 msg_info.dt.kern_data_len; 705 io->scsiio.kern_total_len = 706 msg_info.dt.kern_total_len; 707 io->scsiio.kern_data_resid = 708 msg_info.dt.kern_data_resid; 709 io->scsiio.kern_rel_offset = 710 msg_info.dt.kern_rel_offset; 711 /* 712 * Clear out per-DMA flags. 713 */ 714 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 715 /* 716 * Add per-DMA flags that are set for this 717 * particular DMA request. 718 */ 719 io->io_hdr.flags |= msg_info.dt.flags & 720 CTL_FLAG_RDMA_MASK; 721 } else 722 sgl = (struct ctl_sg_entry *) 723 io->scsiio.kern_data_ptr; 724 725 for (i = msg_info.dt.sent_sg_entries, j = 0; 726 i < (msg_info.dt.sent_sg_entries + 727 msg_info.dt.cur_sg_entries); i++, j++) { 728 sgl[i].addr = msg_info.dt.sg_list[j].addr; 729 sgl[i].len = msg_info.dt.sg_list[j].len; 730 731#if 0 732 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 733 __func__, 734 msg_info.dt.sg_list[j].addr, 735 msg_info.dt.sg_list[j].len, 736 sgl[i].addr, sgl[i].len, j, i); 737#endif 738 } 739#if 0 740 memcpy(&sgl[msg_info.dt.sent_sg_entries], 741 msg_info.dt.sg_list, 742 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 743#endif 744 745 /* 746 * If this is the last piece of the I/O, we've got 747 * the full S/G list. Queue processing in the thread. 748 * Otherwise wait for the next piece. 749 */ 750 if (msg_info.dt.sg_last != 0) 751 ctl_enqueue_isc(io); 752 break; 753 } 754 /* Performed on the Serializing (primary) SC, XFER mode only */ 755 case CTL_MSG_DATAMOVE_DONE: { 756 if (msg_info.hdr.serializing_sc == NULL) { 757 printf("%s: serializing_sc == NULL!\n", 758 __func__); 759 /* XXX KDM now what? */ 760 break; 761 } 762 /* 763 * We grab the sense information here in case 764 * there was a failure, so we can return status 765 * back to the initiator. 766 */ 767 io = msg_info.hdr.serializing_sc; 768 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 769 io->io_hdr.status = msg_info.hdr.status; 770 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 771 io->scsiio.sense_len = msg_info.scsi.sense_len; 772 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 773 io->io_hdr.port_status = msg_info.scsi.fetd_status; 774 io->scsiio.residual = msg_info.scsi.residual; 775 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 776 sizeof(io->scsiio.sense_data)); 777 ctl_enqueue_isc(io); 778 break; 779 } 780 781 /* Preformed on Originating SC, SER_ONLY mode */ 782 case CTL_MSG_R2R: 783 io = msg_info.hdr.original_sc; 784 if (io == NULL) { 785 printf("%s: Major Bummer\n", __func__); 786 return; 787 } else { 788#if 0 789 printf("pOrig %x\n",(int) ctsio); 790#endif 791 } 792 io->io_hdr.msg_type = CTL_MSG_R2R; 793 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 794 ctl_enqueue_isc(io); 795 break; 796 797 /* 798 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 799 * mode. 800 * Performed on the Originating (i.e. secondary) SC in XFER 801 * mode 802 */ 803 case CTL_MSG_FINISH_IO: 804 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) 805 ctl_isc_handler_finish_xfer(ctl_softc, 806 &msg_info); 807 else 808 ctl_isc_handler_finish_ser_only(ctl_softc, 809 &msg_info); 810 break; 811 812 /* Preformed on Originating SC */ 813 case CTL_MSG_BAD_JUJU: 814 io = msg_info.hdr.original_sc; 815 if (io == NULL) { 816 printf("%s: Bad JUJU!, original_sc is NULL!\n", 817 __func__); 818 break; 819 } 820 ctl_copy_sense_data(&msg_info, io); 821 /* 822 * IO should have already been cleaned up on other 823 * SC so clear this flag so we won't send a message 824 * back to finish the IO there. 825 */ 826 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 827 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 828 829 /* io = msg_info.hdr.serializing_sc; */ 830 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 831 ctl_enqueue_isc(io); 832 break; 833 834 /* Handle resets sent from the other side */ 835 case CTL_MSG_MANAGE_TASKS: { 836 struct ctl_taskio *taskio; 837 taskio = (struct ctl_taskio *)ctl_alloc_io( 838 (void *)ctl_softc->othersc_pool); 839 if (taskio == NULL) { 840 printf("ctl_isc_event_handler: can't allocate " 841 "ctl_io!\n"); 842 /* Bad Juju */ 843 /* should I just call the proper reset func 844 here??? */ 845 goto bailout; 846 } 847 ctl_zero_io((union ctl_io *)taskio); 848 taskio->io_hdr.io_type = CTL_IO_TASK; 849 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 850 taskio->io_hdr.nexus = msg_info.hdr.nexus; 851 taskio->task_action = msg_info.task.task_action; 852 taskio->tag_num = msg_info.task.tag_num; 853 taskio->tag_type = msg_info.task.tag_type; 854#ifdef CTL_TIME_IO 855 taskio->io_hdr.start_time = time_uptime; 856 getbintime(&taskio->io_hdr.start_bt); 857#if 0 858 cs_prof_gettime(&taskio->io_hdr.start_ticks); 859#endif 860#endif /* CTL_TIME_IO */ 861 ctl_run_task((union ctl_io *)taskio); 862 break; 863 } 864 /* Persistent Reserve action which needs attention */ 865 case CTL_MSG_PERS_ACTION: 866 presio = (struct ctl_prio *)ctl_alloc_io( 867 (void *)ctl_softc->othersc_pool); 868 if (presio == NULL) { 869 printf("ctl_isc_event_handler: can't allocate " 870 "ctl_io!\n"); 871 /* Bad Juju */ 872 /* Need to set busy and send msg back */ 873 goto bailout; 874 } 875 ctl_zero_io((union ctl_io *)presio); 876 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 877 presio->pr_msg = msg_info.pr; 878 ctl_enqueue_isc((union ctl_io *)presio); 879 break; 880 case CTL_MSG_SYNC_FE: 881 rcv_sync_msg = 1; 882 break; 883 case CTL_MSG_APS_LOCK: { 884 // It's quicker to execute this then to 885 // queue it. 886 struct ctl_lun *lun; 887 struct ctl_page_index *page_index; 888 struct copan_aps_subpage *current_sp; 889 uint32_t targ_lun; 890 891 targ_lun = msg_info.hdr.nexus.targ_mapped_lun; 892 lun = ctl_softc->ctl_luns[targ_lun]; 893 mtx_lock(&lun->lun_lock); 894 page_index = &lun->mode_pages.index[index_to_aps_page]; 895 current_sp = (struct copan_aps_subpage *) 896 (page_index->page_data + 897 (page_index->page_len * CTL_PAGE_CURRENT)); 898 899 current_sp->lock_active = msg_info.aps.lock_flag; 900 mtx_unlock(&lun->lun_lock); 901 break; 902 } 903 default: 904 printf("How did I get here?\n"); 905 } 906 } else if (event == CTL_HA_EVT_MSG_SENT) { 907 if (param != CTL_HA_STATUS_SUCCESS) { 908 printf("Bad status from ctl_ha_msg_send status %d\n", 909 param); 910 } 911 return; 912 } else if (event == CTL_HA_EVT_DISCONNECT) { 913 printf("CTL: Got a disconnect from Isc\n"); 914 return; 915 } else { 916 printf("ctl_isc_event_handler: Unknown event %d\n", event); 917 return; 918 } 919 920bailout: 921 return; 922} 923 924static void 925ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 926{ 927 struct scsi_sense_data *sense; 928 929 sense = &dest->scsiio.sense_data; 930 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 931 dest->scsiio.scsi_status = src->scsi.scsi_status; 932 dest->scsiio.sense_len = src->scsi.sense_len; 933 dest->io_hdr.status = src->hdr.status; 934} 935 936static int 937ctl_init(void) 938{ 939 struct ctl_softc *softc; 940 struct ctl_io_pool *internal_pool, *emergency_pool, *other_pool; 941 struct ctl_port *port; 942 uint8_t sc_id =0; 943 int i, error, retval; 944 //int isc_retval; 945 946 retval = 0; 947 ctl_pause_rtr = 0; 948 rcv_sync_msg = 0; 949 950 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 951 M_WAITOK | M_ZERO); 952 softc = control_softc; 953 954 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 955 "cam/ctl"); 956 957 softc->dev->si_drv1 = softc; 958 959 /* 960 * By default, return a "bad LUN" peripheral qualifier for unknown 961 * LUNs. The user can override this default using the tunable or 962 * sysctl. See the comment in ctl_inquiry_std() for more details. 963 */ 964 softc->inquiry_pq_no_lun = 1; 965 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 966 &softc->inquiry_pq_no_lun); 967 sysctl_ctx_init(&softc->sysctl_ctx); 968 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 969 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 970 CTLFLAG_RD, 0, "CAM Target Layer"); 971 972 if (softc->sysctl_tree == NULL) { 973 printf("%s: unable to allocate sysctl tree\n", __func__); 974 destroy_dev(softc->dev); 975 free(control_softc, M_DEVBUF); 976 control_softc = NULL; 977 return (ENOMEM); 978 } 979 980 SYSCTL_ADD_INT(&softc->sysctl_ctx, 981 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 982 "inquiry_pq_no_lun", CTLFLAG_RW, 983 &softc->inquiry_pq_no_lun, 0, 984 "Report no lun possible for invalid LUNs"); 985 986 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 987 mtx_init(&softc->pool_lock, "CTL pool mutex", NULL, MTX_DEF); 988 softc->open_count = 0; 989 990 /* 991 * Default to actually sending a SYNCHRONIZE CACHE command down to 992 * the drive. 993 */ 994 softc->flags = CTL_FLAG_REAL_SYNC; 995 996 /* 997 * In Copan's HA scheme, the "master" and "slave" roles are 998 * figured out through the slot the controller is in. Although it 999 * is an active/active system, someone has to be in charge. 1000 */ 1001#ifdef NEEDTOPORT 1002 scmicro_rw(SCMICRO_GET_SHELF_ID, &sc_id); 1003#endif 1004 1005 if (sc_id == 0) { 1006 softc->flags |= CTL_FLAG_MASTER_SHELF; 1007 persis_offset = 0; 1008 } else 1009 persis_offset = CTL_MAX_INITIATORS; 1010 1011 /* 1012 * XXX KDM need to figure out where we want to get our target ID 1013 * and WWID. Is it different on each port? 1014 */ 1015 softc->target.id = 0; 1016 softc->target.wwid[0] = 0x12345678; 1017 softc->target.wwid[1] = 0x87654321; 1018 STAILQ_INIT(&softc->lun_list); 1019 STAILQ_INIT(&softc->pending_lun_queue); 1020 STAILQ_INIT(&softc->fe_list); 1021 STAILQ_INIT(&softc->port_list); 1022 STAILQ_INIT(&softc->be_list); 1023 STAILQ_INIT(&softc->io_pools); 1024 1025 if (ctl_pool_create(softc, CTL_POOL_INTERNAL, CTL_POOL_ENTRIES_INTERNAL, 1026 &internal_pool)!= 0){ 1027 printf("ctl: can't allocate %d entry internal pool, " 1028 "exiting\n", CTL_POOL_ENTRIES_INTERNAL); 1029 return (ENOMEM); 1030 } 1031 1032 if (ctl_pool_create(softc, CTL_POOL_EMERGENCY, 1033 CTL_POOL_ENTRIES_EMERGENCY, &emergency_pool) != 0) { 1034 printf("ctl: can't allocate %d entry emergency pool, " 1035 "exiting\n", CTL_POOL_ENTRIES_EMERGENCY); 1036 ctl_pool_free(internal_pool); 1037 return (ENOMEM); 1038 } 1039 1040 if (ctl_pool_create(softc, CTL_POOL_4OTHERSC, CTL_POOL_ENTRIES_OTHER_SC, 1041 &other_pool) != 0) 1042 { 1043 printf("ctl: can't allocate %d entry other SC pool, " 1044 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1045 ctl_pool_free(internal_pool); 1046 ctl_pool_free(emergency_pool); 1047 return (ENOMEM); 1048 } 1049 1050 softc->internal_pool = internal_pool; 1051 softc->emergency_pool = emergency_pool; 1052 softc->othersc_pool = other_pool; 1053 1054 if (worker_threads <= 0) 1055 worker_threads = max(1, mp_ncpus / 4); 1056 if (worker_threads > CTL_MAX_THREADS) 1057 worker_threads = CTL_MAX_THREADS; 1058 1059 for (i = 0; i < worker_threads; i++) { 1060 struct ctl_thread *thr = &softc->threads[i]; 1061 1062 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1063 thr->ctl_softc = softc; 1064 STAILQ_INIT(&thr->incoming_queue); 1065 STAILQ_INIT(&thr->rtr_queue); 1066 STAILQ_INIT(&thr->done_queue); 1067 STAILQ_INIT(&thr->isc_queue); 1068 1069 error = kproc_kthread_add(ctl_work_thread, thr, 1070 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1071 if (error != 0) { 1072 printf("error creating CTL work thread!\n"); 1073 ctl_pool_free(internal_pool); 1074 ctl_pool_free(emergency_pool); 1075 ctl_pool_free(other_pool); 1076 return (error); 1077 } 1078 } 1079 error = kproc_kthread_add(ctl_lun_thread, softc, 1080 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1081 if (error != 0) { 1082 printf("error creating CTL lun thread!\n"); 1083 ctl_pool_free(internal_pool); 1084 ctl_pool_free(emergency_pool); 1085 ctl_pool_free(other_pool); 1086 return (error); 1087 } 1088 if (bootverbose) 1089 printf("ctl: CAM Target Layer loaded\n"); 1090 1091 /* 1092 * Initialize the ioctl front end. 1093 */ 1094 ctl_frontend_register(&ioctl_frontend); 1095 port = &softc->ioctl_info.port; 1096 port->frontend = &ioctl_frontend; 1097 sprintf(softc->ioctl_info.port_name, "ioctl"); 1098 port->port_type = CTL_PORT_IOCTL; 1099 port->num_requested_ctl_io = 100; 1100 port->port_name = softc->ioctl_info.port_name; 1101 port->port_online = ctl_ioctl_online; 1102 port->port_offline = ctl_ioctl_offline; 1103 port->onoff_arg = &softc->ioctl_info; 1104 port->lun_enable = ctl_ioctl_lun_enable; 1105 port->lun_disable = ctl_ioctl_lun_disable; 1106 port->targ_lun_arg = &softc->ioctl_info; 1107 port->fe_datamove = ctl_ioctl_datamove; 1108 port->fe_done = ctl_ioctl_done; 1109 port->max_targets = 15; 1110 port->max_target_id = 15; 1111 1112 if (ctl_port_register(&softc->ioctl_info.port, 1113 (softc->flags & CTL_FLAG_MASTER_SHELF)) != 0) { 1114 printf("ctl: ioctl front end registration failed, will " 1115 "continue anyway\n"); 1116 } 1117 1118#ifdef CTL_IO_DELAY 1119 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1120 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1121 sizeof(struct callout), CTL_TIMER_BYTES); 1122 return (EINVAL); 1123 } 1124#endif /* CTL_IO_DELAY */ 1125 1126 return (0); 1127} 1128 1129void 1130ctl_shutdown(void) 1131{ 1132 struct ctl_softc *softc; 1133 struct ctl_lun *lun, *next_lun; 1134 struct ctl_io_pool *pool; 1135 1136 softc = (struct ctl_softc *)control_softc; 1137 1138 if (ctl_port_deregister(&softc->ioctl_info.port) != 0) 1139 printf("ctl: ioctl front end deregistration failed\n"); 1140 1141 mtx_lock(&softc->ctl_lock); 1142 1143 /* 1144 * Free up each LUN. 1145 */ 1146 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1147 next_lun = STAILQ_NEXT(lun, links); 1148 ctl_free_lun(lun); 1149 } 1150 1151 mtx_unlock(&softc->ctl_lock); 1152 1153 ctl_frontend_deregister(&ioctl_frontend); 1154 1155 /* 1156 * This will rip the rug out from under any FETDs or anyone else 1157 * that has a pool allocated. Since we increment our module 1158 * refcount any time someone outside the main CTL module allocates 1159 * a pool, we shouldn't have any problems here. The user won't be 1160 * able to unload the CTL module until client modules have 1161 * successfully unloaded. 1162 */ 1163 while ((pool = STAILQ_FIRST(&softc->io_pools)) != NULL) 1164 ctl_pool_free(pool); 1165 1166#if 0 1167 ctl_shutdown_thread(softc->work_thread); 1168 mtx_destroy(&softc->queue_lock); 1169#endif 1170 1171 mtx_destroy(&softc->pool_lock); 1172 mtx_destroy(&softc->ctl_lock); 1173 1174 destroy_dev(softc->dev); 1175 1176 sysctl_ctx_free(&softc->sysctl_ctx); 1177 1178 free(control_softc, M_DEVBUF); 1179 control_softc = NULL; 1180 1181 if (bootverbose) 1182 printf("ctl: CAM Target Layer unloaded\n"); 1183} 1184 1185static int 1186ctl_module_event_handler(module_t mod, int what, void *arg) 1187{ 1188 1189 switch (what) { 1190 case MOD_LOAD: 1191 return (ctl_init()); 1192 case MOD_UNLOAD: 1193 return (EBUSY); 1194 default: 1195 return (EOPNOTSUPP); 1196 } 1197} 1198 1199/* 1200 * XXX KDM should we do some access checks here? Bump a reference count to 1201 * prevent a CTL module from being unloaded while someone has it open? 1202 */ 1203static int 1204ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1205{ 1206 return (0); 1207} 1208 1209static int 1210ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1211{ 1212 return (0); 1213} 1214 1215int 1216ctl_port_enable(ctl_port_type port_type) 1217{ 1218 struct ctl_softc *softc; 1219 struct ctl_port *port; 1220 1221 if (ctl_is_single == 0) { 1222 union ctl_ha_msg msg_info; 1223 int isc_retval; 1224 1225#if 0 1226 printf("%s: HA mode, synchronizing frontend enable\n", 1227 __func__); 1228#endif 1229 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1230 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1231 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1232 printf("Sync msg send error retval %d\n", isc_retval); 1233 } 1234 if (!rcv_sync_msg) { 1235 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1236 sizeof(msg_info), 1); 1237 } 1238#if 0 1239 printf("CTL:Frontend Enable\n"); 1240 } else { 1241 printf("%s: single mode, skipping frontend synchronization\n", 1242 __func__); 1243#endif 1244 } 1245 1246 softc = control_softc; 1247 1248 STAILQ_FOREACH(port, &softc->port_list, links) { 1249 if (port_type & port->port_type) 1250 { 1251#if 0 1252 printf("port %d\n", port->targ_port); 1253#endif 1254 ctl_port_online(port); 1255 } 1256 } 1257 1258 return (0); 1259} 1260 1261int 1262ctl_port_disable(ctl_port_type port_type) 1263{ 1264 struct ctl_softc *softc; 1265 struct ctl_port *port; 1266 1267 softc = control_softc; 1268 1269 STAILQ_FOREACH(port, &softc->port_list, links) { 1270 if (port_type & port->port_type) 1271 ctl_port_offline(port); 1272 } 1273 1274 return (0); 1275} 1276 1277/* 1278 * Returns 0 for success, 1 for failure. 1279 * Currently the only failure mode is if there aren't enough entries 1280 * allocated. So, in case of a failure, look at num_entries_dropped, 1281 * reallocate and try again. 1282 */ 1283int 1284ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1285 int *num_entries_filled, int *num_entries_dropped, 1286 ctl_port_type port_type, int no_virtual) 1287{ 1288 struct ctl_softc *softc; 1289 struct ctl_port *port; 1290 int entries_dropped, entries_filled; 1291 int retval; 1292 int i; 1293 1294 softc = control_softc; 1295 1296 retval = 0; 1297 entries_filled = 0; 1298 entries_dropped = 0; 1299 1300 i = 0; 1301 mtx_lock(&softc->ctl_lock); 1302 STAILQ_FOREACH(port, &softc->port_list, links) { 1303 struct ctl_port_entry *entry; 1304 1305 if ((port->port_type & port_type) == 0) 1306 continue; 1307 1308 if ((no_virtual != 0) 1309 && (port->virtual_port != 0)) 1310 continue; 1311 1312 if (entries_filled >= num_entries_alloced) { 1313 entries_dropped++; 1314 continue; 1315 } 1316 entry = &entries[i]; 1317 1318 entry->port_type = port->port_type; 1319 strlcpy(entry->port_name, port->port_name, 1320 sizeof(entry->port_name)); 1321 entry->physical_port = port->physical_port; 1322 entry->virtual_port = port->virtual_port; 1323 entry->wwnn = port->wwnn; 1324 entry->wwpn = port->wwpn; 1325 1326 i++; 1327 entries_filled++; 1328 } 1329 1330 mtx_unlock(&softc->ctl_lock); 1331 1332 if (entries_dropped > 0) 1333 retval = 1; 1334 1335 *num_entries_dropped = entries_dropped; 1336 *num_entries_filled = entries_filled; 1337 1338 return (retval); 1339} 1340 1341static void 1342ctl_ioctl_online(void *arg) 1343{ 1344 struct ctl_ioctl_info *ioctl_info; 1345 1346 ioctl_info = (struct ctl_ioctl_info *)arg; 1347 1348 ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED; 1349} 1350 1351static void 1352ctl_ioctl_offline(void *arg) 1353{ 1354 struct ctl_ioctl_info *ioctl_info; 1355 1356 ioctl_info = (struct ctl_ioctl_info *)arg; 1357 1358 ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED; 1359} 1360 1361/* 1362 * Remove an initiator by port number and initiator ID. 1363 * Returns 0 for success, -1 for failure. 1364 */ 1365int 1366ctl_remove_initiator(struct ctl_port *port, int iid) 1367{ 1368 struct ctl_softc *softc = control_softc; 1369 1370 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1371 1372 if (iid > CTL_MAX_INIT_PER_PORT) { 1373 printf("%s: initiator ID %u > maximun %u!\n", 1374 __func__, iid, CTL_MAX_INIT_PER_PORT); 1375 return (-1); 1376 } 1377 1378 mtx_lock(&softc->ctl_lock); 1379 port->wwpn_iid[iid].in_use--; 1380 port->wwpn_iid[iid].last_use = time_uptime; 1381 mtx_unlock(&softc->ctl_lock); 1382 1383 return (0); 1384} 1385 1386/* 1387 * Add an initiator to the initiator map. 1388 * Returns iid for success, < 0 for failure. 1389 */ 1390int 1391ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1392{ 1393 struct ctl_softc *softc = control_softc; 1394 time_t best_time; 1395 int i, best; 1396 1397 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1398 1399 if (iid >= CTL_MAX_INIT_PER_PORT) { 1400 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1401 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1402 free(name, M_CTL); 1403 return (-1); 1404 } 1405 1406 mtx_lock(&softc->ctl_lock); 1407 1408 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1409 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1410 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1411 iid = i; 1412 break; 1413 } 1414 if (name != NULL && port->wwpn_iid[i].name != NULL && 1415 strcmp(name, port->wwpn_iid[i].name) == 0) { 1416 iid = i; 1417 break; 1418 } 1419 } 1420 } 1421 1422 if (iid < 0) { 1423 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1424 if (port->wwpn_iid[i].in_use == 0 && 1425 port->wwpn_iid[i].wwpn == 0 && 1426 port->wwpn_iid[i].name == NULL) { 1427 iid = i; 1428 break; 1429 } 1430 } 1431 } 1432 1433 if (iid < 0) { 1434 best = -1; 1435 best_time = INT32_MAX; 1436 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1437 if (port->wwpn_iid[i].in_use == 0) { 1438 if (port->wwpn_iid[i].last_use < best_time) { 1439 best = i; 1440 best_time = port->wwpn_iid[i].last_use; 1441 } 1442 } 1443 } 1444 iid = best; 1445 } 1446 1447 if (iid < 0) { 1448 mtx_unlock(&softc->ctl_lock); 1449 free(name, M_CTL); 1450 return (-2); 1451 } 1452 1453 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1454 /* 1455 * This is not an error yet. 1456 */ 1457 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1458#if 0 1459 printf("%s: port %d iid %u WWPN %#jx arrived" 1460 " again\n", __func__, port->targ_port, 1461 iid, (uintmax_t)wwpn); 1462#endif 1463 goto take; 1464 } 1465 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1466 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1467#if 0 1468 printf("%s: port %d iid %u name '%s' arrived" 1469 " again\n", __func__, port->targ_port, 1470 iid, name); 1471#endif 1472 goto take; 1473 } 1474 1475 /* 1476 * This is an error, but what do we do about it? The 1477 * driver is telling us we have a new WWPN for this 1478 * initiator ID, so we pretty much need to use it. 1479 */ 1480 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1481 " but WWPN %#jx '%s' is still at that address\n", 1482 __func__, port->targ_port, iid, wwpn, name, 1483 (uintmax_t)port->wwpn_iid[iid].wwpn, 1484 port->wwpn_iid[iid].name); 1485 1486 /* 1487 * XXX KDM clear have_ca and ua_pending on each LUN for 1488 * this initiator. 1489 */ 1490 } 1491take: 1492 free(port->wwpn_iid[iid].name, M_CTL); 1493 port->wwpn_iid[iid].name = name; 1494 port->wwpn_iid[iid].wwpn = wwpn; 1495 port->wwpn_iid[iid].in_use++; 1496 mtx_unlock(&softc->ctl_lock); 1497 1498 return (iid); 1499} 1500 1501static int 1502ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1503{ 1504 int len; 1505 1506 switch (port->port_type) { 1507 case CTL_PORT_FC: 1508 { 1509 struct scsi_transportid_fcp *id = 1510 (struct scsi_transportid_fcp *)buf; 1511 if (port->wwpn_iid[iid].wwpn == 0) 1512 return (0); 1513 memset(id, 0, sizeof(*id)); 1514 id->format_protocol = SCSI_PROTO_FC; 1515 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1516 return (sizeof(*id)); 1517 } 1518 case CTL_PORT_ISCSI: 1519 { 1520 struct scsi_transportid_iscsi_port *id = 1521 (struct scsi_transportid_iscsi_port *)buf; 1522 if (port->wwpn_iid[iid].name == NULL) 1523 return (0); 1524 memset(id, 0, 256); 1525 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1526 SCSI_PROTO_ISCSI; 1527 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1528 len = roundup2(min(len, 252), 4); 1529 scsi_ulto2b(len, id->additional_length); 1530 return (sizeof(*id) + len); 1531 } 1532 case CTL_PORT_SAS: 1533 { 1534 struct scsi_transportid_sas *id = 1535 (struct scsi_transportid_sas *)buf; 1536 if (port->wwpn_iid[iid].wwpn == 0) 1537 return (0); 1538 memset(id, 0, sizeof(*id)); 1539 id->format_protocol = SCSI_PROTO_SAS; 1540 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1541 return (sizeof(*id)); 1542 } 1543 default: 1544 { 1545 struct scsi_transportid_spi *id = 1546 (struct scsi_transportid_spi *)buf; 1547 memset(id, 0, sizeof(*id)); 1548 id->format_protocol = SCSI_PROTO_SPI; 1549 scsi_ulto2b(iid, id->scsi_addr); 1550 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1551 return (sizeof(*id)); 1552 } 1553 } 1554} 1555 1556static int 1557ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1558{ 1559 return (0); 1560} 1561 1562static int 1563ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1564{ 1565 return (0); 1566} 1567 1568/* 1569 * Data movement routine for the CTL ioctl frontend port. 1570 */ 1571static int 1572ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) 1573{ 1574 struct ctl_sg_entry *ext_sglist, *kern_sglist; 1575 struct ctl_sg_entry ext_entry, kern_entry; 1576 int ext_sglen, ext_sg_entries, kern_sg_entries; 1577 int ext_sg_start, ext_offset; 1578 int len_to_copy, len_copied; 1579 int kern_watermark, ext_watermark; 1580 int ext_sglist_malloced; 1581 int i, j; 1582 1583 ext_sglist_malloced = 0; 1584 ext_sg_start = 0; 1585 ext_offset = 0; 1586 1587 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); 1588 1589 /* 1590 * If this flag is set, fake the data transfer. 1591 */ 1592 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { 1593 ctsio->ext_data_filled = ctsio->ext_data_len; 1594 goto bailout; 1595 } 1596 1597 /* 1598 * To simplify things here, if we have a single buffer, stick it in 1599 * a S/G entry and just make it a single entry S/G list. 1600 */ 1601 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { 1602 int len_seen; 1603 1604 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); 1605 1606 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, 1607 M_WAITOK); 1608 ext_sglist_malloced = 1; 1609 if (copyin(ctsio->ext_data_ptr, ext_sglist, 1610 ext_sglen) != 0) { 1611 ctl_set_internal_failure(ctsio, 1612 /*sks_valid*/ 0, 1613 /*retry_count*/ 0); 1614 goto bailout; 1615 } 1616 ext_sg_entries = ctsio->ext_sg_entries; 1617 len_seen = 0; 1618 for (i = 0; i < ext_sg_entries; i++) { 1619 if ((len_seen + ext_sglist[i].len) >= 1620 ctsio->ext_data_filled) { 1621 ext_sg_start = i; 1622 ext_offset = ctsio->ext_data_filled - len_seen; 1623 break; 1624 } 1625 len_seen += ext_sglist[i].len; 1626 } 1627 } else { 1628 ext_sglist = &ext_entry; 1629 ext_sglist->addr = ctsio->ext_data_ptr; 1630 ext_sglist->len = ctsio->ext_data_len; 1631 ext_sg_entries = 1; 1632 ext_sg_start = 0; 1633 ext_offset = ctsio->ext_data_filled; 1634 } 1635 1636 if (ctsio->kern_sg_entries > 0) { 1637 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; 1638 kern_sg_entries = ctsio->kern_sg_entries; 1639 } else { 1640 kern_sglist = &kern_entry; 1641 kern_sglist->addr = ctsio->kern_data_ptr; 1642 kern_sglist->len = ctsio->kern_data_len; 1643 kern_sg_entries = 1; 1644 } 1645 1646 1647 kern_watermark = 0; 1648 ext_watermark = ext_offset; 1649 len_copied = 0; 1650 for (i = ext_sg_start, j = 0; 1651 i < ext_sg_entries && j < kern_sg_entries;) { 1652 uint8_t *ext_ptr, *kern_ptr; 1653 1654 len_to_copy = ctl_min(ext_sglist[i].len - ext_watermark, 1655 kern_sglist[j].len - kern_watermark); 1656 1657 ext_ptr = (uint8_t *)ext_sglist[i].addr; 1658 ext_ptr = ext_ptr + ext_watermark; 1659 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 1660 /* 1661 * XXX KDM fix this! 1662 */ 1663 panic("need to implement bus address support"); 1664#if 0 1665 kern_ptr = bus_to_virt(kern_sglist[j].addr); 1666#endif 1667 } else 1668 kern_ptr = (uint8_t *)kern_sglist[j].addr; 1669 kern_ptr = kern_ptr + kern_watermark; 1670 1671 kern_watermark += len_to_copy; 1672 ext_watermark += len_to_copy; 1673 1674 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == 1675 CTL_FLAG_DATA_IN) { 1676 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1677 "bytes to user\n", len_to_copy)); 1678 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1679 "to %p\n", kern_ptr, ext_ptr)); 1680 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { 1681 ctl_set_internal_failure(ctsio, 1682 /*sks_valid*/ 0, 1683 /*retry_count*/ 0); 1684 goto bailout; 1685 } 1686 } else { 1687 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1688 "bytes from user\n", len_to_copy)); 1689 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1690 "to %p\n", ext_ptr, kern_ptr)); 1691 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ 1692 ctl_set_internal_failure(ctsio, 1693 /*sks_valid*/ 0, 1694 /*retry_count*/0); 1695 goto bailout; 1696 } 1697 } 1698 1699 len_copied += len_to_copy; 1700 1701 if (ext_sglist[i].len == ext_watermark) { 1702 i++; 1703 ext_watermark = 0; 1704 } 1705 1706 if (kern_sglist[j].len == kern_watermark) { 1707 j++; 1708 kern_watermark = 0; 1709 } 1710 } 1711 1712 ctsio->ext_data_filled += len_copied; 1713 1714 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " 1715 "kern_sg_entries: %d\n", ext_sg_entries, 1716 kern_sg_entries)); 1717 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " 1718 "kern_data_len = %d\n", ctsio->ext_data_len, 1719 ctsio->kern_data_len)); 1720 1721 1722 /* XXX KDM set residual?? */ 1723bailout: 1724 1725 if (ext_sglist_malloced != 0) 1726 free(ext_sglist, M_CTL); 1727 1728 return (CTL_RETVAL_COMPLETE); 1729} 1730 1731/* 1732 * Serialize a command that went down the "wrong" side, and so was sent to 1733 * this controller for execution. The logic is a little different than the 1734 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1735 * sent back to the other side, but in the success case, we execute the 1736 * command on this side (XFER mode) or tell the other side to execute it 1737 * (SER_ONLY mode). 1738 */ 1739static int 1740ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1741{ 1742 struct ctl_softc *ctl_softc; 1743 union ctl_ha_msg msg_info; 1744 struct ctl_lun *lun; 1745 int retval = 0; 1746 uint32_t targ_lun; 1747 1748 ctl_softc = control_softc; 1749 1750 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1751 lun = ctl_softc->ctl_luns[targ_lun]; 1752 if (lun==NULL) 1753 { 1754 /* 1755 * Why isn't LUN defined? The other side wouldn't 1756 * send a cmd if the LUN is undefined. 1757 */ 1758 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1759 1760 /* "Logical unit not supported" */ 1761 ctl_set_sense_data(&msg_info.scsi.sense_data, 1762 lun, 1763 /*sense_format*/SSD_TYPE_NONE, 1764 /*current_error*/ 1, 1765 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1766 /*asc*/ 0x25, 1767 /*ascq*/ 0x00, 1768 SSD_ELEM_NONE); 1769 1770 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1771 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1772 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1773 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1774 msg_info.hdr.serializing_sc = NULL; 1775 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1776 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1777 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1778 } 1779 return(1); 1780 1781 } 1782 1783 mtx_lock(&lun->lun_lock); 1784 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1785 1786 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1787 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1788 ooa_links))) { 1789 case CTL_ACTION_BLOCK: 1790 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1791 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1792 blocked_links); 1793 break; 1794 case CTL_ACTION_PASS: 1795 case CTL_ACTION_SKIP: 1796 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 1797 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1798 ctl_enqueue_rtr((union ctl_io *)ctsio); 1799 } else { 1800 1801 /* send msg back to other side */ 1802 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1803 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1804 msg_info.hdr.msg_type = CTL_MSG_R2R; 1805#if 0 1806 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1807#endif 1808 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1809 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1810 } 1811 } 1812 break; 1813 case CTL_ACTION_OVERLAP: 1814 /* OVERLAPPED COMMANDS ATTEMPTED */ 1815 ctl_set_sense_data(&msg_info.scsi.sense_data, 1816 lun, 1817 /*sense_format*/SSD_TYPE_NONE, 1818 /*current_error*/ 1, 1819 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1820 /*asc*/ 0x4E, 1821 /*ascq*/ 0x00, 1822 SSD_ELEM_NONE); 1823 1824 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1825 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1826 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1827 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1828 msg_info.hdr.serializing_sc = NULL; 1829 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1830#if 0 1831 printf("BAD JUJU:Major Bummer Overlap\n"); 1832#endif 1833 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1834 retval = 1; 1835 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1836 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1837 } 1838 break; 1839 case CTL_ACTION_OVERLAP_TAG: 1840 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1841 ctl_set_sense_data(&msg_info.scsi.sense_data, 1842 lun, 1843 /*sense_format*/SSD_TYPE_NONE, 1844 /*current_error*/ 1, 1845 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1846 /*asc*/ 0x4D, 1847 /*ascq*/ ctsio->tag_num & 0xff, 1848 SSD_ELEM_NONE); 1849 1850 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1851 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1852 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1853 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1854 msg_info.hdr.serializing_sc = NULL; 1855 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1856#if 0 1857 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1858#endif 1859 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1860 retval = 1; 1861 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1862 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1863 } 1864 break; 1865 case CTL_ACTION_ERROR: 1866 default: 1867 /* "Internal target failure" */ 1868 ctl_set_sense_data(&msg_info.scsi.sense_data, 1869 lun, 1870 /*sense_format*/SSD_TYPE_NONE, 1871 /*current_error*/ 1, 1872 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1873 /*asc*/ 0x44, 1874 /*ascq*/ 0x00, 1875 SSD_ELEM_NONE); 1876 1877 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1878 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1879 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1880 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1881 msg_info.hdr.serializing_sc = NULL; 1882 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1883#if 0 1884 printf("BAD JUJU:Major Bummer HW Error\n"); 1885#endif 1886 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1887 retval = 1; 1888 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1889 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1890 } 1891 break; 1892 } 1893 mtx_unlock(&lun->lun_lock); 1894 return (retval); 1895} 1896 1897static int 1898ctl_ioctl_submit_wait(union ctl_io *io) 1899{ 1900 struct ctl_fe_ioctl_params params; 1901 ctl_fe_ioctl_state last_state; 1902 int done, retval; 1903 1904 retval = 0; 1905 1906 bzero(¶ms, sizeof(params)); 1907 1908 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); 1909 cv_init(¶ms.sem, "ctlioccv"); 1910 params.state = CTL_IOCTL_INPROG; 1911 last_state = params.state; 1912 1913 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; 1914 1915 CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n")); 1916 1917 /* This shouldn't happen */ 1918 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) 1919 return (retval); 1920 1921 done = 0; 1922 1923 do { 1924 mtx_lock(¶ms.ioctl_mtx); 1925 /* 1926 * Check the state here, and don't sleep if the state has 1927 * already changed (i.e. wakeup has already occured, but we 1928 * weren't waiting yet). 1929 */ 1930 if (params.state == last_state) { 1931 /* XXX KDM cv_wait_sig instead? */ 1932 cv_wait(¶ms.sem, ¶ms.ioctl_mtx); 1933 } 1934 last_state = params.state; 1935 1936 switch (params.state) { 1937 case CTL_IOCTL_INPROG: 1938 /* Why did we wake up? */ 1939 /* XXX KDM error here? */ 1940 mtx_unlock(¶ms.ioctl_mtx); 1941 break; 1942 case CTL_IOCTL_DATAMOVE: 1943 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); 1944 1945 /* 1946 * change last_state back to INPROG to avoid 1947 * deadlock on subsequent data moves. 1948 */ 1949 params.state = last_state = CTL_IOCTL_INPROG; 1950 1951 mtx_unlock(¶ms.ioctl_mtx); 1952 ctl_ioctl_do_datamove(&io->scsiio); 1953 /* 1954 * Note that in some cases, most notably writes, 1955 * this will queue the I/O and call us back later. 1956 * In other cases, generally reads, this routine 1957 * will immediately call back and wake us up, 1958 * probably using our own context. 1959 */ 1960 io->scsiio.be_move_done(io); 1961 break; 1962 case CTL_IOCTL_DONE: 1963 mtx_unlock(¶ms.ioctl_mtx); 1964 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); 1965 done = 1; 1966 break; 1967 default: 1968 mtx_unlock(¶ms.ioctl_mtx); 1969 /* XXX KDM error here? */ 1970 break; 1971 } 1972 } while (done == 0); 1973 1974 mtx_destroy(¶ms.ioctl_mtx); 1975 cv_destroy(¶ms.sem); 1976 1977 return (CTL_RETVAL_COMPLETE); 1978} 1979 1980static void 1981ctl_ioctl_datamove(union ctl_io *io) 1982{ 1983 struct ctl_fe_ioctl_params *params; 1984 1985 params = (struct ctl_fe_ioctl_params *) 1986 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 1987 1988 mtx_lock(¶ms->ioctl_mtx); 1989 params->state = CTL_IOCTL_DATAMOVE; 1990 cv_broadcast(¶ms->sem); 1991 mtx_unlock(¶ms->ioctl_mtx); 1992} 1993 1994static void 1995ctl_ioctl_done(union ctl_io *io) 1996{ 1997 struct ctl_fe_ioctl_params *params; 1998 1999 params = (struct ctl_fe_ioctl_params *) 2000 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2001 2002 mtx_lock(¶ms->ioctl_mtx); 2003 params->state = CTL_IOCTL_DONE; 2004 cv_broadcast(¶ms->sem); 2005 mtx_unlock(¶ms->ioctl_mtx); 2006} 2007 2008static void 2009ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask) 2010{ 2011 struct ctl_fe_ioctl_startstop_info *sd_info; 2012 2013 sd_info = (struct ctl_fe_ioctl_startstop_info *)arg; 2014 2015 sd_info->hs_info.status = metatask->status; 2016 sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns; 2017 sd_info->hs_info.luns_complete = 2018 metatask->taskinfo.startstop.luns_complete; 2019 sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed; 2020 2021 cv_broadcast(&sd_info->sem); 2022} 2023 2024static void 2025ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask) 2026{ 2027 struct ctl_fe_ioctl_bbrread_info *fe_bbr_info; 2028 2029 fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg; 2030 2031 mtx_lock(fe_bbr_info->lock); 2032 fe_bbr_info->bbr_info->status = metatask->status; 2033 fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2034 fe_bbr_info->wakeup_done = 1; 2035 mtx_unlock(fe_bbr_info->lock); 2036 2037 cv_broadcast(&fe_bbr_info->sem); 2038} 2039 2040/* 2041 * Returns 0 for success, errno for failure. 2042 */ 2043static int 2044ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2045 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2046{ 2047 union ctl_io *io; 2048 int retval; 2049 2050 retval = 0; 2051 2052 mtx_lock(&lun->lun_lock); 2053 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2054 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2055 ooa_links)) { 2056 struct ctl_ooa_entry *entry; 2057 2058 /* 2059 * If we've got more than we can fit, just count the 2060 * remaining entries. 2061 */ 2062 if (*cur_fill_num >= ooa_hdr->alloc_num) 2063 continue; 2064 2065 entry = &kern_entries[*cur_fill_num]; 2066 2067 entry->tag_num = io->scsiio.tag_num; 2068 entry->lun_num = lun->lun; 2069#ifdef CTL_TIME_IO 2070 entry->start_bt = io->io_hdr.start_bt; 2071#endif 2072 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2073 entry->cdb_len = io->scsiio.cdb_len; 2074 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2075 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2076 2077 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2078 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2079 2080 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2081 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2082 2083 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2084 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2085 2086 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2087 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2088 } 2089 mtx_unlock(&lun->lun_lock); 2090 2091 return (retval); 2092} 2093 2094static void * 2095ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2096 size_t error_str_len) 2097{ 2098 void *kptr; 2099 2100 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2101 2102 if (copyin(user_addr, kptr, len) != 0) { 2103 snprintf(error_str, error_str_len, "Error copying %d bytes " 2104 "from user address %p to kernel address %p", len, 2105 user_addr, kptr); 2106 free(kptr, M_CTL); 2107 return (NULL); 2108 } 2109 2110 return (kptr); 2111} 2112 2113static void 2114ctl_free_args(int num_args, struct ctl_be_arg *args) 2115{ 2116 int i; 2117 2118 if (args == NULL) 2119 return; 2120 2121 for (i = 0; i < num_args; i++) { 2122 free(args[i].kname, M_CTL); 2123 free(args[i].kvalue, M_CTL); 2124 } 2125 2126 free(args, M_CTL); 2127} 2128 2129static struct ctl_be_arg * 2130ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2131 char *error_str, size_t error_str_len) 2132{ 2133 struct ctl_be_arg *args; 2134 int i; 2135 2136 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2137 error_str, error_str_len); 2138 2139 if (args == NULL) 2140 goto bailout; 2141 2142 for (i = 0; i < num_args; i++) { 2143 args[i].kname = NULL; 2144 args[i].kvalue = NULL; 2145 } 2146 2147 for (i = 0; i < num_args; i++) { 2148 uint8_t *tmpptr; 2149 2150 args[i].kname = ctl_copyin_alloc(args[i].name, 2151 args[i].namelen, error_str, error_str_len); 2152 if (args[i].kname == NULL) 2153 goto bailout; 2154 2155 if (args[i].kname[args[i].namelen - 1] != '\0') { 2156 snprintf(error_str, error_str_len, "Argument %d " 2157 "name is not NUL-terminated", i); 2158 goto bailout; 2159 } 2160 2161 if (args[i].flags & CTL_BEARG_RD) { 2162 tmpptr = ctl_copyin_alloc(args[i].value, 2163 args[i].vallen, error_str, error_str_len); 2164 if (tmpptr == NULL) 2165 goto bailout; 2166 if ((args[i].flags & CTL_BEARG_ASCII) 2167 && (tmpptr[args[i].vallen - 1] != '\0')) { 2168 snprintf(error_str, error_str_len, "Argument " 2169 "%d value is not NUL-terminated", i); 2170 goto bailout; 2171 } 2172 args[i].kvalue = tmpptr; 2173 } else { 2174 args[i].kvalue = malloc(args[i].vallen, 2175 M_CTL, M_WAITOK | M_ZERO); 2176 } 2177 } 2178 2179 return (args); 2180bailout: 2181 2182 ctl_free_args(num_args, args); 2183 2184 return (NULL); 2185} 2186 2187static void 2188ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2189{ 2190 int i; 2191 2192 for (i = 0; i < num_args; i++) { 2193 if (args[i].flags & CTL_BEARG_WR) 2194 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2195 } 2196} 2197 2198/* 2199 * Escape characters that are illegal or not recommended in XML. 2200 */ 2201int 2202ctl_sbuf_printf_esc(struct sbuf *sb, char *str) 2203{ 2204 int retval; 2205 2206 retval = 0; 2207 2208 for (; *str; str++) { 2209 switch (*str) { 2210 case '&': 2211 retval = sbuf_printf(sb, "&"); 2212 break; 2213 case '>': 2214 retval = sbuf_printf(sb, ">"); 2215 break; 2216 case '<': 2217 retval = sbuf_printf(sb, "<"); 2218 break; 2219 default: 2220 retval = sbuf_putc(sb, *str); 2221 break; 2222 } 2223 2224 if (retval != 0) 2225 break; 2226 2227 } 2228 2229 return (retval); 2230} 2231 2232static int 2233ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2234 struct thread *td) 2235{ 2236 struct ctl_softc *softc; 2237 int retval; 2238 2239 softc = control_softc; 2240 2241 retval = 0; 2242 2243 switch (cmd) { 2244 case CTL_IO: { 2245 union ctl_io *io; 2246 void *pool_tmp; 2247 2248 /* 2249 * If we haven't been "enabled", don't allow any SCSI I/O 2250 * to this FETD. 2251 */ 2252 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) { 2253 retval = EPERM; 2254 break; 2255 } 2256 2257 io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref); 2258 if (io == NULL) { 2259 printf("ctl_ioctl: can't allocate ctl_io!\n"); 2260 retval = ENOSPC; 2261 break; 2262 } 2263 2264 /* 2265 * Need to save the pool reference so it doesn't get 2266 * spammed by the user's ctl_io. 2267 */ 2268 pool_tmp = io->io_hdr.pool; 2269 2270 memcpy(io, (void *)addr, sizeof(*io)); 2271 2272 io->io_hdr.pool = pool_tmp; 2273 /* 2274 * No status yet, so make sure the status is set properly. 2275 */ 2276 io->io_hdr.status = CTL_STATUS_NONE; 2277 2278 /* 2279 * The user sets the initiator ID, target and LUN IDs. 2280 */ 2281 io->io_hdr.nexus.targ_port = softc->ioctl_info.port.targ_port; 2282 io->io_hdr.flags |= CTL_FLAG_USER_REQ; 2283 if ((io->io_hdr.io_type == CTL_IO_SCSI) 2284 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) 2285 io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++; 2286 2287 retval = ctl_ioctl_submit_wait(io); 2288 2289 if (retval != 0) { 2290 ctl_free_io(io); 2291 break; 2292 } 2293 2294 memcpy((void *)addr, io, sizeof(*io)); 2295 2296 /* return this to our pool */ 2297 ctl_free_io(io); 2298 2299 break; 2300 } 2301 case CTL_ENABLE_PORT: 2302 case CTL_DISABLE_PORT: 2303 case CTL_SET_PORT_WWNS: { 2304 struct ctl_port *port; 2305 struct ctl_port_entry *entry; 2306 2307 entry = (struct ctl_port_entry *)addr; 2308 2309 mtx_lock(&softc->ctl_lock); 2310 STAILQ_FOREACH(port, &softc->port_list, links) { 2311 int action, done; 2312 2313 action = 0; 2314 done = 0; 2315 2316 if ((entry->port_type == CTL_PORT_NONE) 2317 && (entry->targ_port == port->targ_port)) { 2318 /* 2319 * If the user only wants to enable or 2320 * disable or set WWNs on a specific port, 2321 * do the operation and we're done. 2322 */ 2323 action = 1; 2324 done = 1; 2325 } else if (entry->port_type & port->port_type) { 2326 /* 2327 * Compare the user's type mask with the 2328 * particular frontend type to see if we 2329 * have a match. 2330 */ 2331 action = 1; 2332 done = 0; 2333 2334 /* 2335 * Make sure the user isn't trying to set 2336 * WWNs on multiple ports at the same time. 2337 */ 2338 if (cmd == CTL_SET_PORT_WWNS) { 2339 printf("%s: Can't set WWNs on " 2340 "multiple ports\n", __func__); 2341 retval = EINVAL; 2342 break; 2343 } 2344 } 2345 if (action != 0) { 2346 /* 2347 * XXX KDM we have to drop the lock here, 2348 * because the online/offline operations 2349 * can potentially block. We need to 2350 * reference count the frontends so they 2351 * can't go away, 2352 */ 2353 mtx_unlock(&softc->ctl_lock); 2354 2355 if (cmd == CTL_ENABLE_PORT) { 2356 struct ctl_lun *lun; 2357 2358 STAILQ_FOREACH(lun, &softc->lun_list, 2359 links) { 2360 port->lun_enable(port->targ_lun_arg, 2361 lun->target, 2362 lun->lun); 2363 } 2364 2365 ctl_port_online(port); 2366 } else if (cmd == CTL_DISABLE_PORT) { 2367 struct ctl_lun *lun; 2368 2369 ctl_port_offline(port); 2370 2371 STAILQ_FOREACH(lun, &softc->lun_list, 2372 links) { 2373 port->lun_disable( 2374 port->targ_lun_arg, 2375 lun->target, 2376 lun->lun); 2377 } 2378 } 2379 2380 mtx_lock(&softc->ctl_lock); 2381 2382 if (cmd == CTL_SET_PORT_WWNS) 2383 ctl_port_set_wwns(port, 2384 (entry->flags & CTL_PORT_WWNN_VALID) ? 2385 1 : 0, entry->wwnn, 2386 (entry->flags & CTL_PORT_WWPN_VALID) ? 2387 1 : 0, entry->wwpn); 2388 } 2389 if (done != 0) 2390 break; 2391 } 2392 mtx_unlock(&softc->ctl_lock); 2393 break; 2394 } 2395 case CTL_GET_PORT_LIST: { 2396 struct ctl_port *port; 2397 struct ctl_port_list *list; 2398 int i; 2399 2400 list = (struct ctl_port_list *)addr; 2401 2402 if (list->alloc_len != (list->alloc_num * 2403 sizeof(struct ctl_port_entry))) { 2404 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2405 "alloc_num %u * sizeof(struct ctl_port_entry) " 2406 "%zu\n", __func__, list->alloc_len, 2407 list->alloc_num, sizeof(struct ctl_port_entry)); 2408 retval = EINVAL; 2409 break; 2410 } 2411 list->fill_len = 0; 2412 list->fill_num = 0; 2413 list->dropped_num = 0; 2414 i = 0; 2415 mtx_lock(&softc->ctl_lock); 2416 STAILQ_FOREACH(port, &softc->port_list, links) { 2417 struct ctl_port_entry entry, *list_entry; 2418 2419 if (list->fill_num >= list->alloc_num) { 2420 list->dropped_num++; 2421 continue; 2422 } 2423 2424 entry.port_type = port->port_type; 2425 strlcpy(entry.port_name, port->port_name, 2426 sizeof(entry.port_name)); 2427 entry.targ_port = port->targ_port; 2428 entry.physical_port = port->physical_port; 2429 entry.virtual_port = port->virtual_port; 2430 entry.wwnn = port->wwnn; 2431 entry.wwpn = port->wwpn; 2432 if (port->status & CTL_PORT_STATUS_ONLINE) 2433 entry.online = 1; 2434 else 2435 entry.online = 0; 2436 2437 list_entry = &list->entries[i]; 2438 2439 retval = copyout(&entry, list_entry, sizeof(entry)); 2440 if (retval != 0) { 2441 printf("%s: CTL_GET_PORT_LIST: copyout " 2442 "returned %d\n", __func__, retval); 2443 break; 2444 } 2445 i++; 2446 list->fill_num++; 2447 list->fill_len += sizeof(entry); 2448 } 2449 mtx_unlock(&softc->ctl_lock); 2450 2451 /* 2452 * If this is non-zero, we had a copyout fault, so there's 2453 * probably no point in attempting to set the status inside 2454 * the structure. 2455 */ 2456 if (retval != 0) 2457 break; 2458 2459 if (list->dropped_num > 0) 2460 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2461 else 2462 list->status = CTL_PORT_LIST_OK; 2463 break; 2464 } 2465 case CTL_DUMP_OOA: { 2466 struct ctl_lun *lun; 2467 union ctl_io *io; 2468 char printbuf[128]; 2469 struct sbuf sb; 2470 2471 mtx_lock(&softc->ctl_lock); 2472 printf("Dumping OOA queues:\n"); 2473 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2474 mtx_lock(&lun->lun_lock); 2475 for (io = (union ctl_io *)TAILQ_FIRST( 2476 &lun->ooa_queue); io != NULL; 2477 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2478 ooa_links)) { 2479 sbuf_new(&sb, printbuf, sizeof(printbuf), 2480 SBUF_FIXEDLEN); 2481 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2482 (intmax_t)lun->lun, 2483 io->scsiio.tag_num, 2484 (io->io_hdr.flags & 2485 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2486 (io->io_hdr.flags & 2487 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2488 (io->io_hdr.flags & 2489 CTL_FLAG_ABORT) ? " ABORT" : "", 2490 (io->io_hdr.flags & 2491 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2492 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2493 sbuf_finish(&sb); 2494 printf("%s\n", sbuf_data(&sb)); 2495 } 2496 mtx_unlock(&lun->lun_lock); 2497 } 2498 printf("OOA queues dump done\n"); 2499 mtx_unlock(&softc->ctl_lock); 2500 break; 2501 } 2502 case CTL_GET_OOA: { 2503 struct ctl_lun *lun; 2504 struct ctl_ooa *ooa_hdr; 2505 struct ctl_ooa_entry *entries; 2506 uint32_t cur_fill_num; 2507 2508 ooa_hdr = (struct ctl_ooa *)addr; 2509 2510 if ((ooa_hdr->alloc_len == 0) 2511 || (ooa_hdr->alloc_num == 0)) { 2512 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2513 "must be non-zero\n", __func__, 2514 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2515 retval = EINVAL; 2516 break; 2517 } 2518 2519 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2520 sizeof(struct ctl_ooa_entry))) { 2521 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2522 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2523 __func__, ooa_hdr->alloc_len, 2524 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2525 retval = EINVAL; 2526 break; 2527 } 2528 2529 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2530 if (entries == NULL) { 2531 printf("%s: could not allocate %d bytes for OOA " 2532 "dump\n", __func__, ooa_hdr->alloc_len); 2533 retval = ENOMEM; 2534 break; 2535 } 2536 2537 mtx_lock(&softc->ctl_lock); 2538 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2539 && ((ooa_hdr->lun_num > CTL_MAX_LUNS) 2540 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2541 mtx_unlock(&softc->ctl_lock); 2542 free(entries, M_CTL); 2543 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2544 __func__, (uintmax_t)ooa_hdr->lun_num); 2545 retval = EINVAL; 2546 break; 2547 } 2548 2549 cur_fill_num = 0; 2550 2551 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2552 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2553 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2554 ooa_hdr, entries); 2555 if (retval != 0) 2556 break; 2557 } 2558 if (retval != 0) { 2559 mtx_unlock(&softc->ctl_lock); 2560 free(entries, M_CTL); 2561 break; 2562 } 2563 } else { 2564 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2565 2566 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2567 entries); 2568 } 2569 mtx_unlock(&softc->ctl_lock); 2570 2571 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2572 ooa_hdr->fill_len = ooa_hdr->fill_num * 2573 sizeof(struct ctl_ooa_entry); 2574 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2575 if (retval != 0) { 2576 printf("%s: error copying out %d bytes for OOA dump\n", 2577 __func__, ooa_hdr->fill_len); 2578 } 2579 2580 getbintime(&ooa_hdr->cur_bt); 2581 2582 if (cur_fill_num > ooa_hdr->alloc_num) { 2583 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2584 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2585 } else { 2586 ooa_hdr->dropped_num = 0; 2587 ooa_hdr->status = CTL_OOA_OK; 2588 } 2589 2590 free(entries, M_CTL); 2591 break; 2592 } 2593 case CTL_CHECK_OOA: { 2594 union ctl_io *io; 2595 struct ctl_lun *lun; 2596 struct ctl_ooa_info *ooa_info; 2597 2598 2599 ooa_info = (struct ctl_ooa_info *)addr; 2600 2601 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2602 ooa_info->status = CTL_OOA_INVALID_LUN; 2603 break; 2604 } 2605 mtx_lock(&softc->ctl_lock); 2606 lun = softc->ctl_luns[ooa_info->lun_id]; 2607 if (lun == NULL) { 2608 mtx_unlock(&softc->ctl_lock); 2609 ooa_info->status = CTL_OOA_INVALID_LUN; 2610 break; 2611 } 2612 mtx_lock(&lun->lun_lock); 2613 mtx_unlock(&softc->ctl_lock); 2614 ooa_info->num_entries = 0; 2615 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2616 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2617 &io->io_hdr, ooa_links)) { 2618 ooa_info->num_entries++; 2619 } 2620 mtx_unlock(&lun->lun_lock); 2621 2622 ooa_info->status = CTL_OOA_SUCCESS; 2623 2624 break; 2625 } 2626 case CTL_HARD_START: 2627 case CTL_HARD_STOP: { 2628 struct ctl_fe_ioctl_startstop_info ss_info; 2629 struct cfi_metatask *metatask; 2630 struct mtx hs_mtx; 2631 2632 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF); 2633 2634 cv_init(&ss_info.sem, "hard start/stop cv" ); 2635 2636 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2637 if (metatask == NULL) { 2638 retval = ENOMEM; 2639 mtx_destroy(&hs_mtx); 2640 break; 2641 } 2642 2643 if (cmd == CTL_HARD_START) 2644 metatask->tasktype = CFI_TASK_STARTUP; 2645 else 2646 metatask->tasktype = CFI_TASK_SHUTDOWN; 2647 2648 metatask->callback = ctl_ioctl_hard_startstop_callback; 2649 metatask->callback_arg = &ss_info; 2650 2651 cfi_action(metatask); 2652 2653 /* Wait for the callback */ 2654 mtx_lock(&hs_mtx); 2655 cv_wait_sig(&ss_info.sem, &hs_mtx); 2656 mtx_unlock(&hs_mtx); 2657 2658 /* 2659 * All information has been copied from the metatask by the 2660 * time cv_broadcast() is called, so we free the metatask here. 2661 */ 2662 cfi_free_metatask(metatask); 2663 2664 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info)); 2665 2666 mtx_destroy(&hs_mtx); 2667 break; 2668 } 2669 case CTL_BBRREAD: { 2670 struct ctl_bbrread_info *bbr_info; 2671 struct ctl_fe_ioctl_bbrread_info fe_bbr_info; 2672 struct mtx bbr_mtx; 2673 struct cfi_metatask *metatask; 2674 2675 bbr_info = (struct ctl_bbrread_info *)addr; 2676 2677 bzero(&fe_bbr_info, sizeof(fe_bbr_info)); 2678 2679 bzero(&bbr_mtx, sizeof(bbr_mtx)); 2680 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF); 2681 2682 fe_bbr_info.bbr_info = bbr_info; 2683 fe_bbr_info.lock = &bbr_mtx; 2684 2685 cv_init(&fe_bbr_info.sem, "BBR read cv"); 2686 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2687 2688 if (metatask == NULL) { 2689 mtx_destroy(&bbr_mtx); 2690 cv_destroy(&fe_bbr_info.sem); 2691 retval = ENOMEM; 2692 break; 2693 } 2694 metatask->tasktype = CFI_TASK_BBRREAD; 2695 metatask->callback = ctl_ioctl_bbrread_callback; 2696 metatask->callback_arg = &fe_bbr_info; 2697 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num; 2698 metatask->taskinfo.bbrread.lba = bbr_info->lba; 2699 metatask->taskinfo.bbrread.len = bbr_info->len; 2700 2701 cfi_action(metatask); 2702 2703 mtx_lock(&bbr_mtx); 2704 while (fe_bbr_info.wakeup_done == 0) 2705 cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx); 2706 mtx_unlock(&bbr_mtx); 2707 2708 bbr_info->status = metatask->status; 2709 bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2710 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status; 2711 memcpy(&bbr_info->sense_data, 2712 &metatask->taskinfo.bbrread.sense_data, 2713 ctl_min(sizeof(bbr_info->sense_data), 2714 sizeof(metatask->taskinfo.bbrread.sense_data))); 2715 2716 cfi_free_metatask(metatask); 2717 2718 mtx_destroy(&bbr_mtx); 2719 cv_destroy(&fe_bbr_info.sem); 2720 2721 break; 2722 } 2723 case CTL_DELAY_IO: { 2724 struct ctl_io_delay_info *delay_info; 2725#ifdef CTL_IO_DELAY 2726 struct ctl_lun *lun; 2727#endif /* CTL_IO_DELAY */ 2728 2729 delay_info = (struct ctl_io_delay_info *)addr; 2730 2731#ifdef CTL_IO_DELAY 2732 mtx_lock(&softc->ctl_lock); 2733 2734 if ((delay_info->lun_id > CTL_MAX_LUNS) 2735 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2736 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2737 } else { 2738 lun = softc->ctl_luns[delay_info->lun_id]; 2739 mtx_lock(&lun->lun_lock); 2740 2741 delay_info->status = CTL_DELAY_STATUS_OK; 2742 2743 switch (delay_info->delay_type) { 2744 case CTL_DELAY_TYPE_CONT: 2745 break; 2746 case CTL_DELAY_TYPE_ONESHOT: 2747 break; 2748 default: 2749 delay_info->status = 2750 CTL_DELAY_STATUS_INVALID_TYPE; 2751 break; 2752 } 2753 2754 switch (delay_info->delay_loc) { 2755 case CTL_DELAY_LOC_DATAMOVE: 2756 lun->delay_info.datamove_type = 2757 delay_info->delay_type; 2758 lun->delay_info.datamove_delay = 2759 delay_info->delay_secs; 2760 break; 2761 case CTL_DELAY_LOC_DONE: 2762 lun->delay_info.done_type = 2763 delay_info->delay_type; 2764 lun->delay_info.done_delay = 2765 delay_info->delay_secs; 2766 break; 2767 default: 2768 delay_info->status = 2769 CTL_DELAY_STATUS_INVALID_LOC; 2770 break; 2771 } 2772 mtx_unlock(&lun->lun_lock); 2773 } 2774 2775 mtx_unlock(&softc->ctl_lock); 2776#else 2777 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2778#endif /* CTL_IO_DELAY */ 2779 break; 2780 } 2781 case CTL_REALSYNC_SET: { 2782 int *syncstate; 2783 2784 syncstate = (int *)addr; 2785 2786 mtx_lock(&softc->ctl_lock); 2787 switch (*syncstate) { 2788 case 0: 2789 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2790 break; 2791 case 1: 2792 softc->flags |= CTL_FLAG_REAL_SYNC; 2793 break; 2794 default: 2795 retval = EINVAL; 2796 break; 2797 } 2798 mtx_unlock(&softc->ctl_lock); 2799 break; 2800 } 2801 case CTL_REALSYNC_GET: { 2802 int *syncstate; 2803 2804 syncstate = (int*)addr; 2805 2806 mtx_lock(&softc->ctl_lock); 2807 if (softc->flags & CTL_FLAG_REAL_SYNC) 2808 *syncstate = 1; 2809 else 2810 *syncstate = 0; 2811 mtx_unlock(&softc->ctl_lock); 2812 2813 break; 2814 } 2815 case CTL_SETSYNC: 2816 case CTL_GETSYNC: { 2817 struct ctl_sync_info *sync_info; 2818 struct ctl_lun *lun; 2819 2820 sync_info = (struct ctl_sync_info *)addr; 2821 2822 mtx_lock(&softc->ctl_lock); 2823 lun = softc->ctl_luns[sync_info->lun_id]; 2824 if (lun == NULL) { 2825 mtx_unlock(&softc->ctl_lock); 2826 sync_info->status = CTL_GS_SYNC_NO_LUN; 2827 } 2828 /* 2829 * Get or set the sync interval. We're not bounds checking 2830 * in the set case, hopefully the user won't do something 2831 * silly. 2832 */ 2833 mtx_lock(&lun->lun_lock); 2834 mtx_unlock(&softc->ctl_lock); 2835 if (cmd == CTL_GETSYNC) 2836 sync_info->sync_interval = lun->sync_interval; 2837 else 2838 lun->sync_interval = sync_info->sync_interval; 2839 mtx_unlock(&lun->lun_lock); 2840 2841 sync_info->status = CTL_GS_SYNC_OK; 2842 2843 break; 2844 } 2845 case CTL_GETSTATS: { 2846 struct ctl_stats *stats; 2847 struct ctl_lun *lun; 2848 int i; 2849 2850 stats = (struct ctl_stats *)addr; 2851 2852 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2853 stats->alloc_len) { 2854 stats->status = CTL_SS_NEED_MORE_SPACE; 2855 stats->num_luns = softc->num_luns; 2856 break; 2857 } 2858 /* 2859 * XXX KDM no locking here. If the LUN list changes, 2860 * things can blow up. 2861 */ 2862 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2863 i++, lun = STAILQ_NEXT(lun, links)) { 2864 retval = copyout(&lun->stats, &stats->lun_stats[i], 2865 sizeof(lun->stats)); 2866 if (retval != 0) 2867 break; 2868 } 2869 stats->num_luns = softc->num_luns; 2870 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2871 softc->num_luns; 2872 stats->status = CTL_SS_OK; 2873#ifdef CTL_TIME_IO 2874 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2875#else 2876 stats->flags = CTL_STATS_FLAG_NONE; 2877#endif 2878 getnanouptime(&stats->timestamp); 2879 break; 2880 } 2881 case CTL_ERROR_INJECT: { 2882 struct ctl_error_desc *err_desc, *new_err_desc; 2883 struct ctl_lun *lun; 2884 2885 err_desc = (struct ctl_error_desc *)addr; 2886 2887 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2888 M_WAITOK | M_ZERO); 2889 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2890 2891 mtx_lock(&softc->ctl_lock); 2892 lun = softc->ctl_luns[err_desc->lun_id]; 2893 if (lun == NULL) { 2894 mtx_unlock(&softc->ctl_lock); 2895 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2896 __func__, (uintmax_t)err_desc->lun_id); 2897 retval = EINVAL; 2898 break; 2899 } 2900 mtx_lock(&lun->lun_lock); 2901 mtx_unlock(&softc->ctl_lock); 2902 2903 /* 2904 * We could do some checking here to verify the validity 2905 * of the request, but given the complexity of error 2906 * injection requests, the checking logic would be fairly 2907 * complex. 2908 * 2909 * For now, if the request is invalid, it just won't get 2910 * executed and might get deleted. 2911 */ 2912 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2913 2914 /* 2915 * XXX KDM check to make sure the serial number is unique, 2916 * in case we somehow manage to wrap. That shouldn't 2917 * happen for a very long time, but it's the right thing to 2918 * do. 2919 */ 2920 new_err_desc->serial = lun->error_serial; 2921 err_desc->serial = lun->error_serial; 2922 lun->error_serial++; 2923 2924 mtx_unlock(&lun->lun_lock); 2925 break; 2926 } 2927 case CTL_ERROR_INJECT_DELETE: { 2928 struct ctl_error_desc *delete_desc, *desc, *desc2; 2929 struct ctl_lun *lun; 2930 int delete_done; 2931 2932 delete_desc = (struct ctl_error_desc *)addr; 2933 delete_done = 0; 2934 2935 mtx_lock(&softc->ctl_lock); 2936 lun = softc->ctl_luns[delete_desc->lun_id]; 2937 if (lun == NULL) { 2938 mtx_unlock(&softc->ctl_lock); 2939 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2940 __func__, (uintmax_t)delete_desc->lun_id); 2941 retval = EINVAL; 2942 break; 2943 } 2944 mtx_lock(&lun->lun_lock); 2945 mtx_unlock(&softc->ctl_lock); 2946 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2947 if (desc->serial != delete_desc->serial) 2948 continue; 2949 2950 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2951 links); 2952 free(desc, M_CTL); 2953 delete_done = 1; 2954 } 2955 mtx_unlock(&lun->lun_lock); 2956 if (delete_done == 0) { 2957 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2958 "error serial %ju on LUN %u\n", __func__, 2959 delete_desc->serial, delete_desc->lun_id); 2960 retval = EINVAL; 2961 break; 2962 } 2963 break; 2964 } 2965 case CTL_DUMP_STRUCTS: { 2966 int i, j, k, idx; 2967 struct ctl_port *port; 2968 struct ctl_frontend *fe; 2969 2970 mtx_lock(&softc->ctl_lock); 2971 printf("CTL Persistent Reservation information start:\n"); 2972 for (i = 0; i < CTL_MAX_LUNS; i++) { 2973 struct ctl_lun *lun; 2974 2975 lun = softc->ctl_luns[i]; 2976 2977 if ((lun == NULL) 2978 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2979 continue; 2980 2981 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 2982 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2983 idx = j * CTL_MAX_INIT_PER_PORT + k; 2984 if (lun->per_res[idx].registered == 0) 2985 continue; 2986 printf(" LUN %d port %d iid %d key " 2987 "%#jx\n", i, j, k, 2988 (uintmax_t)scsi_8btou64( 2989 lun->per_res[idx].res_key.key)); 2990 } 2991 } 2992 } 2993 printf("CTL Persistent Reservation information end\n"); 2994 printf("CTL Ports:\n"); 2995 STAILQ_FOREACH(port, &softc->port_list, links) { 2996 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2997 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2998 port->frontend->name, port->port_type, 2999 port->physical_port, port->virtual_port, 3000 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 3001 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3002 if (port->wwpn_iid[j].in_use == 0 && 3003 port->wwpn_iid[j].wwpn == 0 && 3004 port->wwpn_iid[j].name == NULL) 3005 continue; 3006 3007 printf(" iid %u use %d WWPN %#jx '%s'\n", 3008 j, port->wwpn_iid[j].in_use, 3009 (uintmax_t)port->wwpn_iid[j].wwpn, 3010 port->wwpn_iid[j].name); 3011 } 3012 } 3013 printf("CTL Port information end\n"); 3014 mtx_unlock(&softc->ctl_lock); 3015 /* 3016 * XXX KDM calling this without a lock. We'd likely want 3017 * to drop the lock before calling the frontend's dump 3018 * routine anyway. 3019 */ 3020 printf("CTL Frontends:\n"); 3021 STAILQ_FOREACH(fe, &softc->fe_list, links) { 3022 printf(" Frontend '%s'\n", fe->name); 3023 if (fe->fe_dump != NULL) 3024 fe->fe_dump(); 3025 } 3026 printf("CTL Frontend information end\n"); 3027 break; 3028 } 3029 case CTL_LUN_REQ: { 3030 struct ctl_lun_req *lun_req; 3031 struct ctl_backend_driver *backend; 3032 3033 lun_req = (struct ctl_lun_req *)addr; 3034 3035 backend = ctl_backend_find(lun_req->backend); 3036 if (backend == NULL) { 3037 lun_req->status = CTL_LUN_ERROR; 3038 snprintf(lun_req->error_str, 3039 sizeof(lun_req->error_str), 3040 "Backend \"%s\" not found.", 3041 lun_req->backend); 3042 break; 3043 } 3044 if (lun_req->num_be_args > 0) { 3045 lun_req->kern_be_args = ctl_copyin_args( 3046 lun_req->num_be_args, 3047 lun_req->be_args, 3048 lun_req->error_str, 3049 sizeof(lun_req->error_str)); 3050 if (lun_req->kern_be_args == NULL) { 3051 lun_req->status = CTL_LUN_ERROR; 3052 break; 3053 } 3054 } 3055 3056 retval = backend->ioctl(dev, cmd, addr, flag, td); 3057 3058 if (lun_req->num_be_args > 0) { 3059 ctl_copyout_args(lun_req->num_be_args, 3060 lun_req->kern_be_args); 3061 ctl_free_args(lun_req->num_be_args, 3062 lun_req->kern_be_args); 3063 } 3064 break; 3065 } 3066 case CTL_LUN_LIST: { 3067 struct sbuf *sb; 3068 struct ctl_lun *lun; 3069 struct ctl_lun_list *list; 3070 struct ctl_option *opt; 3071 3072 list = (struct ctl_lun_list *)addr; 3073 3074 /* 3075 * Allocate a fixed length sbuf here, based on the length 3076 * of the user's buffer. We could allocate an auto-extending 3077 * buffer, and then tell the user how much larger our 3078 * amount of data is than his buffer, but that presents 3079 * some problems: 3080 * 3081 * 1. The sbuf(9) routines use a blocking malloc, and so 3082 * we can't hold a lock while calling them with an 3083 * auto-extending buffer. 3084 * 3085 * 2. There is not currently a LUN reference counting 3086 * mechanism, outside of outstanding transactions on 3087 * the LUN's OOA queue. So a LUN could go away on us 3088 * while we're getting the LUN number, backend-specific 3089 * information, etc. Thus, given the way things 3090 * currently work, we need to hold the CTL lock while 3091 * grabbing LUN information. 3092 * 3093 * So, from the user's standpoint, the best thing to do is 3094 * allocate what he thinks is a reasonable buffer length, 3095 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3096 * double the buffer length and try again. (And repeat 3097 * that until he succeeds.) 3098 */ 3099 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3100 if (sb == NULL) { 3101 list->status = CTL_LUN_LIST_ERROR; 3102 snprintf(list->error_str, sizeof(list->error_str), 3103 "Unable to allocate %d bytes for LUN list", 3104 list->alloc_len); 3105 break; 3106 } 3107 3108 sbuf_printf(sb, "<ctllunlist>\n"); 3109 3110 mtx_lock(&softc->ctl_lock); 3111 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3112 mtx_lock(&lun->lun_lock); 3113 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3114 (uintmax_t)lun->lun); 3115 3116 /* 3117 * Bail out as soon as we see that we've overfilled 3118 * the buffer. 3119 */ 3120 if (retval != 0) 3121 break; 3122 3123 retval = sbuf_printf(sb, "\t<backend_type>%s" 3124 "</backend_type>\n", 3125 (lun->backend == NULL) ? "none" : 3126 lun->backend->name); 3127 3128 if (retval != 0) 3129 break; 3130 3131 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3132 lun->be_lun->lun_type); 3133 3134 if (retval != 0) 3135 break; 3136 3137 if (lun->backend == NULL) { 3138 retval = sbuf_printf(sb, "</lun>\n"); 3139 if (retval != 0) 3140 break; 3141 continue; 3142 } 3143 3144 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3145 (lun->be_lun->maxlba > 0) ? 3146 lun->be_lun->maxlba + 1 : 0); 3147 3148 if (retval != 0) 3149 break; 3150 3151 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3152 lun->be_lun->blocksize); 3153 3154 if (retval != 0) 3155 break; 3156 3157 retval = sbuf_printf(sb, "\t<serial_number>"); 3158 3159 if (retval != 0) 3160 break; 3161 3162 retval = ctl_sbuf_printf_esc(sb, 3163 lun->be_lun->serial_num); 3164 3165 if (retval != 0) 3166 break; 3167 3168 retval = sbuf_printf(sb, "</serial_number>\n"); 3169 3170 if (retval != 0) 3171 break; 3172 3173 retval = sbuf_printf(sb, "\t<device_id>"); 3174 3175 if (retval != 0) 3176 break; 3177 3178 retval = ctl_sbuf_printf_esc(sb,lun->be_lun->device_id); 3179 3180 if (retval != 0) 3181 break; 3182 3183 retval = sbuf_printf(sb, "</device_id>\n"); 3184 3185 if (retval != 0) 3186 break; 3187 3188 if (lun->backend->lun_info != NULL) { 3189 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3190 if (retval != 0) 3191 break; 3192 } 3193 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3194 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3195 opt->name, opt->value, opt->name); 3196 if (retval != 0) 3197 break; 3198 } 3199 3200 retval = sbuf_printf(sb, "</lun>\n"); 3201 3202 if (retval != 0) 3203 break; 3204 mtx_unlock(&lun->lun_lock); 3205 } 3206 if (lun != NULL) 3207 mtx_unlock(&lun->lun_lock); 3208 mtx_unlock(&softc->ctl_lock); 3209 3210 if ((retval != 0) 3211 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3212 retval = 0; 3213 sbuf_delete(sb); 3214 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3215 snprintf(list->error_str, sizeof(list->error_str), 3216 "Out of space, %d bytes is too small", 3217 list->alloc_len); 3218 break; 3219 } 3220 3221 sbuf_finish(sb); 3222 3223 retval = copyout(sbuf_data(sb), list->lun_xml, 3224 sbuf_len(sb) + 1); 3225 3226 list->fill_len = sbuf_len(sb) + 1; 3227 list->status = CTL_LUN_LIST_OK; 3228 sbuf_delete(sb); 3229 break; 3230 } 3231 case CTL_ISCSI: { 3232 struct ctl_iscsi *ci; 3233 struct ctl_frontend *fe; 3234 3235 ci = (struct ctl_iscsi *)addr; 3236 3237 fe = ctl_frontend_find("iscsi"); 3238 if (fe == NULL) { 3239 ci->status = CTL_ISCSI_ERROR; 3240 snprintf(ci->error_str, sizeof(ci->error_str), 3241 "Frontend \"iscsi\" not found."); 3242 break; 3243 } 3244 3245 retval = fe->ioctl(dev, cmd, addr, flag, td); 3246 break; 3247 } 3248 case CTL_PORT_REQ: { 3249 struct ctl_req *req; 3250 struct ctl_frontend *fe; 3251 3252 req = (struct ctl_req *)addr; 3253 3254 fe = ctl_frontend_find(req->driver); 3255 if (fe == NULL) { 3256 req->status = CTL_LUN_ERROR; 3257 snprintf(req->error_str, sizeof(req->error_str), 3258 "Frontend \"%s\" not found.", req->driver); 3259 break; 3260 } 3261 if (req->num_args > 0) { 3262 req->kern_args = ctl_copyin_args(req->num_args, 3263 req->args, req->error_str, sizeof(req->error_str)); 3264 if (req->kern_args == NULL) { 3265 req->status = CTL_LUN_ERROR; 3266 break; 3267 } 3268 } 3269 3270 retval = fe->ioctl(dev, cmd, addr, flag, td); 3271 3272 if (req->num_args > 0) { 3273 ctl_copyout_args(req->num_args, req->kern_args); 3274 ctl_free_args(req->num_args, req->kern_args); 3275 } 3276 break; 3277 } 3278 case CTL_PORT_LIST: { 3279 struct sbuf *sb; 3280 struct ctl_port *port; 3281 struct ctl_lun_list *list; 3282 struct ctl_option *opt; 3283 3284 list = (struct ctl_lun_list *)addr; 3285 3286 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3287 if (sb == NULL) { 3288 list->status = CTL_LUN_LIST_ERROR; 3289 snprintf(list->error_str, sizeof(list->error_str), 3290 "Unable to allocate %d bytes for LUN list", 3291 list->alloc_len); 3292 break; 3293 } 3294 3295 sbuf_printf(sb, "<ctlportlist>\n"); 3296 3297 mtx_lock(&softc->ctl_lock); 3298 STAILQ_FOREACH(port, &softc->port_list, links) { 3299 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3300 (uintmax_t)port->targ_port); 3301 3302 /* 3303 * Bail out as soon as we see that we've overfilled 3304 * the buffer. 3305 */ 3306 if (retval != 0) 3307 break; 3308 3309 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3310 "</frontend_type>\n", port->frontend->name); 3311 if (retval != 0) 3312 break; 3313 3314 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3315 port->port_type); 3316 if (retval != 0) 3317 break; 3318 3319 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3320 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3321 if (retval != 0) 3322 break; 3323 3324 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3325 port->port_name); 3326 if (retval != 0) 3327 break; 3328 3329 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3330 port->physical_port); 3331 if (retval != 0) 3332 break; 3333 3334 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3335 port->virtual_port); 3336 if (retval != 0) 3337 break; 3338 3339 retval = sbuf_printf(sb, "\t<wwnn>%#jx</wwnn>\n", 3340 (uintmax_t)port->wwnn); 3341 if (retval != 0) 3342 break; 3343 3344 retval = sbuf_printf(sb, "\t<wwpn>%#jx</wwpn>\n", 3345 (uintmax_t)port->wwpn); 3346 if (retval != 0) 3347 break; 3348 3349 if (port->port_info != NULL) { 3350 retval = port->port_info(port->onoff_arg, sb); 3351 if (retval != 0) 3352 break; 3353 } 3354 STAILQ_FOREACH(opt, &port->options, links) { 3355 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3356 opt->name, opt->value, opt->name); 3357 if (retval != 0) 3358 break; 3359 } 3360 3361 retval = sbuf_printf(sb, "</targ_port>\n"); 3362 if (retval != 0) 3363 break; 3364 } 3365 mtx_unlock(&softc->ctl_lock); 3366 3367 if ((retval != 0) 3368 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3369 retval = 0; 3370 sbuf_delete(sb); 3371 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3372 snprintf(list->error_str, sizeof(list->error_str), 3373 "Out of space, %d bytes is too small", 3374 list->alloc_len); 3375 break; 3376 } 3377 3378 sbuf_finish(sb); 3379 3380 retval = copyout(sbuf_data(sb), list->lun_xml, 3381 sbuf_len(sb) + 1); 3382 3383 list->fill_len = sbuf_len(sb) + 1; 3384 list->status = CTL_LUN_LIST_OK; 3385 sbuf_delete(sb); 3386 break; 3387 } 3388 default: { 3389 /* XXX KDM should we fix this? */ 3390#if 0 3391 struct ctl_backend_driver *backend; 3392 unsigned int type; 3393 int found; 3394 3395 found = 0; 3396 3397 /* 3398 * We encode the backend type as the ioctl type for backend 3399 * ioctls. So parse it out here, and then search for a 3400 * backend of this type. 3401 */ 3402 type = _IOC_TYPE(cmd); 3403 3404 STAILQ_FOREACH(backend, &softc->be_list, links) { 3405 if (backend->type == type) { 3406 found = 1; 3407 break; 3408 } 3409 } 3410 if (found == 0) { 3411 printf("ctl: unknown ioctl command %#lx or backend " 3412 "%d\n", cmd, type); 3413 retval = EINVAL; 3414 break; 3415 } 3416 retval = backend->ioctl(dev, cmd, addr, flag, td); 3417#endif 3418 retval = ENOTTY; 3419 break; 3420 } 3421 } 3422 return (retval); 3423} 3424 3425uint32_t 3426ctl_get_initindex(struct ctl_nexus *nexus) 3427{ 3428 if (nexus->targ_port < CTL_MAX_PORTS) 3429 return (nexus->initid.id + 3430 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3431 else 3432 return (nexus->initid.id + 3433 ((nexus->targ_port - CTL_MAX_PORTS) * 3434 CTL_MAX_INIT_PER_PORT)); 3435} 3436 3437uint32_t 3438ctl_get_resindex(struct ctl_nexus *nexus) 3439{ 3440 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3441} 3442 3443uint32_t 3444ctl_port_idx(int port_num) 3445{ 3446 if (port_num < CTL_MAX_PORTS) 3447 return(port_num); 3448 else 3449 return(port_num - CTL_MAX_PORTS); 3450} 3451 3452static uint32_t 3453ctl_map_lun(int port_num, uint32_t lun_id) 3454{ 3455 struct ctl_port *port; 3456 3457 port = control_softc->ctl_ports[ctl_port_idx(port_num)]; 3458 if (port == NULL) 3459 return (UINT32_MAX); 3460 if (port->lun_map == NULL) 3461 return (lun_id); 3462 return (port->lun_map(port->targ_lun_arg, lun_id)); 3463} 3464 3465static uint32_t 3466ctl_map_lun_back(int port_num, uint32_t lun_id) 3467{ 3468 struct ctl_port *port; 3469 uint32_t i; 3470 3471 port = control_softc->ctl_ports[ctl_port_idx(port_num)]; 3472 if (port->lun_map == NULL) 3473 return (lun_id); 3474 for (i = 0; i < CTL_MAX_LUNS; i++) { 3475 if (port->lun_map(port->targ_lun_arg, i) == lun_id) 3476 return (i); 3477 } 3478 return (UINT32_MAX); 3479} 3480 3481/* 3482 * Note: This only works for bitmask sizes that are at least 32 bits, and 3483 * that are a power of 2. 3484 */ 3485int 3486ctl_ffz(uint32_t *mask, uint32_t size) 3487{ 3488 uint32_t num_chunks, num_pieces; 3489 int i, j; 3490 3491 num_chunks = (size >> 5); 3492 if (num_chunks == 0) 3493 num_chunks++; 3494 num_pieces = ctl_min((sizeof(uint32_t) * 8), size); 3495 3496 for (i = 0; i < num_chunks; i++) { 3497 for (j = 0; j < num_pieces; j++) { 3498 if ((mask[i] & (1 << j)) == 0) 3499 return ((i << 5) + j); 3500 } 3501 } 3502 3503 return (-1); 3504} 3505 3506int 3507ctl_set_mask(uint32_t *mask, uint32_t bit) 3508{ 3509 uint32_t chunk, piece; 3510 3511 chunk = bit >> 5; 3512 piece = bit % (sizeof(uint32_t) * 8); 3513 3514 if ((mask[chunk] & (1 << piece)) != 0) 3515 return (-1); 3516 else 3517 mask[chunk] |= (1 << piece); 3518 3519 return (0); 3520} 3521 3522int 3523ctl_clear_mask(uint32_t *mask, uint32_t bit) 3524{ 3525 uint32_t chunk, piece; 3526 3527 chunk = bit >> 5; 3528 piece = bit % (sizeof(uint32_t) * 8); 3529 3530 if ((mask[chunk] & (1 << piece)) == 0) 3531 return (-1); 3532 else 3533 mask[chunk] &= ~(1 << piece); 3534 3535 return (0); 3536} 3537 3538int 3539ctl_is_set(uint32_t *mask, uint32_t bit) 3540{ 3541 uint32_t chunk, piece; 3542 3543 chunk = bit >> 5; 3544 piece = bit % (sizeof(uint32_t) * 8); 3545 3546 if ((mask[chunk] & (1 << piece)) == 0) 3547 return (0); 3548 else 3549 return (1); 3550} 3551 3552#ifdef unused 3553/* 3554 * The bus, target and lun are optional, they can be filled in later. 3555 * can_wait is used to determine whether we can wait on the malloc or not. 3556 */ 3557union ctl_io* 3558ctl_malloc_io(ctl_io_type io_type, uint32_t targ_port, uint32_t targ_target, 3559 uint32_t targ_lun, int can_wait) 3560{ 3561 union ctl_io *io; 3562 3563 if (can_wait) 3564 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_WAITOK); 3565 else 3566 io = (union ctl_io *)malloc(sizeof(*io), M_CTL, M_NOWAIT); 3567 3568 if (io != NULL) { 3569 io->io_hdr.io_type = io_type; 3570 io->io_hdr.targ_port = targ_port; 3571 /* 3572 * XXX KDM this needs to change/go away. We need to move 3573 * to a preallocated pool of ctl_scsiio structures. 3574 */ 3575 io->io_hdr.nexus.targ_target.id = targ_target; 3576 io->io_hdr.nexus.targ_lun = targ_lun; 3577 } 3578 3579 return (io); 3580} 3581 3582void 3583ctl_kfree_io(union ctl_io *io) 3584{ 3585 free(io, M_CTL); 3586} 3587#endif /* unused */ 3588 3589/* 3590 * ctl_softc, pool_type, total_ctl_io are passed in. 3591 * npool is passed out. 3592 */ 3593int 3594ctl_pool_create(struct ctl_softc *ctl_softc, ctl_pool_type pool_type, 3595 uint32_t total_ctl_io, struct ctl_io_pool **npool) 3596{ 3597 uint32_t i; 3598 union ctl_io *cur_io, *next_io; 3599 struct ctl_io_pool *pool; 3600 int retval; 3601 3602 retval = 0; 3603 3604 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3605 M_NOWAIT | M_ZERO); 3606 if (pool == NULL) { 3607 retval = ENOMEM; 3608 goto bailout; 3609 } 3610 3611 pool->type = pool_type; 3612 pool->ctl_softc = ctl_softc; 3613 3614 mtx_lock(&ctl_softc->pool_lock); 3615 pool->id = ctl_softc->cur_pool_id++; 3616 mtx_unlock(&ctl_softc->pool_lock); 3617 3618 pool->flags = CTL_POOL_FLAG_NONE; 3619 pool->refcount = 1; /* Reference for validity. */ 3620 STAILQ_INIT(&pool->free_queue); 3621 3622 /* 3623 * XXX KDM other options here: 3624 * - allocate a page at a time 3625 * - allocate one big chunk of memory. 3626 * Page allocation might work well, but would take a little more 3627 * tracking. 3628 */ 3629 for (i = 0; i < total_ctl_io; i++) { 3630 cur_io = (union ctl_io *)malloc(sizeof(*cur_io), M_CTLIO, 3631 M_NOWAIT); 3632 if (cur_io == NULL) { 3633 retval = ENOMEM; 3634 break; 3635 } 3636 cur_io->io_hdr.pool = pool; 3637 STAILQ_INSERT_TAIL(&pool->free_queue, &cur_io->io_hdr, links); 3638 pool->total_ctl_io++; 3639 pool->free_ctl_io++; 3640 } 3641 3642 if (retval != 0) { 3643 for (cur_io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3644 cur_io != NULL; cur_io = next_io) { 3645 next_io = (union ctl_io *)STAILQ_NEXT(&cur_io->io_hdr, 3646 links); 3647 STAILQ_REMOVE(&pool->free_queue, &cur_io->io_hdr, 3648 ctl_io_hdr, links); 3649 free(cur_io, M_CTLIO); 3650 } 3651 3652 free(pool, M_CTL); 3653 goto bailout; 3654 } 3655 mtx_lock(&ctl_softc->pool_lock); 3656 ctl_softc->num_pools++; 3657 STAILQ_INSERT_TAIL(&ctl_softc->io_pools, pool, links); 3658 /* 3659 * Increment our usage count if this is an external consumer, so we 3660 * can't get unloaded until the external consumer (most likely a 3661 * FETD) unloads and frees his pool. 3662 * 3663 * XXX KDM will this increment the caller's module use count, or 3664 * mine? 3665 */ 3666#if 0 3667 if ((pool_type != CTL_POOL_EMERGENCY) 3668 && (pool_type != CTL_POOL_INTERNAL) 3669 && (pool_type != CTL_POOL_4OTHERSC)) 3670 MOD_INC_USE_COUNT; 3671#endif 3672 3673 mtx_unlock(&ctl_softc->pool_lock); 3674 3675 *npool = pool; 3676 3677bailout: 3678 3679 return (retval); 3680} 3681 3682static int 3683ctl_pool_acquire(struct ctl_io_pool *pool) 3684{ 3685 3686 mtx_assert(&pool->ctl_softc->pool_lock, MA_OWNED); 3687 3688 if (pool->flags & CTL_POOL_FLAG_INVALID) 3689 return (EINVAL); 3690 3691 pool->refcount++; 3692 3693 return (0); 3694} 3695 3696static void 3697ctl_pool_release(struct ctl_io_pool *pool) 3698{ 3699 struct ctl_softc *ctl_softc = pool->ctl_softc; 3700 union ctl_io *io; 3701 3702 mtx_assert(&ctl_softc->pool_lock, MA_OWNED); 3703 3704 if (--pool->refcount != 0) 3705 return; 3706 3707 while ((io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue)) != NULL) { 3708 STAILQ_REMOVE(&pool->free_queue, &io->io_hdr, ctl_io_hdr, 3709 links); 3710 free(io, M_CTLIO); 3711 } 3712 3713 STAILQ_REMOVE(&ctl_softc->io_pools, pool, ctl_io_pool, links); 3714 ctl_softc->num_pools--; 3715 3716 /* 3717 * XXX KDM will this decrement the caller's usage count or mine? 3718 */ 3719#if 0 3720 if ((pool->type != CTL_POOL_EMERGENCY) 3721 && (pool->type != CTL_POOL_INTERNAL) 3722 && (pool->type != CTL_POOL_4OTHERSC)) 3723 MOD_DEC_USE_COUNT; 3724#endif 3725 3726 free(pool, M_CTL); 3727} 3728 3729void 3730ctl_pool_free(struct ctl_io_pool *pool) 3731{ 3732 struct ctl_softc *ctl_softc; 3733 3734 if (pool == NULL) 3735 return; 3736 3737 ctl_softc = pool->ctl_softc; 3738 mtx_lock(&ctl_softc->pool_lock); 3739 pool->flags |= CTL_POOL_FLAG_INVALID; 3740 ctl_pool_release(pool); 3741 mtx_unlock(&ctl_softc->pool_lock); 3742} 3743 3744/* 3745 * This routine does not block (except for spinlocks of course). 3746 * It tries to allocate a ctl_io union from the caller's pool as quickly as 3747 * possible. 3748 */ 3749union ctl_io * 3750ctl_alloc_io(void *pool_ref) 3751{ 3752 union ctl_io *io; 3753 struct ctl_softc *ctl_softc; 3754 struct ctl_io_pool *pool, *npool; 3755 struct ctl_io_pool *emergency_pool; 3756 3757 pool = (struct ctl_io_pool *)pool_ref; 3758 3759 if (pool == NULL) { 3760 printf("%s: pool is NULL\n", __func__); 3761 return (NULL); 3762 } 3763 3764 emergency_pool = NULL; 3765 3766 ctl_softc = pool->ctl_softc; 3767 3768 mtx_lock(&ctl_softc->pool_lock); 3769 /* 3770 * First, try to get the io structure from the user's pool. 3771 */ 3772 if (ctl_pool_acquire(pool) == 0) { 3773 io = (union ctl_io *)STAILQ_FIRST(&pool->free_queue); 3774 if (io != NULL) { 3775 STAILQ_REMOVE_HEAD(&pool->free_queue, links); 3776 pool->total_allocated++; 3777 pool->free_ctl_io--; 3778 mtx_unlock(&ctl_softc->pool_lock); 3779 return (io); 3780 } else 3781 ctl_pool_release(pool); 3782 } 3783 /* 3784 * If he doesn't have any io structures left, search for an 3785 * emergency pool and grab one from there. 3786 */ 3787 STAILQ_FOREACH(npool, &ctl_softc->io_pools, links) { 3788 if (npool->type != CTL_POOL_EMERGENCY) 3789 continue; 3790 3791 if (ctl_pool_acquire(npool) != 0) 3792 continue; 3793 3794 emergency_pool = npool; 3795 3796 io = (union ctl_io *)STAILQ_FIRST(&npool->free_queue); 3797 if (io != NULL) { 3798 STAILQ_REMOVE_HEAD(&npool->free_queue, links); 3799 npool->total_allocated++; 3800 npool->free_ctl_io--; 3801 mtx_unlock(&ctl_softc->pool_lock); 3802 return (io); 3803 } else 3804 ctl_pool_release(npool); 3805 } 3806 3807 /* Drop the spinlock before we malloc */ 3808 mtx_unlock(&ctl_softc->pool_lock); 3809 3810 /* 3811 * The emergency pool (if it exists) didn't have one, so try an 3812 * atomic (i.e. nonblocking) malloc and see if we get lucky. 3813 */ 3814 io = (union ctl_io *)malloc(sizeof(*io), M_CTLIO, M_NOWAIT); 3815 if (io != NULL) { 3816 /* 3817 * If the emergency pool exists but is empty, add this 3818 * ctl_io to its list when it gets freed. 3819 */ 3820 if (emergency_pool != NULL) { 3821 mtx_lock(&ctl_softc->pool_lock); 3822 if (ctl_pool_acquire(emergency_pool) == 0) { 3823 io->io_hdr.pool = emergency_pool; 3824 emergency_pool->total_ctl_io++; 3825 /* 3826 * Need to bump this, otherwise 3827 * total_allocated and total_freed won't 3828 * match when we no longer have anything 3829 * outstanding. 3830 */ 3831 emergency_pool->total_allocated++; 3832 } 3833 mtx_unlock(&ctl_softc->pool_lock); 3834 } else 3835 io->io_hdr.pool = NULL; 3836 } 3837 3838 return (io); 3839} 3840 3841void 3842ctl_free_io(union ctl_io *io) 3843{ 3844 if (io == NULL) 3845 return; 3846 3847 /* 3848 * If this ctl_io has a pool, return it to that pool. 3849 */ 3850 if (io->io_hdr.pool != NULL) { 3851 struct ctl_io_pool *pool; 3852 3853 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3854 mtx_lock(&pool->ctl_softc->pool_lock); 3855 io->io_hdr.io_type = 0xff; 3856 STAILQ_INSERT_TAIL(&pool->free_queue, &io->io_hdr, links); 3857 pool->total_freed++; 3858 pool->free_ctl_io++; 3859 ctl_pool_release(pool); 3860 mtx_unlock(&pool->ctl_softc->pool_lock); 3861 } else { 3862 /* 3863 * Otherwise, just free it. We probably malloced it and 3864 * the emergency pool wasn't available. 3865 */ 3866 free(io, M_CTLIO); 3867 } 3868 3869} 3870 3871void 3872ctl_zero_io(union ctl_io *io) 3873{ 3874 void *pool_ref; 3875 3876 if (io == NULL) 3877 return; 3878 3879 /* 3880 * May need to preserve linked list pointers at some point too. 3881 */ 3882 pool_ref = io->io_hdr.pool; 3883 3884 memset(io, 0, sizeof(*io)); 3885 3886 io->io_hdr.pool = pool_ref; 3887} 3888 3889/* 3890 * This routine is currently used for internal copies of ctl_ios that need 3891 * to persist for some reason after we've already returned status to the 3892 * FETD. (Thus the flag set.) 3893 * 3894 * XXX XXX 3895 * Note that this makes a blind copy of all fields in the ctl_io, except 3896 * for the pool reference. This includes any memory that has been 3897 * allocated! That memory will no longer be valid after done has been 3898 * called, so this would be VERY DANGEROUS for command that actually does 3899 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3900 * start and stop commands, which don't transfer any data, so this is not a 3901 * problem. If it is used for anything else, the caller would also need to 3902 * allocate data buffer space and this routine would need to be modified to 3903 * copy the data buffer(s) as well. 3904 */ 3905void 3906ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3907{ 3908 void *pool_ref; 3909 3910 if ((src == NULL) 3911 || (dest == NULL)) 3912 return; 3913 3914 /* 3915 * May need to preserve linked list pointers at some point too. 3916 */ 3917 pool_ref = dest->io_hdr.pool; 3918 3919 memcpy(dest, src, ctl_min(sizeof(*src), sizeof(*dest))); 3920 3921 dest->io_hdr.pool = pool_ref; 3922 /* 3923 * We need to know that this is an internal copy, and doesn't need 3924 * to get passed back to the FETD that allocated it. 3925 */ 3926 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3927} 3928 3929#ifdef NEEDTOPORT 3930static void 3931ctl_update_power_subpage(struct copan_power_subpage *page) 3932{ 3933 int num_luns, num_partitions, config_type; 3934 struct ctl_softc *softc; 3935 cs_BOOL_t aor_present, shelf_50pct_power; 3936 cs_raidset_personality_t rs_type; 3937 int max_active_luns; 3938 3939 softc = control_softc; 3940 3941 /* subtract out the processor LUN */ 3942 num_luns = softc->num_luns - 1; 3943 /* 3944 * Default to 7 LUNs active, which was the only number we allowed 3945 * in the past. 3946 */ 3947 max_active_luns = 7; 3948 3949 num_partitions = config_GetRsPartitionInfo(); 3950 config_type = config_GetConfigType(); 3951 shelf_50pct_power = config_GetShelfPowerMode(); 3952 aor_present = config_IsAorRsPresent(); 3953 3954 rs_type = ddb_GetRsRaidType(1); 3955 if ((rs_type != CS_RAIDSET_PERSONALITY_RAID5) 3956 && (rs_type != CS_RAIDSET_PERSONALITY_RAID1)) { 3957 EPRINT(0, "Unsupported RS type %d!", rs_type); 3958 } 3959 3960 3961 page->total_luns = num_luns; 3962 3963 switch (config_type) { 3964 case 40: 3965 /* 3966 * In a 40 drive configuration, it doesn't matter what DC 3967 * cards we have, whether we have AOR enabled or not, 3968 * partitioning or not, or what type of RAIDset we have. 3969 * In that scenario, we can power up every LUN we present 3970 * to the user. 3971 */ 3972 max_active_luns = num_luns; 3973 3974 break; 3975 case 64: 3976 if (shelf_50pct_power == CS_FALSE) { 3977 /* 25% power */ 3978 if (aor_present == CS_TRUE) { 3979 if (rs_type == 3980 CS_RAIDSET_PERSONALITY_RAID5) { 3981 max_active_luns = 7; 3982 } else if (rs_type == 3983 CS_RAIDSET_PERSONALITY_RAID1){ 3984 max_active_luns = 14; 3985 } else { 3986 /* XXX KDM now what?? */ 3987 } 3988 } else { 3989 if (rs_type == 3990 CS_RAIDSET_PERSONALITY_RAID5) { 3991 max_active_luns = 8; 3992 } else if (rs_type == 3993 CS_RAIDSET_PERSONALITY_RAID1){ 3994 max_active_luns = 16; 3995 } else { 3996 /* XXX KDM now what?? */ 3997 } 3998 } 3999 } else { 4000 /* 50% power */ 4001 /* 4002 * With 50% power in a 64 drive configuration, we 4003 * can power all LUNs we present. 4004 */ 4005 max_active_luns = num_luns; 4006 } 4007 break; 4008 case 112: 4009 if (shelf_50pct_power == CS_FALSE) { 4010 /* 25% power */ 4011 if (aor_present == CS_TRUE) { 4012 if (rs_type == 4013 CS_RAIDSET_PERSONALITY_RAID5) { 4014 max_active_luns = 7; 4015 } else if (rs_type == 4016 CS_RAIDSET_PERSONALITY_RAID1){ 4017 max_active_luns = 14; 4018 } else { 4019 /* XXX KDM now what?? */ 4020 } 4021 } else { 4022 if (rs_type == 4023 CS_RAIDSET_PERSONALITY_RAID5) { 4024 max_active_luns = 8; 4025 } else if (rs_type == 4026 CS_RAIDSET_PERSONALITY_RAID1){ 4027 max_active_luns = 16; 4028 } else { 4029 /* XXX KDM now what?? */ 4030 } 4031 } 4032 } else { 4033 /* 50% power */ 4034 if (aor_present == CS_TRUE) { 4035 if (rs_type == 4036 CS_RAIDSET_PERSONALITY_RAID5) { 4037 max_active_luns = 14; 4038 } else if (rs_type == 4039 CS_RAIDSET_PERSONALITY_RAID1){ 4040 /* 4041 * We're assuming here that disk 4042 * caching is enabled, and so we're 4043 * able to power up half of each 4044 * LUN, and cache all writes. 4045 */ 4046 max_active_luns = num_luns; 4047 } else { 4048 /* XXX KDM now what?? */ 4049 } 4050 } else { 4051 if (rs_type == 4052 CS_RAIDSET_PERSONALITY_RAID5) { 4053 max_active_luns = 15; 4054 } else if (rs_type == 4055 CS_RAIDSET_PERSONALITY_RAID1){ 4056 max_active_luns = 30; 4057 } else { 4058 /* XXX KDM now what?? */ 4059 } 4060 } 4061 } 4062 break; 4063 default: 4064 /* 4065 * In this case, we have an unknown configuration, so we 4066 * just use the default from above. 4067 */ 4068 break; 4069 } 4070 4071 page->max_active_luns = max_active_luns; 4072#if 0 4073 printk("%s: total_luns = %d, max_active_luns = %d\n", __func__, 4074 page->total_luns, page->max_active_luns); 4075#endif 4076} 4077#endif /* NEEDTOPORT */ 4078 4079/* 4080 * This routine could be used in the future to load default and/or saved 4081 * mode page parameters for a particuar lun. 4082 */ 4083static int 4084ctl_init_page_index(struct ctl_lun *lun) 4085{ 4086 int i; 4087 struct ctl_page_index *page_index; 4088 struct ctl_softc *softc; 4089 4090 memcpy(&lun->mode_pages.index, page_index_template, 4091 sizeof(page_index_template)); 4092 4093 softc = lun->ctl_softc; 4094 4095 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 4096 4097 page_index = &lun->mode_pages.index[i]; 4098 /* 4099 * If this is a disk-only mode page, there's no point in 4100 * setting it up. For some pages, we have to have some 4101 * basic information about the disk in order to calculate the 4102 * mode page data. 4103 */ 4104 if ((lun->be_lun->lun_type != T_DIRECT) 4105 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4106 continue; 4107 4108 switch (page_index->page_code & SMPH_PC_MASK) { 4109 case SMS_FORMAT_DEVICE_PAGE: { 4110 struct scsi_format_page *format_page; 4111 4112 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4113 panic("subpage is incorrect!"); 4114 4115 /* 4116 * Sectors per track are set above. Bytes per 4117 * sector need to be set here on a per-LUN basis. 4118 */ 4119 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4120 &format_page_default, 4121 sizeof(format_page_default)); 4122 memcpy(&lun->mode_pages.format_page[ 4123 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4124 sizeof(format_page_changeable)); 4125 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4126 &format_page_default, 4127 sizeof(format_page_default)); 4128 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4129 &format_page_default, 4130 sizeof(format_page_default)); 4131 4132 format_page = &lun->mode_pages.format_page[ 4133 CTL_PAGE_CURRENT]; 4134 scsi_ulto2b(lun->be_lun->blocksize, 4135 format_page->bytes_per_sector); 4136 4137 format_page = &lun->mode_pages.format_page[ 4138 CTL_PAGE_DEFAULT]; 4139 scsi_ulto2b(lun->be_lun->blocksize, 4140 format_page->bytes_per_sector); 4141 4142 format_page = &lun->mode_pages.format_page[ 4143 CTL_PAGE_SAVED]; 4144 scsi_ulto2b(lun->be_lun->blocksize, 4145 format_page->bytes_per_sector); 4146 4147 page_index->page_data = 4148 (uint8_t *)lun->mode_pages.format_page; 4149 break; 4150 } 4151 case SMS_RIGID_DISK_PAGE: { 4152 struct scsi_rigid_disk_page *rigid_disk_page; 4153 uint32_t sectors_per_cylinder; 4154 uint64_t cylinders; 4155#ifndef __XSCALE__ 4156 int shift; 4157#endif /* !__XSCALE__ */ 4158 4159 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4160 panic("invalid subpage value %d", 4161 page_index->subpage); 4162 4163 /* 4164 * Rotation rate and sectors per track are set 4165 * above. We calculate the cylinders here based on 4166 * capacity. Due to the number of heads and 4167 * sectors per track we're using, smaller arrays 4168 * may turn out to have 0 cylinders. Linux and 4169 * FreeBSD don't pay attention to these mode pages 4170 * to figure out capacity, but Solaris does. It 4171 * seems to deal with 0 cylinders just fine, and 4172 * works out a fake geometry based on the capacity. 4173 */ 4174 memcpy(&lun->mode_pages.rigid_disk_page[ 4175 CTL_PAGE_CURRENT], &rigid_disk_page_default, 4176 sizeof(rigid_disk_page_default)); 4177 memcpy(&lun->mode_pages.rigid_disk_page[ 4178 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4179 sizeof(rigid_disk_page_changeable)); 4180 memcpy(&lun->mode_pages.rigid_disk_page[ 4181 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4182 sizeof(rigid_disk_page_default)); 4183 memcpy(&lun->mode_pages.rigid_disk_page[ 4184 CTL_PAGE_SAVED], &rigid_disk_page_default, 4185 sizeof(rigid_disk_page_default)); 4186 4187 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4188 CTL_DEFAULT_HEADS; 4189 4190 /* 4191 * The divide method here will be more accurate, 4192 * probably, but results in floating point being 4193 * used in the kernel on i386 (__udivdi3()). On the 4194 * XScale, though, __udivdi3() is implemented in 4195 * software. 4196 * 4197 * The shift method for cylinder calculation is 4198 * accurate if sectors_per_cylinder is a power of 4199 * 2. Otherwise it might be slightly off -- you 4200 * might have a bit of a truncation problem. 4201 */ 4202#ifdef __XSCALE__ 4203 cylinders = (lun->be_lun->maxlba + 1) / 4204 sectors_per_cylinder; 4205#else 4206 for (shift = 31; shift > 0; shift--) { 4207 if (sectors_per_cylinder & (1 << shift)) 4208 break; 4209 } 4210 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4211#endif 4212 4213 /* 4214 * We've basically got 3 bytes, or 24 bits for the 4215 * cylinder size in the mode page. If we're over, 4216 * just round down to 2^24. 4217 */ 4218 if (cylinders > 0xffffff) 4219 cylinders = 0xffffff; 4220 4221 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4222 CTL_PAGE_CURRENT]; 4223 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4224 4225 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4226 CTL_PAGE_DEFAULT]; 4227 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4228 4229 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4230 CTL_PAGE_SAVED]; 4231 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4232 4233 page_index->page_data = 4234 (uint8_t *)lun->mode_pages.rigid_disk_page; 4235 break; 4236 } 4237 case SMS_CACHING_PAGE: { 4238 4239 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4240 panic("invalid subpage value %d", 4241 page_index->subpage); 4242 /* 4243 * Defaults should be okay here, no calculations 4244 * needed. 4245 */ 4246 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4247 &caching_page_default, 4248 sizeof(caching_page_default)); 4249 memcpy(&lun->mode_pages.caching_page[ 4250 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4251 sizeof(caching_page_changeable)); 4252 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4253 &caching_page_default, 4254 sizeof(caching_page_default)); 4255 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4256 &caching_page_default, 4257 sizeof(caching_page_default)); 4258 page_index->page_data = 4259 (uint8_t *)lun->mode_pages.caching_page; 4260 break; 4261 } 4262 case SMS_CONTROL_MODE_PAGE: { 4263 4264 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4265 panic("invalid subpage value %d", 4266 page_index->subpage); 4267 4268 /* 4269 * Defaults should be okay here, no calculations 4270 * needed. 4271 */ 4272 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 4273 &control_page_default, 4274 sizeof(control_page_default)); 4275 memcpy(&lun->mode_pages.control_page[ 4276 CTL_PAGE_CHANGEABLE], &control_page_changeable, 4277 sizeof(control_page_changeable)); 4278 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 4279 &control_page_default, 4280 sizeof(control_page_default)); 4281 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 4282 &control_page_default, 4283 sizeof(control_page_default)); 4284 page_index->page_data = 4285 (uint8_t *)lun->mode_pages.control_page; 4286 break; 4287 4288 } 4289 case SMS_VENDOR_SPECIFIC_PAGE:{ 4290 switch (page_index->subpage) { 4291 case PWR_SUBPAGE_CODE: { 4292 struct copan_power_subpage *current_page, 4293 *saved_page; 4294 4295 memcpy(&lun->mode_pages.power_subpage[ 4296 CTL_PAGE_CURRENT], 4297 &power_page_default, 4298 sizeof(power_page_default)); 4299 memcpy(&lun->mode_pages.power_subpage[ 4300 CTL_PAGE_CHANGEABLE], 4301 &power_page_changeable, 4302 sizeof(power_page_changeable)); 4303 memcpy(&lun->mode_pages.power_subpage[ 4304 CTL_PAGE_DEFAULT], 4305 &power_page_default, 4306 sizeof(power_page_default)); 4307 memcpy(&lun->mode_pages.power_subpage[ 4308 CTL_PAGE_SAVED], 4309 &power_page_default, 4310 sizeof(power_page_default)); 4311 page_index->page_data = 4312 (uint8_t *)lun->mode_pages.power_subpage; 4313 4314 current_page = (struct copan_power_subpage *) 4315 (page_index->page_data + 4316 (page_index->page_len * 4317 CTL_PAGE_CURRENT)); 4318 saved_page = (struct copan_power_subpage *) 4319 (page_index->page_data + 4320 (page_index->page_len * 4321 CTL_PAGE_SAVED)); 4322 break; 4323 } 4324 case APS_SUBPAGE_CODE: { 4325 struct copan_aps_subpage *current_page, 4326 *saved_page; 4327 4328 // This gets set multiple times but 4329 // it should always be the same. It's 4330 // only done during init so who cares. 4331 index_to_aps_page = i; 4332 4333 memcpy(&lun->mode_pages.aps_subpage[ 4334 CTL_PAGE_CURRENT], 4335 &aps_page_default, 4336 sizeof(aps_page_default)); 4337 memcpy(&lun->mode_pages.aps_subpage[ 4338 CTL_PAGE_CHANGEABLE], 4339 &aps_page_changeable, 4340 sizeof(aps_page_changeable)); 4341 memcpy(&lun->mode_pages.aps_subpage[ 4342 CTL_PAGE_DEFAULT], 4343 &aps_page_default, 4344 sizeof(aps_page_default)); 4345 memcpy(&lun->mode_pages.aps_subpage[ 4346 CTL_PAGE_SAVED], 4347 &aps_page_default, 4348 sizeof(aps_page_default)); 4349 page_index->page_data = 4350 (uint8_t *)lun->mode_pages.aps_subpage; 4351 4352 current_page = (struct copan_aps_subpage *) 4353 (page_index->page_data + 4354 (page_index->page_len * 4355 CTL_PAGE_CURRENT)); 4356 saved_page = (struct copan_aps_subpage *) 4357 (page_index->page_data + 4358 (page_index->page_len * 4359 CTL_PAGE_SAVED)); 4360 break; 4361 } 4362 case DBGCNF_SUBPAGE_CODE: { 4363 struct copan_debugconf_subpage *current_page, 4364 *saved_page; 4365 4366 memcpy(&lun->mode_pages.debugconf_subpage[ 4367 CTL_PAGE_CURRENT], 4368 &debugconf_page_default, 4369 sizeof(debugconf_page_default)); 4370 memcpy(&lun->mode_pages.debugconf_subpage[ 4371 CTL_PAGE_CHANGEABLE], 4372 &debugconf_page_changeable, 4373 sizeof(debugconf_page_changeable)); 4374 memcpy(&lun->mode_pages.debugconf_subpage[ 4375 CTL_PAGE_DEFAULT], 4376 &debugconf_page_default, 4377 sizeof(debugconf_page_default)); 4378 memcpy(&lun->mode_pages.debugconf_subpage[ 4379 CTL_PAGE_SAVED], 4380 &debugconf_page_default, 4381 sizeof(debugconf_page_default)); 4382 page_index->page_data = 4383 (uint8_t *)lun->mode_pages.debugconf_subpage; 4384 4385 current_page = (struct copan_debugconf_subpage *) 4386 (page_index->page_data + 4387 (page_index->page_len * 4388 CTL_PAGE_CURRENT)); 4389 saved_page = (struct copan_debugconf_subpage *) 4390 (page_index->page_data + 4391 (page_index->page_len * 4392 CTL_PAGE_SAVED)); 4393 break; 4394 } 4395 default: 4396 panic("invalid subpage value %d", 4397 page_index->subpage); 4398 break; 4399 } 4400 break; 4401 } 4402 default: 4403 panic("invalid page value %d", 4404 page_index->page_code & SMPH_PC_MASK); 4405 break; 4406 } 4407 } 4408 4409 return (CTL_RETVAL_COMPLETE); 4410} 4411 4412/* 4413 * LUN allocation. 4414 * 4415 * Requirements: 4416 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4417 * wants us to allocate the LUN and he can block. 4418 * - ctl_softc is always set 4419 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4420 * 4421 * Returns 0 for success, non-zero (errno) for failure. 4422 */ 4423static int 4424ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4425 struct ctl_be_lun *const be_lun, struct ctl_id target_id) 4426{ 4427 struct ctl_lun *nlun, *lun; 4428 struct ctl_port *port; 4429 struct scsi_vpd_id_descriptor *desc; 4430 struct scsi_vpd_id_t10 *t10id; 4431 const char *scsiname, *vendor; 4432 int lun_number, i, lun_malloced; 4433 int devidlen, idlen1, idlen2 = 0, len; 4434 4435 if (be_lun == NULL) 4436 return (EINVAL); 4437 4438 /* 4439 * We currently only support Direct Access or Processor LUN types. 4440 */ 4441 switch (be_lun->lun_type) { 4442 case T_DIRECT: 4443 break; 4444 case T_PROCESSOR: 4445 break; 4446 case T_SEQUENTIAL: 4447 case T_CHANGER: 4448 default: 4449 be_lun->lun_config_status(be_lun->be_lun, 4450 CTL_LUN_CONFIG_FAILURE); 4451 break; 4452 } 4453 if (ctl_lun == NULL) { 4454 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4455 lun_malloced = 1; 4456 } else { 4457 lun_malloced = 0; 4458 lun = ctl_lun; 4459 } 4460 4461 memset(lun, 0, sizeof(*lun)); 4462 if (lun_malloced) 4463 lun->flags = CTL_LUN_MALLOCED; 4464 4465 /* Generate LUN ID. */ 4466 devidlen = max(CTL_DEVID_MIN_LEN, 4467 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4468 idlen1 = sizeof(*t10id) + devidlen; 4469 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4470 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4471 if (scsiname != NULL) { 4472 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4473 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4474 } 4475 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4476 M_CTL, M_WAITOK | M_ZERO); 4477 lun->lun_devid->len = len; 4478 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4479 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4480 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4481 desc->length = idlen1; 4482 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4483 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4484 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4485 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4486 } else { 4487 strncpy(t10id->vendor, vendor, 4488 min(sizeof(t10id->vendor), strlen(vendor))); 4489 } 4490 strncpy((char *)t10id->vendor_spec_id, 4491 (char *)be_lun->device_id, devidlen); 4492 if (scsiname != NULL) { 4493 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4494 desc->length); 4495 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4496 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4497 SVPD_ID_TYPE_SCSI_NAME; 4498 desc->length = idlen2; 4499 strlcpy(desc->identifier, scsiname, idlen2); 4500 } 4501 4502 mtx_lock(&ctl_softc->ctl_lock); 4503 /* 4504 * See if the caller requested a particular LUN number. If so, see 4505 * if it is available. Otherwise, allocate the first available LUN. 4506 */ 4507 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4508 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4509 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4510 mtx_unlock(&ctl_softc->ctl_lock); 4511 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4512 printf("ctl: requested LUN ID %d is higher " 4513 "than CTL_MAX_LUNS - 1 (%d)\n", 4514 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4515 } else { 4516 /* 4517 * XXX KDM return an error, or just assign 4518 * another LUN ID in this case?? 4519 */ 4520 printf("ctl: requested LUN ID %d is already " 4521 "in use\n", be_lun->req_lun_id); 4522 } 4523 if (lun->flags & CTL_LUN_MALLOCED) 4524 free(lun, M_CTL); 4525 be_lun->lun_config_status(be_lun->be_lun, 4526 CTL_LUN_CONFIG_FAILURE); 4527 return (ENOSPC); 4528 } 4529 lun_number = be_lun->req_lun_id; 4530 } else { 4531 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4532 if (lun_number == -1) { 4533 mtx_unlock(&ctl_softc->ctl_lock); 4534 printf("ctl: can't allocate LUN on target %ju, out of " 4535 "LUNs\n", (uintmax_t)target_id.id); 4536 if (lun->flags & CTL_LUN_MALLOCED) 4537 free(lun, M_CTL); 4538 be_lun->lun_config_status(be_lun->be_lun, 4539 CTL_LUN_CONFIG_FAILURE); 4540 return (ENOSPC); 4541 } 4542 } 4543 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4544 4545 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4546 lun->target = target_id; 4547 lun->lun = lun_number; 4548 lun->be_lun = be_lun; 4549 /* 4550 * The processor LUN is always enabled. Disk LUNs come on line 4551 * disabled, and must be enabled by the backend. 4552 */ 4553 lun->flags |= CTL_LUN_DISABLED; 4554 lun->backend = be_lun->be; 4555 be_lun->ctl_lun = lun; 4556 be_lun->lun_id = lun_number; 4557 atomic_add_int(&be_lun->be->num_luns, 1); 4558 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4559 lun->flags |= CTL_LUN_STOPPED; 4560 4561 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4562 lun->flags |= CTL_LUN_INOPERABLE; 4563 4564 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4565 lun->flags |= CTL_LUN_PRIMARY_SC; 4566 4567 lun->ctl_softc = ctl_softc; 4568 TAILQ_INIT(&lun->ooa_queue); 4569 TAILQ_INIT(&lun->blocked_queue); 4570 STAILQ_INIT(&lun->error_list); 4571 4572 /* 4573 * Initialize the mode page index. 4574 */ 4575 ctl_init_page_index(lun); 4576 4577 /* 4578 * Set the poweron UA for all initiators on this LUN only. 4579 */ 4580 for (i = 0; i < CTL_MAX_INITIATORS; i++) 4581 lun->pending_sense[i].ua_pending = CTL_UA_POWERON; 4582 4583 /* 4584 * Now, before we insert this lun on the lun list, set the lun 4585 * inventory changed UA for all other luns. 4586 */ 4587 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4588 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4589 nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE; 4590 } 4591 } 4592 4593 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4594 4595 ctl_softc->ctl_luns[lun_number] = lun; 4596 4597 ctl_softc->num_luns++; 4598 4599 /* Setup statistics gathering */ 4600 lun->stats.device_type = be_lun->lun_type; 4601 lun->stats.lun_number = lun_number; 4602 if (lun->stats.device_type == T_DIRECT) 4603 lun->stats.blocksize = be_lun->blocksize; 4604 else 4605 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4606 for (i = 0;i < CTL_MAX_PORTS;i++) 4607 lun->stats.ports[i].targ_port = i; 4608 4609 mtx_unlock(&ctl_softc->ctl_lock); 4610 4611 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4612 4613 /* 4614 * Run through each registered FETD and bring it online if it isn't 4615 * already. Enable the target ID if it hasn't been enabled, and 4616 * enable this particular LUN. 4617 */ 4618 STAILQ_FOREACH(port, &ctl_softc->port_list, links) { 4619 int retval; 4620 4621 retval = port->lun_enable(port->targ_lun_arg, target_id,lun_number); 4622 if (retval != 0) { 4623 printf("ctl_alloc_lun: FETD %s port %d returned error " 4624 "%d for lun_enable on target %ju lun %d\n", 4625 port->port_name, port->targ_port, retval, 4626 (uintmax_t)target_id.id, lun_number); 4627 } else 4628 port->status |= CTL_PORT_STATUS_LUN_ONLINE; 4629 } 4630 return (0); 4631} 4632 4633/* 4634 * Delete a LUN. 4635 * Assumptions: 4636 * - LUN has already been marked invalid and any pending I/O has been taken 4637 * care of. 4638 */ 4639static int 4640ctl_free_lun(struct ctl_lun *lun) 4641{ 4642 struct ctl_softc *softc; 4643#if 0 4644 struct ctl_port *port; 4645#endif 4646 struct ctl_lun *nlun; 4647 int i; 4648 4649 softc = lun->ctl_softc; 4650 4651 mtx_assert(&softc->ctl_lock, MA_OWNED); 4652 4653 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4654 4655 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4656 4657 softc->ctl_luns[lun->lun] = NULL; 4658 4659 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4660 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4661 4662 softc->num_luns--; 4663 4664 /* 4665 * XXX KDM this scheme only works for a single target/multiple LUN 4666 * setup. It needs to be revamped for a multiple target scheme. 4667 * 4668 * XXX KDM this results in port->lun_disable() getting called twice, 4669 * once when ctl_disable_lun() is called, and a second time here. 4670 * We really need to re-think the LUN disable semantics. There 4671 * should probably be several steps/levels to LUN removal: 4672 * - disable 4673 * - invalidate 4674 * - free 4675 * 4676 * Right now we only have a disable method when communicating to 4677 * the front end ports, at least for individual LUNs. 4678 */ 4679#if 0 4680 STAILQ_FOREACH(port, &softc->port_list, links) { 4681 int retval; 4682 4683 retval = port->lun_disable(port->targ_lun_arg, lun->target, 4684 lun->lun); 4685 if (retval != 0) { 4686 printf("ctl_free_lun: FETD %s port %d returned error " 4687 "%d for lun_disable on target %ju lun %jd\n", 4688 port->port_name, port->targ_port, retval, 4689 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4690 } 4691 4692 if (STAILQ_FIRST(&softc->lun_list) == NULL) { 4693 port->status &= ~CTL_PORT_STATUS_LUN_ONLINE; 4694 4695 retval = port->targ_disable(port->targ_lun_arg,lun->target); 4696 if (retval != 0) { 4697 printf("ctl_free_lun: FETD %s port %d " 4698 "returned error %d for targ_disable on " 4699 "target %ju\n", port->port_name, 4700 port->targ_port, retval, 4701 (uintmax_t)lun->target.id); 4702 } else 4703 port->status &= ~CTL_PORT_STATUS_TARG_ONLINE; 4704 4705 if ((port->status & CTL_PORT_STATUS_TARG_ONLINE) != 0) 4706 continue; 4707 4708#if 0 4709 port->port_offline(port->onoff_arg); 4710 port->status &= ~CTL_PORT_STATUS_ONLINE; 4711#endif 4712 } 4713 } 4714#endif 4715 4716 /* 4717 * Tell the backend to free resources, if this LUN has a backend. 4718 */ 4719 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4720 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4721 4722 mtx_destroy(&lun->lun_lock); 4723 free(lun->lun_devid, M_CTL); 4724 if (lun->flags & CTL_LUN_MALLOCED) 4725 free(lun, M_CTL); 4726 4727 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4728 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 4729 nlun->pending_sense[i].ua_pending |= CTL_UA_LUN_CHANGE; 4730 } 4731 } 4732 4733 return (0); 4734} 4735 4736static void 4737ctl_create_lun(struct ctl_be_lun *be_lun) 4738{ 4739 struct ctl_softc *ctl_softc; 4740 4741 ctl_softc = control_softc; 4742 4743 /* 4744 * ctl_alloc_lun() should handle all potential failure cases. 4745 */ 4746 ctl_alloc_lun(ctl_softc, NULL, be_lun, ctl_softc->target); 4747} 4748 4749int 4750ctl_add_lun(struct ctl_be_lun *be_lun) 4751{ 4752 struct ctl_softc *ctl_softc = control_softc; 4753 4754 mtx_lock(&ctl_softc->ctl_lock); 4755 STAILQ_INSERT_TAIL(&ctl_softc->pending_lun_queue, be_lun, links); 4756 mtx_unlock(&ctl_softc->ctl_lock); 4757 wakeup(&ctl_softc->pending_lun_queue); 4758 4759 return (0); 4760} 4761 4762int 4763ctl_enable_lun(struct ctl_be_lun *be_lun) 4764{ 4765 struct ctl_softc *ctl_softc; 4766 struct ctl_port *port, *nport; 4767 struct ctl_lun *lun; 4768 int retval; 4769 4770 ctl_softc = control_softc; 4771 4772 lun = (struct ctl_lun *)be_lun->ctl_lun; 4773 4774 mtx_lock(&ctl_softc->ctl_lock); 4775 mtx_lock(&lun->lun_lock); 4776 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4777 /* 4778 * eh? Why did we get called if the LUN is already 4779 * enabled? 4780 */ 4781 mtx_unlock(&lun->lun_lock); 4782 mtx_unlock(&ctl_softc->ctl_lock); 4783 return (0); 4784 } 4785 lun->flags &= ~CTL_LUN_DISABLED; 4786 mtx_unlock(&lun->lun_lock); 4787 4788 for (port = STAILQ_FIRST(&ctl_softc->port_list); port != NULL; port = nport) { 4789 nport = STAILQ_NEXT(port, links); 4790 4791 /* 4792 * Drop the lock while we call the FETD's enable routine. 4793 * This can lead to a callback into CTL (at least in the 4794 * case of the internal initiator frontend. 4795 */ 4796 mtx_unlock(&ctl_softc->ctl_lock); 4797 retval = port->lun_enable(port->targ_lun_arg, lun->target,lun->lun); 4798 mtx_lock(&ctl_softc->ctl_lock); 4799 if (retval != 0) { 4800 printf("%s: FETD %s port %d returned error " 4801 "%d for lun_enable on target %ju lun %jd\n", 4802 __func__, port->port_name, port->targ_port, retval, 4803 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4804 } 4805#if 0 4806 else { 4807 /* NOTE: TODO: why does lun enable affect port status? */ 4808 port->status |= CTL_PORT_STATUS_LUN_ONLINE; 4809 } 4810#endif 4811 } 4812 4813 mtx_unlock(&ctl_softc->ctl_lock); 4814 4815 return (0); 4816} 4817 4818int 4819ctl_disable_lun(struct ctl_be_lun *be_lun) 4820{ 4821 struct ctl_softc *ctl_softc; 4822 struct ctl_port *port; 4823 struct ctl_lun *lun; 4824 int retval; 4825 4826 ctl_softc = control_softc; 4827 4828 lun = (struct ctl_lun *)be_lun->ctl_lun; 4829 4830 mtx_lock(&ctl_softc->ctl_lock); 4831 mtx_lock(&lun->lun_lock); 4832 if (lun->flags & CTL_LUN_DISABLED) { 4833 mtx_unlock(&lun->lun_lock); 4834 mtx_unlock(&ctl_softc->ctl_lock); 4835 return (0); 4836 } 4837 lun->flags |= CTL_LUN_DISABLED; 4838 mtx_unlock(&lun->lun_lock); 4839 4840 STAILQ_FOREACH(port, &ctl_softc->port_list, links) { 4841 mtx_unlock(&ctl_softc->ctl_lock); 4842 /* 4843 * Drop the lock before we call the frontend's disable 4844 * routine, to avoid lock order reversals. 4845 * 4846 * XXX KDM what happens if the frontend list changes while 4847 * we're traversing it? It's unlikely, but should be handled. 4848 */ 4849 retval = port->lun_disable(port->targ_lun_arg, lun->target, 4850 lun->lun); 4851 mtx_lock(&ctl_softc->ctl_lock); 4852 if (retval != 0) { 4853 printf("ctl_alloc_lun: FETD %s port %d returned error " 4854 "%d for lun_disable on target %ju lun %jd\n", 4855 port->port_name, port->targ_port, retval, 4856 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4857 } 4858 } 4859 4860 mtx_unlock(&ctl_softc->ctl_lock); 4861 4862 return (0); 4863} 4864 4865int 4866ctl_start_lun(struct ctl_be_lun *be_lun) 4867{ 4868 struct ctl_softc *ctl_softc; 4869 struct ctl_lun *lun; 4870 4871 ctl_softc = control_softc; 4872 4873 lun = (struct ctl_lun *)be_lun->ctl_lun; 4874 4875 mtx_lock(&lun->lun_lock); 4876 lun->flags &= ~CTL_LUN_STOPPED; 4877 mtx_unlock(&lun->lun_lock); 4878 4879 return (0); 4880} 4881 4882int 4883ctl_stop_lun(struct ctl_be_lun *be_lun) 4884{ 4885 struct ctl_softc *ctl_softc; 4886 struct ctl_lun *lun; 4887 4888 ctl_softc = control_softc; 4889 4890 lun = (struct ctl_lun *)be_lun->ctl_lun; 4891 4892 mtx_lock(&lun->lun_lock); 4893 lun->flags |= CTL_LUN_STOPPED; 4894 mtx_unlock(&lun->lun_lock); 4895 4896 return (0); 4897} 4898 4899int 4900ctl_lun_offline(struct ctl_be_lun *be_lun) 4901{ 4902 struct ctl_softc *ctl_softc; 4903 struct ctl_lun *lun; 4904 4905 ctl_softc = control_softc; 4906 4907 lun = (struct ctl_lun *)be_lun->ctl_lun; 4908 4909 mtx_lock(&lun->lun_lock); 4910 lun->flags |= CTL_LUN_OFFLINE; 4911 mtx_unlock(&lun->lun_lock); 4912 4913 return (0); 4914} 4915 4916int 4917ctl_lun_online(struct ctl_be_lun *be_lun) 4918{ 4919 struct ctl_softc *ctl_softc; 4920 struct ctl_lun *lun; 4921 4922 ctl_softc = control_softc; 4923 4924 lun = (struct ctl_lun *)be_lun->ctl_lun; 4925 4926 mtx_lock(&lun->lun_lock); 4927 lun->flags &= ~CTL_LUN_OFFLINE; 4928 mtx_unlock(&lun->lun_lock); 4929 4930 return (0); 4931} 4932 4933int 4934ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4935{ 4936 struct ctl_softc *ctl_softc; 4937 struct ctl_lun *lun; 4938 4939 ctl_softc = control_softc; 4940 4941 lun = (struct ctl_lun *)be_lun->ctl_lun; 4942 4943 mtx_lock(&lun->lun_lock); 4944 4945 /* 4946 * The LUN needs to be disabled before it can be marked invalid. 4947 */ 4948 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4949 mtx_unlock(&lun->lun_lock); 4950 return (-1); 4951 } 4952 /* 4953 * Mark the LUN invalid. 4954 */ 4955 lun->flags |= CTL_LUN_INVALID; 4956 4957 /* 4958 * If there is nothing in the OOA queue, go ahead and free the LUN. 4959 * If we have something in the OOA queue, we'll free it when the 4960 * last I/O completes. 4961 */ 4962 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4963 mtx_unlock(&lun->lun_lock); 4964 mtx_lock(&ctl_softc->ctl_lock); 4965 ctl_free_lun(lun); 4966 mtx_unlock(&ctl_softc->ctl_lock); 4967 } else 4968 mtx_unlock(&lun->lun_lock); 4969 4970 return (0); 4971} 4972 4973int 4974ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4975{ 4976 struct ctl_softc *ctl_softc; 4977 struct ctl_lun *lun; 4978 4979 ctl_softc = control_softc; 4980 lun = (struct ctl_lun *)be_lun->ctl_lun; 4981 4982 mtx_lock(&lun->lun_lock); 4983 lun->flags |= CTL_LUN_INOPERABLE; 4984 mtx_unlock(&lun->lun_lock); 4985 4986 return (0); 4987} 4988 4989int 4990ctl_lun_operable(struct ctl_be_lun *be_lun) 4991{ 4992 struct ctl_softc *ctl_softc; 4993 struct ctl_lun *lun; 4994 4995 ctl_softc = control_softc; 4996 lun = (struct ctl_lun *)be_lun->ctl_lun; 4997 4998 mtx_lock(&lun->lun_lock); 4999 lun->flags &= ~CTL_LUN_INOPERABLE; 5000 mtx_unlock(&lun->lun_lock); 5001 5002 return (0); 5003} 5004 5005int 5006ctl_lun_power_lock(struct ctl_be_lun *be_lun, struct ctl_nexus *nexus, 5007 int lock) 5008{ 5009 struct ctl_softc *softc; 5010 struct ctl_lun *lun; 5011 struct copan_aps_subpage *current_sp; 5012 struct ctl_page_index *page_index; 5013 int i; 5014 5015 softc = control_softc; 5016 5017 mtx_lock(&softc->ctl_lock); 5018 5019 lun = (struct ctl_lun *)be_lun->ctl_lun; 5020 mtx_lock(&lun->lun_lock); 5021 5022 page_index = NULL; 5023 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 5024 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 5025 APS_PAGE_CODE) 5026 continue; 5027 5028 if (lun->mode_pages.index[i].subpage != APS_SUBPAGE_CODE) 5029 continue; 5030 page_index = &lun->mode_pages.index[i]; 5031 } 5032 5033 if (page_index == NULL) { 5034 mtx_unlock(&lun->lun_lock); 5035 mtx_unlock(&softc->ctl_lock); 5036 printf("%s: APS subpage not found for lun %ju!\n", __func__, 5037 (uintmax_t)lun->lun); 5038 return (1); 5039 } 5040#if 0 5041 if ((softc->aps_locked_lun != 0) 5042 && (softc->aps_locked_lun != lun->lun)) { 5043 printf("%s: attempt to lock LUN %llu when %llu is already " 5044 "locked\n"); 5045 mtx_unlock(&lun->lun_lock); 5046 mtx_unlock(&softc->ctl_lock); 5047 return (1); 5048 } 5049#endif 5050 5051 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 5052 (page_index->page_len * CTL_PAGE_CURRENT)); 5053 5054 if (lock != 0) { 5055 current_sp->lock_active = APS_LOCK_ACTIVE; 5056 softc->aps_locked_lun = lun->lun; 5057 } else { 5058 current_sp->lock_active = 0; 5059 softc->aps_locked_lun = 0; 5060 } 5061 5062 5063 /* 5064 * If we're in HA mode, try to send the lock message to the other 5065 * side. 5066 */ 5067 if (ctl_is_single == 0) { 5068 int isc_retval; 5069 union ctl_ha_msg lock_msg; 5070 5071 lock_msg.hdr.nexus = *nexus; 5072 lock_msg.hdr.msg_type = CTL_MSG_APS_LOCK; 5073 if (lock != 0) 5074 lock_msg.aps.lock_flag = 1; 5075 else 5076 lock_msg.aps.lock_flag = 0; 5077 isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &lock_msg, 5078 sizeof(lock_msg), 0); 5079 if (isc_retval > CTL_HA_STATUS_SUCCESS) { 5080 printf("%s: APS (lock=%d) error returned from " 5081 "ctl_ha_msg_send: %d\n", __func__, lock, isc_retval); 5082 mtx_unlock(&lun->lun_lock); 5083 mtx_unlock(&softc->ctl_lock); 5084 return (1); 5085 } 5086 } 5087 5088 mtx_unlock(&lun->lun_lock); 5089 mtx_unlock(&softc->ctl_lock); 5090 5091 return (0); 5092} 5093 5094void 5095ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5096{ 5097 struct ctl_lun *lun; 5098 struct ctl_softc *softc; 5099 int i; 5100 5101 softc = control_softc; 5102 5103 lun = (struct ctl_lun *)be_lun->ctl_lun; 5104 5105 mtx_lock(&lun->lun_lock); 5106 5107 for (i = 0; i < CTL_MAX_INITIATORS; i++) 5108 lun->pending_sense[i].ua_pending |= CTL_UA_CAPACITY_CHANGED; 5109 5110 mtx_unlock(&lun->lun_lock); 5111} 5112 5113/* 5114 * Backend "memory move is complete" callback for requests that never 5115 * make it down to say RAIDCore's configuration code. 5116 */ 5117int 5118ctl_config_move_done(union ctl_io *io) 5119{ 5120 int retval; 5121 5122 retval = CTL_RETVAL_COMPLETE; 5123 5124 5125 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5126 /* 5127 * XXX KDM this shouldn't happen, but what if it does? 5128 */ 5129 if (io->io_hdr.io_type != CTL_IO_SCSI) 5130 panic("I/O type isn't CTL_IO_SCSI!"); 5131 5132 if ((io->io_hdr.port_status == 0) 5133 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 5134 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 5135 io->io_hdr.status = CTL_SUCCESS; 5136 else if ((io->io_hdr.port_status != 0) 5137 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 5138 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){ 5139 /* 5140 * For hardware error sense keys, the sense key 5141 * specific value is defined to be a retry count, 5142 * but we use it to pass back an internal FETD 5143 * error code. XXX KDM Hopefully the FETD is only 5144 * using 16 bits for an error code, since that's 5145 * all the space we have in the sks field. 5146 */ 5147 ctl_set_internal_failure(&io->scsiio, 5148 /*sks_valid*/ 1, 5149 /*retry_count*/ 5150 io->io_hdr.port_status); 5151 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5152 free(io->scsiio.kern_data_ptr, M_CTL); 5153 ctl_done(io); 5154 goto bailout; 5155 } 5156 5157 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 5158 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 5159 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5160 /* 5161 * XXX KDM just assuming a single pointer here, and not a 5162 * S/G list. If we start using S/G lists for config data, 5163 * we'll need to know how to clean them up here as well. 5164 */ 5165 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5166 free(io->scsiio.kern_data_ptr, M_CTL); 5167 /* Hopefully the user has already set the status... */ 5168 ctl_done(io); 5169 } else { 5170 /* 5171 * XXX KDM now we need to continue data movement. Some 5172 * options: 5173 * - call ctl_scsiio() again? We don't do this for data 5174 * writes, because for those at least we know ahead of 5175 * time where the write will go and how long it is. For 5176 * config writes, though, that information is largely 5177 * contained within the write itself, thus we need to 5178 * parse out the data again. 5179 * 5180 * - Call some other function once the data is in? 5181 */ 5182 5183 /* 5184 * XXX KDM call ctl_scsiio() again for now, and check flag 5185 * bits to see whether we're allocated or not. 5186 */ 5187 retval = ctl_scsiio(&io->scsiio); 5188 } 5189bailout: 5190 return (retval); 5191} 5192 5193/* 5194 * This gets called by a backend driver when it is done with a 5195 * data_submit method. 5196 */ 5197void 5198ctl_data_submit_done(union ctl_io *io) 5199{ 5200 /* 5201 * If the IO_CONT flag is set, we need to call the supplied 5202 * function to continue processing the I/O, instead of completing 5203 * the I/O just yet. 5204 * 5205 * If there is an error, though, we don't want to keep processing. 5206 * Instead, just send status back to the initiator. 5207 */ 5208 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5209 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5210 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5211 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5212 io->scsiio.io_cont(io); 5213 return; 5214 } 5215 ctl_done(io); 5216} 5217 5218/* 5219 * This gets called by a backend driver when it is done with a 5220 * configuration write. 5221 */ 5222void 5223ctl_config_write_done(union ctl_io *io) 5224{ 5225 /* 5226 * If the IO_CONT flag is set, we need to call the supplied 5227 * function to continue processing the I/O, instead of completing 5228 * the I/O just yet. 5229 * 5230 * If there is an error, though, we don't want to keep processing. 5231 * Instead, just send status back to the initiator. 5232 */ 5233 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) 5234 && (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) 5235 || ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS))) { 5236 io->scsiio.io_cont(io); 5237 return; 5238 } 5239 /* 5240 * Since a configuration write can be done for commands that actually 5241 * have data allocated, like write buffer, and commands that have 5242 * no data, like start/stop unit, we need to check here. 5243 */ 5244 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 5245 free(io->scsiio.kern_data_ptr, M_CTL); 5246 ctl_done(io); 5247} 5248 5249/* 5250 * SCSI release command. 5251 */ 5252int 5253ctl_scsi_release(struct ctl_scsiio *ctsio) 5254{ 5255 int length, longid, thirdparty_id, resv_id; 5256 struct ctl_softc *ctl_softc; 5257 struct ctl_lun *lun; 5258 5259 length = 0; 5260 resv_id = 0; 5261 5262 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5263 5264 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5265 ctl_softc = control_softc; 5266 5267 switch (ctsio->cdb[0]) { 5268 case RELEASE_10: { 5269 struct scsi_release_10 *cdb; 5270 5271 cdb = (struct scsi_release_10 *)ctsio->cdb; 5272 5273 if (cdb->byte2 & SR10_LONGID) 5274 longid = 1; 5275 else 5276 thirdparty_id = cdb->thirdparty_id; 5277 5278 resv_id = cdb->resv_id; 5279 length = scsi_2btoul(cdb->length); 5280 break; 5281 } 5282 } 5283 5284 5285 /* 5286 * XXX KDM right now, we only support LUN reservation. We don't 5287 * support 3rd party reservations, or extent reservations, which 5288 * might actually need the parameter list. If we've gotten this 5289 * far, we've got a LUN reservation. Anything else got kicked out 5290 * above. So, according to SPC, ignore the length. 5291 */ 5292 length = 0; 5293 5294 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5295 && (length > 0)) { 5296 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5297 ctsio->kern_data_len = length; 5298 ctsio->kern_total_len = length; 5299 ctsio->kern_data_resid = 0; 5300 ctsio->kern_rel_offset = 0; 5301 ctsio->kern_sg_entries = 0; 5302 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5303 ctsio->be_move_done = ctl_config_move_done; 5304 ctl_datamove((union ctl_io *)ctsio); 5305 5306 return (CTL_RETVAL_COMPLETE); 5307 } 5308 5309 if (length > 0) 5310 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5311 5312 mtx_lock(&lun->lun_lock); 5313 5314 /* 5315 * According to SPC, it is not an error for an intiator to attempt 5316 * to release a reservation on a LUN that isn't reserved, or that 5317 * is reserved by another initiator. The reservation can only be 5318 * released, though, by the initiator who made it or by one of 5319 * several reset type events. 5320 */ 5321 if (lun->flags & CTL_LUN_RESERVED) { 5322 if ((ctsio->io_hdr.nexus.initid.id == lun->rsv_nexus.initid.id) 5323 && (ctsio->io_hdr.nexus.targ_port == lun->rsv_nexus.targ_port) 5324 && (ctsio->io_hdr.nexus.targ_target.id == 5325 lun->rsv_nexus.targ_target.id)) { 5326 lun->flags &= ~CTL_LUN_RESERVED; 5327 } 5328 } 5329 5330 mtx_unlock(&lun->lun_lock); 5331 5332 ctsio->scsi_status = SCSI_STATUS_OK; 5333 ctsio->io_hdr.status = CTL_SUCCESS; 5334 5335 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5336 free(ctsio->kern_data_ptr, M_CTL); 5337 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5338 } 5339 5340 ctl_done((union ctl_io *)ctsio); 5341 return (CTL_RETVAL_COMPLETE); 5342} 5343 5344int 5345ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5346{ 5347 int extent, thirdparty, longid; 5348 int resv_id, length; 5349 uint64_t thirdparty_id; 5350 struct ctl_softc *ctl_softc; 5351 struct ctl_lun *lun; 5352 5353 extent = 0; 5354 thirdparty = 0; 5355 longid = 0; 5356 resv_id = 0; 5357 length = 0; 5358 thirdparty_id = 0; 5359 5360 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5361 5362 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5363 ctl_softc = control_softc; 5364 5365 switch (ctsio->cdb[0]) { 5366 case RESERVE_10: { 5367 struct scsi_reserve_10 *cdb; 5368 5369 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5370 5371 if (cdb->byte2 & SR10_LONGID) 5372 longid = 1; 5373 else 5374 thirdparty_id = cdb->thirdparty_id; 5375 5376 resv_id = cdb->resv_id; 5377 length = scsi_2btoul(cdb->length); 5378 break; 5379 } 5380 } 5381 5382 /* 5383 * XXX KDM right now, we only support LUN reservation. We don't 5384 * support 3rd party reservations, or extent reservations, which 5385 * might actually need the parameter list. If we've gotten this 5386 * far, we've got a LUN reservation. Anything else got kicked out 5387 * above. So, according to SPC, ignore the length. 5388 */ 5389 length = 0; 5390 5391 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5392 && (length > 0)) { 5393 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5394 ctsio->kern_data_len = length; 5395 ctsio->kern_total_len = length; 5396 ctsio->kern_data_resid = 0; 5397 ctsio->kern_rel_offset = 0; 5398 ctsio->kern_sg_entries = 0; 5399 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5400 ctsio->be_move_done = ctl_config_move_done; 5401 ctl_datamove((union ctl_io *)ctsio); 5402 5403 return (CTL_RETVAL_COMPLETE); 5404 } 5405 5406 if (length > 0) 5407 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5408 5409 mtx_lock(&lun->lun_lock); 5410 if (lun->flags & CTL_LUN_RESERVED) { 5411 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id) 5412 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port) 5413 || (ctsio->io_hdr.nexus.targ_target.id != 5414 lun->rsv_nexus.targ_target.id)) { 5415 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 5416 ctsio->io_hdr.status = CTL_SCSI_ERROR; 5417 goto bailout; 5418 } 5419 } 5420 5421 lun->flags |= CTL_LUN_RESERVED; 5422 lun->rsv_nexus = ctsio->io_hdr.nexus; 5423 5424 ctsio->scsi_status = SCSI_STATUS_OK; 5425 ctsio->io_hdr.status = CTL_SUCCESS; 5426 5427bailout: 5428 mtx_unlock(&lun->lun_lock); 5429 5430 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5431 free(ctsio->kern_data_ptr, M_CTL); 5432 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5433 } 5434 5435 ctl_done((union ctl_io *)ctsio); 5436 return (CTL_RETVAL_COMPLETE); 5437} 5438 5439int 5440ctl_start_stop(struct ctl_scsiio *ctsio) 5441{ 5442 struct scsi_start_stop_unit *cdb; 5443 struct ctl_lun *lun; 5444 struct ctl_softc *ctl_softc; 5445 int retval; 5446 5447 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5448 5449 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5450 ctl_softc = control_softc; 5451 retval = 0; 5452 5453 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5454 5455 /* 5456 * XXX KDM 5457 * We don't support the immediate bit on a stop unit. In order to 5458 * do that, we would need to code up a way to know that a stop is 5459 * pending, and hold off any new commands until it completes, one 5460 * way or another. Then we could accept or reject those commands 5461 * depending on its status. We would almost need to do the reverse 5462 * of what we do below for an immediate start -- return the copy of 5463 * the ctl_io to the FETD with status to send to the host (and to 5464 * free the copy!) and then free the original I/O once the stop 5465 * actually completes. That way, the OOA queue mechanism can work 5466 * to block commands that shouldn't proceed. Another alternative 5467 * would be to put the copy in the queue in place of the original, 5468 * and return the original back to the caller. That could be 5469 * slightly safer.. 5470 */ 5471 if ((cdb->byte2 & SSS_IMMED) 5472 && ((cdb->how & SSS_START) == 0)) { 5473 ctl_set_invalid_field(ctsio, 5474 /*sks_valid*/ 1, 5475 /*command*/ 1, 5476 /*field*/ 1, 5477 /*bit_valid*/ 1, 5478 /*bit*/ 0); 5479 ctl_done((union ctl_io *)ctsio); 5480 return (CTL_RETVAL_COMPLETE); 5481 } 5482 5483 if ((lun->flags & CTL_LUN_PR_RESERVED) 5484 && ((cdb->how & SSS_START)==0)) { 5485 uint32_t residx; 5486 5487 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5488 if (!lun->per_res[residx].registered 5489 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5490 5491 ctl_set_reservation_conflict(ctsio); 5492 ctl_done((union ctl_io *)ctsio); 5493 return (CTL_RETVAL_COMPLETE); 5494 } 5495 } 5496 5497 /* 5498 * If there is no backend on this device, we can't start or stop 5499 * it. In theory we shouldn't get any start/stop commands in the 5500 * first place at this level if the LUN doesn't have a backend. 5501 * That should get stopped by the command decode code. 5502 */ 5503 if (lun->backend == NULL) { 5504 ctl_set_invalid_opcode(ctsio); 5505 ctl_done((union ctl_io *)ctsio); 5506 return (CTL_RETVAL_COMPLETE); 5507 } 5508 5509 /* 5510 * XXX KDM Copan-specific offline behavior. 5511 * Figure out a reasonable way to port this? 5512 */ 5513#ifdef NEEDTOPORT 5514 mtx_lock(&lun->lun_lock); 5515 5516 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5517 && (lun->flags & CTL_LUN_OFFLINE)) { 5518 /* 5519 * If the LUN is offline, and the on/offline bit isn't set, 5520 * reject the start or stop. Otherwise, let it through. 5521 */ 5522 mtx_unlock(&lun->lun_lock); 5523 ctl_set_lun_not_ready(ctsio); 5524 ctl_done((union ctl_io *)ctsio); 5525 } else { 5526 mtx_unlock(&lun->lun_lock); 5527#endif /* NEEDTOPORT */ 5528 /* 5529 * This could be a start or a stop when we're online, 5530 * or a stop/offline or start/online. A start or stop when 5531 * we're offline is covered in the case above. 5532 */ 5533 /* 5534 * In the non-immediate case, we send the request to 5535 * the backend and return status to the user when 5536 * it is done. 5537 * 5538 * In the immediate case, we allocate a new ctl_io 5539 * to hold a copy of the request, and send that to 5540 * the backend. We then set good status on the 5541 * user's request and return it immediately. 5542 */ 5543 if (cdb->byte2 & SSS_IMMED) { 5544 union ctl_io *new_io; 5545 5546 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5547 if (new_io == NULL) { 5548 ctl_set_busy(ctsio); 5549 ctl_done((union ctl_io *)ctsio); 5550 } else { 5551 ctl_copy_io((union ctl_io *)ctsio, 5552 new_io); 5553 retval = lun->backend->config_write(new_io); 5554 ctl_set_success(ctsio); 5555 ctl_done((union ctl_io *)ctsio); 5556 } 5557 } else { 5558 retval = lun->backend->config_write( 5559 (union ctl_io *)ctsio); 5560 } 5561#ifdef NEEDTOPORT 5562 } 5563#endif 5564 return (retval); 5565} 5566 5567/* 5568 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5569 * we don't really do anything with the LBA and length fields if the user 5570 * passes them in. Instead we'll just flush out the cache for the entire 5571 * LUN. 5572 */ 5573int 5574ctl_sync_cache(struct ctl_scsiio *ctsio) 5575{ 5576 struct ctl_lun *lun; 5577 struct ctl_softc *ctl_softc; 5578 uint64_t starting_lba; 5579 uint32_t block_count; 5580 int retval; 5581 5582 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5583 5584 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5585 ctl_softc = control_softc; 5586 retval = 0; 5587 5588 switch (ctsio->cdb[0]) { 5589 case SYNCHRONIZE_CACHE: { 5590 struct scsi_sync_cache *cdb; 5591 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5592 5593 starting_lba = scsi_4btoul(cdb->begin_lba); 5594 block_count = scsi_2btoul(cdb->lb_count); 5595 break; 5596 } 5597 case SYNCHRONIZE_CACHE_16: { 5598 struct scsi_sync_cache_16 *cdb; 5599 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5600 5601 starting_lba = scsi_8btou64(cdb->begin_lba); 5602 block_count = scsi_4btoul(cdb->lb_count); 5603 break; 5604 } 5605 default: 5606 ctl_set_invalid_opcode(ctsio); 5607 ctl_done((union ctl_io *)ctsio); 5608 goto bailout; 5609 break; /* NOTREACHED */ 5610 } 5611 5612 /* 5613 * We check the LBA and length, but don't do anything with them. 5614 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5615 * get flushed. This check will just help satisfy anyone who wants 5616 * to see an error for an out of range LBA. 5617 */ 5618 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5619 ctl_set_lba_out_of_range(ctsio); 5620 ctl_done((union ctl_io *)ctsio); 5621 goto bailout; 5622 } 5623 5624 /* 5625 * If this LUN has no backend, we can't flush the cache anyway. 5626 */ 5627 if (lun->backend == NULL) { 5628 ctl_set_invalid_opcode(ctsio); 5629 ctl_done((union ctl_io *)ctsio); 5630 goto bailout; 5631 } 5632 5633 /* 5634 * Check to see whether we're configured to send the SYNCHRONIZE 5635 * CACHE command directly to the back end. 5636 */ 5637 mtx_lock(&lun->lun_lock); 5638 if ((ctl_softc->flags & CTL_FLAG_REAL_SYNC) 5639 && (++(lun->sync_count) >= lun->sync_interval)) { 5640 lun->sync_count = 0; 5641 mtx_unlock(&lun->lun_lock); 5642 retval = lun->backend->config_write((union ctl_io *)ctsio); 5643 } else { 5644 mtx_unlock(&lun->lun_lock); 5645 ctl_set_success(ctsio); 5646 ctl_done((union ctl_io *)ctsio); 5647 } 5648 5649bailout: 5650 5651 return (retval); 5652} 5653 5654int 5655ctl_format(struct ctl_scsiio *ctsio) 5656{ 5657 struct scsi_format *cdb; 5658 struct ctl_lun *lun; 5659 struct ctl_softc *ctl_softc; 5660 int length, defect_list_len; 5661 5662 CTL_DEBUG_PRINT(("ctl_format\n")); 5663 5664 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5665 ctl_softc = control_softc; 5666 5667 cdb = (struct scsi_format *)ctsio->cdb; 5668 5669 length = 0; 5670 if (cdb->byte2 & SF_FMTDATA) { 5671 if (cdb->byte2 & SF_LONGLIST) 5672 length = sizeof(struct scsi_format_header_long); 5673 else 5674 length = sizeof(struct scsi_format_header_short); 5675 } 5676 5677 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5678 && (length > 0)) { 5679 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5680 ctsio->kern_data_len = length; 5681 ctsio->kern_total_len = length; 5682 ctsio->kern_data_resid = 0; 5683 ctsio->kern_rel_offset = 0; 5684 ctsio->kern_sg_entries = 0; 5685 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5686 ctsio->be_move_done = ctl_config_move_done; 5687 ctl_datamove((union ctl_io *)ctsio); 5688 5689 return (CTL_RETVAL_COMPLETE); 5690 } 5691 5692 defect_list_len = 0; 5693 5694 if (cdb->byte2 & SF_FMTDATA) { 5695 if (cdb->byte2 & SF_LONGLIST) { 5696 struct scsi_format_header_long *header; 5697 5698 header = (struct scsi_format_header_long *) 5699 ctsio->kern_data_ptr; 5700 5701 defect_list_len = scsi_4btoul(header->defect_list_len); 5702 if (defect_list_len != 0) { 5703 ctl_set_invalid_field(ctsio, 5704 /*sks_valid*/ 1, 5705 /*command*/ 0, 5706 /*field*/ 2, 5707 /*bit_valid*/ 0, 5708 /*bit*/ 0); 5709 goto bailout; 5710 } 5711 } else { 5712 struct scsi_format_header_short *header; 5713 5714 header = (struct scsi_format_header_short *) 5715 ctsio->kern_data_ptr; 5716 5717 defect_list_len = scsi_2btoul(header->defect_list_len); 5718 if (defect_list_len != 0) { 5719 ctl_set_invalid_field(ctsio, 5720 /*sks_valid*/ 1, 5721 /*command*/ 0, 5722 /*field*/ 2, 5723 /*bit_valid*/ 0, 5724 /*bit*/ 0); 5725 goto bailout; 5726 } 5727 } 5728 } 5729 5730 /* 5731 * The format command will clear out the "Medium format corrupted" 5732 * status if set by the configuration code. That status is really 5733 * just a way to notify the host that we have lost the media, and 5734 * get them to issue a command that will basically make them think 5735 * they're blowing away the media. 5736 */ 5737 mtx_lock(&lun->lun_lock); 5738 lun->flags &= ~CTL_LUN_INOPERABLE; 5739 mtx_unlock(&lun->lun_lock); 5740 5741 ctsio->scsi_status = SCSI_STATUS_OK; 5742 ctsio->io_hdr.status = CTL_SUCCESS; 5743bailout: 5744 5745 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5746 free(ctsio->kern_data_ptr, M_CTL); 5747 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5748 } 5749 5750 ctl_done((union ctl_io *)ctsio); 5751 return (CTL_RETVAL_COMPLETE); 5752} 5753 5754int 5755ctl_read_buffer(struct ctl_scsiio *ctsio) 5756{ 5757 struct scsi_read_buffer *cdb; 5758 struct ctl_lun *lun; 5759 int buffer_offset, len; 5760 static uint8_t descr[4]; 5761 static uint8_t echo_descr[4] = { 0 }; 5762 5763 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5764 5765 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5766 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5767 5768 if (lun->flags & CTL_LUN_PR_RESERVED) { 5769 uint32_t residx; 5770 5771 /* 5772 * XXX KDM need a lock here. 5773 */ 5774 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5775 if ((lun->res_type == SPR_TYPE_EX_AC 5776 && residx != lun->pr_res_idx) 5777 || ((lun->res_type == SPR_TYPE_EX_AC_RO 5778 || lun->res_type == SPR_TYPE_EX_AC_AR) 5779 && !lun->per_res[residx].registered)) { 5780 ctl_set_reservation_conflict(ctsio); 5781 ctl_done((union ctl_io *)ctsio); 5782 return (CTL_RETVAL_COMPLETE); 5783 } 5784 } 5785 5786 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5787 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5788 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5789 ctl_set_invalid_field(ctsio, 5790 /*sks_valid*/ 1, 5791 /*command*/ 1, 5792 /*field*/ 1, 5793 /*bit_valid*/ 1, 5794 /*bit*/ 4); 5795 ctl_done((union ctl_io *)ctsio); 5796 return (CTL_RETVAL_COMPLETE); 5797 } 5798 5799 len = scsi_3btoul(cdb->length); 5800 buffer_offset = scsi_3btoul(cdb->offset); 5801 5802 if (buffer_offset + len > sizeof(lun->write_buffer)) { 5803 ctl_set_invalid_field(ctsio, 5804 /*sks_valid*/ 1, 5805 /*command*/ 1, 5806 /*field*/ 6, 5807 /*bit_valid*/ 0, 5808 /*bit*/ 0); 5809 ctl_done((union ctl_io *)ctsio); 5810 return (CTL_RETVAL_COMPLETE); 5811 } 5812 5813 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5814 descr[0] = 0; 5815 scsi_ulto3b(sizeof(lun->write_buffer), &descr[1]); 5816 ctsio->kern_data_ptr = descr; 5817 len = min(len, sizeof(descr)); 5818 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5819 ctsio->kern_data_ptr = echo_descr; 5820 len = min(len, sizeof(echo_descr)); 5821 } else 5822 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5823 ctsio->kern_data_len = len; 5824 ctsio->kern_total_len = len; 5825 ctsio->kern_data_resid = 0; 5826 ctsio->kern_rel_offset = 0; 5827 ctsio->kern_sg_entries = 0; 5828 ctsio->be_move_done = ctl_config_move_done; 5829 ctl_datamove((union ctl_io *)ctsio); 5830 5831 return (CTL_RETVAL_COMPLETE); 5832} 5833 5834int 5835ctl_write_buffer(struct ctl_scsiio *ctsio) 5836{ 5837 struct scsi_write_buffer *cdb; 5838 struct ctl_lun *lun; 5839 int buffer_offset, len; 5840 5841 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5842 5843 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5844 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5845 5846 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5847 ctl_set_invalid_field(ctsio, 5848 /*sks_valid*/ 1, 5849 /*command*/ 1, 5850 /*field*/ 1, 5851 /*bit_valid*/ 1, 5852 /*bit*/ 4); 5853 ctl_done((union ctl_io *)ctsio); 5854 return (CTL_RETVAL_COMPLETE); 5855 } 5856 5857 len = scsi_3btoul(cdb->length); 5858 buffer_offset = scsi_3btoul(cdb->offset); 5859 5860 if (buffer_offset + len > sizeof(lun->write_buffer)) { 5861 ctl_set_invalid_field(ctsio, 5862 /*sks_valid*/ 1, 5863 /*command*/ 1, 5864 /*field*/ 6, 5865 /*bit_valid*/ 0, 5866 /*bit*/ 0); 5867 ctl_done((union ctl_io *)ctsio); 5868 return (CTL_RETVAL_COMPLETE); 5869 } 5870 5871 /* 5872 * If we've got a kernel request that hasn't been malloced yet, 5873 * malloc it and tell the caller the data buffer is here. 5874 */ 5875 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5876 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5877 ctsio->kern_data_len = len; 5878 ctsio->kern_total_len = len; 5879 ctsio->kern_data_resid = 0; 5880 ctsio->kern_rel_offset = 0; 5881 ctsio->kern_sg_entries = 0; 5882 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5883 ctsio->be_move_done = ctl_config_move_done; 5884 ctl_datamove((union ctl_io *)ctsio); 5885 5886 return (CTL_RETVAL_COMPLETE); 5887 } 5888 5889 ctl_done((union ctl_io *)ctsio); 5890 5891 return (CTL_RETVAL_COMPLETE); 5892} 5893 5894int 5895ctl_write_same(struct ctl_scsiio *ctsio) 5896{ 5897 struct ctl_lun *lun; 5898 struct ctl_lba_len_flags *lbalen; 5899 uint64_t lba; 5900 uint32_t num_blocks; 5901 int len, retval; 5902 uint8_t byte2; 5903 5904 retval = CTL_RETVAL_COMPLETE; 5905 5906 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5907 5908 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5909 5910 switch (ctsio->cdb[0]) { 5911 case WRITE_SAME_10: { 5912 struct scsi_write_same_10 *cdb; 5913 5914 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5915 5916 lba = scsi_4btoul(cdb->addr); 5917 num_blocks = scsi_2btoul(cdb->length); 5918 byte2 = cdb->byte2; 5919 break; 5920 } 5921 case WRITE_SAME_16: { 5922 struct scsi_write_same_16 *cdb; 5923 5924 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5925 5926 lba = scsi_8btou64(cdb->addr); 5927 num_blocks = scsi_4btoul(cdb->length); 5928 byte2 = cdb->byte2; 5929 break; 5930 } 5931 default: 5932 /* 5933 * We got a command we don't support. This shouldn't 5934 * happen, commands should be filtered out above us. 5935 */ 5936 ctl_set_invalid_opcode(ctsio); 5937 ctl_done((union ctl_io *)ctsio); 5938 5939 return (CTL_RETVAL_COMPLETE); 5940 break; /* NOTREACHED */ 5941 } 5942 5943 /* 5944 * The first check is to make sure we're in bounds, the second 5945 * check is to catch wrap-around problems. If the lba + num blocks 5946 * is less than the lba, then we've wrapped around and the block 5947 * range is invalid anyway. 5948 */ 5949 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5950 || ((lba + num_blocks) < lba)) { 5951 ctl_set_lba_out_of_range(ctsio); 5952 ctl_done((union ctl_io *)ctsio); 5953 return (CTL_RETVAL_COMPLETE); 5954 } 5955 5956 /* Zero number of blocks means "to the last logical block" */ 5957 if (num_blocks == 0) { 5958 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5959 ctl_set_invalid_field(ctsio, 5960 /*sks_valid*/ 0, 5961 /*command*/ 1, 5962 /*field*/ 0, 5963 /*bit_valid*/ 0, 5964 /*bit*/ 0); 5965 ctl_done((union ctl_io *)ctsio); 5966 return (CTL_RETVAL_COMPLETE); 5967 } 5968 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5969 } 5970 5971 len = lun->be_lun->blocksize; 5972 5973 /* 5974 * If we've got a kernel request that hasn't been malloced yet, 5975 * malloc it and tell the caller the data buffer is here. 5976 */ 5977 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5978 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5979 ctsio->kern_data_len = len; 5980 ctsio->kern_total_len = len; 5981 ctsio->kern_data_resid = 0; 5982 ctsio->kern_rel_offset = 0; 5983 ctsio->kern_sg_entries = 0; 5984 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5985 ctsio->be_move_done = ctl_config_move_done; 5986 ctl_datamove((union ctl_io *)ctsio); 5987 5988 return (CTL_RETVAL_COMPLETE); 5989 } 5990 5991 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5992 lbalen->lba = lba; 5993 lbalen->len = num_blocks; 5994 lbalen->flags = byte2; 5995 retval = lun->backend->config_write((union ctl_io *)ctsio); 5996 5997 return (retval); 5998} 5999 6000int 6001ctl_unmap(struct ctl_scsiio *ctsio) 6002{ 6003 struct ctl_lun *lun; 6004 struct scsi_unmap *cdb; 6005 struct ctl_ptr_len_flags *ptrlen; 6006 struct scsi_unmap_header *hdr; 6007 struct scsi_unmap_desc *buf, *end; 6008 uint64_t lba; 6009 uint32_t num_blocks; 6010 int len, retval; 6011 uint8_t byte2; 6012 6013 retval = CTL_RETVAL_COMPLETE; 6014 6015 CTL_DEBUG_PRINT(("ctl_unmap\n")); 6016 6017 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6018 cdb = (struct scsi_unmap *)ctsio->cdb; 6019 6020 len = scsi_2btoul(cdb->length); 6021 byte2 = cdb->byte2; 6022 6023 /* 6024 * If we've got a kernel request that hasn't been malloced yet, 6025 * malloc it and tell the caller the data buffer is here. 6026 */ 6027 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6028 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 6029 ctsio->kern_data_len = len; 6030 ctsio->kern_total_len = len; 6031 ctsio->kern_data_resid = 0; 6032 ctsio->kern_rel_offset = 0; 6033 ctsio->kern_sg_entries = 0; 6034 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6035 ctsio->be_move_done = ctl_config_move_done; 6036 ctl_datamove((union ctl_io *)ctsio); 6037 6038 return (CTL_RETVAL_COMPLETE); 6039 } 6040 6041 len = ctsio->kern_total_len - ctsio->kern_data_resid; 6042 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 6043 if (len < sizeof (*hdr) || 6044 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 6045 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 6046 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 6047 ctl_set_invalid_field(ctsio, 6048 /*sks_valid*/ 0, 6049 /*command*/ 0, 6050 /*field*/ 0, 6051 /*bit_valid*/ 0, 6052 /*bit*/ 0); 6053 ctl_done((union ctl_io *)ctsio); 6054 return (CTL_RETVAL_COMPLETE); 6055 } 6056 len = scsi_2btoul(hdr->desc_length); 6057 buf = (struct scsi_unmap_desc *)(hdr + 1); 6058 end = buf + len / sizeof(*buf); 6059 6060 ptrlen = (struct ctl_ptr_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 6061 ptrlen->ptr = (void *)buf; 6062 ptrlen->len = len; 6063 ptrlen->flags = byte2; 6064 6065 for (; buf < end; buf++) { 6066 lba = scsi_8btou64(buf->lba); 6067 num_blocks = scsi_4btoul(buf->length); 6068 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 6069 || ((lba + num_blocks) < lba)) { 6070 ctl_set_lba_out_of_range(ctsio); 6071 ctl_done((union ctl_io *)ctsio); 6072 return (CTL_RETVAL_COMPLETE); 6073 } 6074 } 6075 6076 retval = lun->backend->config_write((union ctl_io *)ctsio); 6077 6078 return (retval); 6079} 6080 6081/* 6082 * Note that this function currently doesn't actually do anything inside 6083 * CTL to enforce things if the DQue bit is turned on. 6084 * 6085 * Also note that this function can't be used in the default case, because 6086 * the DQue bit isn't set in the changeable mask for the control mode page 6087 * anyway. This is just here as an example for how to implement a page 6088 * handler, and a placeholder in case we want to allow the user to turn 6089 * tagged queueing on and off. 6090 * 6091 * The D_SENSE bit handling is functional, however, and will turn 6092 * descriptor sense on and off for a given LUN. 6093 */ 6094int 6095ctl_control_page_handler(struct ctl_scsiio *ctsio, 6096 struct ctl_page_index *page_index, uint8_t *page_ptr) 6097{ 6098 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 6099 struct ctl_lun *lun; 6100 struct ctl_softc *softc; 6101 int set_ua; 6102 uint32_t initidx; 6103 6104 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6105 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6106 set_ua = 0; 6107 6108 user_cp = (struct scsi_control_page *)page_ptr; 6109 current_cp = (struct scsi_control_page *) 6110 (page_index->page_data + (page_index->page_len * 6111 CTL_PAGE_CURRENT)); 6112 saved_cp = (struct scsi_control_page *) 6113 (page_index->page_data + (page_index->page_len * 6114 CTL_PAGE_SAVED)); 6115 6116 softc = control_softc; 6117 6118 mtx_lock(&lun->lun_lock); 6119 if (((current_cp->rlec & SCP_DSENSE) == 0) 6120 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 6121 /* 6122 * Descriptor sense is currently turned off and the user 6123 * wants to turn it on. 6124 */ 6125 current_cp->rlec |= SCP_DSENSE; 6126 saved_cp->rlec |= SCP_DSENSE; 6127 lun->flags |= CTL_LUN_SENSE_DESC; 6128 set_ua = 1; 6129 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 6130 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 6131 /* 6132 * Descriptor sense is currently turned on, and the user 6133 * wants to turn it off. 6134 */ 6135 current_cp->rlec &= ~SCP_DSENSE; 6136 saved_cp->rlec &= ~SCP_DSENSE; 6137 lun->flags &= ~CTL_LUN_SENSE_DESC; 6138 set_ua = 1; 6139 } 6140 if (current_cp->queue_flags & SCP_QUEUE_DQUE) { 6141 if (user_cp->queue_flags & SCP_QUEUE_DQUE) { 6142#ifdef NEEDTOPORT 6143 csevent_log(CSC_CTL | CSC_SHELF_SW | 6144 CTL_UNTAG_TO_UNTAG, 6145 csevent_LogType_Trace, 6146 csevent_Severity_Information, 6147 csevent_AlertLevel_Green, 6148 csevent_FRU_Firmware, 6149 csevent_FRU_Unknown, 6150 "Received untagged to untagged transition"); 6151#endif /* NEEDTOPORT */ 6152 } else { 6153#ifdef NEEDTOPORT 6154 csevent_log(CSC_CTL | CSC_SHELF_SW | 6155 CTL_UNTAG_TO_TAG, 6156 csevent_LogType_ConfigChange, 6157 csevent_Severity_Information, 6158 csevent_AlertLevel_Green, 6159 csevent_FRU_Firmware, 6160 csevent_FRU_Unknown, 6161 "Received untagged to tagged " 6162 "queueing transition"); 6163#endif /* NEEDTOPORT */ 6164 6165 current_cp->queue_flags &= ~SCP_QUEUE_DQUE; 6166 saved_cp->queue_flags &= ~SCP_QUEUE_DQUE; 6167 set_ua = 1; 6168 } 6169 } else { 6170 if (user_cp->queue_flags & SCP_QUEUE_DQUE) { 6171#ifdef NEEDTOPORT 6172 csevent_log(CSC_CTL | CSC_SHELF_SW | 6173 CTL_TAG_TO_UNTAG, 6174 csevent_LogType_ConfigChange, 6175 csevent_Severity_Warning, 6176 csevent_AlertLevel_Yellow, 6177 csevent_FRU_Firmware, 6178 csevent_FRU_Unknown, 6179 "Received tagged queueing to untagged " 6180 "transition"); 6181#endif /* NEEDTOPORT */ 6182 6183 current_cp->queue_flags |= SCP_QUEUE_DQUE; 6184 saved_cp->queue_flags |= SCP_QUEUE_DQUE; 6185 set_ua = 1; 6186 } else { 6187#ifdef NEEDTOPORT 6188 csevent_log(CSC_CTL | CSC_SHELF_SW | 6189 CTL_TAG_TO_TAG, 6190 csevent_LogType_Trace, 6191 csevent_Severity_Information, 6192 csevent_AlertLevel_Green, 6193 csevent_FRU_Firmware, 6194 csevent_FRU_Unknown, 6195 "Received tagged queueing to tagged " 6196 "queueing transition"); 6197#endif /* NEEDTOPORT */ 6198 } 6199 } 6200 if (set_ua != 0) { 6201 int i; 6202 /* 6203 * Let other initiators know that the mode 6204 * parameters for this LUN have changed. 6205 */ 6206 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 6207 if (i == initidx) 6208 continue; 6209 6210 lun->pending_sense[i].ua_pending |= 6211 CTL_UA_MODE_CHANGE; 6212 } 6213 } 6214 mtx_unlock(&lun->lun_lock); 6215 6216 return (0); 6217} 6218 6219int 6220ctl_power_sp_handler(struct ctl_scsiio *ctsio, 6221 struct ctl_page_index *page_index, uint8_t *page_ptr) 6222{ 6223 return (0); 6224} 6225 6226int 6227ctl_power_sp_sense_handler(struct ctl_scsiio *ctsio, 6228 struct ctl_page_index *page_index, int pc) 6229{ 6230 struct copan_power_subpage *page; 6231 6232 page = (struct copan_power_subpage *)page_index->page_data + 6233 (page_index->page_len * pc); 6234 6235 switch (pc) { 6236 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6237 /* 6238 * We don't update the changable bits for this page. 6239 */ 6240 break; 6241 case SMS_PAGE_CTRL_CURRENT >> 6: 6242 case SMS_PAGE_CTRL_DEFAULT >> 6: 6243 case SMS_PAGE_CTRL_SAVED >> 6: 6244#ifdef NEEDTOPORT 6245 ctl_update_power_subpage(page); 6246#endif 6247 break; 6248 default: 6249#ifdef NEEDTOPORT 6250 EPRINT(0, "Invalid PC %d!!", pc); 6251#endif 6252 break; 6253 } 6254 return (0); 6255} 6256 6257 6258int 6259ctl_aps_sp_handler(struct ctl_scsiio *ctsio, 6260 struct ctl_page_index *page_index, uint8_t *page_ptr) 6261{ 6262 struct copan_aps_subpage *user_sp; 6263 struct copan_aps_subpage *current_sp; 6264 union ctl_modepage_info *modepage_info; 6265 struct ctl_softc *softc; 6266 struct ctl_lun *lun; 6267 int retval; 6268 6269 retval = CTL_RETVAL_COMPLETE; 6270 current_sp = (struct copan_aps_subpage *)(page_index->page_data + 6271 (page_index->page_len * CTL_PAGE_CURRENT)); 6272 softc = control_softc; 6273 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6274 6275 user_sp = (struct copan_aps_subpage *)page_ptr; 6276 6277 modepage_info = (union ctl_modepage_info *) 6278 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6279 6280 modepage_info->header.page_code = page_index->page_code & SMPH_PC_MASK; 6281 modepage_info->header.subpage = page_index->subpage; 6282 modepage_info->aps.lock_active = user_sp->lock_active; 6283 6284 mtx_lock(&softc->ctl_lock); 6285 6286 /* 6287 * If there is a request to lock the LUN and another LUN is locked 6288 * this is an error. If the requested LUN is already locked ignore 6289 * the request. If no LUN is locked attempt to lock it. 6290 * if there is a request to unlock the LUN and the LUN is currently 6291 * locked attempt to unlock it. Otherwise ignore the request. i.e. 6292 * if another LUN is locked or no LUN is locked. 6293 */ 6294 if (user_sp->lock_active & APS_LOCK_ACTIVE) { 6295 if (softc->aps_locked_lun == lun->lun) { 6296 /* 6297 * This LUN is already locked, so we're done. 6298 */ 6299 retval = CTL_RETVAL_COMPLETE; 6300 } else if (softc->aps_locked_lun == 0) { 6301 /* 6302 * No one has the lock, pass the request to the 6303 * backend. 6304 */ 6305 retval = lun->backend->config_write( 6306 (union ctl_io *)ctsio); 6307 } else { 6308 /* 6309 * Someone else has the lock, throw out the request. 6310 */ 6311 ctl_set_already_locked(ctsio); 6312 free(ctsio->kern_data_ptr, M_CTL); 6313 ctl_done((union ctl_io *)ctsio); 6314 6315 /* 6316 * Set the return value so that ctl_do_mode_select() 6317 * won't try to complete the command. We already 6318 * completed it here. 6319 */ 6320 retval = CTL_RETVAL_ERROR; 6321 } 6322 } else if (softc->aps_locked_lun == lun->lun) { 6323 /* 6324 * This LUN is locked, so pass the unlock request to the 6325 * backend. 6326 */ 6327 retval = lun->backend->config_write((union ctl_io *)ctsio); 6328 } 6329 mtx_unlock(&softc->ctl_lock); 6330 6331 return (retval); 6332} 6333 6334int 6335ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 6336 struct ctl_page_index *page_index, 6337 uint8_t *page_ptr) 6338{ 6339 uint8_t *c; 6340 int i; 6341 6342 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 6343 ctl_time_io_secs = 6344 (c[0] << 8) | 6345 (c[1] << 0) | 6346 0; 6347 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 6348 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 6349 printf("page data:"); 6350 for (i=0; i<8; i++) 6351 printf(" %.2x",page_ptr[i]); 6352 printf("\n"); 6353 return (0); 6354} 6355 6356int 6357ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 6358 struct ctl_page_index *page_index, 6359 int pc) 6360{ 6361 struct copan_debugconf_subpage *page; 6362 6363 page = (struct copan_debugconf_subpage *)page_index->page_data + 6364 (page_index->page_len * pc); 6365 6366 switch (pc) { 6367 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6368 case SMS_PAGE_CTRL_DEFAULT >> 6: 6369 case SMS_PAGE_CTRL_SAVED >> 6: 6370 /* 6371 * We don't update the changable or default bits for this page. 6372 */ 6373 break; 6374 case SMS_PAGE_CTRL_CURRENT >> 6: 6375 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 6376 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 6377 break; 6378 default: 6379#ifdef NEEDTOPORT 6380 EPRINT(0, "Invalid PC %d!!", pc); 6381#endif /* NEEDTOPORT */ 6382 break; 6383 } 6384 return (0); 6385} 6386 6387 6388static int 6389ctl_do_mode_select(union ctl_io *io) 6390{ 6391 struct scsi_mode_page_header *page_header; 6392 struct ctl_page_index *page_index; 6393 struct ctl_scsiio *ctsio; 6394 int control_dev, page_len; 6395 int page_len_offset, page_len_size; 6396 union ctl_modepage_info *modepage_info; 6397 struct ctl_lun *lun; 6398 int *len_left, *len_used; 6399 int retval, i; 6400 6401 ctsio = &io->scsiio; 6402 page_index = NULL; 6403 page_len = 0; 6404 retval = CTL_RETVAL_COMPLETE; 6405 6406 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6407 6408 if (lun->be_lun->lun_type != T_DIRECT) 6409 control_dev = 1; 6410 else 6411 control_dev = 0; 6412 6413 modepage_info = (union ctl_modepage_info *) 6414 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6415 len_left = &modepage_info->header.len_left; 6416 len_used = &modepage_info->header.len_used; 6417 6418do_next_page: 6419 6420 page_header = (struct scsi_mode_page_header *) 6421 (ctsio->kern_data_ptr + *len_used); 6422 6423 if (*len_left == 0) { 6424 free(ctsio->kern_data_ptr, M_CTL); 6425 ctl_set_success(ctsio); 6426 ctl_done((union ctl_io *)ctsio); 6427 return (CTL_RETVAL_COMPLETE); 6428 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6429 6430 free(ctsio->kern_data_ptr, M_CTL); 6431 ctl_set_param_len_error(ctsio); 6432 ctl_done((union ctl_io *)ctsio); 6433 return (CTL_RETVAL_COMPLETE); 6434 6435 } else if ((page_header->page_code & SMPH_SPF) 6436 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6437 6438 free(ctsio->kern_data_ptr, M_CTL); 6439 ctl_set_param_len_error(ctsio); 6440 ctl_done((union ctl_io *)ctsio); 6441 return (CTL_RETVAL_COMPLETE); 6442 } 6443 6444 6445 /* 6446 * XXX KDM should we do something with the block descriptor? 6447 */ 6448 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6449 6450 if ((control_dev != 0) 6451 && (lun->mode_pages.index[i].page_flags & 6452 CTL_PAGE_FLAG_DISK_ONLY)) 6453 continue; 6454 6455 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6456 (page_header->page_code & SMPH_PC_MASK)) 6457 continue; 6458 6459 /* 6460 * If neither page has a subpage code, then we've got a 6461 * match. 6462 */ 6463 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6464 && ((page_header->page_code & SMPH_SPF) == 0)) { 6465 page_index = &lun->mode_pages.index[i]; 6466 page_len = page_header->page_length; 6467 break; 6468 } 6469 6470 /* 6471 * If both pages have subpages, then the subpage numbers 6472 * have to match. 6473 */ 6474 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6475 && (page_header->page_code & SMPH_SPF)) { 6476 struct scsi_mode_page_header_sp *sph; 6477 6478 sph = (struct scsi_mode_page_header_sp *)page_header; 6479 6480 if (lun->mode_pages.index[i].subpage == 6481 sph->subpage) { 6482 page_index = &lun->mode_pages.index[i]; 6483 page_len = scsi_2btoul(sph->page_length); 6484 break; 6485 } 6486 } 6487 } 6488 6489 /* 6490 * If we couldn't find the page, or if we don't have a mode select 6491 * handler for it, send back an error to the user. 6492 */ 6493 if ((page_index == NULL) 6494 || (page_index->select_handler == NULL)) { 6495 ctl_set_invalid_field(ctsio, 6496 /*sks_valid*/ 1, 6497 /*command*/ 0, 6498 /*field*/ *len_used, 6499 /*bit_valid*/ 0, 6500 /*bit*/ 0); 6501 free(ctsio->kern_data_ptr, M_CTL); 6502 ctl_done((union ctl_io *)ctsio); 6503 return (CTL_RETVAL_COMPLETE); 6504 } 6505 6506 if (page_index->page_code & SMPH_SPF) { 6507 page_len_offset = 2; 6508 page_len_size = 2; 6509 } else { 6510 page_len_size = 1; 6511 page_len_offset = 1; 6512 } 6513 6514 /* 6515 * If the length the initiator gives us isn't the one we specify in 6516 * the mode page header, or if they didn't specify enough data in 6517 * the CDB to avoid truncating this page, kick out the request. 6518 */ 6519 if ((page_len != (page_index->page_len - page_len_offset - 6520 page_len_size)) 6521 || (*len_left < page_index->page_len)) { 6522 6523 6524 ctl_set_invalid_field(ctsio, 6525 /*sks_valid*/ 1, 6526 /*command*/ 0, 6527 /*field*/ *len_used + page_len_offset, 6528 /*bit_valid*/ 0, 6529 /*bit*/ 0); 6530 free(ctsio->kern_data_ptr, M_CTL); 6531 ctl_done((union ctl_io *)ctsio); 6532 return (CTL_RETVAL_COMPLETE); 6533 } 6534 6535 /* 6536 * Run through the mode page, checking to make sure that the bits 6537 * the user changed are actually legal for him to change. 6538 */ 6539 for (i = 0; i < page_index->page_len; i++) { 6540 uint8_t *user_byte, *change_mask, *current_byte; 6541 int bad_bit; 6542 int j; 6543 6544 user_byte = (uint8_t *)page_header + i; 6545 change_mask = page_index->page_data + 6546 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6547 current_byte = page_index->page_data + 6548 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6549 6550 /* 6551 * Check to see whether the user set any bits in this byte 6552 * that he is not allowed to set. 6553 */ 6554 if ((*user_byte & ~(*change_mask)) == 6555 (*current_byte & ~(*change_mask))) 6556 continue; 6557 6558 /* 6559 * Go through bit by bit to determine which one is illegal. 6560 */ 6561 bad_bit = 0; 6562 for (j = 7; j >= 0; j--) { 6563 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6564 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6565 bad_bit = i; 6566 break; 6567 } 6568 } 6569 ctl_set_invalid_field(ctsio, 6570 /*sks_valid*/ 1, 6571 /*command*/ 0, 6572 /*field*/ *len_used + i, 6573 /*bit_valid*/ 1, 6574 /*bit*/ bad_bit); 6575 free(ctsio->kern_data_ptr, M_CTL); 6576 ctl_done((union ctl_io *)ctsio); 6577 return (CTL_RETVAL_COMPLETE); 6578 } 6579 6580 /* 6581 * Decrement these before we call the page handler, since we may 6582 * end up getting called back one way or another before the handler 6583 * returns to this context. 6584 */ 6585 *len_left -= page_index->page_len; 6586 *len_used += page_index->page_len; 6587 6588 retval = page_index->select_handler(ctsio, page_index, 6589 (uint8_t *)page_header); 6590 6591 /* 6592 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6593 * wait until this queued command completes to finish processing 6594 * the mode page. If it returns anything other than 6595 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6596 * already set the sense information, freed the data pointer, and 6597 * completed the io for us. 6598 */ 6599 if (retval != CTL_RETVAL_COMPLETE) 6600 goto bailout_no_done; 6601 6602 /* 6603 * If the initiator sent us more than one page, parse the next one. 6604 */ 6605 if (*len_left > 0) 6606 goto do_next_page; 6607 6608 ctl_set_success(ctsio); 6609 free(ctsio->kern_data_ptr, M_CTL); 6610 ctl_done((union ctl_io *)ctsio); 6611 6612bailout_no_done: 6613 6614 return (CTL_RETVAL_COMPLETE); 6615 6616} 6617 6618int 6619ctl_mode_select(struct ctl_scsiio *ctsio) 6620{ 6621 int param_len, pf, sp; 6622 int header_size, bd_len; 6623 int len_left, len_used; 6624 struct ctl_page_index *page_index; 6625 struct ctl_lun *lun; 6626 int control_dev, page_len; 6627 union ctl_modepage_info *modepage_info; 6628 int retval; 6629 6630 pf = 0; 6631 sp = 0; 6632 page_len = 0; 6633 len_used = 0; 6634 len_left = 0; 6635 retval = 0; 6636 bd_len = 0; 6637 page_index = NULL; 6638 6639 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6640 6641 if (lun->be_lun->lun_type != T_DIRECT) 6642 control_dev = 1; 6643 else 6644 control_dev = 0; 6645 6646 switch (ctsio->cdb[0]) { 6647 case MODE_SELECT_6: { 6648 struct scsi_mode_select_6 *cdb; 6649 6650 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6651 6652 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6653 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6654 6655 param_len = cdb->length; 6656 header_size = sizeof(struct scsi_mode_header_6); 6657 break; 6658 } 6659 case MODE_SELECT_10: { 6660 struct scsi_mode_select_10 *cdb; 6661 6662 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6663 6664 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6665 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6666 6667 param_len = scsi_2btoul(cdb->length); 6668 header_size = sizeof(struct scsi_mode_header_10); 6669 break; 6670 } 6671 default: 6672 ctl_set_invalid_opcode(ctsio); 6673 ctl_done((union ctl_io *)ctsio); 6674 return (CTL_RETVAL_COMPLETE); 6675 break; /* NOTREACHED */ 6676 } 6677 6678 /* 6679 * From SPC-3: 6680 * "A parameter list length of zero indicates that the Data-Out Buffer 6681 * shall be empty. This condition shall not be considered as an error." 6682 */ 6683 if (param_len == 0) { 6684 ctl_set_success(ctsio); 6685 ctl_done((union ctl_io *)ctsio); 6686 return (CTL_RETVAL_COMPLETE); 6687 } 6688 6689 /* 6690 * Since we'll hit this the first time through, prior to 6691 * allocation, we don't need to free a data buffer here. 6692 */ 6693 if (param_len < header_size) { 6694 ctl_set_param_len_error(ctsio); 6695 ctl_done((union ctl_io *)ctsio); 6696 return (CTL_RETVAL_COMPLETE); 6697 } 6698 6699 /* 6700 * Allocate the data buffer and grab the user's data. In theory, 6701 * we shouldn't have to sanity check the parameter list length here 6702 * because the maximum size is 64K. We should be able to malloc 6703 * that much without too many problems. 6704 */ 6705 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6706 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6707 ctsio->kern_data_len = param_len; 6708 ctsio->kern_total_len = param_len; 6709 ctsio->kern_data_resid = 0; 6710 ctsio->kern_rel_offset = 0; 6711 ctsio->kern_sg_entries = 0; 6712 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6713 ctsio->be_move_done = ctl_config_move_done; 6714 ctl_datamove((union ctl_io *)ctsio); 6715 6716 return (CTL_RETVAL_COMPLETE); 6717 } 6718 6719 switch (ctsio->cdb[0]) { 6720 case MODE_SELECT_6: { 6721 struct scsi_mode_header_6 *mh6; 6722 6723 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6724 bd_len = mh6->blk_desc_len; 6725 break; 6726 } 6727 case MODE_SELECT_10: { 6728 struct scsi_mode_header_10 *mh10; 6729 6730 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6731 bd_len = scsi_2btoul(mh10->blk_desc_len); 6732 break; 6733 } 6734 default: 6735 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6736 break; 6737 } 6738 6739 if (param_len < (header_size + bd_len)) { 6740 free(ctsio->kern_data_ptr, M_CTL); 6741 ctl_set_param_len_error(ctsio); 6742 ctl_done((union ctl_io *)ctsio); 6743 return (CTL_RETVAL_COMPLETE); 6744 } 6745 6746 /* 6747 * Set the IO_CONT flag, so that if this I/O gets passed to 6748 * ctl_config_write_done(), it'll get passed back to 6749 * ctl_do_mode_select() for further processing, or completion if 6750 * we're all done. 6751 */ 6752 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6753 ctsio->io_cont = ctl_do_mode_select; 6754 6755 modepage_info = (union ctl_modepage_info *) 6756 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6757 6758 memset(modepage_info, 0, sizeof(*modepage_info)); 6759 6760 len_left = param_len - header_size - bd_len; 6761 len_used = header_size + bd_len; 6762 6763 modepage_info->header.len_left = len_left; 6764 modepage_info->header.len_used = len_used; 6765 6766 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6767} 6768 6769int 6770ctl_mode_sense(struct ctl_scsiio *ctsio) 6771{ 6772 struct ctl_lun *lun; 6773 int pc, page_code, dbd, llba, subpage; 6774 int alloc_len, page_len, header_len, total_len; 6775 struct scsi_mode_block_descr *block_desc; 6776 struct ctl_page_index *page_index; 6777 int control_dev; 6778 6779 dbd = 0; 6780 llba = 0; 6781 block_desc = NULL; 6782 page_index = NULL; 6783 6784 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6785 6786 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6787 6788 if (lun->be_lun->lun_type != T_DIRECT) 6789 control_dev = 1; 6790 else 6791 control_dev = 0; 6792 6793 if (lun->flags & CTL_LUN_PR_RESERVED) { 6794 uint32_t residx; 6795 6796 /* 6797 * XXX KDM need a lock here. 6798 */ 6799 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 6800 if ((lun->res_type == SPR_TYPE_EX_AC 6801 && residx != lun->pr_res_idx) 6802 || ((lun->res_type == SPR_TYPE_EX_AC_RO 6803 || lun->res_type == SPR_TYPE_EX_AC_AR) 6804 && !lun->per_res[residx].registered)) { 6805 ctl_set_reservation_conflict(ctsio); 6806 ctl_done((union ctl_io *)ctsio); 6807 return (CTL_RETVAL_COMPLETE); 6808 } 6809 } 6810 6811 switch (ctsio->cdb[0]) { 6812 case MODE_SENSE_6: { 6813 struct scsi_mode_sense_6 *cdb; 6814 6815 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6816 6817 header_len = sizeof(struct scsi_mode_hdr_6); 6818 if (cdb->byte2 & SMS_DBD) 6819 dbd = 1; 6820 else 6821 header_len += sizeof(struct scsi_mode_block_descr); 6822 6823 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6824 page_code = cdb->page & SMS_PAGE_CODE; 6825 subpage = cdb->subpage; 6826 alloc_len = cdb->length; 6827 break; 6828 } 6829 case MODE_SENSE_10: { 6830 struct scsi_mode_sense_10 *cdb; 6831 6832 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6833 6834 header_len = sizeof(struct scsi_mode_hdr_10); 6835 6836 if (cdb->byte2 & SMS_DBD) 6837 dbd = 1; 6838 else 6839 header_len += sizeof(struct scsi_mode_block_descr); 6840 if (cdb->byte2 & SMS10_LLBAA) 6841 llba = 1; 6842 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6843 page_code = cdb->page & SMS_PAGE_CODE; 6844 subpage = cdb->subpage; 6845 alloc_len = scsi_2btoul(cdb->length); 6846 break; 6847 } 6848 default: 6849 ctl_set_invalid_opcode(ctsio); 6850 ctl_done((union ctl_io *)ctsio); 6851 return (CTL_RETVAL_COMPLETE); 6852 break; /* NOTREACHED */ 6853 } 6854 6855 /* 6856 * We have to make a first pass through to calculate the size of 6857 * the pages that match the user's query. Then we allocate enough 6858 * memory to hold it, and actually copy the data into the buffer. 6859 */ 6860 switch (page_code) { 6861 case SMS_ALL_PAGES_PAGE: { 6862 int i; 6863 6864 page_len = 0; 6865 6866 /* 6867 * At the moment, values other than 0 and 0xff here are 6868 * reserved according to SPC-3. 6869 */ 6870 if ((subpage != SMS_SUBPAGE_PAGE_0) 6871 && (subpage != SMS_SUBPAGE_ALL)) { 6872 ctl_set_invalid_field(ctsio, 6873 /*sks_valid*/ 1, 6874 /*command*/ 1, 6875 /*field*/ 3, 6876 /*bit_valid*/ 0, 6877 /*bit*/ 0); 6878 ctl_done((union ctl_io *)ctsio); 6879 return (CTL_RETVAL_COMPLETE); 6880 } 6881 6882 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6883 if ((control_dev != 0) 6884 && (lun->mode_pages.index[i].page_flags & 6885 CTL_PAGE_FLAG_DISK_ONLY)) 6886 continue; 6887 6888 /* 6889 * We don't use this subpage if the user didn't 6890 * request all subpages. 6891 */ 6892 if ((lun->mode_pages.index[i].subpage != 0) 6893 && (subpage == SMS_SUBPAGE_PAGE_0)) 6894 continue; 6895 6896#if 0 6897 printf("found page %#x len %d\n", 6898 lun->mode_pages.index[i].page_code & 6899 SMPH_PC_MASK, 6900 lun->mode_pages.index[i].page_len); 6901#endif 6902 page_len += lun->mode_pages.index[i].page_len; 6903 } 6904 break; 6905 } 6906 default: { 6907 int i; 6908 6909 page_len = 0; 6910 6911 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6912 /* Look for the right page code */ 6913 if ((lun->mode_pages.index[i].page_code & 6914 SMPH_PC_MASK) != page_code) 6915 continue; 6916 6917 /* Look for the right subpage or the subpage wildcard*/ 6918 if ((lun->mode_pages.index[i].subpage != subpage) 6919 && (subpage != SMS_SUBPAGE_ALL)) 6920 continue; 6921 6922 /* Make sure the page is supported for this dev type */ 6923 if ((control_dev != 0) 6924 && (lun->mode_pages.index[i].page_flags & 6925 CTL_PAGE_FLAG_DISK_ONLY)) 6926 continue; 6927 6928#if 0 6929 printf("found page %#x len %d\n", 6930 lun->mode_pages.index[i].page_code & 6931 SMPH_PC_MASK, 6932 lun->mode_pages.index[i].page_len); 6933#endif 6934 6935 page_len += lun->mode_pages.index[i].page_len; 6936 } 6937 6938 if (page_len == 0) { 6939 ctl_set_invalid_field(ctsio, 6940 /*sks_valid*/ 1, 6941 /*command*/ 1, 6942 /*field*/ 2, 6943 /*bit_valid*/ 1, 6944 /*bit*/ 5); 6945 ctl_done((union ctl_io *)ctsio); 6946 return (CTL_RETVAL_COMPLETE); 6947 } 6948 break; 6949 } 6950 } 6951 6952 total_len = header_len + page_len; 6953#if 0 6954 printf("header_len = %d, page_len = %d, total_len = %d\n", 6955 header_len, page_len, total_len); 6956#endif 6957 6958 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6959 ctsio->kern_sg_entries = 0; 6960 ctsio->kern_data_resid = 0; 6961 ctsio->kern_rel_offset = 0; 6962 if (total_len < alloc_len) { 6963 ctsio->residual = alloc_len - total_len; 6964 ctsio->kern_data_len = total_len; 6965 ctsio->kern_total_len = total_len; 6966 } else { 6967 ctsio->residual = 0; 6968 ctsio->kern_data_len = alloc_len; 6969 ctsio->kern_total_len = alloc_len; 6970 } 6971 6972 switch (ctsio->cdb[0]) { 6973 case MODE_SENSE_6: { 6974 struct scsi_mode_hdr_6 *header; 6975 6976 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6977 6978 header->datalen = ctl_min(total_len - 1, 254); 6979 6980 if (dbd) 6981 header->block_descr_len = 0; 6982 else 6983 header->block_descr_len = 6984 sizeof(struct scsi_mode_block_descr); 6985 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6986 break; 6987 } 6988 case MODE_SENSE_10: { 6989 struct scsi_mode_hdr_10 *header; 6990 int datalen; 6991 6992 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6993 6994 datalen = ctl_min(total_len - 2, 65533); 6995 scsi_ulto2b(datalen, header->datalen); 6996 if (dbd) 6997 scsi_ulto2b(0, header->block_descr_len); 6998 else 6999 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 7000 header->block_descr_len); 7001 block_desc = (struct scsi_mode_block_descr *)&header[1]; 7002 break; 7003 } 7004 default: 7005 panic("invalid CDB type %#x", ctsio->cdb[0]); 7006 break; /* NOTREACHED */ 7007 } 7008 7009 /* 7010 * If we've got a disk, use its blocksize in the block 7011 * descriptor. Otherwise, just set it to 0. 7012 */ 7013 if (dbd == 0) { 7014 if (control_dev != 0) 7015 scsi_ulto3b(lun->be_lun->blocksize, 7016 block_desc->block_len); 7017 else 7018 scsi_ulto3b(0, block_desc->block_len); 7019 } 7020 7021 switch (page_code) { 7022 case SMS_ALL_PAGES_PAGE: { 7023 int i, data_used; 7024 7025 data_used = header_len; 7026 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 7027 struct ctl_page_index *page_index; 7028 7029 page_index = &lun->mode_pages.index[i]; 7030 7031 if ((control_dev != 0) 7032 && (page_index->page_flags & 7033 CTL_PAGE_FLAG_DISK_ONLY)) 7034 continue; 7035 7036 /* 7037 * We don't use this subpage if the user didn't 7038 * request all subpages. We already checked (above) 7039 * to make sure the user only specified a subpage 7040 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 7041 */ 7042 if ((page_index->subpage != 0) 7043 && (subpage == SMS_SUBPAGE_PAGE_0)) 7044 continue; 7045 7046 /* 7047 * Call the handler, if it exists, to update the 7048 * page to the latest values. 7049 */ 7050 if (page_index->sense_handler != NULL) 7051 page_index->sense_handler(ctsio, page_index,pc); 7052 7053 memcpy(ctsio->kern_data_ptr + data_used, 7054 page_index->page_data + 7055 (page_index->page_len * pc), 7056 page_index->page_len); 7057 data_used += page_index->page_len; 7058 } 7059 break; 7060 } 7061 default: { 7062 int i, data_used; 7063 7064 data_used = header_len; 7065 7066 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 7067 struct ctl_page_index *page_index; 7068 7069 page_index = &lun->mode_pages.index[i]; 7070 7071 /* Look for the right page code */ 7072 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 7073 continue; 7074 7075 /* Look for the right subpage or the subpage wildcard*/ 7076 if ((page_index->subpage != subpage) 7077 && (subpage != SMS_SUBPAGE_ALL)) 7078 continue; 7079 7080 /* Make sure the page is supported for this dev type */ 7081 if ((control_dev != 0) 7082 && (page_index->page_flags & 7083 CTL_PAGE_FLAG_DISK_ONLY)) 7084 continue; 7085 7086 /* 7087 * Call the handler, if it exists, to update the 7088 * page to the latest values. 7089 */ 7090 if (page_index->sense_handler != NULL) 7091 page_index->sense_handler(ctsio, page_index,pc); 7092 7093 memcpy(ctsio->kern_data_ptr + data_used, 7094 page_index->page_data + 7095 (page_index->page_len * pc), 7096 page_index->page_len); 7097 data_used += page_index->page_len; 7098 } 7099 break; 7100 } 7101 } 7102 7103 ctsio->scsi_status = SCSI_STATUS_OK; 7104 7105 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7106 ctsio->be_move_done = ctl_config_move_done; 7107 ctl_datamove((union ctl_io *)ctsio); 7108 7109 return (CTL_RETVAL_COMPLETE); 7110} 7111 7112int 7113ctl_read_capacity(struct ctl_scsiio *ctsio) 7114{ 7115 struct scsi_read_capacity *cdb; 7116 struct scsi_read_capacity_data *data; 7117 struct ctl_lun *lun; 7118 uint32_t lba; 7119 7120 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 7121 7122 cdb = (struct scsi_read_capacity *)ctsio->cdb; 7123 7124 lba = scsi_4btoul(cdb->addr); 7125 if (((cdb->pmi & SRC_PMI) == 0) 7126 && (lba != 0)) { 7127 ctl_set_invalid_field(/*ctsio*/ ctsio, 7128 /*sks_valid*/ 1, 7129 /*command*/ 1, 7130 /*field*/ 2, 7131 /*bit_valid*/ 0, 7132 /*bit*/ 0); 7133 ctl_done((union ctl_io *)ctsio); 7134 return (CTL_RETVAL_COMPLETE); 7135 } 7136 7137 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7138 7139 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7140 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 7141 ctsio->residual = 0; 7142 ctsio->kern_data_len = sizeof(*data); 7143 ctsio->kern_total_len = sizeof(*data); 7144 ctsio->kern_data_resid = 0; 7145 ctsio->kern_rel_offset = 0; 7146 ctsio->kern_sg_entries = 0; 7147 7148 /* 7149 * If the maximum LBA is greater than 0xfffffffe, the user must 7150 * issue a SERVICE ACTION IN (16) command, with the read capacity 7151 * serivce action set. 7152 */ 7153 if (lun->be_lun->maxlba > 0xfffffffe) 7154 scsi_ulto4b(0xffffffff, data->addr); 7155 else 7156 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 7157 7158 /* 7159 * XXX KDM this may not be 512 bytes... 7160 */ 7161 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7162 7163 ctsio->scsi_status = SCSI_STATUS_OK; 7164 7165 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7166 ctsio->be_move_done = ctl_config_move_done; 7167 ctl_datamove((union ctl_io *)ctsio); 7168 7169 return (CTL_RETVAL_COMPLETE); 7170} 7171 7172int 7173ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7174{ 7175 struct scsi_read_capacity_16 *cdb; 7176 struct scsi_read_capacity_data_long *data; 7177 struct ctl_lun *lun; 7178 uint64_t lba; 7179 uint32_t alloc_len; 7180 7181 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7182 7183 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7184 7185 alloc_len = scsi_4btoul(cdb->alloc_len); 7186 lba = scsi_8btou64(cdb->addr); 7187 7188 if ((cdb->reladr & SRC16_PMI) 7189 && (lba != 0)) { 7190 ctl_set_invalid_field(/*ctsio*/ ctsio, 7191 /*sks_valid*/ 1, 7192 /*command*/ 1, 7193 /*field*/ 2, 7194 /*bit_valid*/ 0, 7195 /*bit*/ 0); 7196 ctl_done((union ctl_io *)ctsio); 7197 return (CTL_RETVAL_COMPLETE); 7198 } 7199 7200 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7201 7202 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7203 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7204 7205 if (sizeof(*data) < alloc_len) { 7206 ctsio->residual = alloc_len - sizeof(*data); 7207 ctsio->kern_data_len = sizeof(*data); 7208 ctsio->kern_total_len = sizeof(*data); 7209 } else { 7210 ctsio->residual = 0; 7211 ctsio->kern_data_len = alloc_len; 7212 ctsio->kern_total_len = alloc_len; 7213 } 7214 ctsio->kern_data_resid = 0; 7215 ctsio->kern_rel_offset = 0; 7216 ctsio->kern_sg_entries = 0; 7217 7218 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7219 /* XXX KDM this may not be 512 bytes... */ 7220 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7221 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7222 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7223 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7224 data->lalba_lbp[0] |= SRC16_LBPME; 7225 7226 ctsio->scsi_status = SCSI_STATUS_OK; 7227 7228 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7229 ctsio->be_move_done = ctl_config_move_done; 7230 ctl_datamove((union ctl_io *)ctsio); 7231 7232 return (CTL_RETVAL_COMPLETE); 7233} 7234 7235int 7236ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7237{ 7238 struct scsi_maintenance_in *cdb; 7239 int retval; 7240 int alloc_len, ext, total_len = 0, g, p, pc, pg; 7241 int num_target_port_groups, num_target_ports, single; 7242 struct ctl_lun *lun; 7243 struct ctl_softc *softc; 7244 struct ctl_port *port; 7245 struct scsi_target_group_data *rtg_ptr; 7246 struct scsi_target_group_data_extended *rtg_ext_ptr; 7247 struct scsi_target_port_group_descriptor *tpg_desc; 7248 7249 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7250 7251 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7252 softc = control_softc; 7253 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7254 7255 retval = CTL_RETVAL_COMPLETE; 7256 7257 switch (cdb->byte2 & STG_PDF_MASK) { 7258 case STG_PDF_LENGTH: 7259 ext = 0; 7260 break; 7261 case STG_PDF_EXTENDED: 7262 ext = 1; 7263 break; 7264 default: 7265 ctl_set_invalid_field(/*ctsio*/ ctsio, 7266 /*sks_valid*/ 1, 7267 /*command*/ 1, 7268 /*field*/ 2, 7269 /*bit_valid*/ 1, 7270 /*bit*/ 5); 7271 ctl_done((union ctl_io *)ctsio); 7272 return(retval); 7273 } 7274 7275 single = ctl_is_single; 7276 if (single) 7277 num_target_port_groups = 1; 7278 else 7279 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7280 num_target_ports = 0; 7281 mtx_lock(&softc->ctl_lock); 7282 STAILQ_FOREACH(port, &softc->port_list, links) { 7283 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7284 continue; 7285 if (ctl_map_lun_back(port->targ_port, lun->lun) >= CTL_MAX_LUNS) 7286 continue; 7287 num_target_ports++; 7288 } 7289 mtx_unlock(&softc->ctl_lock); 7290 7291 if (ext) 7292 total_len = sizeof(struct scsi_target_group_data_extended); 7293 else 7294 total_len = sizeof(struct scsi_target_group_data); 7295 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7296 num_target_port_groups + 7297 sizeof(struct scsi_target_port_descriptor) * 7298 num_target_ports * num_target_port_groups; 7299 7300 alloc_len = scsi_4btoul(cdb->length); 7301 7302 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7303 7304 ctsio->kern_sg_entries = 0; 7305 7306 if (total_len < alloc_len) { 7307 ctsio->residual = alloc_len - total_len; 7308 ctsio->kern_data_len = total_len; 7309 ctsio->kern_total_len = total_len; 7310 } else { 7311 ctsio->residual = 0; 7312 ctsio->kern_data_len = alloc_len; 7313 ctsio->kern_total_len = alloc_len; 7314 } 7315 ctsio->kern_data_resid = 0; 7316 ctsio->kern_rel_offset = 0; 7317 7318 if (ext) { 7319 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7320 ctsio->kern_data_ptr; 7321 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7322 rtg_ext_ptr->format_type = 0x10; 7323 rtg_ext_ptr->implicit_transition_time = 0; 7324 tpg_desc = &rtg_ext_ptr->groups[0]; 7325 } else { 7326 rtg_ptr = (struct scsi_target_group_data *) 7327 ctsio->kern_data_ptr; 7328 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7329 tpg_desc = &rtg_ptr->groups[0]; 7330 } 7331 7332 pg = ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS; 7333 mtx_lock(&softc->ctl_lock); 7334 for (g = 0; g < num_target_port_groups; g++) { 7335 if (g == pg) 7336 tpg_desc->pref_state = TPG_PRIMARY | 7337 TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7338 else 7339 tpg_desc->pref_state = 7340 TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7341 tpg_desc->support = TPG_AO_SUP; 7342 if (!single) 7343 tpg_desc->support |= TPG_AN_SUP; 7344 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7345 tpg_desc->status = TPG_IMPLICIT; 7346 pc = 0; 7347 STAILQ_FOREACH(port, &softc->port_list, links) { 7348 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7349 continue; 7350 if (ctl_map_lun_back(port->targ_port, lun->lun) >= 7351 CTL_MAX_LUNS) 7352 continue; 7353 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 7354 scsi_ulto2b(p, tpg_desc->descriptors[pc]. 7355 relative_target_port_identifier); 7356 pc++; 7357 } 7358 tpg_desc->target_port_count = pc; 7359 tpg_desc = (struct scsi_target_port_group_descriptor *) 7360 &tpg_desc->descriptors[pc]; 7361 } 7362 mtx_unlock(&softc->ctl_lock); 7363 7364 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7365 ctsio->be_move_done = ctl_config_move_done; 7366 7367 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 7368 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 7369 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 7370 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 7371 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 7372 7373 ctl_datamove((union ctl_io *)ctsio); 7374 return(retval); 7375} 7376 7377int 7378ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7379{ 7380 struct ctl_lun *lun; 7381 struct scsi_report_supported_opcodes *cdb; 7382 const struct ctl_cmd_entry *entry, *sentry; 7383 struct scsi_report_supported_opcodes_all *all; 7384 struct scsi_report_supported_opcodes_descr *descr; 7385 struct scsi_report_supported_opcodes_one *one; 7386 int retval; 7387 int alloc_len, total_len; 7388 int opcode, service_action, i, j, num; 7389 7390 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7391 7392 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7393 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7394 7395 retval = CTL_RETVAL_COMPLETE; 7396 7397 opcode = cdb->requested_opcode; 7398 service_action = scsi_2btoul(cdb->requested_service_action); 7399 switch (cdb->options & RSO_OPTIONS_MASK) { 7400 case RSO_OPTIONS_ALL: 7401 num = 0; 7402 for (i = 0; i < 256; i++) { 7403 entry = &ctl_cmd_table[i]; 7404 if (entry->flags & CTL_CMD_FLAG_SA5) { 7405 for (j = 0; j < 32; j++) { 7406 sentry = &((const struct ctl_cmd_entry *) 7407 entry->execute)[j]; 7408 if (ctl_cmd_applicable( 7409 lun->be_lun->lun_type, sentry)) 7410 num++; 7411 } 7412 } else { 7413 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7414 entry)) 7415 num++; 7416 } 7417 } 7418 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7419 num * sizeof(struct scsi_report_supported_opcodes_descr); 7420 break; 7421 case RSO_OPTIONS_OC: 7422 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7423 ctl_set_invalid_field(/*ctsio*/ ctsio, 7424 /*sks_valid*/ 1, 7425 /*command*/ 1, 7426 /*field*/ 2, 7427 /*bit_valid*/ 1, 7428 /*bit*/ 2); 7429 ctl_done((union ctl_io *)ctsio); 7430 return (CTL_RETVAL_COMPLETE); 7431 } 7432 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7433 break; 7434 case RSO_OPTIONS_OC_SA: 7435 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7436 service_action >= 32) { 7437 ctl_set_invalid_field(/*ctsio*/ ctsio, 7438 /*sks_valid*/ 1, 7439 /*command*/ 1, 7440 /*field*/ 2, 7441 /*bit_valid*/ 1, 7442 /*bit*/ 2); 7443 ctl_done((union ctl_io *)ctsio); 7444 return (CTL_RETVAL_COMPLETE); 7445 } 7446 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7447 break; 7448 default: 7449 ctl_set_invalid_field(/*ctsio*/ ctsio, 7450 /*sks_valid*/ 1, 7451 /*command*/ 1, 7452 /*field*/ 2, 7453 /*bit_valid*/ 1, 7454 /*bit*/ 2); 7455 ctl_done((union ctl_io *)ctsio); 7456 return (CTL_RETVAL_COMPLETE); 7457 } 7458 7459 alloc_len = scsi_4btoul(cdb->length); 7460 7461 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7462 7463 ctsio->kern_sg_entries = 0; 7464 7465 if (total_len < alloc_len) { 7466 ctsio->residual = alloc_len - total_len; 7467 ctsio->kern_data_len = total_len; 7468 ctsio->kern_total_len = total_len; 7469 } else { 7470 ctsio->residual = 0; 7471 ctsio->kern_data_len = alloc_len; 7472 ctsio->kern_total_len = alloc_len; 7473 } 7474 ctsio->kern_data_resid = 0; 7475 ctsio->kern_rel_offset = 0; 7476 7477 switch (cdb->options & RSO_OPTIONS_MASK) { 7478 case RSO_OPTIONS_ALL: 7479 all = (struct scsi_report_supported_opcodes_all *) 7480 ctsio->kern_data_ptr; 7481 num = 0; 7482 for (i = 0; i < 256; i++) { 7483 entry = &ctl_cmd_table[i]; 7484 if (entry->flags & CTL_CMD_FLAG_SA5) { 7485 for (j = 0; j < 32; j++) { 7486 sentry = &((const struct ctl_cmd_entry *) 7487 entry->execute)[j]; 7488 if (!ctl_cmd_applicable( 7489 lun->be_lun->lun_type, sentry)) 7490 continue; 7491 descr = &all->descr[num++]; 7492 descr->opcode = i; 7493 scsi_ulto2b(j, descr->service_action); 7494 descr->flags = RSO_SERVACTV; 7495 scsi_ulto2b(sentry->length, 7496 descr->cdb_length); 7497 } 7498 } else { 7499 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7500 entry)) 7501 continue; 7502 descr = &all->descr[num++]; 7503 descr->opcode = i; 7504 scsi_ulto2b(0, descr->service_action); 7505 descr->flags = 0; 7506 scsi_ulto2b(entry->length, descr->cdb_length); 7507 } 7508 } 7509 scsi_ulto4b( 7510 num * sizeof(struct scsi_report_supported_opcodes_descr), 7511 all->length); 7512 break; 7513 case RSO_OPTIONS_OC: 7514 one = (struct scsi_report_supported_opcodes_one *) 7515 ctsio->kern_data_ptr; 7516 entry = &ctl_cmd_table[opcode]; 7517 goto fill_one; 7518 case RSO_OPTIONS_OC_SA: 7519 one = (struct scsi_report_supported_opcodes_one *) 7520 ctsio->kern_data_ptr; 7521 entry = &ctl_cmd_table[opcode]; 7522 entry = &((const struct ctl_cmd_entry *) 7523 entry->execute)[service_action]; 7524fill_one: 7525 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7526 one->support = 3; 7527 scsi_ulto2b(entry->length, one->cdb_length); 7528 one->cdb_usage[0] = opcode; 7529 memcpy(&one->cdb_usage[1], entry->usage, 7530 entry->length - 1); 7531 } else 7532 one->support = 1; 7533 break; 7534 } 7535 7536 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7537 ctsio->be_move_done = ctl_config_move_done; 7538 7539 ctl_datamove((union ctl_io *)ctsio); 7540 return(retval); 7541} 7542 7543int 7544ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7545{ 7546 struct ctl_lun *lun; 7547 struct scsi_report_supported_tmf *cdb; 7548 struct scsi_report_supported_tmf_data *data; 7549 int retval; 7550 int alloc_len, total_len; 7551 7552 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7553 7554 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7555 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7556 7557 retval = CTL_RETVAL_COMPLETE; 7558 7559 total_len = sizeof(struct scsi_report_supported_tmf_data); 7560 alloc_len = scsi_4btoul(cdb->length); 7561 7562 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7563 7564 ctsio->kern_sg_entries = 0; 7565 7566 if (total_len < alloc_len) { 7567 ctsio->residual = alloc_len - total_len; 7568 ctsio->kern_data_len = total_len; 7569 ctsio->kern_total_len = total_len; 7570 } else { 7571 ctsio->residual = 0; 7572 ctsio->kern_data_len = alloc_len; 7573 ctsio->kern_total_len = alloc_len; 7574 } 7575 ctsio->kern_data_resid = 0; 7576 ctsio->kern_rel_offset = 0; 7577 7578 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7579 data->byte1 |= RST_ATS | RST_ATSS | RST_LURS | RST_TRS; 7580 data->byte2 |= RST_ITNRS; 7581 7582 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7583 ctsio->be_move_done = ctl_config_move_done; 7584 7585 ctl_datamove((union ctl_io *)ctsio); 7586 return (retval); 7587} 7588 7589int 7590ctl_report_timestamp(struct ctl_scsiio *ctsio) 7591{ 7592 struct ctl_lun *lun; 7593 struct scsi_report_timestamp *cdb; 7594 struct scsi_report_timestamp_data *data; 7595 struct timeval tv; 7596 int64_t timestamp; 7597 int retval; 7598 int alloc_len, total_len; 7599 7600 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7601 7602 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7603 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7604 7605 retval = CTL_RETVAL_COMPLETE; 7606 7607 total_len = sizeof(struct scsi_report_timestamp_data); 7608 alloc_len = scsi_4btoul(cdb->length); 7609 7610 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7611 7612 ctsio->kern_sg_entries = 0; 7613 7614 if (total_len < alloc_len) { 7615 ctsio->residual = alloc_len - total_len; 7616 ctsio->kern_data_len = total_len; 7617 ctsio->kern_total_len = total_len; 7618 } else { 7619 ctsio->residual = 0; 7620 ctsio->kern_data_len = alloc_len; 7621 ctsio->kern_total_len = alloc_len; 7622 } 7623 ctsio->kern_data_resid = 0; 7624 ctsio->kern_rel_offset = 0; 7625 7626 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7627 scsi_ulto2b(sizeof(*data) - 2, data->length); 7628 data->origin = RTS_ORIG_OUTSIDE; 7629 getmicrotime(&tv); 7630 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7631 scsi_ulto4b(timestamp >> 16, data->timestamp); 7632 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7633 7634 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7635 ctsio->be_move_done = ctl_config_move_done; 7636 7637 ctl_datamove((union ctl_io *)ctsio); 7638 return (retval); 7639} 7640 7641int 7642ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7643{ 7644 struct scsi_per_res_in *cdb; 7645 int alloc_len, total_len = 0; 7646 /* struct scsi_per_res_in_rsrv in_data; */ 7647 struct ctl_lun *lun; 7648 struct ctl_softc *softc; 7649 7650 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7651 7652 softc = control_softc; 7653 7654 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7655 7656 alloc_len = scsi_2btoul(cdb->length); 7657 7658 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7659 7660retry: 7661 mtx_lock(&lun->lun_lock); 7662 switch (cdb->action) { 7663 case SPRI_RK: /* read keys */ 7664 total_len = sizeof(struct scsi_per_res_in_keys) + 7665 lun->pr_key_count * 7666 sizeof(struct scsi_per_res_key); 7667 break; 7668 case SPRI_RR: /* read reservation */ 7669 if (lun->flags & CTL_LUN_PR_RESERVED) 7670 total_len = sizeof(struct scsi_per_res_in_rsrv); 7671 else 7672 total_len = sizeof(struct scsi_per_res_in_header); 7673 break; 7674 case SPRI_RC: /* report capabilities */ 7675 total_len = sizeof(struct scsi_per_res_cap); 7676 break; 7677 case SPRI_RS: /* read full status */ 7678 total_len = sizeof(struct scsi_per_res_in_header) + 7679 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7680 lun->pr_key_count; 7681 break; 7682 default: 7683 panic("Invalid PR type %x", cdb->action); 7684 } 7685 mtx_unlock(&lun->lun_lock); 7686 7687 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7688 7689 if (total_len < alloc_len) { 7690 ctsio->residual = alloc_len - total_len; 7691 ctsio->kern_data_len = total_len; 7692 ctsio->kern_total_len = total_len; 7693 } else { 7694 ctsio->residual = 0; 7695 ctsio->kern_data_len = alloc_len; 7696 ctsio->kern_total_len = alloc_len; 7697 } 7698 7699 ctsio->kern_data_resid = 0; 7700 ctsio->kern_rel_offset = 0; 7701 ctsio->kern_sg_entries = 0; 7702 7703 mtx_lock(&lun->lun_lock); 7704 switch (cdb->action) { 7705 case SPRI_RK: { // read keys 7706 struct scsi_per_res_in_keys *res_keys; 7707 int i, key_count; 7708 7709 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7710 7711 /* 7712 * We had to drop the lock to allocate our buffer, which 7713 * leaves time for someone to come in with another 7714 * persistent reservation. (That is unlikely, though, 7715 * since this should be the only persistent reservation 7716 * command active right now.) 7717 */ 7718 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7719 (lun->pr_key_count * 7720 sizeof(struct scsi_per_res_key)))){ 7721 mtx_unlock(&lun->lun_lock); 7722 free(ctsio->kern_data_ptr, M_CTL); 7723 printf("%s: reservation length changed, retrying\n", 7724 __func__); 7725 goto retry; 7726 } 7727 7728 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7729 7730 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7731 lun->pr_key_count, res_keys->header.length); 7732 7733 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7734 if (!lun->per_res[i].registered) 7735 continue; 7736 7737 /* 7738 * We used lun->pr_key_count to calculate the 7739 * size to allocate. If it turns out the number of 7740 * initiators with the registered flag set is 7741 * larger than that (i.e. they haven't been kept in 7742 * sync), we've got a problem. 7743 */ 7744 if (key_count >= lun->pr_key_count) { 7745#ifdef NEEDTOPORT 7746 csevent_log(CSC_CTL | CSC_SHELF_SW | 7747 CTL_PR_ERROR, 7748 csevent_LogType_Fault, 7749 csevent_AlertLevel_Yellow, 7750 csevent_FRU_ShelfController, 7751 csevent_FRU_Firmware, 7752 csevent_FRU_Unknown, 7753 "registered keys %d >= key " 7754 "count %d", key_count, 7755 lun->pr_key_count); 7756#endif 7757 key_count++; 7758 continue; 7759 } 7760 memcpy(res_keys->keys[key_count].key, 7761 lun->per_res[i].res_key.key, 7762 ctl_min(sizeof(res_keys->keys[key_count].key), 7763 sizeof(lun->per_res[i].res_key))); 7764 key_count++; 7765 } 7766 break; 7767 } 7768 case SPRI_RR: { // read reservation 7769 struct scsi_per_res_in_rsrv *res; 7770 int tmp_len, header_only; 7771 7772 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7773 7774 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7775 7776 if (lun->flags & CTL_LUN_PR_RESERVED) 7777 { 7778 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7779 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7780 res->header.length); 7781 header_only = 0; 7782 } else { 7783 tmp_len = sizeof(struct scsi_per_res_in_header); 7784 scsi_ulto4b(0, res->header.length); 7785 header_only = 1; 7786 } 7787 7788 /* 7789 * We had to drop the lock to allocate our buffer, which 7790 * leaves time for someone to come in with another 7791 * persistent reservation. (That is unlikely, though, 7792 * since this should be the only persistent reservation 7793 * command active right now.) 7794 */ 7795 if (tmp_len != total_len) { 7796 mtx_unlock(&lun->lun_lock); 7797 free(ctsio->kern_data_ptr, M_CTL); 7798 printf("%s: reservation status changed, retrying\n", 7799 __func__); 7800 goto retry; 7801 } 7802 7803 /* 7804 * No reservation held, so we're done. 7805 */ 7806 if (header_only != 0) 7807 break; 7808 7809 /* 7810 * If the registration is an All Registrants type, the key 7811 * is 0, since it doesn't really matter. 7812 */ 7813 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7814 memcpy(res->data.reservation, 7815 &lun->per_res[lun->pr_res_idx].res_key, 7816 sizeof(struct scsi_per_res_key)); 7817 } 7818 res->data.scopetype = lun->res_type; 7819 break; 7820 } 7821 case SPRI_RC: //report capabilities 7822 { 7823 struct scsi_per_res_cap *res_cap; 7824 uint16_t type_mask; 7825 7826 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7827 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7828 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_3; 7829 type_mask = SPRI_TM_WR_EX_AR | 7830 SPRI_TM_EX_AC_RO | 7831 SPRI_TM_WR_EX_RO | 7832 SPRI_TM_EX_AC | 7833 SPRI_TM_WR_EX | 7834 SPRI_TM_EX_AC_AR; 7835 scsi_ulto2b(type_mask, res_cap->type_mask); 7836 break; 7837 } 7838 case SPRI_RS: { // read full status 7839 struct scsi_per_res_in_full *res_status; 7840 struct scsi_per_res_in_full_desc *res_desc; 7841 struct ctl_port *port; 7842 int i, len; 7843 7844 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7845 7846 /* 7847 * We had to drop the lock to allocate our buffer, which 7848 * leaves time for someone to come in with another 7849 * persistent reservation. (That is unlikely, though, 7850 * since this should be the only persistent reservation 7851 * command active right now.) 7852 */ 7853 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7854 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7855 lun->pr_key_count)){ 7856 mtx_unlock(&lun->lun_lock); 7857 free(ctsio->kern_data_ptr, M_CTL); 7858 printf("%s: reservation length changed, retrying\n", 7859 __func__); 7860 goto retry; 7861 } 7862 7863 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7864 7865 res_desc = &res_status->desc[0]; 7866 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7867 if (!lun->per_res[i].registered) 7868 continue; 7869 7870 memcpy(&res_desc->res_key, &lun->per_res[i].res_key.key, 7871 sizeof(res_desc->res_key)); 7872 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7873 (lun->pr_res_idx == i || 7874 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7875 res_desc->flags = SPRI_FULL_R_HOLDER; 7876 res_desc->scopetype = lun->res_type; 7877 } 7878 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7879 res_desc->rel_trgt_port_id); 7880 len = 0; 7881 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7882 if (port != NULL) 7883 len = ctl_create_iid(port, 7884 i % CTL_MAX_INIT_PER_PORT, 7885 res_desc->transport_id); 7886 scsi_ulto4b(len, res_desc->additional_length); 7887 res_desc = (struct scsi_per_res_in_full_desc *) 7888 &res_desc->transport_id[len]; 7889 } 7890 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7891 res_status->header.length); 7892 break; 7893 } 7894 default: 7895 /* 7896 * This is a bug, because we just checked for this above, 7897 * and should have returned an error. 7898 */ 7899 panic("Invalid PR type %x", cdb->action); 7900 break; /* NOTREACHED */ 7901 } 7902 mtx_unlock(&lun->lun_lock); 7903 7904 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7905 ctsio->be_move_done = ctl_config_move_done; 7906 7907 CTL_DEBUG_PRINT(("buf = %x %x %x %x %x %x %x %x\n", 7908 ctsio->kern_data_ptr[0], ctsio->kern_data_ptr[1], 7909 ctsio->kern_data_ptr[2], ctsio->kern_data_ptr[3], 7910 ctsio->kern_data_ptr[4], ctsio->kern_data_ptr[5], 7911 ctsio->kern_data_ptr[6], ctsio->kern_data_ptr[7])); 7912 7913 ctl_datamove((union ctl_io *)ctsio); 7914 7915 return (CTL_RETVAL_COMPLETE); 7916} 7917 7918/* 7919 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7920 * it should return. 7921 */ 7922static int 7923ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7924 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7925 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7926 struct scsi_per_res_out_parms* param) 7927{ 7928 union ctl_ha_msg persis_io; 7929 int retval, i; 7930 int isc_retval; 7931 7932 retval = 0; 7933 7934 mtx_lock(&lun->lun_lock); 7935 if (sa_res_key == 0) { 7936 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7937 /* validate scope and type */ 7938 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7939 SPR_LU_SCOPE) { 7940 mtx_unlock(&lun->lun_lock); 7941 ctl_set_invalid_field(/*ctsio*/ ctsio, 7942 /*sks_valid*/ 1, 7943 /*command*/ 1, 7944 /*field*/ 2, 7945 /*bit_valid*/ 1, 7946 /*bit*/ 4); 7947 ctl_done((union ctl_io *)ctsio); 7948 return (1); 7949 } 7950 7951 if (type>8 || type==2 || type==4 || type==0) { 7952 mtx_unlock(&lun->lun_lock); 7953 ctl_set_invalid_field(/*ctsio*/ ctsio, 7954 /*sks_valid*/ 1, 7955 /*command*/ 1, 7956 /*field*/ 2, 7957 /*bit_valid*/ 1, 7958 /*bit*/ 0); 7959 ctl_done((union ctl_io *)ctsio); 7960 return (1); 7961 } 7962 7963 /* temporarily unregister this nexus */ 7964 lun->per_res[residx].registered = 0; 7965 7966 /* 7967 * Unregister everybody else and build UA for 7968 * them 7969 */ 7970 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7971 if (lun->per_res[i].registered == 0) 7972 continue; 7973 7974 if (!persis_offset 7975 && i <CTL_MAX_INITIATORS) 7976 lun->pending_sense[i].ua_pending |= 7977 CTL_UA_REG_PREEMPT; 7978 else if (persis_offset 7979 && i >= persis_offset) 7980 lun->pending_sense[i-persis_offset 7981 ].ua_pending |= 7982 CTL_UA_REG_PREEMPT; 7983 lun->per_res[i].registered = 0; 7984 memset(&lun->per_res[i].res_key, 0, 7985 sizeof(struct scsi_per_res_key)); 7986 } 7987 lun->per_res[residx].registered = 1; 7988 lun->pr_key_count = 1; 7989 lun->res_type = type; 7990 if (lun->res_type != SPR_TYPE_WR_EX_AR 7991 && lun->res_type != SPR_TYPE_EX_AC_AR) 7992 lun->pr_res_idx = residx; 7993 7994 /* send msg to other side */ 7995 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7996 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7997 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7998 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7999 persis_io.pr.pr_info.res_type = type; 8000 memcpy(persis_io.pr.pr_info.sa_res_key, 8001 param->serv_act_res_key, 8002 sizeof(param->serv_act_res_key)); 8003 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8004 &persis_io, sizeof(persis_io), 0)) > 8005 CTL_HA_STATUS_SUCCESS) { 8006 printf("CTL:Persis Out error returned " 8007 "from ctl_ha_msg_send %d\n", 8008 isc_retval); 8009 } 8010 } else { 8011 /* not all registrants */ 8012 mtx_unlock(&lun->lun_lock); 8013 free(ctsio->kern_data_ptr, M_CTL); 8014 ctl_set_invalid_field(ctsio, 8015 /*sks_valid*/ 1, 8016 /*command*/ 0, 8017 /*field*/ 8, 8018 /*bit_valid*/ 0, 8019 /*bit*/ 0); 8020 ctl_done((union ctl_io *)ctsio); 8021 return (1); 8022 } 8023 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8024 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 8025 int found = 0; 8026 8027 if (res_key == sa_res_key) { 8028 /* special case */ 8029 /* 8030 * The spec implies this is not good but doesn't 8031 * say what to do. There are two choices either 8032 * generate a res conflict or check condition 8033 * with illegal field in parameter data. Since 8034 * that is what is done when the sa_res_key is 8035 * zero I'll take that approach since this has 8036 * to do with the sa_res_key. 8037 */ 8038 mtx_unlock(&lun->lun_lock); 8039 free(ctsio->kern_data_ptr, M_CTL); 8040 ctl_set_invalid_field(ctsio, 8041 /*sks_valid*/ 1, 8042 /*command*/ 0, 8043 /*field*/ 8, 8044 /*bit_valid*/ 0, 8045 /*bit*/ 0); 8046 ctl_done((union ctl_io *)ctsio); 8047 return (1); 8048 } 8049 8050 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8051 if (lun->per_res[i].registered 8052 && memcmp(param->serv_act_res_key, 8053 lun->per_res[i].res_key.key, 8054 sizeof(struct scsi_per_res_key)) != 0) 8055 continue; 8056 8057 found = 1; 8058 lun->per_res[i].registered = 0; 8059 memset(&lun->per_res[i].res_key, 0, 8060 sizeof(struct scsi_per_res_key)); 8061 lun->pr_key_count--; 8062 8063 if (!persis_offset 8064 && i < CTL_MAX_INITIATORS) 8065 lun->pending_sense[i].ua_pending |= 8066 CTL_UA_REG_PREEMPT; 8067 else if (persis_offset 8068 && i >= persis_offset) 8069 lun->pending_sense[i-persis_offset].ua_pending|= 8070 CTL_UA_REG_PREEMPT; 8071 } 8072 if (!found) { 8073 mtx_unlock(&lun->lun_lock); 8074 free(ctsio->kern_data_ptr, M_CTL); 8075 ctl_set_reservation_conflict(ctsio); 8076 ctl_done((union ctl_io *)ctsio); 8077 return (CTL_RETVAL_COMPLETE); 8078 } 8079 /* send msg to other side */ 8080 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8081 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8082 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8083 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8084 persis_io.pr.pr_info.res_type = type; 8085 memcpy(persis_io.pr.pr_info.sa_res_key, 8086 param->serv_act_res_key, 8087 sizeof(param->serv_act_res_key)); 8088 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8089 &persis_io, sizeof(persis_io), 0)) > 8090 CTL_HA_STATUS_SUCCESS) { 8091 printf("CTL:Persis Out error returned from " 8092 "ctl_ha_msg_send %d\n", isc_retval); 8093 } 8094 } else { 8095 /* Reserved but not all registrants */ 8096 /* sa_res_key is res holder */ 8097 if (memcmp(param->serv_act_res_key, 8098 lun->per_res[lun->pr_res_idx].res_key.key, 8099 sizeof(struct scsi_per_res_key)) == 0) { 8100 /* validate scope and type */ 8101 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8102 SPR_LU_SCOPE) { 8103 mtx_unlock(&lun->lun_lock); 8104 ctl_set_invalid_field(/*ctsio*/ ctsio, 8105 /*sks_valid*/ 1, 8106 /*command*/ 1, 8107 /*field*/ 2, 8108 /*bit_valid*/ 1, 8109 /*bit*/ 4); 8110 ctl_done((union ctl_io *)ctsio); 8111 return (1); 8112 } 8113 8114 if (type>8 || type==2 || type==4 || type==0) { 8115 mtx_unlock(&lun->lun_lock); 8116 ctl_set_invalid_field(/*ctsio*/ ctsio, 8117 /*sks_valid*/ 1, 8118 /*command*/ 1, 8119 /*field*/ 2, 8120 /*bit_valid*/ 1, 8121 /*bit*/ 0); 8122 ctl_done((union ctl_io *)ctsio); 8123 return (1); 8124 } 8125 8126 /* 8127 * Do the following: 8128 * if sa_res_key != res_key remove all 8129 * registrants w/sa_res_key and generate UA 8130 * for these registrants(Registrations 8131 * Preempted) if it wasn't an exclusive 8132 * reservation generate UA(Reservations 8133 * Preempted) for all other registered nexuses 8134 * if the type has changed. Establish the new 8135 * reservation and holder. If res_key and 8136 * sa_res_key are the same do the above 8137 * except don't unregister the res holder. 8138 */ 8139 8140 /* 8141 * Temporarily unregister so it won't get 8142 * removed or UA generated 8143 */ 8144 lun->per_res[residx].registered = 0; 8145 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8146 if (lun->per_res[i].registered == 0) 8147 continue; 8148 8149 if (memcmp(param->serv_act_res_key, 8150 lun->per_res[i].res_key.key, 8151 sizeof(struct scsi_per_res_key)) == 0) { 8152 lun->per_res[i].registered = 0; 8153 memset(&lun->per_res[i].res_key, 8154 0, 8155 sizeof(struct scsi_per_res_key)); 8156 lun->pr_key_count--; 8157 8158 if (!persis_offset 8159 && i < CTL_MAX_INITIATORS) 8160 lun->pending_sense[i 8161 ].ua_pending |= 8162 CTL_UA_REG_PREEMPT; 8163 else if (persis_offset 8164 && i >= persis_offset) 8165 lun->pending_sense[ 8166 i-persis_offset].ua_pending |= 8167 CTL_UA_REG_PREEMPT; 8168 } else if (type != lun->res_type 8169 && (lun->res_type == SPR_TYPE_WR_EX_RO 8170 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 8171 if (!persis_offset 8172 && i < CTL_MAX_INITIATORS) 8173 lun->pending_sense[i 8174 ].ua_pending |= 8175 CTL_UA_RES_RELEASE; 8176 else if (persis_offset 8177 && i >= persis_offset) 8178 lun->pending_sense[ 8179 i-persis_offset 8180 ].ua_pending |= 8181 CTL_UA_RES_RELEASE; 8182 } 8183 } 8184 lun->per_res[residx].registered = 1; 8185 lun->res_type = type; 8186 if (lun->res_type != SPR_TYPE_WR_EX_AR 8187 && lun->res_type != SPR_TYPE_EX_AC_AR) 8188 lun->pr_res_idx = residx; 8189 else 8190 lun->pr_res_idx = 8191 CTL_PR_ALL_REGISTRANTS; 8192 8193 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8194 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8195 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8196 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8197 persis_io.pr.pr_info.res_type = type; 8198 memcpy(persis_io.pr.pr_info.sa_res_key, 8199 param->serv_act_res_key, 8200 sizeof(param->serv_act_res_key)); 8201 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8202 &persis_io, sizeof(persis_io), 0)) > 8203 CTL_HA_STATUS_SUCCESS) { 8204 printf("CTL:Persis Out error returned " 8205 "from ctl_ha_msg_send %d\n", 8206 isc_retval); 8207 } 8208 } else { 8209 /* 8210 * sa_res_key is not the res holder just 8211 * remove registrants 8212 */ 8213 int found=0; 8214 8215 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8216 if (memcmp(param->serv_act_res_key, 8217 lun->per_res[i].res_key.key, 8218 sizeof(struct scsi_per_res_key)) != 0) 8219 continue; 8220 8221 found = 1; 8222 lun->per_res[i].registered = 0; 8223 memset(&lun->per_res[i].res_key, 0, 8224 sizeof(struct scsi_per_res_key)); 8225 lun->pr_key_count--; 8226 8227 if (!persis_offset 8228 && i < CTL_MAX_INITIATORS) 8229 lun->pending_sense[i].ua_pending |= 8230 CTL_UA_REG_PREEMPT; 8231 else if (persis_offset 8232 && i >= persis_offset) 8233 lun->pending_sense[ 8234 i-persis_offset].ua_pending |= 8235 CTL_UA_REG_PREEMPT; 8236 } 8237 8238 if (!found) { 8239 mtx_unlock(&lun->lun_lock); 8240 free(ctsio->kern_data_ptr, M_CTL); 8241 ctl_set_reservation_conflict(ctsio); 8242 ctl_done((union ctl_io *)ctsio); 8243 return (1); 8244 } 8245 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8246 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8247 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8248 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8249 persis_io.pr.pr_info.res_type = type; 8250 memcpy(persis_io.pr.pr_info.sa_res_key, 8251 param->serv_act_res_key, 8252 sizeof(param->serv_act_res_key)); 8253 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8254 &persis_io, sizeof(persis_io), 0)) > 8255 CTL_HA_STATUS_SUCCESS) { 8256 printf("CTL:Persis Out error returned " 8257 "from ctl_ha_msg_send %d\n", 8258 isc_retval); 8259 } 8260 } 8261 } 8262 8263 lun->PRGeneration++; 8264 mtx_unlock(&lun->lun_lock); 8265 8266 return (retval); 8267} 8268 8269static void 8270ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8271{ 8272 int i; 8273 8274 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8275 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8276 || memcmp(&lun->per_res[lun->pr_res_idx].res_key, 8277 msg->pr.pr_info.sa_res_key, 8278 sizeof(struct scsi_per_res_key)) != 0) { 8279 uint64_t sa_res_key; 8280 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8281 8282 if (sa_res_key == 0) { 8283 /* temporarily unregister this nexus */ 8284 lun->per_res[msg->pr.pr_info.residx].registered = 0; 8285 8286 /* 8287 * Unregister everybody else and build UA for 8288 * them 8289 */ 8290 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8291 if (lun->per_res[i].registered == 0) 8292 continue; 8293 8294 if (!persis_offset 8295 && i < CTL_MAX_INITIATORS) 8296 lun->pending_sense[i].ua_pending |= 8297 CTL_UA_REG_PREEMPT; 8298 else if (persis_offset && i >= persis_offset) 8299 lun->pending_sense[i - 8300 persis_offset].ua_pending |= 8301 CTL_UA_REG_PREEMPT; 8302 lun->per_res[i].registered = 0; 8303 memset(&lun->per_res[i].res_key, 0, 8304 sizeof(struct scsi_per_res_key)); 8305 } 8306 8307 lun->per_res[msg->pr.pr_info.residx].registered = 1; 8308 lun->pr_key_count = 1; 8309 lun->res_type = msg->pr.pr_info.res_type; 8310 if (lun->res_type != SPR_TYPE_WR_EX_AR 8311 && lun->res_type != SPR_TYPE_EX_AC_AR) 8312 lun->pr_res_idx = msg->pr.pr_info.residx; 8313 } else { 8314 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8315 if (memcmp(msg->pr.pr_info.sa_res_key, 8316 lun->per_res[i].res_key.key, 8317 sizeof(struct scsi_per_res_key)) != 0) 8318 continue; 8319 8320 lun->per_res[i].registered = 0; 8321 memset(&lun->per_res[i].res_key, 0, 8322 sizeof(struct scsi_per_res_key)); 8323 lun->pr_key_count--; 8324 8325 if (!persis_offset 8326 && i < persis_offset) 8327 lun->pending_sense[i].ua_pending |= 8328 CTL_UA_REG_PREEMPT; 8329 else if (persis_offset 8330 && i >= persis_offset) 8331 lun->pending_sense[i - 8332 persis_offset].ua_pending |= 8333 CTL_UA_REG_PREEMPT; 8334 } 8335 } 8336 } else { 8337 /* 8338 * Temporarily unregister so it won't get removed 8339 * or UA generated 8340 */ 8341 lun->per_res[msg->pr.pr_info.residx].registered = 0; 8342 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8343 if (lun->per_res[i].registered == 0) 8344 continue; 8345 8346 if (memcmp(msg->pr.pr_info.sa_res_key, 8347 lun->per_res[i].res_key.key, 8348 sizeof(struct scsi_per_res_key)) == 0) { 8349 lun->per_res[i].registered = 0; 8350 memset(&lun->per_res[i].res_key, 0, 8351 sizeof(struct scsi_per_res_key)); 8352 lun->pr_key_count--; 8353 if (!persis_offset 8354 && i < CTL_MAX_INITIATORS) 8355 lun->pending_sense[i].ua_pending |= 8356 CTL_UA_REG_PREEMPT; 8357 else if (persis_offset 8358 && i >= persis_offset) 8359 lun->pending_sense[i - 8360 persis_offset].ua_pending |= 8361 CTL_UA_REG_PREEMPT; 8362 } else if (msg->pr.pr_info.res_type != lun->res_type 8363 && (lun->res_type == SPR_TYPE_WR_EX_RO 8364 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 8365 if (!persis_offset 8366 && i < persis_offset) 8367 lun->pending_sense[i 8368 ].ua_pending |= 8369 CTL_UA_RES_RELEASE; 8370 else if (persis_offset 8371 && i >= persis_offset) 8372 lun->pending_sense[i - 8373 persis_offset].ua_pending |= 8374 CTL_UA_RES_RELEASE; 8375 } 8376 } 8377 lun->per_res[msg->pr.pr_info.residx].registered = 1; 8378 lun->res_type = msg->pr.pr_info.res_type; 8379 if (lun->res_type != SPR_TYPE_WR_EX_AR 8380 && lun->res_type != SPR_TYPE_EX_AC_AR) 8381 lun->pr_res_idx = msg->pr.pr_info.residx; 8382 else 8383 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8384 } 8385 lun->PRGeneration++; 8386 8387} 8388 8389 8390int 8391ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8392{ 8393 int retval; 8394 int isc_retval; 8395 u_int32_t param_len; 8396 struct scsi_per_res_out *cdb; 8397 struct ctl_lun *lun; 8398 struct scsi_per_res_out_parms* param; 8399 struct ctl_softc *softc; 8400 uint32_t residx; 8401 uint64_t res_key, sa_res_key; 8402 uint8_t type; 8403 union ctl_ha_msg persis_io; 8404 int i; 8405 8406 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8407 8408 retval = CTL_RETVAL_COMPLETE; 8409 8410 softc = control_softc; 8411 8412 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8413 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8414 8415 /* 8416 * We only support whole-LUN scope. The scope & type are ignored for 8417 * register, register and ignore existing key and clear. 8418 * We sometimes ignore scope and type on preempts too!! 8419 * Verify reservation type here as well. 8420 */ 8421 type = cdb->scope_type & SPR_TYPE_MASK; 8422 if ((cdb->action == SPRO_RESERVE) 8423 || (cdb->action == SPRO_RELEASE)) { 8424 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8425 ctl_set_invalid_field(/*ctsio*/ ctsio, 8426 /*sks_valid*/ 1, 8427 /*command*/ 1, 8428 /*field*/ 2, 8429 /*bit_valid*/ 1, 8430 /*bit*/ 4); 8431 ctl_done((union ctl_io *)ctsio); 8432 return (CTL_RETVAL_COMPLETE); 8433 } 8434 8435 if (type>8 || type==2 || type==4 || type==0) { 8436 ctl_set_invalid_field(/*ctsio*/ ctsio, 8437 /*sks_valid*/ 1, 8438 /*command*/ 1, 8439 /*field*/ 2, 8440 /*bit_valid*/ 1, 8441 /*bit*/ 0); 8442 ctl_done((union ctl_io *)ctsio); 8443 return (CTL_RETVAL_COMPLETE); 8444 } 8445 } 8446 8447 param_len = scsi_4btoul(cdb->length); 8448 8449 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8450 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8451 ctsio->kern_data_len = param_len; 8452 ctsio->kern_total_len = param_len; 8453 ctsio->kern_data_resid = 0; 8454 ctsio->kern_rel_offset = 0; 8455 ctsio->kern_sg_entries = 0; 8456 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8457 ctsio->be_move_done = ctl_config_move_done; 8458 ctl_datamove((union ctl_io *)ctsio); 8459 8460 return (CTL_RETVAL_COMPLETE); 8461 } 8462 8463 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8464 8465 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8466 res_key = scsi_8btou64(param->res_key.key); 8467 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8468 8469 /* 8470 * Validate the reservation key here except for SPRO_REG_IGNO 8471 * This must be done for all other service actions 8472 */ 8473 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8474 mtx_lock(&lun->lun_lock); 8475 if (lun->per_res[residx].registered) { 8476 if (memcmp(param->res_key.key, 8477 lun->per_res[residx].res_key.key, 8478 ctl_min(sizeof(param->res_key), 8479 sizeof(lun->per_res[residx].res_key))) != 0) { 8480 /* 8481 * The current key passed in doesn't match 8482 * the one the initiator previously 8483 * registered. 8484 */ 8485 mtx_unlock(&lun->lun_lock); 8486 free(ctsio->kern_data_ptr, M_CTL); 8487 ctl_set_reservation_conflict(ctsio); 8488 ctl_done((union ctl_io *)ctsio); 8489 return (CTL_RETVAL_COMPLETE); 8490 } 8491 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8492 /* 8493 * We are not registered 8494 */ 8495 mtx_unlock(&lun->lun_lock); 8496 free(ctsio->kern_data_ptr, M_CTL); 8497 ctl_set_reservation_conflict(ctsio); 8498 ctl_done((union ctl_io *)ctsio); 8499 return (CTL_RETVAL_COMPLETE); 8500 } else if (res_key != 0) { 8501 /* 8502 * We are not registered and trying to register but 8503 * the register key isn't zero. 8504 */ 8505 mtx_unlock(&lun->lun_lock); 8506 free(ctsio->kern_data_ptr, M_CTL); 8507 ctl_set_reservation_conflict(ctsio); 8508 ctl_done((union ctl_io *)ctsio); 8509 return (CTL_RETVAL_COMPLETE); 8510 } 8511 mtx_unlock(&lun->lun_lock); 8512 } 8513 8514 switch (cdb->action & SPRO_ACTION_MASK) { 8515 case SPRO_REGISTER: 8516 case SPRO_REG_IGNO: { 8517 8518#if 0 8519 printf("Registration received\n"); 8520#endif 8521 8522 /* 8523 * We don't support any of these options, as we report in 8524 * the read capabilities request (see 8525 * ctl_persistent_reserve_in(), above). 8526 */ 8527 if ((param->flags & SPR_SPEC_I_PT) 8528 || (param->flags & SPR_ALL_TG_PT) 8529 || (param->flags & SPR_APTPL)) { 8530 int bit_ptr; 8531 8532 if (param->flags & SPR_APTPL) 8533 bit_ptr = 0; 8534 else if (param->flags & SPR_ALL_TG_PT) 8535 bit_ptr = 2; 8536 else /* SPR_SPEC_I_PT */ 8537 bit_ptr = 3; 8538 8539 free(ctsio->kern_data_ptr, M_CTL); 8540 ctl_set_invalid_field(ctsio, 8541 /*sks_valid*/ 1, 8542 /*command*/ 0, 8543 /*field*/ 20, 8544 /*bit_valid*/ 1, 8545 /*bit*/ bit_ptr); 8546 ctl_done((union ctl_io *)ctsio); 8547 return (CTL_RETVAL_COMPLETE); 8548 } 8549 8550 mtx_lock(&lun->lun_lock); 8551 8552 /* 8553 * The initiator wants to clear the 8554 * key/unregister. 8555 */ 8556 if (sa_res_key == 0) { 8557 if ((res_key == 0 8558 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8559 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8560 && !lun->per_res[residx].registered)) { 8561 mtx_unlock(&lun->lun_lock); 8562 goto done; 8563 } 8564 8565 lun->per_res[residx].registered = 0; 8566 memset(&lun->per_res[residx].res_key, 8567 0, sizeof(lun->per_res[residx].res_key)); 8568 lun->pr_key_count--; 8569 8570 if (residx == lun->pr_res_idx) { 8571 lun->flags &= ~CTL_LUN_PR_RESERVED; 8572 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8573 8574 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8575 || lun->res_type == SPR_TYPE_EX_AC_RO) 8576 && lun->pr_key_count) { 8577 /* 8578 * If the reservation is a registrants 8579 * only type we need to generate a UA 8580 * for other registered inits. The 8581 * sense code should be RESERVATIONS 8582 * RELEASED 8583 */ 8584 8585 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 8586 if (lun->per_res[ 8587 i+persis_offset].registered 8588 == 0) 8589 continue; 8590 lun->pending_sense[i 8591 ].ua_pending |= 8592 CTL_UA_RES_RELEASE; 8593 } 8594 } 8595 lun->res_type = 0; 8596 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8597 if (lun->pr_key_count==0) { 8598 lun->flags &= ~CTL_LUN_PR_RESERVED; 8599 lun->res_type = 0; 8600 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8601 } 8602 } 8603 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8604 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8605 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8606 persis_io.pr.pr_info.residx = residx; 8607 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8608 &persis_io, sizeof(persis_io), 0 )) > 8609 CTL_HA_STATUS_SUCCESS) { 8610 printf("CTL:Persis Out error returned from " 8611 "ctl_ha_msg_send %d\n", isc_retval); 8612 } 8613 } else /* sa_res_key != 0 */ { 8614 8615 /* 8616 * If we aren't registered currently then increment 8617 * the key count and set the registered flag. 8618 */ 8619 if (!lun->per_res[residx].registered) { 8620 lun->pr_key_count++; 8621 lun->per_res[residx].registered = 1; 8622 } 8623 8624 memcpy(&lun->per_res[residx].res_key, 8625 param->serv_act_res_key, 8626 ctl_min(sizeof(param->serv_act_res_key), 8627 sizeof(lun->per_res[residx].res_key))); 8628 8629 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8630 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8631 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8632 persis_io.pr.pr_info.residx = residx; 8633 memcpy(persis_io.pr.pr_info.sa_res_key, 8634 param->serv_act_res_key, 8635 sizeof(param->serv_act_res_key)); 8636 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8637 &persis_io, sizeof(persis_io), 0)) > 8638 CTL_HA_STATUS_SUCCESS) { 8639 printf("CTL:Persis Out error returned from " 8640 "ctl_ha_msg_send %d\n", isc_retval); 8641 } 8642 } 8643 lun->PRGeneration++; 8644 mtx_unlock(&lun->lun_lock); 8645 8646 break; 8647 } 8648 case SPRO_RESERVE: 8649#if 0 8650 printf("Reserve executed type %d\n", type); 8651#endif 8652 mtx_lock(&lun->lun_lock); 8653 if (lun->flags & CTL_LUN_PR_RESERVED) { 8654 /* 8655 * if this isn't the reservation holder and it's 8656 * not a "all registrants" type or if the type is 8657 * different then we have a conflict 8658 */ 8659 if ((lun->pr_res_idx != residx 8660 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8661 || lun->res_type != type) { 8662 mtx_unlock(&lun->lun_lock); 8663 free(ctsio->kern_data_ptr, M_CTL); 8664 ctl_set_reservation_conflict(ctsio); 8665 ctl_done((union ctl_io *)ctsio); 8666 return (CTL_RETVAL_COMPLETE); 8667 } 8668 mtx_unlock(&lun->lun_lock); 8669 } else /* create a reservation */ { 8670 /* 8671 * If it's not an "all registrants" type record 8672 * reservation holder 8673 */ 8674 if (type != SPR_TYPE_WR_EX_AR 8675 && type != SPR_TYPE_EX_AC_AR) 8676 lun->pr_res_idx = residx; /* Res holder */ 8677 else 8678 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8679 8680 lun->flags |= CTL_LUN_PR_RESERVED; 8681 lun->res_type = type; 8682 8683 mtx_unlock(&lun->lun_lock); 8684 8685 /* send msg to other side */ 8686 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8687 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8688 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8689 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8690 persis_io.pr.pr_info.res_type = type; 8691 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8692 &persis_io, sizeof(persis_io), 0)) > 8693 CTL_HA_STATUS_SUCCESS) { 8694 printf("CTL:Persis Out error returned from " 8695 "ctl_ha_msg_send %d\n", isc_retval); 8696 } 8697 } 8698 break; 8699 8700 case SPRO_RELEASE: 8701 mtx_lock(&lun->lun_lock); 8702 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8703 /* No reservation exists return good status */ 8704 mtx_unlock(&lun->lun_lock); 8705 goto done; 8706 } 8707 /* 8708 * Is this nexus a reservation holder? 8709 */ 8710 if (lun->pr_res_idx != residx 8711 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8712 /* 8713 * not a res holder return good status but 8714 * do nothing 8715 */ 8716 mtx_unlock(&lun->lun_lock); 8717 goto done; 8718 } 8719 8720 if (lun->res_type != type) { 8721 mtx_unlock(&lun->lun_lock); 8722 free(ctsio->kern_data_ptr, M_CTL); 8723 ctl_set_illegal_pr_release(ctsio); 8724 ctl_done((union ctl_io *)ctsio); 8725 return (CTL_RETVAL_COMPLETE); 8726 } 8727 8728 /* okay to release */ 8729 lun->flags &= ~CTL_LUN_PR_RESERVED; 8730 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8731 lun->res_type = 0; 8732 8733 /* 8734 * if this isn't an exclusive access 8735 * res generate UA for all other 8736 * registrants. 8737 */ 8738 if (type != SPR_TYPE_EX_AC 8739 && type != SPR_TYPE_WR_EX) { 8740 /* 8741 * temporarily unregister so we don't generate UA 8742 */ 8743 lun->per_res[residx].registered = 0; 8744 8745 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8746 if (lun->per_res[i+persis_offset].registered 8747 == 0) 8748 continue; 8749 lun->pending_sense[i].ua_pending |= 8750 CTL_UA_RES_RELEASE; 8751 } 8752 8753 lun->per_res[residx].registered = 1; 8754 } 8755 mtx_unlock(&lun->lun_lock); 8756 /* Send msg to other side */ 8757 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8758 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8759 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8760 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8761 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8762 printf("CTL:Persis Out error returned from " 8763 "ctl_ha_msg_send %d\n", isc_retval); 8764 } 8765 break; 8766 8767 case SPRO_CLEAR: 8768 /* send msg to other side */ 8769 8770 mtx_lock(&lun->lun_lock); 8771 lun->flags &= ~CTL_LUN_PR_RESERVED; 8772 lun->res_type = 0; 8773 lun->pr_key_count = 0; 8774 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8775 8776 8777 memset(&lun->per_res[residx].res_key, 8778 0, sizeof(lun->per_res[residx].res_key)); 8779 lun->per_res[residx].registered = 0; 8780 8781 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 8782 if (lun->per_res[i].registered) { 8783 if (!persis_offset && i < CTL_MAX_INITIATORS) 8784 lun->pending_sense[i].ua_pending |= 8785 CTL_UA_RES_PREEMPT; 8786 else if (persis_offset && i >= persis_offset) 8787 lun->pending_sense[i-persis_offset 8788 ].ua_pending |= CTL_UA_RES_PREEMPT; 8789 8790 memset(&lun->per_res[i].res_key, 8791 0, sizeof(struct scsi_per_res_key)); 8792 lun->per_res[i].registered = 0; 8793 } 8794 lun->PRGeneration++; 8795 mtx_unlock(&lun->lun_lock); 8796 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8797 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8798 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8799 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8800 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8801 printf("CTL:Persis Out error returned from " 8802 "ctl_ha_msg_send %d\n", isc_retval); 8803 } 8804 break; 8805 8806 case SPRO_PREEMPT: { 8807 int nretval; 8808 8809 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8810 residx, ctsio, cdb, param); 8811 if (nretval != 0) 8812 return (CTL_RETVAL_COMPLETE); 8813 break; 8814 } 8815 default: 8816 panic("Invalid PR type %x", cdb->action); 8817 } 8818 8819done: 8820 free(ctsio->kern_data_ptr, M_CTL); 8821 ctl_set_success(ctsio); 8822 ctl_done((union ctl_io *)ctsio); 8823 8824 return (retval); 8825} 8826 8827/* 8828 * This routine is for handling a message from the other SC pertaining to 8829 * persistent reserve out. All the error checking will have been done 8830 * so only perorming the action need be done here to keep the two 8831 * in sync. 8832 */ 8833static void 8834ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8835{ 8836 struct ctl_lun *lun; 8837 struct ctl_softc *softc; 8838 int i; 8839 uint32_t targ_lun; 8840 8841 softc = control_softc; 8842 8843 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8844 lun = softc->ctl_luns[targ_lun]; 8845 mtx_lock(&lun->lun_lock); 8846 switch(msg->pr.pr_info.action) { 8847 case CTL_PR_REG_KEY: 8848 if (!lun->per_res[msg->pr.pr_info.residx].registered) { 8849 lun->per_res[msg->pr.pr_info.residx].registered = 1; 8850 lun->pr_key_count++; 8851 } 8852 lun->PRGeneration++; 8853 memcpy(&lun->per_res[msg->pr.pr_info.residx].res_key, 8854 msg->pr.pr_info.sa_res_key, 8855 sizeof(struct scsi_per_res_key)); 8856 break; 8857 8858 case CTL_PR_UNREG_KEY: 8859 lun->per_res[msg->pr.pr_info.residx].registered = 0; 8860 memset(&lun->per_res[msg->pr.pr_info.residx].res_key, 8861 0, sizeof(struct scsi_per_res_key)); 8862 lun->pr_key_count--; 8863 8864 /* XXX Need to see if the reservation has been released */ 8865 /* if so do we need to generate UA? */ 8866 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8867 lun->flags &= ~CTL_LUN_PR_RESERVED; 8868 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8869 8870 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8871 || lun->res_type == SPR_TYPE_EX_AC_RO) 8872 && lun->pr_key_count) { 8873 /* 8874 * If the reservation is a registrants 8875 * only type we need to generate a UA 8876 * for other registered inits. The 8877 * sense code should be RESERVATIONS 8878 * RELEASED 8879 */ 8880 8881 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8882 if (lun->per_res[i+ 8883 persis_offset].registered == 0) 8884 continue; 8885 8886 lun->pending_sense[i 8887 ].ua_pending |= 8888 CTL_UA_RES_RELEASE; 8889 } 8890 } 8891 lun->res_type = 0; 8892 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8893 if (lun->pr_key_count==0) { 8894 lun->flags &= ~CTL_LUN_PR_RESERVED; 8895 lun->res_type = 0; 8896 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8897 } 8898 } 8899 lun->PRGeneration++; 8900 break; 8901 8902 case CTL_PR_RESERVE: 8903 lun->flags |= CTL_LUN_PR_RESERVED; 8904 lun->res_type = msg->pr.pr_info.res_type; 8905 lun->pr_res_idx = msg->pr.pr_info.residx; 8906 8907 break; 8908 8909 case CTL_PR_RELEASE: 8910 /* 8911 * if this isn't an exclusive access res generate UA for all 8912 * other registrants. 8913 */ 8914 if (lun->res_type != SPR_TYPE_EX_AC 8915 && lun->res_type != SPR_TYPE_WR_EX) { 8916 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8917 if (lun->per_res[i+persis_offset].registered) 8918 lun->pending_sense[i].ua_pending |= 8919 CTL_UA_RES_RELEASE; 8920 } 8921 8922 lun->flags &= ~CTL_LUN_PR_RESERVED; 8923 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8924 lun->res_type = 0; 8925 break; 8926 8927 case CTL_PR_PREEMPT: 8928 ctl_pro_preempt_other(lun, msg); 8929 break; 8930 case CTL_PR_CLEAR: 8931 lun->flags &= ~CTL_LUN_PR_RESERVED; 8932 lun->res_type = 0; 8933 lun->pr_key_count = 0; 8934 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8935 8936 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8937 if (lun->per_res[i].registered == 0) 8938 continue; 8939 if (!persis_offset 8940 && i < CTL_MAX_INITIATORS) 8941 lun->pending_sense[i].ua_pending |= 8942 CTL_UA_RES_PREEMPT; 8943 else if (persis_offset 8944 && i >= persis_offset) 8945 lun->pending_sense[i-persis_offset].ua_pending|= 8946 CTL_UA_RES_PREEMPT; 8947 memset(&lun->per_res[i].res_key, 0, 8948 sizeof(struct scsi_per_res_key)); 8949 lun->per_res[i].registered = 0; 8950 } 8951 lun->PRGeneration++; 8952 break; 8953 } 8954 8955 mtx_unlock(&lun->lun_lock); 8956} 8957 8958int 8959ctl_read_write(struct ctl_scsiio *ctsio) 8960{ 8961 struct ctl_lun *lun; 8962 struct ctl_lba_len_flags *lbalen; 8963 uint64_t lba; 8964 uint32_t num_blocks; 8965 int fua, dpo; 8966 int retval; 8967 int isread; 8968 8969 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8970 8971 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8972 8973 fua = 0; 8974 dpo = 0; 8975 8976 retval = CTL_RETVAL_COMPLETE; 8977 8978 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8979 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8980 if (lun->flags & CTL_LUN_PR_RESERVED && isread) { 8981 uint32_t residx; 8982 8983 /* 8984 * XXX KDM need a lock here. 8985 */ 8986 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8987 if ((lun->res_type == SPR_TYPE_EX_AC 8988 && residx != lun->pr_res_idx) 8989 || ((lun->res_type == SPR_TYPE_EX_AC_RO 8990 || lun->res_type == SPR_TYPE_EX_AC_AR) 8991 && !lun->per_res[residx].registered)) { 8992 ctl_set_reservation_conflict(ctsio); 8993 ctl_done((union ctl_io *)ctsio); 8994 return (CTL_RETVAL_COMPLETE); 8995 } 8996 } 8997 8998 switch (ctsio->cdb[0]) { 8999 case READ_6: 9000 case WRITE_6: { 9001 struct scsi_rw_6 *cdb; 9002 9003 cdb = (struct scsi_rw_6 *)ctsio->cdb; 9004 9005 lba = scsi_3btoul(cdb->addr); 9006 /* only 5 bits are valid in the most significant address byte */ 9007 lba &= 0x1fffff; 9008 num_blocks = cdb->length; 9009 /* 9010 * This is correct according to SBC-2. 9011 */ 9012 if (num_blocks == 0) 9013 num_blocks = 256; 9014 break; 9015 } 9016 case READ_10: 9017 case WRITE_10: { 9018 struct scsi_rw_10 *cdb; 9019 9020 cdb = (struct scsi_rw_10 *)ctsio->cdb; 9021 9022 if (cdb->byte2 & SRW10_FUA) 9023 fua = 1; 9024 if (cdb->byte2 & SRW10_DPO) 9025 dpo = 1; 9026 9027 lba = scsi_4btoul(cdb->addr); 9028 num_blocks = scsi_2btoul(cdb->length); 9029 break; 9030 } 9031 case WRITE_VERIFY_10: { 9032 struct scsi_write_verify_10 *cdb; 9033 9034 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 9035 9036 /* 9037 * XXX KDM we should do actual write verify support at some 9038 * point. This is obviously fake, we're just translating 9039 * things to a write. So we don't even bother checking the 9040 * BYTCHK field, since we don't do any verification. If 9041 * the user asks for it, we'll just pretend we did it. 9042 */ 9043 if (cdb->byte2 & SWV_DPO) 9044 dpo = 1; 9045 9046 lba = scsi_4btoul(cdb->addr); 9047 num_blocks = scsi_2btoul(cdb->length); 9048 break; 9049 } 9050 case READ_12: 9051 case WRITE_12: { 9052 struct scsi_rw_12 *cdb; 9053 9054 cdb = (struct scsi_rw_12 *)ctsio->cdb; 9055 9056 if (cdb->byte2 & SRW12_FUA) 9057 fua = 1; 9058 if (cdb->byte2 & SRW12_DPO) 9059 dpo = 1; 9060 lba = scsi_4btoul(cdb->addr); 9061 num_blocks = scsi_4btoul(cdb->length); 9062 break; 9063 } 9064 case WRITE_VERIFY_12: { 9065 struct scsi_write_verify_12 *cdb; 9066 9067 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 9068 9069 if (cdb->byte2 & SWV_DPO) 9070 dpo = 1; 9071 9072 lba = scsi_4btoul(cdb->addr); 9073 num_blocks = scsi_4btoul(cdb->length); 9074 9075 break; 9076 } 9077 case READ_16: 9078 case WRITE_16: { 9079 struct scsi_rw_16 *cdb; 9080 9081 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9082 9083 if (cdb->byte2 & SRW12_FUA) 9084 fua = 1; 9085 if (cdb->byte2 & SRW12_DPO) 9086 dpo = 1; 9087 9088 lba = scsi_8btou64(cdb->addr); 9089 num_blocks = scsi_4btoul(cdb->length); 9090 break; 9091 } 9092 case WRITE_VERIFY_16: { 9093 struct scsi_write_verify_16 *cdb; 9094 9095 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 9096 9097 if (cdb->byte2 & SWV_DPO) 9098 dpo = 1; 9099 9100 lba = scsi_8btou64(cdb->addr); 9101 num_blocks = scsi_4btoul(cdb->length); 9102 break; 9103 } 9104 default: 9105 /* 9106 * We got a command we don't support. This shouldn't 9107 * happen, commands should be filtered out above us. 9108 */ 9109 ctl_set_invalid_opcode(ctsio); 9110 ctl_done((union ctl_io *)ctsio); 9111 9112 return (CTL_RETVAL_COMPLETE); 9113 break; /* NOTREACHED */ 9114 } 9115 9116 /* 9117 * XXX KDM what do we do with the DPO and FUA bits? FUA might be 9118 * interesting for us, but if RAIDCore is in write-back mode, 9119 * getting it to do write-through for a particular transaction may 9120 * not be possible. 9121 */ 9122 9123 /* 9124 * The first check is to make sure we're in bounds, the second 9125 * check is to catch wrap-around problems. If the lba + num blocks 9126 * is less than the lba, then we've wrapped around and the block 9127 * range is invalid anyway. 9128 */ 9129 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9130 || ((lba + num_blocks) < lba)) { 9131 ctl_set_lba_out_of_range(ctsio); 9132 ctl_done((union ctl_io *)ctsio); 9133 return (CTL_RETVAL_COMPLETE); 9134 } 9135 9136 /* 9137 * According to SBC-3, a transfer length of 0 is not an error. 9138 * Note that this cannot happen with WRITE(6) or READ(6), since 0 9139 * translates to 256 blocks for those commands. 9140 */ 9141 if (num_blocks == 0) { 9142 ctl_set_success(ctsio); 9143 ctl_done((union ctl_io *)ctsio); 9144 return (CTL_RETVAL_COMPLETE); 9145 } 9146 9147 lbalen = (struct ctl_lba_len_flags *) 9148 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9149 lbalen->lba = lba; 9150 lbalen->len = num_blocks; 9151 lbalen->flags = isread ? CTL_LLF_READ : CTL_LLF_WRITE; 9152 9153 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9154 ctsio->kern_rel_offset = 0; 9155 9156 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 9157 9158 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9159 9160 return (retval); 9161} 9162 9163static int 9164ctl_cnw_cont(union ctl_io *io) 9165{ 9166 struct ctl_scsiio *ctsio; 9167 struct ctl_lun *lun; 9168 struct ctl_lba_len_flags *lbalen; 9169 int retval; 9170 9171 ctsio = &io->scsiio; 9172 ctsio->io_hdr.status = CTL_STATUS_NONE; 9173 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 9174 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9175 lbalen = (struct ctl_lba_len_flags *) 9176 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9177 lbalen->flags = CTL_LLF_WRITE; 9178 9179 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 9180 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9181 return (retval); 9182} 9183 9184int 9185ctl_cnw(struct ctl_scsiio *ctsio) 9186{ 9187 struct ctl_lun *lun; 9188 struct ctl_lba_len_flags *lbalen; 9189 uint64_t lba; 9190 uint32_t num_blocks; 9191 int fua, dpo; 9192 int retval; 9193 9194 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9195 9196 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 9197 9198 fua = 0; 9199 dpo = 0; 9200 9201 retval = CTL_RETVAL_COMPLETE; 9202 9203 switch (ctsio->cdb[0]) { 9204 case COMPARE_AND_WRITE: { 9205 struct scsi_compare_and_write *cdb; 9206 9207 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 9208 9209 if (cdb->byte2 & SRW10_FUA) 9210 fua = 1; 9211 if (cdb->byte2 & SRW10_DPO) 9212 dpo = 1; 9213 lba = scsi_8btou64(cdb->addr); 9214 num_blocks = cdb->length; 9215 break; 9216 } 9217 default: 9218 /* 9219 * We got a command we don't support. This shouldn't 9220 * happen, commands should be filtered out above us. 9221 */ 9222 ctl_set_invalid_opcode(ctsio); 9223 ctl_done((union ctl_io *)ctsio); 9224 9225 return (CTL_RETVAL_COMPLETE); 9226 break; /* NOTREACHED */ 9227 } 9228 9229 /* 9230 * XXX KDM what do we do with the DPO and FUA bits? FUA might be 9231 * interesting for us, but if RAIDCore is in write-back mode, 9232 * getting it to do write-through for a particular transaction may 9233 * not be possible. 9234 */ 9235 9236 /* 9237 * The first check is to make sure we're in bounds, the second 9238 * check is to catch wrap-around problems. If the lba + num blocks 9239 * is less than the lba, then we've wrapped around and the block 9240 * range is invalid anyway. 9241 */ 9242 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9243 || ((lba + num_blocks) < lba)) { 9244 ctl_set_lba_out_of_range(ctsio); 9245 ctl_done((union ctl_io *)ctsio); 9246 return (CTL_RETVAL_COMPLETE); 9247 } 9248 9249 /* 9250 * According to SBC-3, a transfer length of 0 is not an error. 9251 */ 9252 if (num_blocks == 0) { 9253 ctl_set_success(ctsio); 9254 ctl_done((union ctl_io *)ctsio); 9255 return (CTL_RETVAL_COMPLETE); 9256 } 9257 9258 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 9259 ctsio->kern_rel_offset = 0; 9260 9261 /* 9262 * Set the IO_CONT flag, so that if this I/O gets passed to 9263 * ctl_data_submit_done(), it'll get passed back to 9264 * ctl_ctl_cnw_cont() for further processing. 9265 */ 9266 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 9267 ctsio->io_cont = ctl_cnw_cont; 9268 9269 lbalen = (struct ctl_lba_len_flags *) 9270 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9271 lbalen->lba = lba; 9272 lbalen->len = num_blocks; 9273 lbalen->flags = CTL_LLF_COMPARE; 9274 9275 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 9276 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9277 return (retval); 9278} 9279 9280int 9281ctl_verify(struct ctl_scsiio *ctsio) 9282{ 9283 struct ctl_lun *lun; 9284 struct ctl_lba_len_flags *lbalen; 9285 uint64_t lba; 9286 uint32_t num_blocks; 9287 int bytchk, dpo; 9288 int retval; 9289 9290 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9291 9292 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 9293 9294 bytchk = 0; 9295 dpo = 0; 9296 retval = CTL_RETVAL_COMPLETE; 9297 9298 switch (ctsio->cdb[0]) { 9299 case VERIFY_10: { 9300 struct scsi_verify_10 *cdb; 9301 9302 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9303 if (cdb->byte2 & SVFY_BYTCHK) 9304 bytchk = 1; 9305 if (cdb->byte2 & SVFY_DPO) 9306 dpo = 1; 9307 lba = scsi_4btoul(cdb->addr); 9308 num_blocks = scsi_2btoul(cdb->length); 9309 break; 9310 } 9311 case VERIFY_12: { 9312 struct scsi_verify_12 *cdb; 9313 9314 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9315 if (cdb->byte2 & SVFY_BYTCHK) 9316 bytchk = 1; 9317 if (cdb->byte2 & SVFY_DPO) 9318 dpo = 1; 9319 lba = scsi_4btoul(cdb->addr); 9320 num_blocks = scsi_4btoul(cdb->length); 9321 break; 9322 } 9323 case VERIFY_16: { 9324 struct scsi_rw_16 *cdb; 9325 9326 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9327 if (cdb->byte2 & SVFY_BYTCHK) 9328 bytchk = 1; 9329 if (cdb->byte2 & SVFY_DPO) 9330 dpo = 1; 9331 lba = scsi_8btou64(cdb->addr); 9332 num_blocks = scsi_4btoul(cdb->length); 9333 break; 9334 } 9335 default: 9336 /* 9337 * We got a command we don't support. This shouldn't 9338 * happen, commands should be filtered out above us. 9339 */ 9340 ctl_set_invalid_opcode(ctsio); 9341 ctl_done((union ctl_io *)ctsio); 9342 return (CTL_RETVAL_COMPLETE); 9343 } 9344 9345 /* 9346 * The first check is to make sure we're in bounds, the second 9347 * check is to catch wrap-around problems. If the lba + num blocks 9348 * is less than the lba, then we've wrapped around and the block 9349 * range is invalid anyway. 9350 */ 9351 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9352 || ((lba + num_blocks) < lba)) { 9353 ctl_set_lba_out_of_range(ctsio); 9354 ctl_done((union ctl_io *)ctsio); 9355 return (CTL_RETVAL_COMPLETE); 9356 } 9357 9358 /* 9359 * According to SBC-3, a transfer length of 0 is not an error. 9360 */ 9361 if (num_blocks == 0) { 9362 ctl_set_success(ctsio); 9363 ctl_done((union ctl_io *)ctsio); 9364 return (CTL_RETVAL_COMPLETE); 9365 } 9366 9367 lbalen = (struct ctl_lba_len_flags *) 9368 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9369 lbalen->lba = lba; 9370 lbalen->len = num_blocks; 9371 if (bytchk) { 9372 lbalen->flags = CTL_LLF_COMPARE; 9373 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9374 } else { 9375 lbalen->flags = CTL_LLF_VERIFY; 9376 ctsio->kern_total_len = 0; 9377 } 9378 ctsio->kern_rel_offset = 0; 9379 9380 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9381 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9382 return (retval); 9383} 9384 9385int 9386ctl_report_luns(struct ctl_scsiio *ctsio) 9387{ 9388 struct scsi_report_luns *cdb; 9389 struct scsi_report_luns_data *lun_data; 9390 struct ctl_lun *lun, *request_lun; 9391 int num_luns, retval; 9392 uint32_t alloc_len, lun_datalen; 9393 int num_filled, well_known; 9394 uint32_t initidx, targ_lun_id, lun_id; 9395 9396 retval = CTL_RETVAL_COMPLETE; 9397 well_known = 0; 9398 9399 cdb = (struct scsi_report_luns *)ctsio->cdb; 9400 9401 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9402 9403 mtx_lock(&control_softc->ctl_lock); 9404 num_luns = control_softc->num_luns; 9405 mtx_unlock(&control_softc->ctl_lock); 9406 9407 switch (cdb->select_report) { 9408 case RPL_REPORT_DEFAULT: 9409 case RPL_REPORT_ALL: 9410 break; 9411 case RPL_REPORT_WELLKNOWN: 9412 well_known = 1; 9413 num_luns = 0; 9414 break; 9415 default: 9416 ctl_set_invalid_field(ctsio, 9417 /*sks_valid*/ 1, 9418 /*command*/ 1, 9419 /*field*/ 2, 9420 /*bit_valid*/ 0, 9421 /*bit*/ 0); 9422 ctl_done((union ctl_io *)ctsio); 9423 return (retval); 9424 break; /* NOTREACHED */ 9425 } 9426 9427 alloc_len = scsi_4btoul(cdb->length); 9428 /* 9429 * The initiator has to allocate at least 16 bytes for this request, 9430 * so he can at least get the header and the first LUN. Otherwise 9431 * we reject the request (per SPC-3 rev 14, section 6.21). 9432 */ 9433 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9434 sizeof(struct scsi_report_luns_lundata))) { 9435 ctl_set_invalid_field(ctsio, 9436 /*sks_valid*/ 1, 9437 /*command*/ 1, 9438 /*field*/ 6, 9439 /*bit_valid*/ 0, 9440 /*bit*/ 0); 9441 ctl_done((union ctl_io *)ctsio); 9442 return (retval); 9443 } 9444 9445 request_lun = (struct ctl_lun *) 9446 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9447 9448 lun_datalen = sizeof(*lun_data) + 9449 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9450 9451 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9452 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9453 ctsio->kern_sg_entries = 0; 9454 9455 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9456 9457 mtx_lock(&control_softc->ctl_lock); 9458 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9459 lun_id = ctl_map_lun(ctsio->io_hdr.nexus.targ_port, targ_lun_id); 9460 if (lun_id >= CTL_MAX_LUNS) 9461 continue; 9462 lun = control_softc->ctl_luns[lun_id]; 9463 if (lun == NULL) 9464 continue; 9465 9466 if (targ_lun_id <= 0xff) { 9467 /* 9468 * Peripheral addressing method, bus number 0. 9469 */ 9470 lun_data->luns[num_filled].lundata[0] = 9471 RPL_LUNDATA_ATYP_PERIPH; 9472 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9473 num_filled++; 9474 } else if (targ_lun_id <= 0x3fff) { 9475 /* 9476 * Flat addressing method. 9477 */ 9478 lun_data->luns[num_filled].lundata[0] = 9479 RPL_LUNDATA_ATYP_FLAT | 9480 (targ_lun_id & RPL_LUNDATA_FLAT_LUN_MASK); 9481#ifdef OLDCTLHEADERS 9482 (SRLD_ADDR_FLAT << SRLD_ADDR_SHIFT) | 9483 (targ_lun_id & SRLD_BUS_LUN_MASK); 9484#endif 9485 lun_data->luns[num_filled].lundata[1] = 9486#ifdef OLDCTLHEADERS 9487 targ_lun_id >> SRLD_BUS_LUN_BITS; 9488#endif 9489 targ_lun_id >> RPL_LUNDATA_FLAT_LUN_BITS; 9490 num_filled++; 9491 } else { 9492 printf("ctl_report_luns: bogus LUN number %jd, " 9493 "skipping\n", (intmax_t)targ_lun_id); 9494 } 9495 /* 9496 * According to SPC-3, rev 14 section 6.21: 9497 * 9498 * "The execution of a REPORT LUNS command to any valid and 9499 * installed logical unit shall clear the REPORTED LUNS DATA 9500 * HAS CHANGED unit attention condition for all logical 9501 * units of that target with respect to the requesting 9502 * initiator. A valid and installed logical unit is one 9503 * having a PERIPHERAL QUALIFIER of 000b in the standard 9504 * INQUIRY data (see 6.4.2)." 9505 * 9506 * If request_lun is NULL, the LUN this report luns command 9507 * was issued to is either disabled or doesn't exist. In that 9508 * case, we shouldn't clear any pending lun change unit 9509 * attention. 9510 */ 9511 if (request_lun != NULL) { 9512 mtx_lock(&lun->lun_lock); 9513 lun->pending_sense[initidx].ua_pending &= 9514 ~CTL_UA_LUN_CHANGE; 9515 mtx_unlock(&lun->lun_lock); 9516 } 9517 } 9518 mtx_unlock(&control_softc->ctl_lock); 9519 9520 /* 9521 * It's quite possible that we've returned fewer LUNs than we allocated 9522 * space for. Trim it. 9523 */ 9524 lun_datalen = sizeof(*lun_data) + 9525 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9526 9527 if (lun_datalen < alloc_len) { 9528 ctsio->residual = alloc_len - lun_datalen; 9529 ctsio->kern_data_len = lun_datalen; 9530 ctsio->kern_total_len = lun_datalen; 9531 } else { 9532 ctsio->residual = 0; 9533 ctsio->kern_data_len = alloc_len; 9534 ctsio->kern_total_len = alloc_len; 9535 } 9536 ctsio->kern_data_resid = 0; 9537 ctsio->kern_rel_offset = 0; 9538 ctsio->kern_sg_entries = 0; 9539 9540 /* 9541 * We set this to the actual data length, regardless of how much 9542 * space we actually have to return results. If the user looks at 9543 * this value, he'll know whether or not he allocated enough space 9544 * and reissue the command if necessary. We don't support well 9545 * known logical units, so if the user asks for that, return none. 9546 */ 9547 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9548 9549 /* 9550 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9551 * this request. 9552 */ 9553 ctsio->scsi_status = SCSI_STATUS_OK; 9554 9555 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9556 ctsio->be_move_done = ctl_config_move_done; 9557 ctl_datamove((union ctl_io *)ctsio); 9558 9559 return (retval); 9560} 9561 9562int 9563ctl_request_sense(struct ctl_scsiio *ctsio) 9564{ 9565 struct scsi_request_sense *cdb; 9566 struct scsi_sense_data *sense_ptr; 9567 struct ctl_lun *lun; 9568 uint32_t initidx; 9569 int have_error; 9570 scsi_sense_data_type sense_format; 9571 9572 cdb = (struct scsi_request_sense *)ctsio->cdb; 9573 9574 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9575 9576 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9577 9578 /* 9579 * Determine which sense format the user wants. 9580 */ 9581 if (cdb->byte2 & SRS_DESC) 9582 sense_format = SSD_TYPE_DESC; 9583 else 9584 sense_format = SSD_TYPE_FIXED; 9585 9586 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9587 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9588 ctsio->kern_sg_entries = 0; 9589 9590 /* 9591 * struct scsi_sense_data, which is currently set to 256 bytes, is 9592 * larger than the largest allowed value for the length field in the 9593 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9594 */ 9595 ctsio->residual = 0; 9596 ctsio->kern_data_len = cdb->length; 9597 ctsio->kern_total_len = cdb->length; 9598 9599 ctsio->kern_data_resid = 0; 9600 ctsio->kern_rel_offset = 0; 9601 ctsio->kern_sg_entries = 0; 9602 9603 /* 9604 * If we don't have a LUN, we don't have any pending sense. 9605 */ 9606 if (lun == NULL) 9607 goto no_sense; 9608 9609 have_error = 0; 9610 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9611 /* 9612 * Check for pending sense, and then for pending unit attentions. 9613 * Pending sense gets returned first, then pending unit attentions. 9614 */ 9615 mtx_lock(&lun->lun_lock); 9616 if (ctl_is_set(lun->have_ca, initidx)) { 9617 scsi_sense_data_type stored_format; 9618 9619 /* 9620 * Check to see which sense format was used for the stored 9621 * sense data. 9622 */ 9623 stored_format = scsi_sense_type( 9624 &lun->pending_sense[initidx].sense); 9625 9626 /* 9627 * If the user requested a different sense format than the 9628 * one we stored, then we need to convert it to the other 9629 * format. If we're going from descriptor to fixed format 9630 * sense data, we may lose things in translation, depending 9631 * on what options were used. 9632 * 9633 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9634 * for some reason we'll just copy it out as-is. 9635 */ 9636 if ((stored_format == SSD_TYPE_FIXED) 9637 && (sense_format == SSD_TYPE_DESC)) 9638 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9639 &lun->pending_sense[initidx].sense, 9640 (struct scsi_sense_data_desc *)sense_ptr); 9641 else if ((stored_format == SSD_TYPE_DESC) 9642 && (sense_format == SSD_TYPE_FIXED)) 9643 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9644 &lun->pending_sense[initidx].sense, 9645 (struct scsi_sense_data_fixed *)sense_ptr); 9646 else 9647 memcpy(sense_ptr, &lun->pending_sense[initidx].sense, 9648 ctl_min(sizeof(*sense_ptr), 9649 sizeof(lun->pending_sense[initidx].sense))); 9650 9651 ctl_clear_mask(lun->have_ca, initidx); 9652 have_error = 1; 9653 } else if (lun->pending_sense[initidx].ua_pending != CTL_UA_NONE) { 9654 ctl_ua_type ua_type; 9655 9656 ua_type = ctl_build_ua(lun->pending_sense[initidx].ua_pending, 9657 sense_ptr, sense_format); 9658 if (ua_type != CTL_UA_NONE) { 9659 have_error = 1; 9660 /* We're reporting this UA, so clear it */ 9661 lun->pending_sense[initidx].ua_pending &= ~ua_type; 9662 } 9663 } 9664 mtx_unlock(&lun->lun_lock); 9665 9666 /* 9667 * We already have a pending error, return it. 9668 */ 9669 if (have_error != 0) { 9670 /* 9671 * We report the SCSI status as OK, since the status of the 9672 * request sense command itself is OK. 9673 */ 9674 ctsio->scsi_status = SCSI_STATUS_OK; 9675 9676 /* 9677 * We report 0 for the sense length, because we aren't doing 9678 * autosense in this case. We're reporting sense as 9679 * parameter data. 9680 */ 9681 ctsio->sense_len = 0; 9682 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9683 ctsio->be_move_done = ctl_config_move_done; 9684 ctl_datamove((union ctl_io *)ctsio); 9685 9686 return (CTL_RETVAL_COMPLETE); 9687 } 9688 9689no_sense: 9690 9691 /* 9692 * No sense information to report, so we report that everything is 9693 * okay. 9694 */ 9695 ctl_set_sense_data(sense_ptr, 9696 lun, 9697 sense_format, 9698 /*current_error*/ 1, 9699 /*sense_key*/ SSD_KEY_NO_SENSE, 9700 /*asc*/ 0x00, 9701 /*ascq*/ 0x00, 9702 SSD_ELEM_NONE); 9703 9704 ctsio->scsi_status = SCSI_STATUS_OK; 9705 9706 /* 9707 * We report 0 for the sense length, because we aren't doing 9708 * autosense in this case. We're reporting sense as parameter data. 9709 */ 9710 ctsio->sense_len = 0; 9711 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9712 ctsio->be_move_done = ctl_config_move_done; 9713 ctl_datamove((union ctl_io *)ctsio); 9714 9715 return (CTL_RETVAL_COMPLETE); 9716} 9717 9718int 9719ctl_tur(struct ctl_scsiio *ctsio) 9720{ 9721 struct ctl_lun *lun; 9722 9723 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9724 9725 CTL_DEBUG_PRINT(("ctl_tur\n")); 9726 9727 if (lun == NULL) 9728 return (EINVAL); 9729 9730 ctsio->scsi_status = SCSI_STATUS_OK; 9731 ctsio->io_hdr.status = CTL_SUCCESS; 9732 9733 ctl_done((union ctl_io *)ctsio); 9734 9735 return (CTL_RETVAL_COMPLETE); 9736} 9737 9738#ifdef notyet 9739static int 9740ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 9741{ 9742 9743} 9744#endif 9745 9746static int 9747ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9748{ 9749 struct scsi_vpd_supported_pages *pages; 9750 int sup_page_size; 9751 struct ctl_lun *lun; 9752 9753 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9754 9755 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9756 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9757 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9758 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9759 ctsio->kern_sg_entries = 0; 9760 9761 if (sup_page_size < alloc_len) { 9762 ctsio->residual = alloc_len - sup_page_size; 9763 ctsio->kern_data_len = sup_page_size; 9764 ctsio->kern_total_len = sup_page_size; 9765 } else { 9766 ctsio->residual = 0; 9767 ctsio->kern_data_len = alloc_len; 9768 ctsio->kern_total_len = alloc_len; 9769 } 9770 ctsio->kern_data_resid = 0; 9771 ctsio->kern_rel_offset = 0; 9772 ctsio->kern_sg_entries = 0; 9773 9774 /* 9775 * The control device is always connected. The disk device, on the 9776 * other hand, may not be online all the time. Need to change this 9777 * to figure out whether the disk device is actually online or not. 9778 */ 9779 if (lun != NULL) 9780 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9781 lun->be_lun->lun_type; 9782 else 9783 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9784 9785 pages->length = SCSI_EVPD_NUM_SUPPORTED_PAGES; 9786 /* Supported VPD pages */ 9787 pages->page_list[0] = SVPD_SUPPORTED_PAGES; 9788 /* Serial Number */ 9789 pages->page_list[1] = SVPD_UNIT_SERIAL_NUMBER; 9790 /* Device Identification */ 9791 pages->page_list[2] = SVPD_DEVICE_ID; 9792 /* SCSI Ports */ 9793 pages->page_list[3] = SVPD_SCSI_PORTS; 9794 /* Block limits */ 9795 pages->page_list[4] = SVPD_BLOCK_LIMITS; 9796 /* Logical Block Provisioning */ 9797 pages->page_list[5] = SVPD_LBP; 9798 9799 ctsio->scsi_status = SCSI_STATUS_OK; 9800 9801 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9802 ctsio->be_move_done = ctl_config_move_done; 9803 ctl_datamove((union ctl_io *)ctsio); 9804 9805 return (CTL_RETVAL_COMPLETE); 9806} 9807 9808static int 9809ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9810{ 9811 struct scsi_vpd_unit_serial_number *sn_ptr; 9812 struct ctl_lun *lun; 9813 9814 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9815 9816 ctsio->kern_data_ptr = malloc(sizeof(*sn_ptr), M_CTL, M_WAITOK | M_ZERO); 9817 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9818 ctsio->kern_sg_entries = 0; 9819 9820 if (sizeof(*sn_ptr) < alloc_len) { 9821 ctsio->residual = alloc_len - sizeof(*sn_ptr); 9822 ctsio->kern_data_len = sizeof(*sn_ptr); 9823 ctsio->kern_total_len = sizeof(*sn_ptr); 9824 } else { 9825 ctsio->residual = 0; 9826 ctsio->kern_data_len = alloc_len; 9827 ctsio->kern_total_len = alloc_len; 9828 } 9829 ctsio->kern_data_resid = 0; 9830 ctsio->kern_rel_offset = 0; 9831 ctsio->kern_sg_entries = 0; 9832 9833 /* 9834 * The control device is always connected. The disk device, on the 9835 * other hand, may not be online all the time. Need to change this 9836 * to figure out whether the disk device is actually online or not. 9837 */ 9838 if (lun != NULL) 9839 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9840 lun->be_lun->lun_type; 9841 else 9842 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9843 9844 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9845 sn_ptr->length = ctl_min(sizeof(*sn_ptr) - 4, CTL_SN_LEN); 9846 /* 9847 * If we don't have a LUN, we just leave the serial number as 9848 * all spaces. 9849 */ 9850 memset(sn_ptr->serial_num, 0x20, sizeof(sn_ptr->serial_num)); 9851 if (lun != NULL) { 9852 strncpy((char *)sn_ptr->serial_num, 9853 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9854 } 9855 ctsio->scsi_status = SCSI_STATUS_OK; 9856 9857 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9858 ctsio->be_move_done = ctl_config_move_done; 9859 ctl_datamove((union ctl_io *)ctsio); 9860 9861 return (CTL_RETVAL_COMPLETE); 9862} 9863 9864 9865static int 9866ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9867{ 9868 struct scsi_vpd_device_id *devid_ptr; 9869 struct scsi_vpd_id_descriptor *desc; 9870 struct ctl_softc *ctl_softc; 9871 struct ctl_lun *lun; 9872 struct ctl_port *port; 9873 int data_len; 9874 uint8_t proto; 9875 9876 ctl_softc = control_softc; 9877 9878 port = ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 9879 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9880 9881 data_len = sizeof(struct scsi_vpd_device_id) + 9882 sizeof(struct scsi_vpd_id_descriptor) + 9883 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9884 sizeof(struct scsi_vpd_id_descriptor) + 9885 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9886 if (lun && lun->lun_devid) 9887 data_len += lun->lun_devid->len; 9888 if (port->port_devid) 9889 data_len += port->port_devid->len; 9890 if (port->target_devid) 9891 data_len += port->target_devid->len; 9892 9893 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9894 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9895 ctsio->kern_sg_entries = 0; 9896 9897 if (data_len < alloc_len) { 9898 ctsio->residual = alloc_len - data_len; 9899 ctsio->kern_data_len = data_len; 9900 ctsio->kern_total_len = data_len; 9901 } else { 9902 ctsio->residual = 0; 9903 ctsio->kern_data_len = alloc_len; 9904 ctsio->kern_total_len = alloc_len; 9905 } 9906 ctsio->kern_data_resid = 0; 9907 ctsio->kern_rel_offset = 0; 9908 ctsio->kern_sg_entries = 0; 9909 9910 /* 9911 * The control device is always connected. The disk device, on the 9912 * other hand, may not be online all the time. 9913 */ 9914 if (lun != NULL) 9915 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9916 lun->be_lun->lun_type; 9917 else 9918 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9919 devid_ptr->page_code = SVPD_DEVICE_ID; 9920 scsi_ulto2b(data_len - 4, devid_ptr->length); 9921 9922 if (port->port_type == CTL_PORT_FC) 9923 proto = SCSI_PROTO_FC << 4; 9924 else if (port->port_type == CTL_PORT_ISCSI) 9925 proto = SCSI_PROTO_ISCSI << 4; 9926 else 9927 proto = SCSI_PROTO_SPI << 4; 9928 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9929 9930 /* 9931 * We're using a LUN association here. i.e., this device ID is a 9932 * per-LUN identifier. 9933 */ 9934 if (lun && lun->lun_devid) { 9935 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9936 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9937 lun->lun_devid->len); 9938 } 9939 9940 /* 9941 * This is for the WWPN which is a port association. 9942 */ 9943 if (port->port_devid) { 9944 memcpy(desc, port->port_devid->data, port->port_devid->len); 9945 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9946 port->port_devid->len); 9947 } 9948 9949 /* 9950 * This is for the Relative Target Port(type 4h) identifier 9951 */ 9952 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9953 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9954 SVPD_ID_TYPE_RELTARG; 9955 desc->length = 4; 9956 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9957 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9958 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9959 9960 /* 9961 * This is for the Target Port Group(type 5h) identifier 9962 */ 9963 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9964 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9965 SVPD_ID_TYPE_TPORTGRP; 9966 desc->length = 4; 9967 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1, 9968 &desc->identifier[2]); 9969 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9970 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9971 9972 /* 9973 * This is for the Target identifier 9974 */ 9975 if (port->target_devid) { 9976 memcpy(desc, port->target_devid->data, port->target_devid->len); 9977 } 9978 9979 ctsio->scsi_status = SCSI_STATUS_OK; 9980 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9981 ctsio->be_move_done = ctl_config_move_done; 9982 ctl_datamove((union ctl_io *)ctsio); 9983 9984 return (CTL_RETVAL_COMPLETE); 9985} 9986 9987static int 9988ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9989{ 9990 struct ctl_softc *softc = control_softc; 9991 struct scsi_vpd_scsi_ports *sp; 9992 struct scsi_vpd_port_designation *pd; 9993 struct scsi_vpd_port_designation_cont *pdc; 9994 struct ctl_lun *lun; 9995 struct ctl_port *port; 9996 int data_len, num_target_ports, id_len, g, pg, p; 9997 int num_target_port_groups, single; 9998 9999 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10000 10001 single = ctl_is_single; 10002 if (single) 10003 num_target_port_groups = 1; 10004 else 10005 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 10006 num_target_ports = 0; 10007 id_len = 0; 10008 mtx_lock(&softc->ctl_lock); 10009 STAILQ_FOREACH(port, &softc->port_list, links) { 10010 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 10011 continue; 10012 if (ctl_map_lun_back(port->targ_port, lun->lun) >= 10013 CTL_MAX_LUNS) 10014 continue; 10015 num_target_ports++; 10016 if (port->port_devid) 10017 id_len += port->port_devid->len; 10018 } 10019 mtx_unlock(&softc->ctl_lock); 10020 10021 data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups * 10022 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 10023 sizeof(struct scsi_vpd_port_designation_cont)) + id_len; 10024 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10025 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 10026 ctsio->kern_sg_entries = 0; 10027 10028 if (data_len < alloc_len) { 10029 ctsio->residual = alloc_len - data_len; 10030 ctsio->kern_data_len = data_len; 10031 ctsio->kern_total_len = data_len; 10032 } else { 10033 ctsio->residual = 0; 10034 ctsio->kern_data_len = alloc_len; 10035 ctsio->kern_total_len = alloc_len; 10036 } 10037 ctsio->kern_data_resid = 0; 10038 ctsio->kern_rel_offset = 0; 10039 ctsio->kern_sg_entries = 0; 10040 10041 /* 10042 * The control device is always connected. The disk device, on the 10043 * other hand, may not be online all the time. Need to change this 10044 * to figure out whether the disk device is actually online or not. 10045 */ 10046 if (lun != NULL) 10047 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 10048 lun->be_lun->lun_type; 10049 else 10050 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10051 10052 sp->page_code = SVPD_SCSI_PORTS; 10053 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 10054 sp->page_length); 10055 pd = &sp->design[0]; 10056 10057 mtx_lock(&softc->ctl_lock); 10058 if (softc->flags & CTL_FLAG_MASTER_SHELF) 10059 pg = 0; 10060 else 10061 pg = 1; 10062 for (g = 0; g < num_target_port_groups; g++) { 10063 STAILQ_FOREACH(port, &softc->port_list, links) { 10064 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 10065 continue; 10066 if (ctl_map_lun_back(port->targ_port, lun->lun) >= 10067 CTL_MAX_LUNS) 10068 continue; 10069 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 10070 scsi_ulto2b(p, pd->relative_port_id); 10071 scsi_ulto2b(0, pd->initiator_transportid_length); 10072 pdc = (struct scsi_vpd_port_designation_cont *) 10073 &pd->initiator_transportid[0]; 10074 if (port->port_devid && g == pg) { 10075 id_len = port->port_devid->len; 10076 scsi_ulto2b(port->port_devid->len, 10077 pdc->target_port_descriptors_length); 10078 memcpy(pdc->target_port_descriptors, 10079 port->port_devid->data, port->port_devid->len); 10080 } else { 10081 id_len = 0; 10082 scsi_ulto2b(0, pdc->target_port_descriptors_length); 10083 } 10084 pd = (struct scsi_vpd_port_designation *) 10085 ((uint8_t *)pdc->target_port_descriptors + id_len); 10086 } 10087 } 10088 mtx_unlock(&softc->ctl_lock); 10089 10090 ctsio->scsi_status = SCSI_STATUS_OK; 10091 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10092 ctsio->be_move_done = ctl_config_move_done; 10093 ctl_datamove((union ctl_io *)ctsio); 10094 10095 return (CTL_RETVAL_COMPLETE); 10096} 10097 10098static int 10099ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 10100{ 10101 struct scsi_vpd_block_limits *bl_ptr; 10102 struct ctl_lun *lun; 10103 int bs; 10104 10105 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10106 bs = lun->be_lun->blocksize; 10107 10108 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 10109 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 10110 ctsio->kern_sg_entries = 0; 10111 10112 if (sizeof(*bl_ptr) < alloc_len) { 10113 ctsio->residual = alloc_len - sizeof(*bl_ptr); 10114 ctsio->kern_data_len = sizeof(*bl_ptr); 10115 ctsio->kern_total_len = sizeof(*bl_ptr); 10116 } else { 10117 ctsio->residual = 0; 10118 ctsio->kern_data_len = alloc_len; 10119 ctsio->kern_total_len = alloc_len; 10120 } 10121 ctsio->kern_data_resid = 0; 10122 ctsio->kern_rel_offset = 0; 10123 ctsio->kern_sg_entries = 0; 10124 10125 /* 10126 * The control device is always connected. The disk device, on the 10127 * other hand, may not be online all the time. Need to change this 10128 * to figure out whether the disk device is actually online or not. 10129 */ 10130 if (lun != NULL) 10131 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10132 lun->be_lun->lun_type; 10133 else 10134 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10135 10136 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 10137 scsi_ulto2b(sizeof(*bl_ptr), bl_ptr->page_length); 10138 bl_ptr->max_cmp_write_len = 0xff; 10139 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 10140 scsi_ulto4b(MAXPHYS / bs, bl_ptr->opt_txfer_len); 10141 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10142 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 10143 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 10144 } 10145 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 10146 10147 ctsio->scsi_status = SCSI_STATUS_OK; 10148 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10149 ctsio->be_move_done = ctl_config_move_done; 10150 ctl_datamove((union ctl_io *)ctsio); 10151 10152 return (CTL_RETVAL_COMPLETE); 10153} 10154 10155static int 10156ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 10157{ 10158 struct scsi_vpd_logical_block_prov *lbp_ptr; 10159 struct ctl_lun *lun; 10160 int bs; 10161 10162 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10163 bs = lun->be_lun->blocksize; 10164 10165 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 10166 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 10167 ctsio->kern_sg_entries = 0; 10168 10169 if (sizeof(*lbp_ptr) < alloc_len) { 10170 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 10171 ctsio->kern_data_len = sizeof(*lbp_ptr); 10172 ctsio->kern_total_len = sizeof(*lbp_ptr); 10173 } else { 10174 ctsio->residual = 0; 10175 ctsio->kern_data_len = alloc_len; 10176 ctsio->kern_total_len = alloc_len; 10177 } 10178 ctsio->kern_data_resid = 0; 10179 ctsio->kern_rel_offset = 0; 10180 ctsio->kern_sg_entries = 0; 10181 10182 /* 10183 * The control device is always connected. The disk device, on the 10184 * other hand, may not be online all the time. Need to change this 10185 * to figure out whether the disk device is actually online or not. 10186 */ 10187 if (lun != NULL) 10188 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10189 lun->be_lun->lun_type; 10190 else 10191 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10192 10193 lbp_ptr->page_code = SVPD_LBP; 10194 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 10195 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | SVPD_LBP_WS10; 10196 10197 ctsio->scsi_status = SCSI_STATUS_OK; 10198 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10199 ctsio->be_move_done = ctl_config_move_done; 10200 ctl_datamove((union ctl_io *)ctsio); 10201 10202 return (CTL_RETVAL_COMPLETE); 10203} 10204 10205static int 10206ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10207{ 10208 struct scsi_inquiry *cdb; 10209 struct ctl_lun *lun; 10210 int alloc_len, retval; 10211 10212 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10213 cdb = (struct scsi_inquiry *)ctsio->cdb; 10214 10215 retval = CTL_RETVAL_COMPLETE; 10216 10217 alloc_len = scsi_2btoul(cdb->length); 10218 10219 switch (cdb->page_code) { 10220 case SVPD_SUPPORTED_PAGES: 10221 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10222 break; 10223 case SVPD_UNIT_SERIAL_NUMBER: 10224 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10225 break; 10226 case SVPD_DEVICE_ID: 10227 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10228 break; 10229 case SVPD_SCSI_PORTS: 10230 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10231 break; 10232 case SVPD_BLOCK_LIMITS: 10233 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10234 break; 10235 case SVPD_LBP: 10236 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10237 break; 10238 default: 10239 ctl_set_invalid_field(ctsio, 10240 /*sks_valid*/ 1, 10241 /*command*/ 1, 10242 /*field*/ 2, 10243 /*bit_valid*/ 0, 10244 /*bit*/ 0); 10245 ctl_done((union ctl_io *)ctsio); 10246 retval = CTL_RETVAL_COMPLETE; 10247 break; 10248 } 10249 10250 return (retval); 10251} 10252 10253static int 10254ctl_inquiry_std(struct ctl_scsiio *ctsio) 10255{ 10256 struct scsi_inquiry_data *inq_ptr; 10257 struct scsi_inquiry *cdb; 10258 struct ctl_softc *ctl_softc; 10259 struct ctl_lun *lun; 10260 char *val; 10261 uint32_t alloc_len; 10262 int is_fc; 10263 10264 ctl_softc = control_softc; 10265 10266 /* 10267 * Figure out whether we're talking to a Fibre Channel port or not. 10268 * We treat the ioctl front end, and any SCSI adapters, as packetized 10269 * SCSI front ends. 10270 */ 10271 if (ctl_softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type != 10272 CTL_PORT_FC) 10273 is_fc = 0; 10274 else 10275 is_fc = 1; 10276 10277 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10278 cdb = (struct scsi_inquiry *)ctsio->cdb; 10279 alloc_len = scsi_2btoul(cdb->length); 10280 10281 /* 10282 * We malloc the full inquiry data size here and fill it 10283 * in. If the user only asks for less, we'll give him 10284 * that much. 10285 */ 10286 ctsio->kern_data_ptr = malloc(sizeof(*inq_ptr), M_CTL, M_WAITOK | M_ZERO); 10287 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10288 ctsio->kern_sg_entries = 0; 10289 ctsio->kern_data_resid = 0; 10290 ctsio->kern_rel_offset = 0; 10291 10292 if (sizeof(*inq_ptr) < alloc_len) { 10293 ctsio->residual = alloc_len - sizeof(*inq_ptr); 10294 ctsio->kern_data_len = sizeof(*inq_ptr); 10295 ctsio->kern_total_len = sizeof(*inq_ptr); 10296 } else { 10297 ctsio->residual = 0; 10298 ctsio->kern_data_len = alloc_len; 10299 ctsio->kern_total_len = alloc_len; 10300 } 10301 10302 /* 10303 * If we have a LUN configured, report it as connected. Otherwise, 10304 * report that it is offline or no device is supported, depending 10305 * on the value of inquiry_pq_no_lun. 10306 * 10307 * According to the spec (SPC-4 r34), the peripheral qualifier 10308 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 10309 * 10310 * "A peripheral device having the specified peripheral device type 10311 * is not connected to this logical unit. However, the device 10312 * server is capable of supporting the specified peripheral device 10313 * type on this logical unit." 10314 * 10315 * According to the same spec, the peripheral qualifier 10316 * SID_QUAL_BAD_LU (011b) is used in this scenario: 10317 * 10318 * "The device server is not capable of supporting a peripheral 10319 * device on this logical unit. For this peripheral qualifier the 10320 * peripheral device type shall be set to 1Fh. All other peripheral 10321 * device type values are reserved for this peripheral qualifier." 10322 * 10323 * Given the text, it would seem that we probably want to report that 10324 * the LUN is offline here. There is no LUN connected, but we can 10325 * support a LUN at the given LUN number. 10326 * 10327 * In the real world, though, it sounds like things are a little 10328 * different: 10329 * 10330 * - Linux, when presented with a LUN with the offline peripheral 10331 * qualifier, will create an sg driver instance for it. So when 10332 * you attach it to CTL, you wind up with a ton of sg driver 10333 * instances. (One for every LUN that Linux bothered to probe.) 10334 * Linux does this despite the fact that it issues a REPORT LUNs 10335 * to LUN 0 to get the inventory of supported LUNs. 10336 * 10337 * - There is other anecdotal evidence (from Emulex folks) about 10338 * arrays that use the offline peripheral qualifier for LUNs that 10339 * are on the "passive" path in an active/passive array. 10340 * 10341 * So the solution is provide a hopefully reasonable default 10342 * (return bad/no LUN) and allow the user to change the behavior 10343 * with a tunable/sysctl variable. 10344 */ 10345 if (lun != NULL) 10346 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10347 lun->be_lun->lun_type; 10348 else if (ctl_softc->inquiry_pq_no_lun == 0) 10349 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10350 else 10351 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10352 10353 /* RMB in byte 2 is 0 */ 10354 inq_ptr->version = SCSI_REV_SPC3; 10355 10356 /* 10357 * According to SAM-3, even if a device only supports a single 10358 * level of LUN addressing, it should still set the HISUP bit: 10359 * 10360 * 4.9.1 Logical unit numbers overview 10361 * 10362 * All logical unit number formats described in this standard are 10363 * hierarchical in structure even when only a single level in that 10364 * hierarchy is used. The HISUP bit shall be set to one in the 10365 * standard INQUIRY data (see SPC-2) when any logical unit number 10366 * format described in this standard is used. Non-hierarchical 10367 * formats are outside the scope of this standard. 10368 * 10369 * Therefore we set the HiSup bit here. 10370 * 10371 * The reponse format is 2, per SPC-3. 10372 */ 10373 inq_ptr->response_format = SID_HiSup | 2; 10374 10375 inq_ptr->additional_length = sizeof(*inq_ptr) - 4; 10376 CTL_DEBUG_PRINT(("additional_length = %d\n", 10377 inq_ptr->additional_length)); 10378 10379 inq_ptr->spc3_flags = SPC3_SID_TPGS_IMPLICIT; 10380 /* 16 bit addressing */ 10381 if (is_fc == 0) 10382 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10383 /* XXX set the SID_MultiP bit here if we're actually going to 10384 respond on multiple ports */ 10385 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10386 10387 /* 16 bit data bus, synchronous transfers */ 10388 /* XXX these flags don't apply for FC */ 10389 if (is_fc == 0) 10390 inq_ptr->flags = SID_WBus16 | SID_Sync; 10391 /* 10392 * XXX KDM do we want to support tagged queueing on the control 10393 * device at all? 10394 */ 10395 if ((lun == NULL) 10396 || (lun->be_lun->lun_type != T_PROCESSOR)) 10397 inq_ptr->flags |= SID_CmdQue; 10398 /* 10399 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10400 * We have 8 bytes for the vendor name, and 16 bytes for the device 10401 * name and 4 bytes for the revision. 10402 */ 10403 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10404 "vendor")) == NULL) { 10405 strcpy(inq_ptr->vendor, CTL_VENDOR); 10406 } else { 10407 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10408 strncpy(inq_ptr->vendor, val, 10409 min(sizeof(inq_ptr->vendor), strlen(val))); 10410 } 10411 if (lun == NULL) { 10412 strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT); 10413 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10414 switch (lun->be_lun->lun_type) { 10415 case T_DIRECT: 10416 strcpy(inq_ptr->product, CTL_DIRECT_PRODUCT); 10417 break; 10418 case T_PROCESSOR: 10419 strcpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT); 10420 break; 10421 default: 10422 strcpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT); 10423 break; 10424 } 10425 } else { 10426 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10427 strncpy(inq_ptr->product, val, 10428 min(sizeof(inq_ptr->product), strlen(val))); 10429 } 10430 10431 /* 10432 * XXX make this a macro somewhere so it automatically gets 10433 * incremented when we make changes. 10434 */ 10435 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10436 "revision")) == NULL) { 10437 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10438 } else { 10439 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10440 strncpy(inq_ptr->revision, val, 10441 min(sizeof(inq_ptr->revision), strlen(val))); 10442 } 10443 10444 /* 10445 * For parallel SCSI, we support double transition and single 10446 * transition clocking. We also support QAS (Quick Arbitration 10447 * and Selection) and Information Unit transfers on both the 10448 * control and array devices. 10449 */ 10450 if (is_fc == 0) 10451 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10452 SID_SPI_IUS; 10453 10454 /* SAM-3 */ 10455 scsi_ulto2b(0x0060, inq_ptr->version1); 10456 /* SPC-3 (no version claimed) XXX should we claim a version? */ 10457 scsi_ulto2b(0x0300, inq_ptr->version2); 10458 if (is_fc) { 10459 /* FCP-2 ANSI INCITS.350:2003 */ 10460 scsi_ulto2b(0x0917, inq_ptr->version3); 10461 } else { 10462 /* SPI-4 ANSI INCITS.362:200x */ 10463 scsi_ulto2b(0x0B56, inq_ptr->version3); 10464 } 10465 10466 if (lun == NULL) { 10467 /* SBC-2 (no version claimed) XXX should we claim a version? */ 10468 scsi_ulto2b(0x0320, inq_ptr->version4); 10469 } else { 10470 switch (lun->be_lun->lun_type) { 10471 case T_DIRECT: 10472 /* 10473 * SBC-2 (no version claimed) XXX should we claim a 10474 * version? 10475 */ 10476 scsi_ulto2b(0x0320, inq_ptr->version4); 10477 break; 10478 case T_PROCESSOR: 10479 default: 10480 break; 10481 } 10482 } 10483 10484 ctsio->scsi_status = SCSI_STATUS_OK; 10485 if (ctsio->kern_data_len > 0) { 10486 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10487 ctsio->be_move_done = ctl_config_move_done; 10488 ctl_datamove((union ctl_io *)ctsio); 10489 } else { 10490 ctsio->io_hdr.status = CTL_SUCCESS; 10491 ctl_done((union ctl_io *)ctsio); 10492 } 10493 10494 return (CTL_RETVAL_COMPLETE); 10495} 10496 10497int 10498ctl_inquiry(struct ctl_scsiio *ctsio) 10499{ 10500 struct scsi_inquiry *cdb; 10501 int retval; 10502 10503 cdb = (struct scsi_inquiry *)ctsio->cdb; 10504 10505 retval = 0; 10506 10507 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10508 10509 /* 10510 * Right now, we don't support the CmdDt inquiry information. 10511 * This would be nice to support in the future. When we do 10512 * support it, we should change this test so that it checks to make 10513 * sure SI_EVPD and SI_CMDDT aren't both set at the same time. 10514 */ 10515#ifdef notyet 10516 if (((cdb->byte2 & SI_EVPD) 10517 && (cdb->byte2 & SI_CMDDT))) 10518#endif 10519 if (cdb->byte2 & SI_CMDDT) { 10520 /* 10521 * Point to the SI_CMDDT bit. We might change this 10522 * when we support SI_CMDDT, but since both bits would be 10523 * "wrong", this should probably just stay as-is then. 10524 */ 10525 ctl_set_invalid_field(ctsio, 10526 /*sks_valid*/ 1, 10527 /*command*/ 1, 10528 /*field*/ 1, 10529 /*bit_valid*/ 1, 10530 /*bit*/ 1); 10531 ctl_done((union ctl_io *)ctsio); 10532 return (CTL_RETVAL_COMPLETE); 10533 } 10534 if (cdb->byte2 & SI_EVPD) 10535 retval = ctl_inquiry_evpd(ctsio); 10536#ifdef notyet 10537 else if (cdb->byte2 & SI_CMDDT) 10538 retval = ctl_inquiry_cmddt(ctsio); 10539#endif 10540 else 10541 retval = ctl_inquiry_std(ctsio); 10542 10543 return (retval); 10544} 10545 10546/* 10547 * For known CDB types, parse the LBA and length. 10548 */ 10549static int 10550ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint32_t *len) 10551{ 10552 if (io->io_hdr.io_type != CTL_IO_SCSI) 10553 return (1); 10554 10555 switch (io->scsiio.cdb[0]) { 10556 case COMPARE_AND_WRITE: { 10557 struct scsi_compare_and_write *cdb; 10558 10559 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10560 10561 *lba = scsi_8btou64(cdb->addr); 10562 *len = cdb->length; 10563 break; 10564 } 10565 case READ_6: 10566 case WRITE_6: { 10567 struct scsi_rw_6 *cdb; 10568 10569 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10570 10571 *lba = scsi_3btoul(cdb->addr); 10572 /* only 5 bits are valid in the most significant address byte */ 10573 *lba &= 0x1fffff; 10574 *len = cdb->length; 10575 break; 10576 } 10577 case READ_10: 10578 case WRITE_10: { 10579 struct scsi_rw_10 *cdb; 10580 10581 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10582 10583 *lba = scsi_4btoul(cdb->addr); 10584 *len = scsi_2btoul(cdb->length); 10585 break; 10586 } 10587 case WRITE_VERIFY_10: { 10588 struct scsi_write_verify_10 *cdb; 10589 10590 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10591 10592 *lba = scsi_4btoul(cdb->addr); 10593 *len = scsi_2btoul(cdb->length); 10594 break; 10595 } 10596 case READ_12: 10597 case WRITE_12: { 10598 struct scsi_rw_12 *cdb; 10599 10600 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10601 10602 *lba = scsi_4btoul(cdb->addr); 10603 *len = scsi_4btoul(cdb->length); 10604 break; 10605 } 10606 case WRITE_VERIFY_12: { 10607 struct scsi_write_verify_12 *cdb; 10608 10609 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10610 10611 *lba = scsi_4btoul(cdb->addr); 10612 *len = scsi_4btoul(cdb->length); 10613 break; 10614 } 10615 case READ_16: 10616 case WRITE_16: { 10617 struct scsi_rw_16 *cdb; 10618 10619 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10620 10621 *lba = scsi_8btou64(cdb->addr); 10622 *len = scsi_4btoul(cdb->length); 10623 break; 10624 } 10625 case WRITE_VERIFY_16: { 10626 struct scsi_write_verify_16 *cdb; 10627 10628 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10629 10630 10631 *lba = scsi_8btou64(cdb->addr); 10632 *len = scsi_4btoul(cdb->length); 10633 break; 10634 } 10635 case WRITE_SAME_10: { 10636 struct scsi_write_same_10 *cdb; 10637 10638 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10639 10640 *lba = scsi_4btoul(cdb->addr); 10641 *len = scsi_2btoul(cdb->length); 10642 break; 10643 } 10644 case WRITE_SAME_16: { 10645 struct scsi_write_same_16 *cdb; 10646 10647 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10648 10649 *lba = scsi_8btou64(cdb->addr); 10650 *len = scsi_4btoul(cdb->length); 10651 break; 10652 } 10653 case VERIFY_10: { 10654 struct scsi_verify_10 *cdb; 10655 10656 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10657 10658 *lba = scsi_4btoul(cdb->addr); 10659 *len = scsi_2btoul(cdb->length); 10660 break; 10661 } 10662 case VERIFY_12: { 10663 struct scsi_verify_12 *cdb; 10664 10665 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10666 10667 *lba = scsi_4btoul(cdb->addr); 10668 *len = scsi_4btoul(cdb->length); 10669 break; 10670 } 10671 case VERIFY_16: { 10672 struct scsi_verify_16 *cdb; 10673 10674 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10675 10676 *lba = scsi_8btou64(cdb->addr); 10677 *len = scsi_4btoul(cdb->length); 10678 break; 10679 } 10680 default: 10681 return (1); 10682 break; /* NOTREACHED */ 10683 } 10684 10685 return (0); 10686} 10687 10688static ctl_action 10689ctl_extent_check_lba(uint64_t lba1, uint32_t len1, uint64_t lba2, uint32_t len2) 10690{ 10691 uint64_t endlba1, endlba2; 10692 10693 endlba1 = lba1 + len1 - 1; 10694 endlba2 = lba2 + len2 - 1; 10695 10696 if ((endlba1 < lba2) 10697 || (endlba2 < lba1)) 10698 return (CTL_ACTION_PASS); 10699 else 10700 return (CTL_ACTION_BLOCK); 10701} 10702 10703static ctl_action 10704ctl_extent_check(union ctl_io *io1, union ctl_io *io2) 10705{ 10706 uint64_t lba1, lba2; 10707 uint32_t len1, len2; 10708 int retval; 10709 10710 retval = ctl_get_lba_len(io1, &lba1, &len1); 10711 if (retval != 0) 10712 return (CTL_ACTION_ERROR); 10713 10714 retval = ctl_get_lba_len(io2, &lba2, &len2); 10715 if (retval != 0) 10716 return (CTL_ACTION_ERROR); 10717 10718 return (ctl_extent_check_lba(lba1, len1, lba2, len2)); 10719} 10720 10721static ctl_action 10722ctl_check_for_blockage(union ctl_io *pending_io, union ctl_io *ooa_io) 10723{ 10724 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10725 ctl_serialize_action *serialize_row; 10726 10727 /* 10728 * The initiator attempted multiple untagged commands at the same 10729 * time. Can't do that. 10730 */ 10731 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10732 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10733 && ((pending_io->io_hdr.nexus.targ_port == 10734 ooa_io->io_hdr.nexus.targ_port) 10735 && (pending_io->io_hdr.nexus.initid.id == 10736 ooa_io->io_hdr.nexus.initid.id)) 10737 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 10738 return (CTL_ACTION_OVERLAP); 10739 10740 /* 10741 * The initiator attempted to send multiple tagged commands with 10742 * the same ID. (It's fine if different initiators have the same 10743 * tag ID.) 10744 * 10745 * Even if all of those conditions are true, we don't kill the I/O 10746 * if the command ahead of us has been aborted. We won't end up 10747 * sending it to the FETD, and it's perfectly legal to resend a 10748 * command with the same tag number as long as the previous 10749 * instance of this tag number has been aborted somehow. 10750 */ 10751 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10752 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10753 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10754 && ((pending_io->io_hdr.nexus.targ_port == 10755 ooa_io->io_hdr.nexus.targ_port) 10756 && (pending_io->io_hdr.nexus.initid.id == 10757 ooa_io->io_hdr.nexus.initid.id)) 10758 && ((ooa_io->io_hdr.flags & CTL_FLAG_ABORT) == 0)) 10759 return (CTL_ACTION_OVERLAP_TAG); 10760 10761 /* 10762 * If we get a head of queue tag, SAM-3 says that we should 10763 * immediately execute it. 10764 * 10765 * What happens if this command would normally block for some other 10766 * reason? e.g. a request sense with a head of queue tag 10767 * immediately after a write. Normally that would block, but this 10768 * will result in its getting executed immediately... 10769 * 10770 * We currently return "pass" instead of "skip", so we'll end up 10771 * going through the rest of the queue to check for overlapped tags. 10772 * 10773 * XXX KDM check for other types of blockage first?? 10774 */ 10775 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10776 return (CTL_ACTION_PASS); 10777 10778 /* 10779 * Ordered tags have to block until all items ahead of them 10780 * have completed. If we get called with an ordered tag, we always 10781 * block, if something else is ahead of us in the queue. 10782 */ 10783 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10784 return (CTL_ACTION_BLOCK); 10785 10786 /* 10787 * Simple tags get blocked until all head of queue and ordered tags 10788 * ahead of them have completed. I'm lumping untagged commands in 10789 * with simple tags here. XXX KDM is that the right thing to do? 10790 */ 10791 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10792 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10793 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10794 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10795 return (CTL_ACTION_BLOCK); 10796 10797 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio); 10798 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio); 10799 10800 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10801 10802 switch (serialize_row[pending_entry->seridx]) { 10803 case CTL_SER_BLOCK: 10804 return (CTL_ACTION_BLOCK); 10805 break; /* NOTREACHED */ 10806 case CTL_SER_EXTENT: 10807 return (ctl_extent_check(pending_io, ooa_io)); 10808 break; /* NOTREACHED */ 10809 case CTL_SER_PASS: 10810 return (CTL_ACTION_PASS); 10811 break; /* NOTREACHED */ 10812 case CTL_SER_SKIP: 10813 return (CTL_ACTION_SKIP); 10814 break; 10815 default: 10816 panic("invalid serialization value %d", 10817 serialize_row[pending_entry->seridx]); 10818 break; /* NOTREACHED */ 10819 } 10820 10821 return (CTL_ACTION_ERROR); 10822} 10823 10824/* 10825 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10826 * Assumptions: 10827 * - pending_io is generally either incoming, or on the blocked queue 10828 * - starting I/O is the I/O we want to start the check with. 10829 */ 10830static ctl_action 10831ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10832 union ctl_io *starting_io) 10833{ 10834 union ctl_io *ooa_io; 10835 ctl_action action; 10836 10837 mtx_assert(&lun->lun_lock, MA_OWNED); 10838 10839 /* 10840 * Run back along the OOA queue, starting with the current 10841 * blocked I/O and going through every I/O before it on the 10842 * queue. If starting_io is NULL, we'll just end up returning 10843 * CTL_ACTION_PASS. 10844 */ 10845 for (ooa_io = starting_io; ooa_io != NULL; 10846 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10847 ooa_links)){ 10848 10849 /* 10850 * This routine just checks to see whether 10851 * cur_blocked is blocked by ooa_io, which is ahead 10852 * of it in the queue. It doesn't queue/dequeue 10853 * cur_blocked. 10854 */ 10855 action = ctl_check_for_blockage(pending_io, ooa_io); 10856 switch (action) { 10857 case CTL_ACTION_BLOCK: 10858 case CTL_ACTION_OVERLAP: 10859 case CTL_ACTION_OVERLAP_TAG: 10860 case CTL_ACTION_SKIP: 10861 case CTL_ACTION_ERROR: 10862 return (action); 10863 break; /* NOTREACHED */ 10864 case CTL_ACTION_PASS: 10865 break; 10866 default: 10867 panic("invalid action %d", action); 10868 break; /* NOTREACHED */ 10869 } 10870 } 10871 10872 return (CTL_ACTION_PASS); 10873} 10874 10875/* 10876 * Assumptions: 10877 * - An I/O has just completed, and has been removed from the per-LUN OOA 10878 * queue, so some items on the blocked queue may now be unblocked. 10879 */ 10880static int 10881ctl_check_blocked(struct ctl_lun *lun) 10882{ 10883 union ctl_io *cur_blocked, *next_blocked; 10884 10885 mtx_assert(&lun->lun_lock, MA_OWNED); 10886 10887 /* 10888 * Run forward from the head of the blocked queue, checking each 10889 * entry against the I/Os prior to it on the OOA queue to see if 10890 * there is still any blockage. 10891 * 10892 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10893 * with our removing a variable on it while it is traversing the 10894 * list. 10895 */ 10896 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10897 cur_blocked != NULL; cur_blocked = next_blocked) { 10898 union ctl_io *prev_ooa; 10899 ctl_action action; 10900 10901 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10902 blocked_links); 10903 10904 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10905 ctl_ooaq, ooa_links); 10906 10907 /* 10908 * If cur_blocked happens to be the first item in the OOA 10909 * queue now, prev_ooa will be NULL, and the action 10910 * returned will just be CTL_ACTION_PASS. 10911 */ 10912 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10913 10914 switch (action) { 10915 case CTL_ACTION_BLOCK: 10916 /* Nothing to do here, still blocked */ 10917 break; 10918 case CTL_ACTION_OVERLAP: 10919 case CTL_ACTION_OVERLAP_TAG: 10920 /* 10921 * This shouldn't happen! In theory we've already 10922 * checked this command for overlap... 10923 */ 10924 break; 10925 case CTL_ACTION_PASS: 10926 case CTL_ACTION_SKIP: { 10927 struct ctl_softc *softc; 10928 const struct ctl_cmd_entry *entry; 10929 uint32_t initidx; 10930 int isc_retval; 10931 10932 /* 10933 * The skip case shouldn't happen, this transaction 10934 * should have never made it onto the blocked queue. 10935 */ 10936 /* 10937 * This I/O is no longer blocked, we can remove it 10938 * from the blocked queue. Since this is a TAILQ 10939 * (doubly linked list), we can do O(1) removals 10940 * from any place on the list. 10941 */ 10942 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10943 blocked_links); 10944 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10945 10946 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 10947 /* 10948 * Need to send IO back to original side to 10949 * run 10950 */ 10951 union ctl_ha_msg msg_info; 10952 10953 msg_info.hdr.original_sc = 10954 cur_blocked->io_hdr.original_sc; 10955 msg_info.hdr.serializing_sc = cur_blocked; 10956 msg_info.hdr.msg_type = CTL_MSG_R2R; 10957 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10958 &msg_info, sizeof(msg_info), 0)) > 10959 CTL_HA_STATUS_SUCCESS) { 10960 printf("CTL:Check Blocked error from " 10961 "ctl_ha_msg_send %d\n", 10962 isc_retval); 10963 } 10964 break; 10965 } 10966 entry = ctl_get_cmd_entry(&cur_blocked->scsiio); 10967 softc = control_softc; 10968 10969 initidx = ctl_get_initindex(&cur_blocked->io_hdr.nexus); 10970 10971 /* 10972 * Check this I/O for LUN state changes that may 10973 * have happened while this command was blocked. 10974 * The LUN state may have been changed by a command 10975 * ahead of us in the queue, so we need to re-check 10976 * for any states that can be caused by SCSI 10977 * commands. 10978 */ 10979 if (ctl_scsiio_lun_check(softc, lun, entry, 10980 &cur_blocked->scsiio) == 0) { 10981 cur_blocked->io_hdr.flags |= 10982 CTL_FLAG_IS_WAS_ON_RTR; 10983 ctl_enqueue_rtr(cur_blocked); 10984 } else 10985 ctl_done(cur_blocked); 10986 break; 10987 } 10988 default: 10989 /* 10990 * This probably shouldn't happen -- we shouldn't 10991 * get CTL_ACTION_ERROR, or anything else. 10992 */ 10993 break; 10994 } 10995 } 10996 10997 return (CTL_RETVAL_COMPLETE); 10998} 10999 11000/* 11001 * This routine (with one exception) checks LUN flags that can be set by 11002 * commands ahead of us in the OOA queue. These flags have to be checked 11003 * when a command initially comes in, and when we pull a command off the 11004 * blocked queue and are preparing to execute it. The reason we have to 11005 * check these flags for commands on the blocked queue is that the LUN 11006 * state may have been changed by a command ahead of us while we're on the 11007 * blocked queue. 11008 * 11009 * Ordering is somewhat important with these checks, so please pay 11010 * careful attention to the placement of any new checks. 11011 */ 11012static int 11013ctl_scsiio_lun_check(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 11014 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11015{ 11016 int retval; 11017 11018 retval = 0; 11019 11020 mtx_assert(&lun->lun_lock, MA_OWNED); 11021 11022 /* 11023 * If this shelf is a secondary shelf controller, we have to reject 11024 * any media access commands. 11025 */ 11026#if 0 11027 /* No longer needed for HA */ 11028 if (((ctl_softc->flags & CTL_FLAG_MASTER_SHELF) == 0) 11029 && ((entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0)) { 11030 ctl_set_lun_standby(ctsio); 11031 retval = 1; 11032 goto bailout; 11033 } 11034#endif 11035 11036 /* 11037 * Check for a reservation conflict. If this command isn't allowed 11038 * even on reserved LUNs, and if this initiator isn't the one who 11039 * reserved us, reject the command with a reservation conflict. 11040 */ 11041 if ((lun->flags & CTL_LUN_RESERVED) 11042 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11043 if ((ctsio->io_hdr.nexus.initid.id != lun->rsv_nexus.initid.id) 11044 || (ctsio->io_hdr.nexus.targ_port != lun->rsv_nexus.targ_port) 11045 || (ctsio->io_hdr.nexus.targ_target.id != 11046 lun->rsv_nexus.targ_target.id)) { 11047 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 11048 ctsio->io_hdr.status = CTL_SCSI_ERROR; 11049 retval = 1; 11050 goto bailout; 11051 } 11052 } 11053 11054 if ( (lun->flags & CTL_LUN_PR_RESERVED) 11055 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV) == 0)) { 11056 uint32_t residx; 11057 11058 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 11059 /* 11060 * if we aren't registered or it's a res holder type 11061 * reservation and this isn't the res holder then set a 11062 * conflict. 11063 * NOTE: Commands which might be allowed on write exclusive 11064 * type reservations are checked in the particular command 11065 * for a conflict. Read and SSU are the only ones. 11066 */ 11067 if (!lun->per_res[residx].registered 11068 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 11069 ctsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT; 11070 ctsio->io_hdr.status = CTL_SCSI_ERROR; 11071 retval = 1; 11072 goto bailout; 11073 } 11074 11075 } 11076 11077 if ((lun->flags & CTL_LUN_OFFLINE) 11078 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 11079 ctl_set_lun_not_ready(ctsio); 11080 retval = 1; 11081 goto bailout; 11082 } 11083 11084 /* 11085 * If the LUN is stopped, see if this particular command is allowed 11086 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 11087 */ 11088 if ((lun->flags & CTL_LUN_STOPPED) 11089 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 11090 /* "Logical unit not ready, initializing cmd. required" */ 11091 ctl_set_lun_stopped(ctsio); 11092 retval = 1; 11093 goto bailout; 11094 } 11095 11096 if ((lun->flags & CTL_LUN_INOPERABLE) 11097 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 11098 /* "Medium format corrupted" */ 11099 ctl_set_medium_format_corrupted(ctsio); 11100 retval = 1; 11101 goto bailout; 11102 } 11103 11104bailout: 11105 return (retval); 11106 11107} 11108 11109static void 11110ctl_failover_io(union ctl_io *io, int have_lock) 11111{ 11112 ctl_set_busy(&io->scsiio); 11113 ctl_done(io); 11114} 11115 11116static void 11117ctl_failover(void) 11118{ 11119 struct ctl_lun *lun; 11120 struct ctl_softc *ctl_softc; 11121 union ctl_io *next_io, *pending_io; 11122 union ctl_io *io; 11123 int lun_idx; 11124 int i; 11125 11126 ctl_softc = control_softc; 11127 11128 mtx_lock(&ctl_softc->ctl_lock); 11129 /* 11130 * Remove any cmds from the other SC from the rtr queue. These 11131 * will obviously only be for LUNs for which we're the primary. 11132 * We can't send status or get/send data for these commands. 11133 * Since they haven't been executed yet, we can just remove them. 11134 * We'll either abort them or delete them below, depending on 11135 * which HA mode we're in. 11136 */ 11137#ifdef notyet 11138 mtx_lock(&ctl_softc->queue_lock); 11139 for (io = (union ctl_io *)STAILQ_FIRST(&ctl_softc->rtr_queue); 11140 io != NULL; io = next_io) { 11141 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 11142 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 11143 STAILQ_REMOVE(&ctl_softc->rtr_queue, &io->io_hdr, 11144 ctl_io_hdr, links); 11145 } 11146 mtx_unlock(&ctl_softc->queue_lock); 11147#endif 11148 11149 for (lun_idx=0; lun_idx < ctl_softc->num_luns; lun_idx++) { 11150 lun = ctl_softc->ctl_luns[lun_idx]; 11151 if (lun==NULL) 11152 continue; 11153 11154 /* 11155 * Processor LUNs are primary on both sides. 11156 * XXX will this always be true? 11157 */ 11158 if (lun->be_lun->lun_type == T_PROCESSOR) 11159 continue; 11160 11161 if ((lun->flags & CTL_LUN_PRIMARY_SC) 11162 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11163 printf("FAILOVER: primary lun %d\n", lun_idx); 11164 /* 11165 * Remove all commands from the other SC. First from the 11166 * blocked queue then from the ooa queue. Once we have 11167 * removed them. Call ctl_check_blocked to see if there 11168 * is anything that can run. 11169 */ 11170 for (io = (union ctl_io *)TAILQ_FIRST( 11171 &lun->blocked_queue); io != NULL; io = next_io) { 11172 11173 next_io = (union ctl_io *)TAILQ_NEXT( 11174 &io->io_hdr, blocked_links); 11175 11176 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 11177 TAILQ_REMOVE(&lun->blocked_queue, 11178 &io->io_hdr,blocked_links); 11179 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11180 TAILQ_REMOVE(&lun->ooa_queue, 11181 &io->io_hdr, ooa_links); 11182 11183 ctl_free_io(io); 11184 } 11185 } 11186 11187 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 11188 io != NULL; io = next_io) { 11189 11190 next_io = (union ctl_io *)TAILQ_NEXT( 11191 &io->io_hdr, ooa_links); 11192 11193 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 11194 11195 TAILQ_REMOVE(&lun->ooa_queue, 11196 &io->io_hdr, 11197 ooa_links); 11198 11199 ctl_free_io(io); 11200 } 11201 } 11202 ctl_check_blocked(lun); 11203 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 11204 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 11205 11206 printf("FAILOVER: primary lun %d\n", lun_idx); 11207 /* 11208 * Abort all commands from the other SC. We can't 11209 * send status back for them now. These should get 11210 * cleaned up when they are completed or come out 11211 * for a datamove operation. 11212 */ 11213 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 11214 io != NULL; io = next_io) { 11215 next_io = (union ctl_io *)TAILQ_NEXT( 11216 &io->io_hdr, ooa_links); 11217 11218 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 11219 io->io_hdr.flags |= CTL_FLAG_ABORT; 11220 } 11221 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11222 && (ctl_softc->ha_mode == CTL_HA_MODE_XFER)) { 11223 11224 printf("FAILOVER: secondary lun %d\n", lun_idx); 11225 11226 lun->flags |= CTL_LUN_PRIMARY_SC; 11227 11228 /* 11229 * We send all I/O that was sent to this controller 11230 * and redirected to the other side back with 11231 * busy status, and have the initiator retry it. 11232 * Figuring out how much data has been transferred, 11233 * etc. and picking up where we left off would be 11234 * very tricky. 11235 * 11236 * XXX KDM need to remove I/O from the blocked 11237 * queue as well! 11238 */ 11239 for (pending_io = (union ctl_io *)TAILQ_FIRST( 11240 &lun->ooa_queue); pending_io != NULL; 11241 pending_io = next_io) { 11242 11243 next_io = (union ctl_io *)TAILQ_NEXT( 11244 &pending_io->io_hdr, ooa_links); 11245 11246 pending_io->io_hdr.flags &= 11247 ~CTL_FLAG_SENT_2OTHER_SC; 11248 11249 if (pending_io->io_hdr.flags & 11250 CTL_FLAG_IO_ACTIVE) { 11251 pending_io->io_hdr.flags |= 11252 CTL_FLAG_FAILOVER; 11253 } else { 11254 ctl_set_busy(&pending_io->scsiio); 11255 ctl_done(pending_io); 11256 } 11257 } 11258 11259 /* 11260 * Build Unit Attention 11261 */ 11262 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 11263 lun->pending_sense[i].ua_pending |= 11264 CTL_UA_ASYM_ACC_CHANGE; 11265 } 11266 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11267 && (ctl_softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11268 printf("FAILOVER: secondary lun %d\n", lun_idx); 11269 /* 11270 * if the first io on the OOA is not on the RtR queue 11271 * add it. 11272 */ 11273 lun->flags |= CTL_LUN_PRIMARY_SC; 11274 11275 pending_io = (union ctl_io *)TAILQ_FIRST( 11276 &lun->ooa_queue); 11277 if (pending_io==NULL) { 11278 printf("Nothing on OOA queue\n"); 11279 continue; 11280 } 11281 11282 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11283 if ((pending_io->io_hdr.flags & 11284 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 11285 pending_io->io_hdr.flags |= 11286 CTL_FLAG_IS_WAS_ON_RTR; 11287 ctl_enqueue_rtr(pending_io); 11288 } 11289#if 0 11290 else 11291 { 11292 printf("Tag 0x%04x is running\n", 11293 pending_io->scsiio.tag_num); 11294 } 11295#endif 11296 11297 next_io = (union ctl_io *)TAILQ_NEXT( 11298 &pending_io->io_hdr, ooa_links); 11299 for (pending_io=next_io; pending_io != NULL; 11300 pending_io = next_io) { 11301 pending_io->io_hdr.flags &= 11302 ~CTL_FLAG_SENT_2OTHER_SC; 11303 next_io = (union ctl_io *)TAILQ_NEXT( 11304 &pending_io->io_hdr, ooa_links); 11305 if (pending_io->io_hdr.flags & 11306 CTL_FLAG_IS_WAS_ON_RTR) { 11307#if 0 11308 printf("Tag 0x%04x is running\n", 11309 pending_io->scsiio.tag_num); 11310#endif 11311 continue; 11312 } 11313 11314 switch (ctl_check_ooa(lun, pending_io, 11315 (union ctl_io *)TAILQ_PREV( 11316 &pending_io->io_hdr, ctl_ooaq, 11317 ooa_links))) { 11318 11319 case CTL_ACTION_BLOCK: 11320 TAILQ_INSERT_TAIL(&lun->blocked_queue, 11321 &pending_io->io_hdr, 11322 blocked_links); 11323 pending_io->io_hdr.flags |= 11324 CTL_FLAG_BLOCKED; 11325 break; 11326 case CTL_ACTION_PASS: 11327 case CTL_ACTION_SKIP: 11328 pending_io->io_hdr.flags |= 11329 CTL_FLAG_IS_WAS_ON_RTR; 11330 ctl_enqueue_rtr(pending_io); 11331 break; 11332 case CTL_ACTION_OVERLAP: 11333 ctl_set_overlapped_cmd( 11334 (struct ctl_scsiio *)pending_io); 11335 ctl_done(pending_io); 11336 break; 11337 case CTL_ACTION_OVERLAP_TAG: 11338 ctl_set_overlapped_tag( 11339 (struct ctl_scsiio *)pending_io, 11340 pending_io->scsiio.tag_num & 0xff); 11341 ctl_done(pending_io); 11342 break; 11343 case CTL_ACTION_ERROR: 11344 default: 11345 ctl_set_internal_failure( 11346 (struct ctl_scsiio *)pending_io, 11347 0, // sks_valid 11348 0); //retry count 11349 ctl_done(pending_io); 11350 break; 11351 } 11352 } 11353 11354 /* 11355 * Build Unit Attention 11356 */ 11357 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 11358 lun->pending_sense[i].ua_pending |= 11359 CTL_UA_ASYM_ACC_CHANGE; 11360 } 11361 } else { 11362 panic("Unhandled HA mode failover, LUN flags = %#x, " 11363 "ha_mode = #%x", lun->flags, ctl_softc->ha_mode); 11364 } 11365 } 11366 ctl_pause_rtr = 0; 11367 mtx_unlock(&ctl_softc->ctl_lock); 11368} 11369 11370static int 11371ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio) 11372{ 11373 struct ctl_lun *lun; 11374 const struct ctl_cmd_entry *entry; 11375 uint32_t initidx, targ_lun; 11376 int retval; 11377 11378 retval = 0; 11379 11380 lun = NULL; 11381 11382 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11383 if ((targ_lun < CTL_MAX_LUNS) 11384 && (ctl_softc->ctl_luns[targ_lun] != NULL)) { 11385 lun = ctl_softc->ctl_luns[targ_lun]; 11386 /* 11387 * If the LUN is invalid, pretend that it doesn't exist. 11388 * It will go away as soon as all pending I/O has been 11389 * completed. 11390 */ 11391 if (lun->flags & CTL_LUN_DISABLED) { 11392 lun = NULL; 11393 } else { 11394 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11395 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11396 lun->be_lun; 11397 if (lun->be_lun->lun_type == T_PROCESSOR) { 11398 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 11399 } 11400 11401 /* 11402 * Every I/O goes into the OOA queue for a 11403 * particular LUN, and stays there until completion. 11404 */ 11405 mtx_lock(&lun->lun_lock); 11406 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11407 ooa_links); 11408 } 11409 } else { 11410 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11411 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11412 } 11413 11414 /* Get command entry and return error if it is unsuppotyed. */ 11415 entry = ctl_validate_command(ctsio); 11416 if (entry == NULL) { 11417 if (lun) 11418 mtx_unlock(&lun->lun_lock); 11419 return (retval); 11420 } 11421 11422 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11423 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11424 11425 /* 11426 * Check to see whether we can send this command to LUNs that don't 11427 * exist. This should pretty much only be the case for inquiry 11428 * and request sense. Further checks, below, really require having 11429 * a LUN, so we can't really check the command anymore. Just put 11430 * it on the rtr queue. 11431 */ 11432 if (lun == NULL) { 11433 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11434 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11435 ctl_enqueue_rtr((union ctl_io *)ctsio); 11436 return (retval); 11437 } 11438 11439 ctl_set_unsupported_lun(ctsio); 11440 ctl_done((union ctl_io *)ctsio); 11441 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11442 return (retval); 11443 } else { 11444 /* 11445 * Make sure we support this particular command on this LUN. 11446 * e.g., we don't support writes to the control LUN. 11447 */ 11448 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11449 mtx_unlock(&lun->lun_lock); 11450 ctl_set_invalid_opcode(ctsio); 11451 ctl_done((union ctl_io *)ctsio); 11452 return (retval); 11453 } 11454 } 11455 11456 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11457 11458 /* 11459 * If we've got a request sense, it'll clear the contingent 11460 * allegiance condition. Otherwise, if we have a CA condition for 11461 * this initiator, clear it, because it sent down a command other 11462 * than request sense. 11463 */ 11464 if ((ctsio->cdb[0] != REQUEST_SENSE) 11465 && (ctl_is_set(lun->have_ca, initidx))) 11466 ctl_clear_mask(lun->have_ca, initidx); 11467 11468 /* 11469 * If the command has this flag set, it handles its own unit 11470 * attention reporting, we shouldn't do anything. Otherwise we 11471 * check for any pending unit attentions, and send them back to the 11472 * initiator. We only do this when a command initially comes in, 11473 * not when we pull it off the blocked queue. 11474 * 11475 * According to SAM-3, section 5.3.2, the order that things get 11476 * presented back to the host is basically unit attentions caused 11477 * by some sort of reset event, busy status, reservation conflicts 11478 * or task set full, and finally any other status. 11479 * 11480 * One issue here is that some of the unit attentions we report 11481 * don't fall into the "reset" category (e.g. "reported luns data 11482 * has changed"). So reporting it here, before the reservation 11483 * check, may be technically wrong. I guess the only thing to do 11484 * would be to check for and report the reset events here, and then 11485 * check for the other unit attention types after we check for a 11486 * reservation conflict. 11487 * 11488 * XXX KDM need to fix this 11489 */ 11490 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11491 ctl_ua_type ua_type; 11492 11493 ua_type = lun->pending_sense[initidx].ua_pending; 11494 if (ua_type != CTL_UA_NONE) { 11495 scsi_sense_data_type sense_format; 11496 11497 if (lun != NULL) 11498 sense_format = (lun->flags & 11499 CTL_LUN_SENSE_DESC) ? SSD_TYPE_DESC : 11500 SSD_TYPE_FIXED; 11501 else 11502 sense_format = SSD_TYPE_FIXED; 11503 11504 ua_type = ctl_build_ua(ua_type, &ctsio->sense_data, 11505 sense_format); 11506 if (ua_type != CTL_UA_NONE) { 11507 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11508 ctsio->io_hdr.status = CTL_SCSI_ERROR | 11509 CTL_AUTOSENSE; 11510 ctsio->sense_len = SSD_FULL_SIZE; 11511 lun->pending_sense[initidx].ua_pending &= 11512 ~ua_type; 11513 mtx_unlock(&lun->lun_lock); 11514 ctl_done((union ctl_io *)ctsio); 11515 return (retval); 11516 } 11517 } 11518 } 11519 11520 11521 if (ctl_scsiio_lun_check(ctl_softc, lun, entry, ctsio) != 0) { 11522 mtx_unlock(&lun->lun_lock); 11523 ctl_done((union ctl_io *)ctsio); 11524 return (retval); 11525 } 11526 11527 /* 11528 * XXX CHD this is where we want to send IO to other side if 11529 * this LUN is secondary on this SC. We will need to make a copy 11530 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11531 * the copy we send as FROM_OTHER. 11532 * We also need to stuff the address of the original IO so we can 11533 * find it easily. Something similar will need be done on the other 11534 * side so when we are done we can find the copy. 11535 */ 11536 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11537 union ctl_ha_msg msg_info; 11538 int isc_retval; 11539 11540 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11541 11542 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11543 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11544#if 0 11545 printf("1. ctsio %p\n", ctsio); 11546#endif 11547 msg_info.hdr.serializing_sc = NULL; 11548 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11549 msg_info.scsi.tag_num = ctsio->tag_num; 11550 msg_info.scsi.tag_type = ctsio->tag_type; 11551 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11552 11553 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11554 11555 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11556 (void *)&msg_info, sizeof(msg_info), 0)) > 11557 CTL_HA_STATUS_SUCCESS) { 11558 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 11559 isc_retval); 11560 printf("CTL:opcode is %x\n", ctsio->cdb[0]); 11561 } else { 11562#if 0 11563 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 11564#endif 11565 } 11566 11567 /* 11568 * XXX KDM this I/O is off the incoming queue, but hasn't 11569 * been inserted on any other queue. We may need to come 11570 * up with a holding queue while we wait for serialization 11571 * so that we have an idea of what we're waiting for from 11572 * the other side. 11573 */ 11574 mtx_unlock(&lun->lun_lock); 11575 return (retval); 11576 } 11577 11578 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11579 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11580 ctl_ooaq, ooa_links))) { 11581 case CTL_ACTION_BLOCK: 11582 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11583 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11584 blocked_links); 11585 mtx_unlock(&lun->lun_lock); 11586 return (retval); 11587 case CTL_ACTION_PASS: 11588 case CTL_ACTION_SKIP: 11589 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11590 mtx_unlock(&lun->lun_lock); 11591 ctl_enqueue_rtr((union ctl_io *)ctsio); 11592 break; 11593 case CTL_ACTION_OVERLAP: 11594 mtx_unlock(&lun->lun_lock); 11595 ctl_set_overlapped_cmd(ctsio); 11596 ctl_done((union ctl_io *)ctsio); 11597 break; 11598 case CTL_ACTION_OVERLAP_TAG: 11599 mtx_unlock(&lun->lun_lock); 11600 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11601 ctl_done((union ctl_io *)ctsio); 11602 break; 11603 case CTL_ACTION_ERROR: 11604 default: 11605 mtx_unlock(&lun->lun_lock); 11606 ctl_set_internal_failure(ctsio, 11607 /*sks_valid*/ 0, 11608 /*retry_count*/ 0); 11609 ctl_done((union ctl_io *)ctsio); 11610 break; 11611 } 11612 return (retval); 11613} 11614 11615const struct ctl_cmd_entry * 11616ctl_get_cmd_entry(struct ctl_scsiio *ctsio) 11617{ 11618 const struct ctl_cmd_entry *entry; 11619 int service_action; 11620 11621 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11622 if (entry->flags & CTL_CMD_FLAG_SA5) { 11623 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11624 entry = &((const struct ctl_cmd_entry *) 11625 entry->execute)[service_action]; 11626 } 11627 return (entry); 11628} 11629 11630const struct ctl_cmd_entry * 11631ctl_validate_command(struct ctl_scsiio *ctsio) 11632{ 11633 const struct ctl_cmd_entry *entry; 11634 int i; 11635 uint8_t diff; 11636 11637 entry = ctl_get_cmd_entry(ctsio); 11638 if (entry->execute == NULL) { 11639 ctl_set_invalid_opcode(ctsio); 11640 ctl_done((union ctl_io *)ctsio); 11641 return (NULL); 11642 } 11643 KASSERT(entry->length > 0, 11644 ("Not defined length for command 0x%02x/0x%02x", 11645 ctsio->cdb[0], ctsio->cdb[1])); 11646 for (i = 1; i < entry->length; i++) { 11647 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11648 if (diff == 0) 11649 continue; 11650 ctl_set_invalid_field(ctsio, 11651 /*sks_valid*/ 1, 11652 /*command*/ 1, 11653 /*field*/ i, 11654 /*bit_valid*/ 1, 11655 /*bit*/ fls(diff) - 1); 11656 ctl_done((union ctl_io *)ctsio); 11657 return (NULL); 11658 } 11659 return (entry); 11660} 11661 11662static int 11663ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11664{ 11665 11666 switch (lun_type) { 11667 case T_PROCESSOR: 11668 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && 11669 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11670 return (0); 11671 break; 11672 case T_DIRECT: 11673 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && 11674 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11675 return (0); 11676 break; 11677 default: 11678 return (0); 11679 } 11680 return (1); 11681} 11682 11683static int 11684ctl_scsiio(struct ctl_scsiio *ctsio) 11685{ 11686 int retval; 11687 const struct ctl_cmd_entry *entry; 11688 11689 retval = CTL_RETVAL_COMPLETE; 11690 11691 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11692 11693 entry = ctl_get_cmd_entry(ctsio); 11694 11695 /* 11696 * If this I/O has been aborted, just send it straight to 11697 * ctl_done() without executing it. 11698 */ 11699 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11700 ctl_done((union ctl_io *)ctsio); 11701 goto bailout; 11702 } 11703 11704 /* 11705 * All the checks should have been handled by ctl_scsiio_precheck(). 11706 * We should be clear now to just execute the I/O. 11707 */ 11708 retval = entry->execute(ctsio); 11709 11710bailout: 11711 return (retval); 11712} 11713 11714/* 11715 * Since we only implement one target right now, a bus reset simply resets 11716 * our single target. 11717 */ 11718static int 11719ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io) 11720{ 11721 return(ctl_target_reset(ctl_softc, io, CTL_UA_BUS_RESET)); 11722} 11723 11724static int 11725ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 11726 ctl_ua_type ua_type) 11727{ 11728 struct ctl_lun *lun; 11729 int retval; 11730 11731 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11732 union ctl_ha_msg msg_info; 11733 11734 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11735 msg_info.hdr.nexus = io->io_hdr.nexus; 11736 if (ua_type==CTL_UA_TARG_RESET) 11737 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11738 else 11739 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11740 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11741 msg_info.hdr.original_sc = NULL; 11742 msg_info.hdr.serializing_sc = NULL; 11743 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11744 (void *)&msg_info, sizeof(msg_info), 0)) { 11745 } 11746 } 11747 retval = 0; 11748 11749 mtx_lock(&ctl_softc->ctl_lock); 11750 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) 11751 retval += ctl_lun_reset(lun, io, ua_type); 11752 mtx_unlock(&ctl_softc->ctl_lock); 11753 11754 return (retval); 11755} 11756 11757/* 11758 * The LUN should always be set. The I/O is optional, and is used to 11759 * distinguish between I/Os sent by this initiator, and by other 11760 * initiators. We set unit attention for initiators other than this one. 11761 * SAM-3 is vague on this point. It does say that a unit attention should 11762 * be established for other initiators when a LUN is reset (see section 11763 * 5.7.3), but it doesn't specifically say that the unit attention should 11764 * be established for this particular initiator when a LUN is reset. Here 11765 * is the relevant text, from SAM-3 rev 8: 11766 * 11767 * 5.7.2 When a SCSI initiator port aborts its own tasks 11768 * 11769 * When a SCSI initiator port causes its own task(s) to be aborted, no 11770 * notification that the task(s) have been aborted shall be returned to 11771 * the SCSI initiator port other than the completion response for the 11772 * command or task management function action that caused the task(s) to 11773 * be aborted and notification(s) associated with related effects of the 11774 * action (e.g., a reset unit attention condition). 11775 * 11776 * XXX KDM for now, we're setting unit attention for all initiators. 11777 */ 11778static int 11779ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11780{ 11781 union ctl_io *xio; 11782#if 0 11783 uint32_t initindex; 11784#endif 11785 int i; 11786 11787 mtx_lock(&lun->lun_lock); 11788 /* 11789 * Run through the OOA queue and abort each I/O. 11790 */ 11791#if 0 11792 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 11793#endif 11794 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11795 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11796 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11797 } 11798 11799 /* 11800 * This version sets unit attention for every 11801 */ 11802#if 0 11803 initindex = ctl_get_initindex(&io->io_hdr.nexus); 11804 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 11805 if (initindex == i) 11806 continue; 11807 lun->pending_sense[i].ua_pending |= ua_type; 11808 } 11809#endif 11810 11811 /* 11812 * A reset (any kind, really) clears reservations established with 11813 * RESERVE/RELEASE. It does not clear reservations established 11814 * with PERSISTENT RESERVE OUT, but we don't support that at the 11815 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11816 * reservations made with the RESERVE/RELEASE commands, because 11817 * those commands are obsolete in SPC-3. 11818 */ 11819 lun->flags &= ~CTL_LUN_RESERVED; 11820 11821 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 11822 ctl_clear_mask(lun->have_ca, i); 11823 lun->pending_sense[i].ua_pending |= ua_type; 11824 } 11825 mtx_lock(&lun->lun_lock); 11826 11827 return (0); 11828} 11829 11830static int 11831ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11832 int other_sc) 11833{ 11834 union ctl_io *xio; 11835 int found; 11836 11837 mtx_assert(&lun->lun_lock, MA_OWNED); 11838 11839 /* 11840 * Run through the OOA queue and attempt to find the given I/O. 11841 * The target port, initiator ID, tag type and tag number have to 11842 * match the values that we got from the initiator. If we have an 11843 * untagged command to abort, simply abort the first untagged command 11844 * we come to. We only allow one untagged command at a time of course. 11845 */ 11846 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11847 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11848 11849 if ((targ_port == xio->io_hdr.nexus.targ_port) && 11850 (init_id == xio->io_hdr.nexus.initid.id)) { 11851 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11852 found = 1; 11853 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11854 union ctl_ha_msg msg_info; 11855 11856 msg_info.hdr.nexus = xio->io_hdr.nexus; 11857 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11858 msg_info.task.tag_num = xio->scsiio.tag_num; 11859 msg_info.task.tag_type = xio->scsiio.tag_type; 11860 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11861 msg_info.hdr.original_sc = NULL; 11862 msg_info.hdr.serializing_sc = NULL; 11863 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11864 (void *)&msg_info, sizeof(msg_info), 0); 11865 } 11866 } 11867 } 11868 return (found); 11869} 11870 11871static int 11872ctl_abort_task_set(union ctl_io *io) 11873{ 11874 struct ctl_softc *softc = control_softc; 11875 struct ctl_lun *lun; 11876 uint32_t targ_lun; 11877 11878 /* 11879 * Look up the LUN. 11880 */ 11881 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11882 mtx_lock(&softc->ctl_lock); 11883 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) 11884 lun = softc->ctl_luns[targ_lun]; 11885 else { 11886 mtx_unlock(&softc->ctl_lock); 11887 return (1); 11888 } 11889 11890 mtx_lock(&lun->lun_lock); 11891 mtx_unlock(&softc->ctl_lock); 11892 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11893 io->io_hdr.nexus.initid.id, 11894 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11895 mtx_unlock(&lun->lun_lock); 11896 return (0); 11897} 11898 11899static int 11900ctl_i_t_nexus_reset(union ctl_io *io) 11901{ 11902 struct ctl_softc *softc = control_softc; 11903 struct ctl_lun *lun; 11904 uint32_t initindex; 11905 11906 initindex = ctl_get_initindex(&io->io_hdr.nexus); 11907 mtx_lock(&softc->ctl_lock); 11908 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11909 mtx_lock(&lun->lun_lock); 11910 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11911 io->io_hdr.nexus.initid.id, 11912 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11913 ctl_clear_mask(lun->have_ca, initindex); 11914 lun->pending_sense[initindex].ua_pending |= CTL_UA_I_T_NEXUS_LOSS; 11915 mtx_unlock(&lun->lun_lock); 11916 } 11917 mtx_unlock(&softc->ctl_lock); 11918 return (0); 11919} 11920 11921static int 11922ctl_abort_task(union ctl_io *io) 11923{ 11924 union ctl_io *xio; 11925 struct ctl_lun *lun; 11926 struct ctl_softc *ctl_softc; 11927#if 0 11928 struct sbuf sb; 11929 char printbuf[128]; 11930#endif 11931 int found; 11932 uint32_t targ_lun; 11933 11934 ctl_softc = control_softc; 11935 found = 0; 11936 11937 /* 11938 * Look up the LUN. 11939 */ 11940 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11941 mtx_lock(&ctl_softc->ctl_lock); 11942 if ((targ_lun < CTL_MAX_LUNS) 11943 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 11944 lun = ctl_softc->ctl_luns[targ_lun]; 11945 else { 11946 mtx_unlock(&ctl_softc->ctl_lock); 11947 goto bailout; 11948 } 11949 11950#if 0 11951 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11952 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11953#endif 11954 11955 mtx_lock(&lun->lun_lock); 11956 mtx_unlock(&ctl_softc->ctl_lock); 11957 /* 11958 * Run through the OOA queue and attempt to find the given I/O. 11959 * The target port, initiator ID, tag type and tag number have to 11960 * match the values that we got from the initiator. If we have an 11961 * untagged command to abort, simply abort the first untagged command 11962 * we come to. We only allow one untagged command at a time of course. 11963 */ 11964#if 0 11965 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 11966#endif 11967 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11968 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11969#if 0 11970 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11971 11972 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11973 lun->lun, xio->scsiio.tag_num, 11974 xio->scsiio.tag_type, 11975 (xio->io_hdr.blocked_links.tqe_prev 11976 == NULL) ? "" : " BLOCKED", 11977 (xio->io_hdr.flags & 11978 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11979 (xio->io_hdr.flags & 11980 CTL_FLAG_ABORT) ? " ABORT" : "", 11981 (xio->io_hdr.flags & 11982 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11983 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11984 sbuf_finish(&sb); 11985 printf("%s\n", sbuf_data(&sb)); 11986#endif 11987 11988 if ((xio->io_hdr.nexus.targ_port == io->io_hdr.nexus.targ_port) 11989 && (xio->io_hdr.nexus.initid.id == 11990 io->io_hdr.nexus.initid.id)) { 11991 /* 11992 * If the abort says that the task is untagged, the 11993 * task in the queue must be untagged. Otherwise, 11994 * we just check to see whether the tag numbers 11995 * match. This is because the QLogic firmware 11996 * doesn't pass back the tag type in an abort 11997 * request. 11998 */ 11999#if 0 12000 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12001 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12002 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 12003#endif 12004 /* 12005 * XXX KDM we've got problems with FC, because it 12006 * doesn't send down a tag type with aborts. So we 12007 * can only really go by the tag number... 12008 * This may cause problems with parallel SCSI. 12009 * Need to figure that out!! 12010 */ 12011 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12012 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12013 found = 1; 12014 if ((io->io_hdr.flags & 12015 CTL_FLAG_FROM_OTHER_SC) == 0 && 12016 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12017 union ctl_ha_msg msg_info; 12018 12019 io->io_hdr.flags |= 12020 CTL_FLAG_SENT_2OTHER_SC; 12021 msg_info.hdr.nexus = io->io_hdr.nexus; 12022 msg_info.task.task_action = 12023 CTL_TASK_ABORT_TASK; 12024 msg_info.task.tag_num = 12025 io->taskio.tag_num; 12026 msg_info.task.tag_type = 12027 io->taskio.tag_type; 12028 msg_info.hdr.msg_type = 12029 CTL_MSG_MANAGE_TASKS; 12030 msg_info.hdr.original_sc = NULL; 12031 msg_info.hdr.serializing_sc = NULL; 12032#if 0 12033 printf("Sent Abort to other side\n"); 12034#endif 12035 if (CTL_HA_STATUS_SUCCESS != 12036 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12037 (void *)&msg_info, 12038 sizeof(msg_info), 0)) { 12039 } 12040 } 12041#if 0 12042 printf("ctl_abort_task: found I/O to abort\n"); 12043#endif 12044 break; 12045 } 12046 } 12047 } 12048 mtx_unlock(&lun->lun_lock); 12049 12050bailout: 12051 12052 if (found == 0) { 12053 /* 12054 * This isn't really an error. It's entirely possible for 12055 * the abort and command completion to cross on the wire. 12056 * This is more of an informative/diagnostic error. 12057 */ 12058#if 0 12059 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 12060 "%d:%d:%d:%d tag %d type %d\n", 12061 io->io_hdr.nexus.initid.id, 12062 io->io_hdr.nexus.targ_port, 12063 io->io_hdr.nexus.targ_target.id, 12064 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 12065 io->taskio.tag_type); 12066#endif 12067 return (1); 12068 } else 12069 return (0); 12070} 12071 12072/* 12073 * This routine cannot block! It must be callable from an interrupt 12074 * handler as well as from the work thread. 12075 */ 12076static void 12077ctl_run_task(union ctl_io *io) 12078{ 12079 struct ctl_softc *ctl_softc; 12080 int retval; 12081 const char *task_desc; 12082 12083 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12084 12085 ctl_softc = control_softc; 12086 retval = 0; 12087 12088 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12089 ("ctl_run_task: Unextected io_type %d\n", 12090 io->io_hdr.io_type)); 12091 12092 task_desc = ctl_scsi_task_string(&io->taskio); 12093 if (task_desc != NULL) { 12094#ifdef NEEDTOPORT 12095 csevent_log(CSC_CTL | CSC_SHELF_SW | 12096 CTL_TASK_REPORT, 12097 csevent_LogType_Trace, 12098 csevent_Severity_Information, 12099 csevent_AlertLevel_Green, 12100 csevent_FRU_Firmware, 12101 csevent_FRU_Unknown, 12102 "CTL: received task: %s",task_desc); 12103#endif 12104 } else { 12105#ifdef NEEDTOPORT 12106 csevent_log(CSC_CTL | CSC_SHELF_SW | 12107 CTL_TASK_REPORT, 12108 csevent_LogType_Trace, 12109 csevent_Severity_Information, 12110 csevent_AlertLevel_Green, 12111 csevent_FRU_Firmware, 12112 csevent_FRU_Unknown, 12113 "CTL: received unknown task " 12114 "type: %d (%#x)", 12115 io->taskio.task_action, 12116 io->taskio.task_action); 12117#endif 12118 } 12119 switch (io->taskio.task_action) { 12120 case CTL_TASK_ABORT_TASK: 12121 retval = ctl_abort_task(io); 12122 break; 12123 case CTL_TASK_ABORT_TASK_SET: 12124 retval = ctl_abort_task_set(io); 12125 break; 12126 case CTL_TASK_CLEAR_ACA: 12127 break; 12128 case CTL_TASK_CLEAR_TASK_SET: 12129 break; 12130 case CTL_TASK_I_T_NEXUS_RESET: 12131 retval = ctl_i_t_nexus_reset(io); 12132 break; 12133 case CTL_TASK_LUN_RESET: { 12134 struct ctl_lun *lun; 12135 uint32_t targ_lun; 12136 int retval; 12137 12138 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12139 mtx_lock(&ctl_softc->ctl_lock); 12140 if ((targ_lun < CTL_MAX_LUNS) 12141 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 12142 lun = ctl_softc->ctl_luns[targ_lun]; 12143 else { 12144 mtx_unlock(&ctl_softc->ctl_lock); 12145 retval = 1; 12146 break; 12147 } 12148 12149 if (!(io->io_hdr.flags & 12150 CTL_FLAG_FROM_OTHER_SC)) { 12151 union ctl_ha_msg msg_info; 12152 12153 io->io_hdr.flags |= 12154 CTL_FLAG_SENT_2OTHER_SC; 12155 msg_info.hdr.msg_type = 12156 CTL_MSG_MANAGE_TASKS; 12157 msg_info.hdr.nexus = io->io_hdr.nexus; 12158 msg_info.task.task_action = 12159 CTL_TASK_LUN_RESET; 12160 msg_info.hdr.original_sc = NULL; 12161 msg_info.hdr.serializing_sc = NULL; 12162 if (CTL_HA_STATUS_SUCCESS != 12163 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12164 (void *)&msg_info, 12165 sizeof(msg_info), 0)) { 12166 } 12167 } 12168 12169 retval = ctl_lun_reset(lun, io, 12170 CTL_UA_LUN_RESET); 12171 mtx_unlock(&ctl_softc->ctl_lock); 12172 break; 12173 } 12174 case CTL_TASK_TARGET_RESET: 12175 retval = ctl_target_reset(ctl_softc, io, CTL_UA_TARG_RESET); 12176 break; 12177 case CTL_TASK_BUS_RESET: 12178 retval = ctl_bus_reset(ctl_softc, io); 12179 break; 12180 case CTL_TASK_PORT_LOGIN: 12181 break; 12182 case CTL_TASK_PORT_LOGOUT: 12183 break; 12184 default: 12185 printf("ctl_run_task: got unknown task management event %d\n", 12186 io->taskio.task_action); 12187 break; 12188 } 12189 if (retval == 0) 12190 io->io_hdr.status = CTL_SUCCESS; 12191 else 12192 io->io_hdr.status = CTL_ERROR; 12193 12194 /* 12195 * This will queue this I/O to the done queue, but the 12196 * work thread won't be able to process it until we 12197 * return and the lock is released. 12198 */ 12199 ctl_done(io); 12200} 12201 12202/* 12203 * For HA operation. Handle commands that come in from the other 12204 * controller. 12205 */ 12206static void 12207ctl_handle_isc(union ctl_io *io) 12208{ 12209 int free_io; 12210 struct ctl_lun *lun; 12211 struct ctl_softc *ctl_softc; 12212 uint32_t targ_lun; 12213 12214 ctl_softc = control_softc; 12215 12216 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12217 lun = ctl_softc->ctl_luns[targ_lun]; 12218 12219 switch (io->io_hdr.msg_type) { 12220 case CTL_MSG_SERIALIZE: 12221 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 12222 break; 12223 case CTL_MSG_R2R: { 12224 const struct ctl_cmd_entry *entry; 12225 12226 /* 12227 * This is only used in SER_ONLY mode. 12228 */ 12229 free_io = 0; 12230 entry = ctl_get_cmd_entry(&io->scsiio); 12231 mtx_lock(&lun->lun_lock); 12232 if (ctl_scsiio_lun_check(ctl_softc, lun, 12233 entry, (struct ctl_scsiio *)io) != 0) { 12234 mtx_unlock(&lun->lun_lock); 12235 ctl_done(io); 12236 break; 12237 } 12238 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12239 mtx_unlock(&lun->lun_lock); 12240 ctl_enqueue_rtr(io); 12241 break; 12242 } 12243 case CTL_MSG_FINISH_IO: 12244 if (ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 12245 free_io = 0; 12246 ctl_done(io); 12247 } else { 12248 free_io = 1; 12249 mtx_lock(&lun->lun_lock); 12250 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 12251 ooa_links); 12252 ctl_check_blocked(lun); 12253 mtx_unlock(&lun->lun_lock); 12254 } 12255 break; 12256 case CTL_MSG_PERS_ACTION: 12257 ctl_hndl_per_res_out_on_other_sc( 12258 (union ctl_ha_msg *)&io->presio.pr_msg); 12259 free_io = 1; 12260 break; 12261 case CTL_MSG_BAD_JUJU: 12262 free_io = 0; 12263 ctl_done(io); 12264 break; 12265 case CTL_MSG_DATAMOVE: 12266 /* Only used in XFER mode */ 12267 free_io = 0; 12268 ctl_datamove_remote(io); 12269 break; 12270 case CTL_MSG_DATAMOVE_DONE: 12271 /* Only used in XFER mode */ 12272 free_io = 0; 12273 io->scsiio.be_move_done(io); 12274 break; 12275 default: 12276 free_io = 1; 12277 printf("%s: Invalid message type %d\n", 12278 __func__, io->io_hdr.msg_type); 12279 break; 12280 } 12281 if (free_io) 12282 ctl_free_io(io); 12283 12284} 12285 12286 12287/* 12288 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12289 * there is no match. 12290 */ 12291static ctl_lun_error_pattern 12292ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12293{ 12294 const struct ctl_cmd_entry *entry; 12295 ctl_lun_error_pattern filtered_pattern, pattern; 12296 12297 pattern = desc->error_pattern; 12298 12299 /* 12300 * XXX KDM we need more data passed into this function to match a 12301 * custom pattern, and we actually need to implement custom pattern 12302 * matching. 12303 */ 12304 if (pattern & CTL_LUN_PAT_CMD) 12305 return (CTL_LUN_PAT_CMD); 12306 12307 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12308 return (CTL_LUN_PAT_ANY); 12309 12310 entry = ctl_get_cmd_entry(ctsio); 12311 12312 filtered_pattern = entry->pattern & pattern; 12313 12314 /* 12315 * If the user requested specific flags in the pattern (e.g. 12316 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12317 * flags. 12318 * 12319 * If the user did not specify any flags, it doesn't matter whether 12320 * or not the command supports the flags. 12321 */ 12322 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12323 (pattern & ~CTL_LUN_PAT_MASK)) 12324 return (CTL_LUN_PAT_NONE); 12325 12326 /* 12327 * If the user asked for a range check, see if the requested LBA 12328 * range overlaps with this command's LBA range. 12329 */ 12330 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12331 uint64_t lba1; 12332 uint32_t len1; 12333 ctl_action action; 12334 int retval; 12335 12336 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12337 if (retval != 0) 12338 return (CTL_LUN_PAT_NONE); 12339 12340 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12341 desc->lba_range.len); 12342 /* 12343 * A "pass" means that the LBA ranges don't overlap, so 12344 * this doesn't match the user's range criteria. 12345 */ 12346 if (action == CTL_ACTION_PASS) 12347 return (CTL_LUN_PAT_NONE); 12348 } 12349 12350 return (filtered_pattern); 12351} 12352 12353static void 12354ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12355{ 12356 struct ctl_error_desc *desc, *desc2; 12357 12358 mtx_assert(&lun->lun_lock, MA_OWNED); 12359 12360 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12361 ctl_lun_error_pattern pattern; 12362 /* 12363 * Check to see whether this particular command matches 12364 * the pattern in the descriptor. 12365 */ 12366 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12367 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12368 continue; 12369 12370 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12371 case CTL_LUN_INJ_ABORTED: 12372 ctl_set_aborted(&io->scsiio); 12373 break; 12374 case CTL_LUN_INJ_MEDIUM_ERR: 12375 ctl_set_medium_error(&io->scsiio); 12376 break; 12377 case CTL_LUN_INJ_UA: 12378 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12379 * OCCURRED */ 12380 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12381 break; 12382 case CTL_LUN_INJ_CUSTOM: 12383 /* 12384 * We're assuming the user knows what he is doing. 12385 * Just copy the sense information without doing 12386 * checks. 12387 */ 12388 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12389 ctl_min(sizeof(desc->custom_sense), 12390 sizeof(io->scsiio.sense_data))); 12391 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12392 io->scsiio.sense_len = SSD_FULL_SIZE; 12393 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12394 break; 12395 case CTL_LUN_INJ_NONE: 12396 default: 12397 /* 12398 * If this is an error injection type we don't know 12399 * about, clear the continuous flag (if it is set) 12400 * so it will get deleted below. 12401 */ 12402 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12403 break; 12404 } 12405 /* 12406 * By default, each error injection action is a one-shot 12407 */ 12408 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12409 continue; 12410 12411 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12412 12413 free(desc, M_CTL); 12414 } 12415} 12416 12417#ifdef CTL_IO_DELAY 12418static void 12419ctl_datamove_timer_wakeup(void *arg) 12420{ 12421 union ctl_io *io; 12422 12423 io = (union ctl_io *)arg; 12424 12425 ctl_datamove(io); 12426} 12427#endif /* CTL_IO_DELAY */ 12428 12429void 12430ctl_datamove(union ctl_io *io) 12431{ 12432 void (*fe_datamove)(union ctl_io *io); 12433 12434 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12435 12436 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12437 12438#ifdef CTL_TIME_IO 12439 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12440 char str[256]; 12441 char path_str[64]; 12442 struct sbuf sb; 12443 12444 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12445 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12446 12447 sbuf_cat(&sb, path_str); 12448 switch (io->io_hdr.io_type) { 12449 case CTL_IO_SCSI: 12450 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12451 sbuf_printf(&sb, "\n"); 12452 sbuf_cat(&sb, path_str); 12453 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12454 io->scsiio.tag_num, io->scsiio.tag_type); 12455 break; 12456 case CTL_IO_TASK: 12457 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12458 "Tag Type: %d\n", io->taskio.task_action, 12459 io->taskio.tag_num, io->taskio.tag_type); 12460 break; 12461 default: 12462 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12463 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12464 break; 12465 } 12466 sbuf_cat(&sb, path_str); 12467 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12468 (intmax_t)time_uptime - io->io_hdr.start_time); 12469 sbuf_finish(&sb); 12470 printf("%s", sbuf_data(&sb)); 12471 } 12472#endif /* CTL_TIME_IO */ 12473 12474#ifdef CTL_IO_DELAY 12475 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12476 struct ctl_lun *lun; 12477 12478 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12479 12480 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12481 } else { 12482 struct ctl_lun *lun; 12483 12484 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12485 if ((lun != NULL) 12486 && (lun->delay_info.datamove_delay > 0)) { 12487 struct callout *callout; 12488 12489 callout = (struct callout *)&io->io_hdr.timer_bytes; 12490 callout_init(callout, /*mpsafe*/ 1); 12491 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12492 callout_reset(callout, 12493 lun->delay_info.datamove_delay * hz, 12494 ctl_datamove_timer_wakeup, io); 12495 if (lun->delay_info.datamove_type == 12496 CTL_DELAY_TYPE_ONESHOT) 12497 lun->delay_info.datamove_delay = 0; 12498 return; 12499 } 12500 } 12501#endif 12502 12503 /* 12504 * This command has been aborted. Set the port status, so we fail 12505 * the data move. 12506 */ 12507 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12508 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 12509 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 12510 io->io_hdr.nexus.targ_port, 12511 (uintmax_t)io->io_hdr.nexus.targ_target.id, 12512 io->io_hdr.nexus.targ_lun); 12513 io->io_hdr.status = CTL_CMD_ABORTED; 12514 io->io_hdr.port_status = 31337; 12515 /* 12516 * Note that the backend, in this case, will get the 12517 * callback in its context. In other cases it may get 12518 * called in the frontend's interrupt thread context. 12519 */ 12520 io->scsiio.be_move_done(io); 12521 return; 12522 } 12523 12524 /* 12525 * If we're in XFER mode and this I/O is from the other shelf 12526 * controller, we need to send the DMA to the other side to 12527 * actually transfer the data to/from the host. In serialize only 12528 * mode the transfer happens below CTL and ctl_datamove() is only 12529 * called on the machine that originally received the I/O. 12530 */ 12531 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12532 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12533 union ctl_ha_msg msg; 12534 uint32_t sg_entries_sent; 12535 int do_sg_copy; 12536 int i; 12537 12538 memset(&msg, 0, sizeof(msg)); 12539 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12540 msg.hdr.original_sc = io->io_hdr.original_sc; 12541 msg.hdr.serializing_sc = io; 12542 msg.hdr.nexus = io->io_hdr.nexus; 12543 msg.dt.flags = io->io_hdr.flags; 12544 /* 12545 * We convert everything into a S/G list here. We can't 12546 * pass by reference, only by value between controllers. 12547 * So we can't pass a pointer to the S/G list, only as many 12548 * S/G entries as we can fit in here. If it's possible for 12549 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12550 * then we need to break this up into multiple transfers. 12551 */ 12552 if (io->scsiio.kern_sg_entries == 0) { 12553 msg.dt.kern_sg_entries = 1; 12554 /* 12555 * If this is in cached memory, flush the cache 12556 * before we send the DMA request to the other 12557 * controller. We want to do this in either the 12558 * read or the write case. The read case is 12559 * straightforward. In the write case, we want to 12560 * make sure nothing is in the local cache that 12561 * could overwrite the DMAed data. 12562 */ 12563 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12564 /* 12565 * XXX KDM use bus_dmamap_sync() here. 12566 */ 12567 } 12568 12569 /* 12570 * Convert to a physical address if this is a 12571 * virtual address. 12572 */ 12573 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12574 msg.dt.sg_list[0].addr = 12575 io->scsiio.kern_data_ptr; 12576 } else { 12577 /* 12578 * XXX KDM use busdma here! 12579 */ 12580#if 0 12581 msg.dt.sg_list[0].addr = (void *) 12582 vtophys(io->scsiio.kern_data_ptr); 12583#endif 12584 } 12585 12586 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12587 do_sg_copy = 0; 12588 } else { 12589 struct ctl_sg_entry *sgl; 12590 12591 do_sg_copy = 1; 12592 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12593 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 12594 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12595 /* 12596 * XXX KDM use bus_dmamap_sync() here. 12597 */ 12598 } 12599 } 12600 12601 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12602 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12603 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12604 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12605 msg.dt.sg_sequence = 0; 12606 12607 /* 12608 * Loop until we've sent all of the S/G entries. On the 12609 * other end, we'll recompose these S/G entries into one 12610 * contiguous list before passing it to the 12611 */ 12612 for (sg_entries_sent = 0; sg_entries_sent < 12613 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12614 msg.dt.cur_sg_entries = ctl_min((sizeof(msg.dt.sg_list)/ 12615 sizeof(msg.dt.sg_list[0])), 12616 msg.dt.kern_sg_entries - sg_entries_sent); 12617 12618 if (do_sg_copy != 0) { 12619 struct ctl_sg_entry *sgl; 12620 int j; 12621 12622 sgl = (struct ctl_sg_entry *) 12623 io->scsiio.kern_data_ptr; 12624 /* 12625 * If this is in cached memory, flush the cache 12626 * before we send the DMA request to the other 12627 * controller. We want to do this in either 12628 * the * read or the write case. The read 12629 * case is straightforward. In the write 12630 * case, we want to make sure nothing is 12631 * in the local cache that could overwrite 12632 * the DMAed data. 12633 */ 12634 12635 for (i = sg_entries_sent, j = 0; 12636 i < msg.dt.cur_sg_entries; i++, j++) { 12637 if ((io->io_hdr.flags & 12638 CTL_FLAG_NO_DATASYNC) == 0) { 12639 /* 12640 * XXX KDM use bus_dmamap_sync() 12641 */ 12642 } 12643 if ((io->io_hdr.flags & 12644 CTL_FLAG_BUS_ADDR) == 0) { 12645 /* 12646 * XXX KDM use busdma. 12647 */ 12648#if 0 12649 msg.dt.sg_list[j].addr =(void *) 12650 vtophys(sgl[i].addr); 12651#endif 12652 } else { 12653 msg.dt.sg_list[j].addr = 12654 sgl[i].addr; 12655 } 12656 msg.dt.sg_list[j].len = sgl[i].len; 12657 } 12658 } 12659 12660 sg_entries_sent += msg.dt.cur_sg_entries; 12661 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12662 msg.dt.sg_last = 1; 12663 else 12664 msg.dt.sg_last = 0; 12665 12666 /* 12667 * XXX KDM drop and reacquire the lock here? 12668 */ 12669 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12670 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12671 /* 12672 * XXX do something here. 12673 */ 12674 } 12675 12676 msg.dt.sent_sg_entries = sg_entries_sent; 12677 } 12678 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12679 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 12680 ctl_failover_io(io, /*have_lock*/ 0); 12681 12682 } else { 12683 12684 /* 12685 * Lookup the fe_datamove() function for this particular 12686 * front end. 12687 */ 12688 fe_datamove = 12689 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12690 12691 fe_datamove(io); 12692 } 12693} 12694 12695static void 12696ctl_send_datamove_done(union ctl_io *io, int have_lock) 12697{ 12698 union ctl_ha_msg msg; 12699 int isc_status; 12700 12701 memset(&msg, 0, sizeof(msg)); 12702 12703 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12704 msg.hdr.original_sc = io; 12705 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12706 msg.hdr.nexus = io->io_hdr.nexus; 12707 msg.hdr.status = io->io_hdr.status; 12708 msg.scsi.tag_num = io->scsiio.tag_num; 12709 msg.scsi.tag_type = io->scsiio.tag_type; 12710 msg.scsi.scsi_status = io->scsiio.scsi_status; 12711 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12712 sizeof(io->scsiio.sense_data)); 12713 msg.scsi.sense_len = io->scsiio.sense_len; 12714 msg.scsi.sense_residual = io->scsiio.sense_residual; 12715 msg.scsi.fetd_status = io->io_hdr.port_status; 12716 msg.scsi.residual = io->scsiio.residual; 12717 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12718 12719 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12720 ctl_failover_io(io, /*have_lock*/ have_lock); 12721 return; 12722 } 12723 12724 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 12725 if (isc_status > CTL_HA_STATUS_SUCCESS) { 12726 /* XXX do something if this fails */ 12727 } 12728 12729} 12730 12731/* 12732 * The DMA to the remote side is done, now we need to tell the other side 12733 * we're done so it can continue with its data movement. 12734 */ 12735static void 12736ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12737{ 12738 union ctl_io *io; 12739 12740 io = rq->context; 12741 12742 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12743 printf("%s: ISC DMA write failed with error %d", __func__, 12744 rq->ret); 12745 ctl_set_internal_failure(&io->scsiio, 12746 /*sks_valid*/ 1, 12747 /*retry_count*/ rq->ret); 12748 } 12749 12750 ctl_dt_req_free(rq); 12751 12752 /* 12753 * In this case, we had to malloc the memory locally. Free it. 12754 */ 12755 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12756 int i; 12757 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12758 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12759 } 12760 /* 12761 * The data is in local and remote memory, so now we need to send 12762 * status (good or back) back to the other side. 12763 */ 12764 ctl_send_datamove_done(io, /*have_lock*/ 0); 12765} 12766 12767/* 12768 * We've moved the data from the host/controller into local memory. Now we 12769 * need to push it over to the remote controller's memory. 12770 */ 12771static int 12772ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12773{ 12774 int retval; 12775 12776 retval = 0; 12777 12778 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12779 ctl_datamove_remote_write_cb); 12780 12781 return (retval); 12782} 12783 12784static void 12785ctl_datamove_remote_write(union ctl_io *io) 12786{ 12787 int retval; 12788 void (*fe_datamove)(union ctl_io *io); 12789 12790 /* 12791 * - Get the data from the host/HBA into local memory. 12792 * - DMA memory from the local controller to the remote controller. 12793 * - Send status back to the remote controller. 12794 */ 12795 12796 retval = ctl_datamove_remote_sgl_setup(io); 12797 if (retval != 0) 12798 return; 12799 12800 /* Switch the pointer over so the FETD knows what to do */ 12801 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12802 12803 /* 12804 * Use a custom move done callback, since we need to send completion 12805 * back to the other controller, not to the backend on this side. 12806 */ 12807 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12808 12809 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12810 12811 fe_datamove(io); 12812 12813 return; 12814 12815} 12816 12817static int 12818ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12819{ 12820#if 0 12821 char str[256]; 12822 char path_str[64]; 12823 struct sbuf sb; 12824#endif 12825 12826 /* 12827 * In this case, we had to malloc the memory locally. Free it. 12828 */ 12829 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12830 int i; 12831 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12832 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12833 } 12834 12835#if 0 12836 scsi_path_string(io, path_str, sizeof(path_str)); 12837 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12838 sbuf_cat(&sb, path_str); 12839 scsi_command_string(&io->scsiio, NULL, &sb); 12840 sbuf_printf(&sb, "\n"); 12841 sbuf_cat(&sb, path_str); 12842 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12843 io->scsiio.tag_num, io->scsiio.tag_type); 12844 sbuf_cat(&sb, path_str); 12845 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12846 io->io_hdr.flags, io->io_hdr.status); 12847 sbuf_finish(&sb); 12848 printk("%s", sbuf_data(&sb)); 12849#endif 12850 12851 12852 /* 12853 * The read is done, now we need to send status (good or bad) back 12854 * to the other side. 12855 */ 12856 ctl_send_datamove_done(io, /*have_lock*/ 0); 12857 12858 return (0); 12859} 12860 12861static void 12862ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12863{ 12864 union ctl_io *io; 12865 void (*fe_datamove)(union ctl_io *io); 12866 12867 io = rq->context; 12868 12869 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12870 printf("%s: ISC DMA read failed with error %d", __func__, 12871 rq->ret); 12872 ctl_set_internal_failure(&io->scsiio, 12873 /*sks_valid*/ 1, 12874 /*retry_count*/ rq->ret); 12875 } 12876 12877 ctl_dt_req_free(rq); 12878 12879 /* Switch the pointer over so the FETD knows what to do */ 12880 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12881 12882 /* 12883 * Use a custom move done callback, since we need to send completion 12884 * back to the other controller, not to the backend on this side. 12885 */ 12886 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12887 12888 /* XXX KDM add checks like the ones in ctl_datamove? */ 12889 12890 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12891 12892 fe_datamove(io); 12893} 12894 12895static int 12896ctl_datamove_remote_sgl_setup(union ctl_io *io) 12897{ 12898 struct ctl_sg_entry *local_sglist, *remote_sglist; 12899 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 12900 struct ctl_softc *softc; 12901 int retval; 12902 int i; 12903 12904 retval = 0; 12905 softc = control_softc; 12906 12907 local_sglist = io->io_hdr.local_sglist; 12908 local_dma_sglist = io->io_hdr.local_dma_sglist; 12909 remote_sglist = io->io_hdr.remote_sglist; 12910 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 12911 12912 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 12913 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 12914 local_sglist[i].len = remote_sglist[i].len; 12915 12916 /* 12917 * XXX Detect the situation where the RS-level I/O 12918 * redirector on the other side has already read the 12919 * data off of the AOR RS on this side, and 12920 * transferred it to remote (mirror) memory on the 12921 * other side. Since we already have the data in 12922 * memory here, we just need to use it. 12923 * 12924 * XXX KDM this can probably be removed once we 12925 * get the cache device code in and take the 12926 * current AOR implementation out. 12927 */ 12928#ifdef NEEDTOPORT 12929 if ((remote_sglist[i].addr >= 12930 (void *)vtophys(softc->mirr->addr)) 12931 && (remote_sglist[i].addr < 12932 ((void *)vtophys(softc->mirr->addr) + 12933 CacheMirrorOffset))) { 12934 local_sglist[i].addr = remote_sglist[i].addr - 12935 CacheMirrorOffset; 12936 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12937 CTL_FLAG_DATA_IN) 12938 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 12939 } else { 12940 local_sglist[i].addr = remote_sglist[i].addr + 12941 CacheMirrorOffset; 12942 } 12943#endif 12944#if 0 12945 printf("%s: local %p, remote %p, len %d\n", 12946 __func__, local_sglist[i].addr, 12947 remote_sglist[i].addr, local_sglist[i].len); 12948#endif 12949 } 12950 } else { 12951 uint32_t len_to_go; 12952 12953 /* 12954 * In this case, we don't have automatically allocated 12955 * memory for this I/O on this controller. This typically 12956 * happens with internal CTL I/O -- e.g. inquiry, mode 12957 * sense, etc. Anything coming from RAIDCore will have 12958 * a mirror area available. 12959 */ 12960 len_to_go = io->scsiio.kern_data_len; 12961 12962 /* 12963 * Clear the no datasync flag, we have to use malloced 12964 * buffers. 12965 */ 12966 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 12967 12968 /* 12969 * The difficult thing here is that the size of the various 12970 * S/G segments may be different than the size from the 12971 * remote controller. That'll make it harder when DMAing 12972 * the data back to the other side. 12973 */ 12974 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 12975 sizeof(io->io_hdr.remote_sglist[0])) && 12976 (len_to_go > 0); i++) { 12977 local_sglist[i].len = ctl_min(len_to_go, 131072); 12978 CTL_SIZE_8B(local_dma_sglist[i].len, 12979 local_sglist[i].len); 12980 local_sglist[i].addr = 12981 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 12982 12983 local_dma_sglist[i].addr = local_sglist[i].addr; 12984 12985 if (local_sglist[i].addr == NULL) { 12986 int j; 12987 12988 printf("malloc failed for %zd bytes!", 12989 local_dma_sglist[i].len); 12990 for (j = 0; j < i; j++) { 12991 free(local_sglist[j].addr, M_CTL); 12992 } 12993 ctl_set_internal_failure(&io->scsiio, 12994 /*sks_valid*/ 1, 12995 /*retry_count*/ 4857); 12996 retval = 1; 12997 goto bailout_error; 12998 12999 } 13000 /* XXX KDM do we need a sync here? */ 13001 13002 len_to_go -= local_sglist[i].len; 13003 } 13004 /* 13005 * Reset the number of S/G entries accordingly. The 13006 * original number of S/G entries is available in 13007 * rem_sg_entries. 13008 */ 13009 io->scsiio.kern_sg_entries = i; 13010 13011#if 0 13012 printf("%s: kern_sg_entries = %d\n", __func__, 13013 io->scsiio.kern_sg_entries); 13014 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13015 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 13016 local_sglist[i].addr, local_sglist[i].len, 13017 local_dma_sglist[i].len); 13018#endif 13019 } 13020 13021 13022 return (retval); 13023 13024bailout_error: 13025 13026 ctl_send_datamove_done(io, /*have_lock*/ 0); 13027 13028 return (retval); 13029} 13030 13031static int 13032ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 13033 ctl_ha_dt_cb callback) 13034{ 13035 struct ctl_ha_dt_req *rq; 13036 struct ctl_sg_entry *remote_sglist, *local_sglist; 13037 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 13038 uint32_t local_used, remote_used, total_used; 13039 int retval; 13040 int i, j; 13041 13042 retval = 0; 13043 13044 rq = ctl_dt_req_alloc(); 13045 13046 /* 13047 * If we failed to allocate the request, and if the DMA didn't fail 13048 * anyway, set busy status. This is just a resource allocation 13049 * failure. 13050 */ 13051 if ((rq == NULL) 13052 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 13053 ctl_set_busy(&io->scsiio); 13054 13055 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 13056 13057 if (rq != NULL) 13058 ctl_dt_req_free(rq); 13059 13060 /* 13061 * The data move failed. We need to return status back 13062 * to the other controller. No point in trying to DMA 13063 * data to the remote controller. 13064 */ 13065 13066 ctl_send_datamove_done(io, /*have_lock*/ 0); 13067 13068 retval = 1; 13069 13070 goto bailout; 13071 } 13072 13073 local_sglist = io->io_hdr.local_sglist; 13074 local_dma_sglist = io->io_hdr.local_dma_sglist; 13075 remote_sglist = io->io_hdr.remote_sglist; 13076 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 13077 local_used = 0; 13078 remote_used = 0; 13079 total_used = 0; 13080 13081 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 13082 rq->ret = CTL_HA_STATUS_SUCCESS; 13083 rq->context = io; 13084 callback(rq); 13085 goto bailout; 13086 } 13087 13088 /* 13089 * Pull/push the data over the wire from/to the other controller. 13090 * This takes into account the possibility that the local and 13091 * remote sglists may not be identical in terms of the size of 13092 * the elements and the number of elements. 13093 * 13094 * One fundamental assumption here is that the length allocated for 13095 * both the local and remote sglists is identical. Otherwise, we've 13096 * essentially got a coding error of some sort. 13097 */ 13098 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 13099 int isc_ret; 13100 uint32_t cur_len, dma_length; 13101 uint8_t *tmp_ptr; 13102 13103 rq->id = CTL_HA_DATA_CTL; 13104 rq->command = command; 13105 rq->context = io; 13106 13107 /* 13108 * Both pointers should be aligned. But it is possible 13109 * that the allocation length is not. They should both 13110 * also have enough slack left over at the end, though, 13111 * to round up to the next 8 byte boundary. 13112 */ 13113 cur_len = ctl_min(local_sglist[i].len - local_used, 13114 remote_sglist[j].len - remote_used); 13115 13116 /* 13117 * In this case, we have a size issue and need to decrease 13118 * the size, except in the case where we actually have less 13119 * than 8 bytes left. In that case, we need to increase 13120 * the DMA length to get the last bit. 13121 */ 13122 if ((cur_len & 0x7) != 0) { 13123 if (cur_len > 0x7) { 13124 cur_len = cur_len - (cur_len & 0x7); 13125 dma_length = cur_len; 13126 } else { 13127 CTL_SIZE_8B(dma_length, cur_len); 13128 } 13129 13130 } else 13131 dma_length = cur_len; 13132 13133 /* 13134 * If we had to allocate memory for this I/O, instead of using 13135 * the non-cached mirror memory, we'll need to flush the cache 13136 * before trying to DMA to the other controller. 13137 * 13138 * We could end up doing this multiple times for the same 13139 * segment if we have a larger local segment than remote 13140 * segment. That shouldn't be an issue. 13141 */ 13142 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 13143 /* 13144 * XXX KDM use bus_dmamap_sync() here. 13145 */ 13146 } 13147 13148 rq->size = dma_length; 13149 13150 tmp_ptr = (uint8_t *)local_sglist[i].addr; 13151 tmp_ptr += local_used; 13152 13153 /* Use physical addresses when talking to ISC hardware */ 13154 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 13155 /* XXX KDM use busdma */ 13156#if 0 13157 rq->local = vtophys(tmp_ptr); 13158#endif 13159 } else 13160 rq->local = tmp_ptr; 13161 13162 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 13163 tmp_ptr += remote_used; 13164 rq->remote = tmp_ptr; 13165 13166 rq->callback = NULL; 13167 13168 local_used += cur_len; 13169 if (local_used >= local_sglist[i].len) { 13170 i++; 13171 local_used = 0; 13172 } 13173 13174 remote_used += cur_len; 13175 if (remote_used >= remote_sglist[j].len) { 13176 j++; 13177 remote_used = 0; 13178 } 13179 total_used += cur_len; 13180 13181 if (total_used >= io->scsiio.kern_data_len) 13182 rq->callback = callback; 13183 13184 if ((rq->size & 0x7) != 0) { 13185 printf("%s: warning: size %d is not on 8b boundary\n", 13186 __func__, rq->size); 13187 } 13188 if (((uintptr_t)rq->local & 0x7) != 0) { 13189 printf("%s: warning: local %p not on 8b boundary\n", 13190 __func__, rq->local); 13191 } 13192 if (((uintptr_t)rq->remote & 0x7) != 0) { 13193 printf("%s: warning: remote %p not on 8b boundary\n", 13194 __func__, rq->local); 13195 } 13196#if 0 13197 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 13198 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 13199 rq->local, rq->remote, rq->size); 13200#endif 13201 13202 isc_ret = ctl_dt_single(rq); 13203 if (isc_ret == CTL_HA_STATUS_WAIT) 13204 continue; 13205 13206 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 13207 rq->ret = CTL_HA_STATUS_SUCCESS; 13208 } else { 13209 rq->ret = isc_ret; 13210 } 13211 callback(rq); 13212 goto bailout; 13213 } 13214 13215bailout: 13216 return (retval); 13217 13218} 13219 13220static void 13221ctl_datamove_remote_read(union ctl_io *io) 13222{ 13223 int retval; 13224 int i; 13225 13226 /* 13227 * This will send an error to the other controller in the case of a 13228 * failure. 13229 */ 13230 retval = ctl_datamove_remote_sgl_setup(io); 13231 if (retval != 0) 13232 return; 13233 13234 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 13235 ctl_datamove_remote_read_cb); 13236 if ((retval != 0) 13237 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 13238 /* 13239 * Make sure we free memory if there was an error.. The 13240 * ctl_datamove_remote_xfer() function will send the 13241 * datamove done message, or call the callback with an 13242 * error if there is a problem. 13243 */ 13244 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13245 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13246 } 13247 13248 return; 13249} 13250 13251/* 13252 * Process a datamove request from the other controller. This is used for 13253 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 13254 * first. Once that is complete, the data gets DMAed into the remote 13255 * controller's memory. For reads, we DMA from the remote controller's 13256 * memory into our memory first, and then move it out to the FETD. 13257 */ 13258static void 13259ctl_datamove_remote(union ctl_io *io) 13260{ 13261 struct ctl_softc *softc; 13262 13263 softc = control_softc; 13264 13265 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 13266 13267 /* 13268 * Note that we look for an aborted I/O here, but don't do some of 13269 * the other checks that ctl_datamove() normally does. We don't 13270 * need to run the task queue, because this I/O is on the ISC 13271 * queue, which is executed by the work thread after the task queue. 13272 * We don't need to run the datamove delay code, since that should 13273 * have been done if need be on the other controller. 13274 */ 13275 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 13276 13277 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 13278 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 13279 io->io_hdr.nexus.targ_port, 13280 io->io_hdr.nexus.targ_target.id, 13281 io->io_hdr.nexus.targ_lun); 13282 io->io_hdr.status = CTL_CMD_ABORTED; 13283 io->io_hdr.port_status = 31338; 13284 13285 ctl_send_datamove_done(io, /*have_lock*/ 0); 13286 13287 return; 13288 } 13289 13290 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 13291 ctl_datamove_remote_write(io); 13292 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 13293 ctl_datamove_remote_read(io); 13294 } else { 13295 union ctl_ha_msg msg; 13296 struct scsi_sense_data *sense; 13297 uint8_t sks[3]; 13298 int retry_count; 13299 13300 memset(&msg, 0, sizeof(msg)); 13301 13302 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 13303 msg.hdr.status = CTL_SCSI_ERROR; 13304 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 13305 13306 retry_count = 4243; 13307 13308 sense = &msg.scsi.sense_data; 13309 sks[0] = SSD_SCS_VALID; 13310 sks[1] = (retry_count >> 8) & 0xff; 13311 sks[2] = retry_count & 0xff; 13312 13313 /* "Internal target failure" */ 13314 scsi_set_sense_data(sense, 13315 /*sense_format*/ SSD_TYPE_NONE, 13316 /*current_error*/ 1, 13317 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 13318 /*asc*/ 0x44, 13319 /*ascq*/ 0x00, 13320 /*type*/ SSD_ELEM_SKS, 13321 /*size*/ sizeof(sks), 13322 /*data*/ sks, 13323 SSD_ELEM_NONE); 13324 13325 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13326 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13327 ctl_failover_io(io, /*have_lock*/ 1); 13328 return; 13329 } 13330 13331 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 13332 CTL_HA_STATUS_SUCCESS) { 13333 /* XXX KDM what to do if this fails? */ 13334 } 13335 return; 13336 } 13337 13338} 13339 13340static int 13341ctl_process_done(union ctl_io *io) 13342{ 13343 struct ctl_lun *lun; 13344 struct ctl_softc *ctl_softc; 13345 void (*fe_done)(union ctl_io *io); 13346 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 13347 13348 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13349 13350 fe_done = 13351 control_softc->ctl_ports[targ_port]->fe_done; 13352 13353#ifdef CTL_TIME_IO 13354 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13355 char str[256]; 13356 char path_str[64]; 13357 struct sbuf sb; 13358 13359 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13360 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13361 13362 sbuf_cat(&sb, path_str); 13363 switch (io->io_hdr.io_type) { 13364 case CTL_IO_SCSI: 13365 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 13366 sbuf_printf(&sb, "\n"); 13367 sbuf_cat(&sb, path_str); 13368 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13369 io->scsiio.tag_num, io->scsiio.tag_type); 13370 break; 13371 case CTL_IO_TASK: 13372 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 13373 "Tag Type: %d\n", io->taskio.task_action, 13374 io->taskio.tag_num, io->taskio.tag_type); 13375 break; 13376 default: 13377 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13378 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13379 break; 13380 } 13381 sbuf_cat(&sb, path_str); 13382 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13383 (intmax_t)time_uptime - io->io_hdr.start_time); 13384 sbuf_finish(&sb); 13385 printf("%s", sbuf_data(&sb)); 13386 } 13387#endif /* CTL_TIME_IO */ 13388 13389 switch (io->io_hdr.io_type) { 13390 case CTL_IO_SCSI: 13391 break; 13392 case CTL_IO_TASK: 13393 if (bootverbose || verbose > 0) 13394 ctl_io_error_print(io, NULL); 13395 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 13396 ctl_free_io(io); 13397 else 13398 fe_done(io); 13399 return (CTL_RETVAL_COMPLETE); 13400 break; 13401 default: 13402 printf("ctl_process_done: invalid io type %d\n", 13403 io->io_hdr.io_type); 13404 panic("ctl_process_done: invalid io type %d\n", 13405 io->io_hdr.io_type); 13406 break; /* NOTREACHED */ 13407 } 13408 13409 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13410 if (lun == NULL) { 13411 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13412 io->io_hdr.nexus.targ_mapped_lun)); 13413 fe_done(io); 13414 goto bailout; 13415 } 13416 ctl_softc = lun->ctl_softc; 13417 13418 mtx_lock(&lun->lun_lock); 13419 13420 /* 13421 * Check to see if we have any errors to inject here. We only 13422 * inject errors for commands that don't already have errors set. 13423 */ 13424 if ((STAILQ_FIRST(&lun->error_list) != NULL) 13425 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) 13426 ctl_inject_error(lun, io); 13427 13428 /* 13429 * XXX KDM how do we treat commands that aren't completed 13430 * successfully? 13431 * 13432 * XXX KDM should we also track I/O latency? 13433 */ 13434 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13435 io->io_hdr.io_type == CTL_IO_SCSI) { 13436#ifdef CTL_TIME_IO 13437 struct bintime cur_bt; 13438#endif 13439 int type; 13440 13441 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13442 CTL_FLAG_DATA_IN) 13443 type = CTL_STATS_READ; 13444 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13445 CTL_FLAG_DATA_OUT) 13446 type = CTL_STATS_WRITE; 13447 else 13448 type = CTL_STATS_NO_IO; 13449 13450 lun->stats.ports[targ_port].bytes[type] += 13451 io->scsiio.kern_total_len; 13452 lun->stats.ports[targ_port].operations[type]++; 13453#ifdef CTL_TIME_IO 13454 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 13455 &io->io_hdr.dma_bt); 13456 lun->stats.ports[targ_port].num_dmas[type] += 13457 io->io_hdr.num_dmas; 13458 getbintime(&cur_bt); 13459 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 13460 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 13461#endif 13462 } 13463 13464 /* 13465 * Remove this from the OOA queue. 13466 */ 13467 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13468 13469 /* 13470 * Run through the blocked queue on this LUN and see if anything 13471 * has become unblocked, now that this transaction is done. 13472 */ 13473 ctl_check_blocked(lun); 13474 13475 /* 13476 * If the LUN has been invalidated, free it if there is nothing 13477 * left on its OOA queue. 13478 */ 13479 if ((lun->flags & CTL_LUN_INVALID) 13480 && TAILQ_EMPTY(&lun->ooa_queue)) { 13481 mtx_unlock(&lun->lun_lock); 13482 mtx_lock(&ctl_softc->ctl_lock); 13483 ctl_free_lun(lun); 13484 mtx_unlock(&ctl_softc->ctl_lock); 13485 } else 13486 mtx_unlock(&lun->lun_lock); 13487 13488 /* 13489 * If this command has been aborted, make sure we set the status 13490 * properly. The FETD is responsible for freeing the I/O and doing 13491 * whatever it needs to do to clean up its state. 13492 */ 13493 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13494 io->io_hdr.status = CTL_CMD_ABORTED; 13495 13496 /* 13497 * We print out status for every task management command. For SCSI 13498 * commands, we filter out any unit attention errors; they happen 13499 * on every boot, and would clutter up the log. Note: task 13500 * management commands aren't printed here, they are printed above, 13501 * since they should never even make it down here. 13502 */ 13503 switch (io->io_hdr.io_type) { 13504 case CTL_IO_SCSI: { 13505 int error_code, sense_key, asc, ascq; 13506 13507 sense_key = 0; 13508 13509 if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) 13510 && (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) { 13511 /* 13512 * Since this is just for printing, no need to 13513 * show errors here. 13514 */ 13515 scsi_extract_sense_len(&io->scsiio.sense_data, 13516 io->scsiio.sense_len, 13517 &error_code, 13518 &sense_key, 13519 &asc, 13520 &ascq, 13521 /*show_errors*/ 0); 13522 } 13523 13524 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) 13525 && (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SCSI_ERROR) 13526 || (io->scsiio.scsi_status != SCSI_STATUS_CHECK_COND) 13527 || (sense_key != SSD_KEY_UNIT_ATTENTION))) { 13528 13529 if ((time_uptime - ctl_softc->last_print_jiffies) <= 0){ 13530 ctl_softc->skipped_prints++; 13531 } else { 13532 uint32_t skipped_prints; 13533 13534 skipped_prints = ctl_softc->skipped_prints; 13535 13536 ctl_softc->skipped_prints = 0; 13537 ctl_softc->last_print_jiffies = time_uptime; 13538 13539 if (skipped_prints > 0) { 13540#ifdef NEEDTOPORT 13541 csevent_log(CSC_CTL | CSC_SHELF_SW | 13542 CTL_ERROR_REPORT, 13543 csevent_LogType_Trace, 13544 csevent_Severity_Information, 13545 csevent_AlertLevel_Green, 13546 csevent_FRU_Firmware, 13547 csevent_FRU_Unknown, 13548 "High CTL error volume, %d prints " 13549 "skipped", skipped_prints); 13550#endif 13551 } 13552 if (bootverbose || verbose > 0) 13553 ctl_io_error_print(io, NULL); 13554 } 13555 } 13556 break; 13557 } 13558 case CTL_IO_TASK: 13559 if (bootverbose || verbose > 0) 13560 ctl_io_error_print(io, NULL); 13561 break; 13562 default: 13563 break; 13564 } 13565 13566 /* 13567 * Tell the FETD or the other shelf controller we're done with this 13568 * command. Note that only SCSI commands get to this point. Task 13569 * management commands are completed above. 13570 * 13571 * We only send status to the other controller if we're in XFER 13572 * mode. In SER_ONLY mode, the I/O is done on the controller that 13573 * received the I/O (from CTL's perspective), and so the status is 13574 * generated there. 13575 * 13576 * XXX KDM if we hold the lock here, we could cause a deadlock 13577 * if the frontend comes back in in this context to queue 13578 * something. 13579 */ 13580 if ((ctl_softc->ha_mode == CTL_HA_MODE_XFER) 13581 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13582 union ctl_ha_msg msg; 13583 13584 memset(&msg, 0, sizeof(msg)); 13585 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13586 msg.hdr.original_sc = io->io_hdr.original_sc; 13587 msg.hdr.nexus = io->io_hdr.nexus; 13588 msg.hdr.status = io->io_hdr.status; 13589 msg.scsi.scsi_status = io->scsiio.scsi_status; 13590 msg.scsi.tag_num = io->scsiio.tag_num; 13591 msg.scsi.tag_type = io->scsiio.tag_type; 13592 msg.scsi.sense_len = io->scsiio.sense_len; 13593 msg.scsi.sense_residual = io->scsiio.sense_residual; 13594 msg.scsi.residual = io->scsiio.residual; 13595 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13596 sizeof(io->scsiio.sense_data)); 13597 /* 13598 * We copy this whether or not this is an I/O-related 13599 * command. Otherwise, we'd have to go and check to see 13600 * whether it's a read/write command, and it really isn't 13601 * worth it. 13602 */ 13603 memcpy(&msg.scsi.lbalen, 13604 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 13605 sizeof(msg.scsi.lbalen)); 13606 13607 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13608 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 13609 /* XXX do something here */ 13610 } 13611 13612 ctl_free_io(io); 13613 } else 13614 fe_done(io); 13615 13616bailout: 13617 13618 return (CTL_RETVAL_COMPLETE); 13619} 13620 13621/* 13622 * Front end should call this if it doesn't do autosense. When the request 13623 * sense comes back in from the initiator, we'll dequeue this and send it. 13624 */ 13625int 13626ctl_queue_sense(union ctl_io *io) 13627{ 13628 struct ctl_lun *lun; 13629 struct ctl_softc *ctl_softc; 13630 uint32_t initidx, targ_lun; 13631 13632 ctl_softc = control_softc; 13633 13634 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13635 13636 /* 13637 * LUN lookup will likely move to the ctl_work_thread() once we 13638 * have our new queueing infrastructure (that doesn't put things on 13639 * a per-LUN queue initially). That is so that we can handle 13640 * things like an INQUIRY to a LUN that we don't have enabled. We 13641 * can't deal with that right now. 13642 */ 13643 mtx_lock(&ctl_softc->ctl_lock); 13644 13645 /* 13646 * If we don't have a LUN for this, just toss the sense 13647 * information. 13648 */ 13649 targ_lun = io->io_hdr.nexus.targ_lun; 13650 targ_lun = ctl_map_lun(io->io_hdr.nexus.targ_port, targ_lun); 13651 if ((targ_lun < CTL_MAX_LUNS) 13652 && (ctl_softc->ctl_luns[targ_lun] != NULL)) 13653 lun = ctl_softc->ctl_luns[targ_lun]; 13654 else 13655 goto bailout; 13656 13657 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13658 13659 mtx_lock(&lun->lun_lock); 13660 /* 13661 * Already have CA set for this LUN...toss the sense information. 13662 */ 13663 if (ctl_is_set(lun->have_ca, initidx)) { 13664 mtx_unlock(&lun->lun_lock); 13665 goto bailout; 13666 } 13667 13668 memcpy(&lun->pending_sense[initidx].sense, &io->scsiio.sense_data, 13669 ctl_min(sizeof(lun->pending_sense[initidx].sense), 13670 sizeof(io->scsiio.sense_data))); 13671 ctl_set_mask(lun->have_ca, initidx); 13672 mtx_unlock(&lun->lun_lock); 13673 13674bailout: 13675 mtx_unlock(&ctl_softc->ctl_lock); 13676 13677 ctl_free_io(io); 13678 13679 return (CTL_RETVAL_COMPLETE); 13680} 13681 13682/* 13683 * Primary command inlet from frontend ports. All SCSI and task I/O 13684 * requests must go through this function. 13685 */ 13686int 13687ctl_queue(union ctl_io *io) 13688{ 13689 struct ctl_softc *ctl_softc; 13690 13691 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13692 13693 ctl_softc = control_softc; 13694 13695#ifdef CTL_TIME_IO 13696 io->io_hdr.start_time = time_uptime; 13697 getbintime(&io->io_hdr.start_bt); 13698#endif /* CTL_TIME_IO */ 13699 13700 /* Map FE-specific LUN ID into global one. */ 13701 io->io_hdr.nexus.targ_mapped_lun = 13702 ctl_map_lun(io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); 13703 13704 switch (io->io_hdr.io_type) { 13705 case CTL_IO_SCSI: 13706 ctl_enqueue_incoming(io); 13707 break; 13708 case CTL_IO_TASK: 13709 ctl_run_task(io); 13710 break; 13711 default: 13712 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13713 return (EINVAL); 13714 } 13715 13716 return (CTL_RETVAL_COMPLETE); 13717} 13718 13719#ifdef CTL_IO_DELAY 13720static void 13721ctl_done_timer_wakeup(void *arg) 13722{ 13723 union ctl_io *io; 13724 13725 io = (union ctl_io *)arg; 13726 ctl_done(io); 13727} 13728#endif /* CTL_IO_DELAY */ 13729 13730void 13731ctl_done(union ctl_io *io) 13732{ 13733 struct ctl_softc *ctl_softc; 13734 13735 ctl_softc = control_softc; 13736 13737 /* 13738 * Enable this to catch duplicate completion issues. 13739 */ 13740#if 0 13741 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13742 printf("%s: type %d msg %d cdb %x iptl: " 13743 "%d:%d:%d:%d tag 0x%04x " 13744 "flag %#x status %x\n", 13745 __func__, 13746 io->io_hdr.io_type, 13747 io->io_hdr.msg_type, 13748 io->scsiio.cdb[0], 13749 io->io_hdr.nexus.initid.id, 13750 io->io_hdr.nexus.targ_port, 13751 io->io_hdr.nexus.targ_target.id, 13752 io->io_hdr.nexus.targ_lun, 13753 (io->io_hdr.io_type == 13754 CTL_IO_TASK) ? 13755 io->taskio.tag_num : 13756 io->scsiio.tag_num, 13757 io->io_hdr.flags, 13758 io->io_hdr.status); 13759 } else 13760 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13761#endif 13762 13763 /* 13764 * This is an internal copy of an I/O, and should not go through 13765 * the normal done processing logic. 13766 */ 13767 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13768 return; 13769 13770 /* 13771 * We need to send a msg to the serializing shelf to finish the IO 13772 * as well. We don't send a finish message to the other shelf if 13773 * this is a task management command. Task management commands 13774 * aren't serialized in the OOA queue, but rather just executed on 13775 * both shelf controllers for commands that originated on that 13776 * controller. 13777 */ 13778 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 13779 && (io->io_hdr.io_type != CTL_IO_TASK)) { 13780 union ctl_ha_msg msg_io; 13781 13782 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 13783 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 13784 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 13785 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 13786 } 13787 /* continue on to finish IO */ 13788 } 13789#ifdef CTL_IO_DELAY 13790 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13791 struct ctl_lun *lun; 13792 13793 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13794 13795 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13796 } else { 13797 struct ctl_lun *lun; 13798 13799 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13800 13801 if ((lun != NULL) 13802 && (lun->delay_info.done_delay > 0)) { 13803 struct callout *callout; 13804 13805 callout = (struct callout *)&io->io_hdr.timer_bytes; 13806 callout_init(callout, /*mpsafe*/ 1); 13807 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13808 callout_reset(callout, 13809 lun->delay_info.done_delay * hz, 13810 ctl_done_timer_wakeup, io); 13811 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13812 lun->delay_info.done_delay = 0; 13813 return; 13814 } 13815 } 13816#endif /* CTL_IO_DELAY */ 13817 13818 ctl_enqueue_done(io); 13819} 13820 13821int 13822ctl_isc(struct ctl_scsiio *ctsio) 13823{ 13824 struct ctl_lun *lun; 13825 int retval; 13826 13827 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13828 13829 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 13830 13831 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 13832 13833 retval = lun->backend->data_submit((union ctl_io *)ctsio); 13834 13835 return (retval); 13836} 13837 13838 13839static void 13840ctl_work_thread(void *arg) 13841{ 13842 struct ctl_thread *thr = (struct ctl_thread *)arg; 13843 struct ctl_softc *softc = thr->ctl_softc; 13844 union ctl_io *io; 13845 int retval; 13846 13847 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13848 13849 for (;;) { 13850 retval = 0; 13851 13852 /* 13853 * We handle the queues in this order: 13854 * - ISC 13855 * - done queue (to free up resources, unblock other commands) 13856 * - RtR queue 13857 * - incoming queue 13858 * 13859 * If those queues are empty, we break out of the loop and 13860 * go to sleep. 13861 */ 13862 mtx_lock(&thr->queue_lock); 13863 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13864 if (io != NULL) { 13865 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13866 mtx_unlock(&thr->queue_lock); 13867 ctl_handle_isc(io); 13868 continue; 13869 } 13870 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13871 if (io != NULL) { 13872 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13873 /* clear any blocked commands, call fe_done */ 13874 mtx_unlock(&thr->queue_lock); 13875 retval = ctl_process_done(io); 13876 continue; 13877 } 13878 if (!ctl_pause_rtr) { 13879 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13880 if (io != NULL) { 13881 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13882 mtx_unlock(&thr->queue_lock); 13883 retval = ctl_scsiio(&io->scsiio); 13884 if (retval != CTL_RETVAL_COMPLETE) 13885 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13886 continue; 13887 } 13888 } 13889 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13890 if (io != NULL) { 13891 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13892 mtx_unlock(&thr->queue_lock); 13893 ctl_scsiio_precheck(softc, &io->scsiio); 13894 continue; 13895 } 13896 13897 /* Sleep until we have something to do. */ 13898 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13899 } 13900} 13901 13902static void 13903ctl_lun_thread(void *arg) 13904{ 13905 struct ctl_softc *softc = (struct ctl_softc *)arg; 13906 struct ctl_be_lun *be_lun; 13907 int retval; 13908 13909 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13910 13911 for (;;) { 13912 retval = 0; 13913 mtx_lock(&softc->ctl_lock); 13914 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13915 if (be_lun != NULL) { 13916 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13917 mtx_unlock(&softc->ctl_lock); 13918 ctl_create_lun(be_lun); 13919 continue; 13920 } 13921 13922 /* Sleep until we have something to do. */ 13923 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13924 PDROP | PRIBIO, "-", 0); 13925 } 13926} 13927 13928static void 13929ctl_enqueue_incoming(union ctl_io *io) 13930{ 13931 struct ctl_softc *softc = control_softc; 13932 struct ctl_thread *thr; 13933 13934 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13935 mtx_lock(&thr->queue_lock); 13936 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13937 mtx_unlock(&thr->queue_lock); 13938 wakeup(thr); 13939} 13940 13941static void 13942ctl_enqueue_rtr(union ctl_io *io) 13943{ 13944 struct ctl_softc *softc = control_softc; 13945 struct ctl_thread *thr; 13946 13947 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13948 mtx_lock(&thr->queue_lock); 13949 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13950 mtx_unlock(&thr->queue_lock); 13951 wakeup(thr); 13952} 13953 13954static void 13955ctl_enqueue_done(union ctl_io *io) 13956{ 13957 struct ctl_softc *softc = control_softc; 13958 struct ctl_thread *thr; 13959 13960 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13961 mtx_lock(&thr->queue_lock); 13962 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13963 mtx_unlock(&thr->queue_lock); 13964 wakeup(thr); 13965} 13966 13967static void 13968ctl_enqueue_isc(union ctl_io *io) 13969{ 13970 struct ctl_softc *softc = control_softc; 13971 struct ctl_thread *thr; 13972 13973 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13974 mtx_lock(&thr->queue_lock); 13975 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13976 mtx_unlock(&thr->queue_lock); 13977 wakeup(thr); 13978} 13979 13980/* Initialization and failover */ 13981 13982void 13983ctl_init_isc_msg(void) 13984{ 13985 printf("CTL: Still calling this thing\n"); 13986} 13987 13988/* 13989 * Init component 13990 * Initializes component into configuration defined by bootMode 13991 * (see hasc-sv.c) 13992 * returns hasc_Status: 13993 * OK 13994 * ERROR - fatal error 13995 */ 13996static ctl_ha_comp_status 13997ctl_isc_init(struct ctl_ha_component *c) 13998{ 13999 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 14000 14001 c->status = ret; 14002 return ret; 14003} 14004 14005/* Start component 14006 * Starts component in state requested. If component starts successfully, 14007 * it must set its own state to the requestrd state 14008 * When requested state is HASC_STATE_HA, the component may refine it 14009 * by adding _SLAVE or _MASTER flags. 14010 * Currently allowed state transitions are: 14011 * UNKNOWN->HA - initial startup 14012 * UNKNOWN->SINGLE - initial startup when no parter detected 14013 * HA->SINGLE - failover 14014 * returns ctl_ha_comp_status: 14015 * OK - component successfully started in requested state 14016 * FAILED - could not start the requested state, failover may 14017 * be possible 14018 * ERROR - fatal error detected, no future startup possible 14019 */ 14020static ctl_ha_comp_status 14021ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 14022{ 14023 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 14024 14025 printf("%s: go\n", __func__); 14026 14027 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 14028 if (c->state == CTL_HA_STATE_UNKNOWN ) { 14029 ctl_is_single = 0; 14030 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 14031 != CTL_HA_STATUS_SUCCESS) { 14032 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 14033 ret = CTL_HA_COMP_STATUS_ERROR; 14034 } 14035 } else if (CTL_HA_STATE_IS_HA(c->state) 14036 && CTL_HA_STATE_IS_SINGLE(state)){ 14037 // HA->SINGLE transition 14038 ctl_failover(); 14039 ctl_is_single = 1; 14040 } else { 14041 printf("ctl_isc_start:Invalid state transition %X->%X\n", 14042 c->state, state); 14043 ret = CTL_HA_COMP_STATUS_ERROR; 14044 } 14045 if (CTL_HA_STATE_IS_SINGLE(state)) 14046 ctl_is_single = 1; 14047 14048 c->state = state; 14049 c->status = ret; 14050 return ret; 14051} 14052 14053/* 14054 * Quiesce component 14055 * The component must clear any error conditions (set status to OK) and 14056 * prepare itself to another Start call 14057 * returns ctl_ha_comp_status: 14058 * OK 14059 * ERROR 14060 */ 14061static ctl_ha_comp_status 14062ctl_isc_quiesce(struct ctl_ha_component *c) 14063{ 14064 int ret = CTL_HA_COMP_STATUS_OK; 14065 14066 ctl_pause_rtr = 1; 14067 c->status = ret; 14068 return ret; 14069} 14070 14071struct ctl_ha_component ctl_ha_component_ctlisc = 14072{ 14073 .name = "CTL ISC", 14074 .state = CTL_HA_STATE_UNKNOWN, 14075 .init = ctl_isc_init, 14076 .start = ctl_isc_start, 14077 .quiesce = ctl_isc_quiesce 14078}; 14079 14080/* 14081 * vim: ts=8 14082 */ 14083