ctl.c revision 287940
1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id$ 36 */ 37/* 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43#define _CTL_C 44 45#include <sys/cdefs.h> 46__FBSDID("$FreeBSD: head/sys/cam/ctl/ctl.c 287940 2015-09-17 21:51:11Z mav $"); 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/ctype.h> 51#include <sys/kernel.h> 52#include <sys/types.h> 53#include <sys/kthread.h> 54#include <sys/bio.h> 55#include <sys/fcntl.h> 56#include <sys/lock.h> 57#include <sys/module.h> 58#include <sys/mutex.h> 59#include <sys/condvar.h> 60#include <sys/malloc.h> 61#include <sys/conf.h> 62#include <sys/ioccom.h> 63#include <sys/queue.h> 64#include <sys/sbuf.h> 65#include <sys/smp.h> 66#include <sys/endian.h> 67#include <sys/sysctl.h> 68#include <vm/uma.h> 69 70#include <cam/cam.h> 71#include <cam/scsi/scsi_all.h> 72#include <cam/scsi/scsi_da.h> 73#include <cam/ctl/ctl_io.h> 74#include <cam/ctl/ctl.h> 75#include <cam/ctl/ctl_frontend.h> 76#include <cam/ctl/ctl_util.h> 77#include <cam/ctl/ctl_backend.h> 78#include <cam/ctl/ctl_ioctl.h> 79#include <cam/ctl/ctl_ha.h> 80#include <cam/ctl/ctl_private.h> 81#include <cam/ctl/ctl_debug.h> 82#include <cam/ctl/ctl_scsi_all.h> 83#include <cam/ctl/ctl_error.h> 84 85struct ctl_softc *control_softc = NULL; 86 87/* 88 * Template mode pages. 89 */ 90 91/* 92 * Note that these are default values only. The actual values will be 93 * filled in when the user does a mode sense. 94 */ 95const static struct copan_debugconf_subpage debugconf_page_default = { 96 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 97 DBGCNF_SUBPAGE_CODE, /* subpage */ 98 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 99 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 100 DBGCNF_VERSION, /* page_version */ 101 {CTL_TIME_IO_DEFAULT_SECS>>8, 102 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 103}; 104 105const static struct copan_debugconf_subpage debugconf_page_changeable = { 106 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 107 DBGCNF_SUBPAGE_CODE, /* subpage */ 108 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 109 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 110 0, /* page_version */ 111 {0xff,0xff}, /* ctl_time_io_secs */ 112}; 113 114const static struct scsi_da_rw_recovery_page rw_er_page_default = { 115 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 116 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 117 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 118 /*read_retry_count*/0, 119 /*correction_span*/0, 120 /*head_offset_count*/0, 121 /*data_strobe_offset_cnt*/0, 122 /*byte8*/SMS_RWER_LBPERE, 123 /*write_retry_count*/0, 124 /*reserved2*/0, 125 /*recovery_time_limit*/{0, 0}, 126}; 127 128const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 129 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 130 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 131 /*byte3*/0, 132 /*read_retry_count*/0, 133 /*correction_span*/0, 134 /*head_offset_count*/0, 135 /*data_strobe_offset_cnt*/0, 136 /*byte8*/0, 137 /*write_retry_count*/0, 138 /*reserved2*/0, 139 /*recovery_time_limit*/{0, 0}, 140}; 141 142const static struct scsi_format_page format_page_default = { 143 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 144 /*page_length*/sizeof(struct scsi_format_page) - 2, 145 /*tracks_per_zone*/ {0, 0}, 146 /*alt_sectors_per_zone*/ {0, 0}, 147 /*alt_tracks_per_zone*/ {0, 0}, 148 /*alt_tracks_per_lun*/ {0, 0}, 149 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 150 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 151 /*bytes_per_sector*/ {0, 0}, 152 /*interleave*/ {0, 0}, 153 /*track_skew*/ {0, 0}, 154 /*cylinder_skew*/ {0, 0}, 155 /*flags*/ SFP_HSEC, 156 /*reserved*/ {0, 0, 0} 157}; 158 159const static struct scsi_format_page format_page_changeable = { 160 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 161 /*page_length*/sizeof(struct scsi_format_page) - 2, 162 /*tracks_per_zone*/ {0, 0}, 163 /*alt_sectors_per_zone*/ {0, 0}, 164 /*alt_tracks_per_zone*/ {0, 0}, 165 /*alt_tracks_per_lun*/ {0, 0}, 166 /*sectors_per_track*/ {0, 0}, 167 /*bytes_per_sector*/ {0, 0}, 168 /*interleave*/ {0, 0}, 169 /*track_skew*/ {0, 0}, 170 /*cylinder_skew*/ {0, 0}, 171 /*flags*/ 0, 172 /*reserved*/ {0, 0, 0} 173}; 174 175const static struct scsi_rigid_disk_page rigid_disk_page_default = { 176 /*page_code*/SMS_RIGID_DISK_PAGE, 177 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 178 /*cylinders*/ {0, 0, 0}, 179 /*heads*/ CTL_DEFAULT_HEADS, 180 /*start_write_precomp*/ {0, 0, 0}, 181 /*start_reduced_current*/ {0, 0, 0}, 182 /*step_rate*/ {0, 0}, 183 /*landing_zone_cylinder*/ {0, 0, 0}, 184 /*rpl*/ SRDP_RPL_DISABLED, 185 /*rotational_offset*/ 0, 186 /*reserved1*/ 0, 187 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 188 CTL_DEFAULT_ROTATION_RATE & 0xff}, 189 /*reserved2*/ {0, 0} 190}; 191 192const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 193 /*page_code*/SMS_RIGID_DISK_PAGE, 194 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 195 /*cylinders*/ {0, 0, 0}, 196 /*heads*/ 0, 197 /*start_write_precomp*/ {0, 0, 0}, 198 /*start_reduced_current*/ {0, 0, 0}, 199 /*step_rate*/ {0, 0}, 200 /*landing_zone_cylinder*/ {0, 0, 0}, 201 /*rpl*/ 0, 202 /*rotational_offset*/ 0, 203 /*reserved1*/ 0, 204 /*rotation_rate*/ {0, 0}, 205 /*reserved2*/ {0, 0} 206}; 207 208const static struct scsi_caching_page caching_page_default = { 209 /*page_code*/SMS_CACHING_PAGE, 210 /*page_length*/sizeof(struct scsi_caching_page) - 2, 211 /*flags1*/ SCP_DISC | SCP_WCE, 212 /*ret_priority*/ 0, 213 /*disable_pf_transfer_len*/ {0xff, 0xff}, 214 /*min_prefetch*/ {0, 0}, 215 /*max_prefetch*/ {0xff, 0xff}, 216 /*max_pf_ceiling*/ {0xff, 0xff}, 217 /*flags2*/ 0, 218 /*cache_segments*/ 0, 219 /*cache_seg_size*/ {0, 0}, 220 /*reserved*/ 0, 221 /*non_cache_seg_size*/ {0, 0, 0} 222}; 223 224const static struct scsi_caching_page caching_page_changeable = { 225 /*page_code*/SMS_CACHING_PAGE, 226 /*page_length*/sizeof(struct scsi_caching_page) - 2, 227 /*flags1*/ SCP_WCE | SCP_RCD, 228 /*ret_priority*/ 0, 229 /*disable_pf_transfer_len*/ {0, 0}, 230 /*min_prefetch*/ {0, 0}, 231 /*max_prefetch*/ {0, 0}, 232 /*max_pf_ceiling*/ {0, 0}, 233 /*flags2*/ 0, 234 /*cache_segments*/ 0, 235 /*cache_seg_size*/ {0, 0}, 236 /*reserved*/ 0, 237 /*non_cache_seg_size*/ {0, 0, 0} 238}; 239 240const static struct scsi_control_page control_page_default = { 241 /*page_code*/SMS_CONTROL_MODE_PAGE, 242 /*page_length*/sizeof(struct scsi_control_page) - 2, 243 /*rlec*/0, 244 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 245 /*eca_and_aen*/0, 246 /*flags4*/SCP_TAS, 247 /*aen_holdoff_period*/{0, 0}, 248 /*busy_timeout_period*/{0, 0}, 249 /*extended_selftest_completion_time*/{0, 0} 250}; 251 252const static struct scsi_control_page control_page_changeable = { 253 /*page_code*/SMS_CONTROL_MODE_PAGE, 254 /*page_length*/sizeof(struct scsi_control_page) - 2, 255 /*rlec*/SCP_DSENSE, 256 /*queue_flags*/SCP_QUEUE_ALG_MASK, 257 /*eca_and_aen*/SCP_SWP, 258 /*flags4*/0, 259 /*aen_holdoff_period*/{0, 0}, 260 /*busy_timeout_period*/{0, 0}, 261 /*extended_selftest_completion_time*/{0, 0} 262}; 263 264const static struct scsi_info_exceptions_page ie_page_default = { 265 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 266 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 267 /*info_flags*/SIEP_FLAGS_DEXCPT, 268 /*mrie*/0, 269 /*interval_timer*/{0, 0, 0, 0}, 270 /*report_count*/{0, 0, 0, 0} 271}; 272 273const static struct scsi_info_exceptions_page ie_page_changeable = { 274 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 275 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 276 /*info_flags*/0, 277 /*mrie*/0, 278 /*interval_timer*/{0, 0, 0, 0}, 279 /*report_count*/{0, 0, 0, 0} 280}; 281 282#define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 283 284const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 285 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 286 /*subpage_code*/0x02, 287 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 288 /*flags*/0, 289 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 290 /*descr*/{}}, 291 {{/*flags*/0, 292 /*resource*/0x01, 293 /*reserved*/{0, 0}, 294 /*count*/{0, 0, 0, 0}}, 295 {/*flags*/0, 296 /*resource*/0x02, 297 /*reserved*/{0, 0}, 298 /*count*/{0, 0, 0, 0}}, 299 {/*flags*/0, 300 /*resource*/0xf1, 301 /*reserved*/{0, 0}, 302 /*count*/{0, 0, 0, 0}}, 303 {/*flags*/0, 304 /*resource*/0xf2, 305 /*reserved*/{0, 0}, 306 /*count*/{0, 0, 0, 0}} 307 } 308}; 309 310const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 311 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 312 /*subpage_code*/0x02, 313 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 314 /*flags*/0, 315 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 316 /*descr*/{}}, 317 {{/*flags*/0, 318 /*resource*/0, 319 /*reserved*/{0, 0}, 320 /*count*/{0, 0, 0, 0}}, 321 {/*flags*/0, 322 /*resource*/0, 323 /*reserved*/{0, 0}, 324 /*count*/{0, 0, 0, 0}}, 325 {/*flags*/0, 326 /*resource*/0, 327 /*reserved*/{0, 0}, 328 /*count*/{0, 0, 0, 0}}, 329 {/*flags*/0, 330 /*resource*/0, 331 /*reserved*/{0, 0}, 332 /*count*/{0, 0, 0, 0}} 333 } 334}; 335 336SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 337static int worker_threads = -1; 338SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 339 &worker_threads, 1, "Number of worker threads"); 340static int ctl_debug = CTL_DEBUG_NONE; 341SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 342 &ctl_debug, 0, "Enabled debug flags"); 343 344/* 345 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 346 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 347 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 348 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 349 */ 350#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 351 352static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 353 int param); 354static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 355static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 356static int ctl_init(void); 357void ctl_shutdown(void); 358static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 359static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 360static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 361static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 362 struct ctl_ooa *ooa_hdr, 363 struct ctl_ooa_entry *kern_entries); 364static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 365 struct thread *td); 366static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 367 struct ctl_be_lun *be_lun); 368static int ctl_free_lun(struct ctl_lun *lun); 369static void ctl_create_lun(struct ctl_be_lun *be_lun); 370static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); 371 372static int ctl_do_mode_select(union ctl_io *io); 373static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 374 uint64_t res_key, uint64_t sa_res_key, 375 uint8_t type, uint32_t residx, 376 struct ctl_scsiio *ctsio, 377 struct scsi_per_res_out *cdb, 378 struct scsi_per_res_out_parms* param); 379static void ctl_pro_preempt_other(struct ctl_lun *lun, 380 union ctl_ha_msg *msg); 381static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 382static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 383static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 384static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 385static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 386static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 387static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 388 int alloc_len); 389static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 390 int alloc_len); 391static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 392static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 393static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 394static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 395static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 396static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 397 bool seq); 398static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 399static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 400 union ctl_io *pending_io, union ctl_io *ooa_io); 401static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 402 union ctl_io *starting_io); 403static int ctl_check_blocked(struct ctl_lun *lun); 404static int ctl_scsiio_lun_check(struct ctl_lun *lun, 405 const struct ctl_cmd_entry *entry, 406 struct ctl_scsiio *ctsio); 407static void ctl_failover_lun(struct ctl_lun *lun); 408static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 409 struct ctl_scsiio *ctsio); 410static int ctl_scsiio(struct ctl_scsiio *ctsio); 411 412static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 413static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 414 ctl_ua_type ua_type); 415static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, 416 ctl_ua_type ua_type); 417static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 418static int ctl_abort_task(union ctl_io *io); 419static int ctl_abort_task_set(union ctl_io *io); 420static int ctl_query_task(union ctl_io *io, int task_set); 421static int ctl_i_t_nexus_reset(union ctl_io *io); 422static int ctl_query_async_event(union ctl_io *io); 423static void ctl_run_task(union ctl_io *io); 424#ifdef CTL_IO_DELAY 425static void ctl_datamove_timer_wakeup(void *arg); 426static void ctl_done_timer_wakeup(void *arg); 427#endif /* CTL_IO_DELAY */ 428 429static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 430static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 431static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 432static void ctl_datamove_remote_write(union ctl_io *io); 433static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 434static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 435static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 436static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 437 ctl_ha_dt_cb callback); 438static void ctl_datamove_remote_read(union ctl_io *io); 439static void ctl_datamove_remote(union ctl_io *io); 440static int ctl_process_done(union ctl_io *io); 441static void ctl_lun_thread(void *arg); 442static void ctl_thresh_thread(void *arg); 443static void ctl_work_thread(void *arg); 444static void ctl_enqueue_incoming(union ctl_io *io); 445static void ctl_enqueue_rtr(union ctl_io *io); 446static void ctl_enqueue_done(union ctl_io *io); 447static void ctl_enqueue_isc(union ctl_io *io); 448static const struct ctl_cmd_entry * 449 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 450static const struct ctl_cmd_entry * 451 ctl_validate_command(struct ctl_scsiio *ctsio); 452static int ctl_cmd_applicable(uint8_t lun_type, 453 const struct ctl_cmd_entry *entry); 454 455static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 456static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 457static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 458static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 459 460/* 461 * Load the serialization table. This isn't very pretty, but is probably 462 * the easiest way to do it. 463 */ 464#include "ctl_ser_table.c" 465 466/* 467 * We only need to define open, close and ioctl routines for this driver. 468 */ 469static struct cdevsw ctl_cdevsw = { 470 .d_version = D_VERSION, 471 .d_flags = 0, 472 .d_open = ctl_open, 473 .d_close = ctl_close, 474 .d_ioctl = ctl_ioctl, 475 .d_name = "ctl", 476}; 477 478 479MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 480 481static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 482 483static moduledata_t ctl_moduledata = { 484 "ctl", 485 ctl_module_event_handler, 486 NULL 487}; 488 489DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 490MODULE_VERSION(ctl, 1); 491 492static struct ctl_frontend ha_frontend = 493{ 494 .name = "ha", 495}; 496 497static void 498ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 499 union ctl_ha_msg *msg_info) 500{ 501 struct ctl_scsiio *ctsio; 502 503 if (msg_info->hdr.original_sc == NULL) { 504 printf("%s: original_sc == NULL!\n", __func__); 505 /* XXX KDM now what? */ 506 return; 507 } 508 509 ctsio = &msg_info->hdr.original_sc->scsiio; 510 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 511 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 512 ctsio->io_hdr.status = msg_info->hdr.status; 513 ctsio->scsi_status = msg_info->scsi.scsi_status; 514 ctsio->sense_len = msg_info->scsi.sense_len; 515 ctsio->sense_residual = msg_info->scsi.sense_residual; 516 ctsio->residual = msg_info->scsi.residual; 517 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 518 msg_info->scsi.sense_len); 519 ctl_enqueue_isc((union ctl_io *)ctsio); 520} 521 522static void 523ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 524 union ctl_ha_msg *msg_info) 525{ 526 struct ctl_scsiio *ctsio; 527 528 if (msg_info->hdr.serializing_sc == NULL) { 529 printf("%s: serializing_sc == NULL!\n", __func__); 530 /* XXX KDM now what? */ 531 return; 532 } 533 534 ctsio = &msg_info->hdr.serializing_sc->scsiio; 535 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 536 ctl_enqueue_isc((union ctl_io *)ctsio); 537} 538 539void 540ctl_isc_announce_lun(struct ctl_lun *lun) 541{ 542 struct ctl_softc *softc = lun->ctl_softc; 543 union ctl_ha_msg *msg; 544 struct ctl_ha_msg_lun_pr_key pr_key; 545 int i, k; 546 547 if (softc->ha_link != CTL_HA_LINK_ONLINE) 548 return; 549 mtx_lock(&lun->lun_lock); 550 i = sizeof(msg->lun); 551 if (lun->lun_devid) 552 i += lun->lun_devid->len; 553 i += sizeof(pr_key) * lun->pr_key_count; 554alloc: 555 mtx_unlock(&lun->lun_lock); 556 msg = malloc(i, M_CTL, M_WAITOK); 557 mtx_lock(&lun->lun_lock); 558 k = sizeof(msg->lun); 559 if (lun->lun_devid) 560 k += lun->lun_devid->len; 561 k += sizeof(pr_key) * lun->pr_key_count; 562 if (i < k) { 563 free(msg, M_CTL); 564 i = k; 565 goto alloc; 566 } 567 bzero(&msg->lun, sizeof(msg->lun)); 568 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 569 msg->hdr.nexus.targ_lun = lun->lun; 570 msg->hdr.nexus.targ_mapped_lun = lun->lun; 571 msg->lun.flags = lun->flags; 572 msg->lun.pr_generation = lun->PRGeneration; 573 msg->lun.pr_res_idx = lun->pr_res_idx; 574 msg->lun.pr_res_type = lun->res_type; 575 msg->lun.pr_key_count = lun->pr_key_count; 576 i = 0; 577 if (lun->lun_devid) { 578 msg->lun.lun_devid_len = lun->lun_devid->len; 579 memcpy(&msg->lun.data[i], lun->lun_devid->data, 580 msg->lun.lun_devid_len); 581 i += msg->lun.lun_devid_len; 582 } 583 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 584 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 585 continue; 586 pr_key.pr_iid = k; 587 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 588 i += sizeof(pr_key); 589 } 590 mtx_unlock(&lun->lun_lock); 591 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 592 M_WAITOK); 593 free(msg, M_CTL); 594} 595 596void 597ctl_isc_announce_port(struct ctl_port *port) 598{ 599 struct ctl_softc *softc = control_softc; 600 union ctl_ha_msg *msg; 601 int i; 602 603 if (port->targ_port < softc->port_min || 604 port->targ_port >= softc->port_max || 605 softc->ha_link != CTL_HA_LINK_ONLINE) 606 return; 607 i = sizeof(msg->port) + strlen(port->port_name) + 1; 608 if (port->lun_map) 609 i += sizeof(uint32_t) * CTL_MAX_LUNS; 610 if (port->port_devid) 611 i += port->port_devid->len; 612 if (port->target_devid) 613 i += port->target_devid->len; 614 if (port->init_devid) 615 i += port->init_devid->len; 616 msg = malloc(i, M_CTL, M_WAITOK); 617 bzero(&msg->port, sizeof(msg->port)); 618 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 619 msg->hdr.nexus.targ_port = port->targ_port; 620 msg->port.port_type = port->port_type; 621 msg->port.physical_port = port->physical_port; 622 msg->port.virtual_port = port->virtual_port; 623 msg->port.status = port->status; 624 i = 0; 625 msg->port.name_len = sprintf(&msg->port.data[i], 626 "%d:%s", softc->ha_id, port->port_name) + 1; 627 i += msg->port.name_len; 628 if (port->lun_map) { 629 msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS; 630 memcpy(&msg->port.data[i], port->lun_map, 631 msg->port.lun_map_len); 632 i += msg->port.lun_map_len; 633 } 634 if (port->port_devid) { 635 msg->port.port_devid_len = port->port_devid->len; 636 memcpy(&msg->port.data[i], port->port_devid->data, 637 msg->port.port_devid_len); 638 i += msg->port.port_devid_len; 639 } 640 if (port->target_devid) { 641 msg->port.target_devid_len = port->target_devid->len; 642 memcpy(&msg->port.data[i], port->target_devid->data, 643 msg->port.target_devid_len); 644 i += msg->port.target_devid_len; 645 } 646 if (port->init_devid) { 647 msg->port.init_devid_len = port->init_devid->len; 648 memcpy(&msg->port.data[i], port->init_devid->data, 649 msg->port.init_devid_len); 650 i += msg->port.init_devid_len; 651 } 652 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 653 M_WAITOK); 654 free(msg, M_CTL); 655} 656 657void 658ctl_isc_announce_iid(struct ctl_port *port, int iid) 659{ 660 struct ctl_softc *softc = control_softc; 661 union ctl_ha_msg *msg; 662 int i, l; 663 664 if (port->targ_port < softc->port_min || 665 port->targ_port >= softc->port_max || 666 softc->ha_link != CTL_HA_LINK_ONLINE) 667 return; 668 mtx_lock(&softc->ctl_lock); 669 i = sizeof(msg->iid); 670 l = 0; 671 if (port->wwpn_iid[iid].name) 672 l = strlen(port->wwpn_iid[iid].name) + 1; 673 i += l; 674 msg = malloc(i, M_CTL, M_NOWAIT); 675 if (msg == NULL) { 676 mtx_unlock(&softc->ctl_lock); 677 return; 678 } 679 bzero(&msg->iid, sizeof(msg->iid)); 680 msg->hdr.msg_type = CTL_MSG_IID_SYNC; 681 msg->hdr.nexus.targ_port = port->targ_port; 682 msg->hdr.nexus.initid = iid; 683 msg->iid.in_use = port->wwpn_iid[iid].in_use; 684 msg->iid.name_len = l; 685 msg->iid.wwpn = port->wwpn_iid[iid].wwpn; 686 if (port->wwpn_iid[iid].name) 687 strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); 688 mtx_unlock(&softc->ctl_lock); 689 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); 690 free(msg, M_CTL); 691} 692 693static void 694ctl_isc_ha_link_up(struct ctl_softc *softc) 695{ 696 struct ctl_port *port; 697 struct ctl_lun *lun; 698 int i; 699 700 STAILQ_FOREACH(port, &softc->port_list, links) { 701 ctl_isc_announce_port(port); 702 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 703 if (port->wwpn_iid[i].in_use) 704 ctl_isc_announce_iid(port, i); 705 } 706 } 707 STAILQ_FOREACH(lun, &softc->lun_list, links) 708 ctl_isc_announce_lun(lun); 709} 710 711static void 712ctl_isc_ha_link_down(struct ctl_softc *softc) 713{ 714 struct ctl_port *port; 715 struct ctl_lun *lun; 716 union ctl_io *io; 717 int i; 718 719 mtx_lock(&softc->ctl_lock); 720 STAILQ_FOREACH(lun, &softc->lun_list, links) { 721 mtx_lock(&lun->lun_lock); 722 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 723 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 724 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 725 } 726 mtx_unlock(&lun->lun_lock); 727 728 mtx_unlock(&softc->ctl_lock); 729 io = ctl_alloc_io(softc->othersc_pool); 730 mtx_lock(&softc->ctl_lock); 731 ctl_zero_io(io); 732 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 733 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 734 ctl_enqueue_isc(io); 735 } 736 737 STAILQ_FOREACH(port, &softc->port_list, links) { 738 if (port->targ_port >= softc->port_min && 739 port->targ_port < softc->port_max) 740 continue; 741 port->status &= ~CTL_PORT_STATUS_ONLINE; 742 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 743 port->wwpn_iid[i].in_use = 0; 744 free(port->wwpn_iid[i].name, M_CTL); 745 port->wwpn_iid[i].name = NULL; 746 } 747 } 748 mtx_unlock(&softc->ctl_lock); 749} 750 751static void 752ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 753{ 754 struct ctl_lun *lun; 755 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 756 757 mtx_lock(&softc->ctl_lock); 758 if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS && 759 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) != NULL) { 760 mtx_lock(&lun->lun_lock); 761 mtx_unlock(&softc->ctl_lock); 762 if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && 763 msg->ua.ua_set) 764 memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); 765 if (msg->ua.ua_all) { 766 if (msg->ua.ua_set) 767 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 768 else 769 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 770 } else { 771 if (msg->ua.ua_set) 772 ctl_est_ua(lun, iid, msg->ua.ua_type); 773 else 774 ctl_clr_ua(lun, iid, msg->ua.ua_type); 775 } 776 mtx_unlock(&lun->lun_lock); 777 } else 778 mtx_unlock(&softc->ctl_lock); 779} 780 781static void 782ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 783{ 784 struct ctl_lun *lun; 785 struct ctl_ha_msg_lun_pr_key pr_key; 786 int i, k; 787 ctl_lun_flags oflags; 788 uint32_t targ_lun; 789 790 targ_lun = msg->hdr.nexus.targ_mapped_lun; 791 mtx_lock(&softc->ctl_lock); 792 if ((targ_lun >= CTL_MAX_LUNS) || 793 ((lun = softc->ctl_luns[targ_lun]) == NULL)) { 794 mtx_unlock(&softc->ctl_lock); 795 return; 796 } 797 mtx_lock(&lun->lun_lock); 798 mtx_unlock(&softc->ctl_lock); 799 if (lun->flags & CTL_LUN_DISABLED) { 800 mtx_unlock(&lun->lun_lock); 801 return; 802 } 803 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 804 if (msg->lun.lun_devid_len != i || (i > 0 && 805 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 806 mtx_unlock(&lun->lun_lock); 807 printf("%s: Received conflicting HA LUN %d\n", 808 __func__, msg->hdr.nexus.targ_lun); 809 return; 810 } else { 811 /* Record whether peer is primary. */ 812 oflags = lun->flags; 813 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 814 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 815 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 816 else 817 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 818 if (oflags != lun->flags) 819 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 820 821 /* If peer is primary and we are not -- use data */ 822 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 823 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 824 lun->PRGeneration = msg->lun.pr_generation; 825 lun->pr_res_idx = msg->lun.pr_res_idx; 826 lun->res_type = msg->lun.pr_res_type; 827 lun->pr_key_count = msg->lun.pr_key_count; 828 for (k = 0; k < CTL_MAX_INITIATORS; k++) 829 ctl_clr_prkey(lun, k); 830 for (k = 0; k < msg->lun.pr_key_count; k++) { 831 memcpy(&pr_key, &msg->lun.data[i], 832 sizeof(pr_key)); 833 ctl_alloc_prkey(lun, pr_key.pr_iid); 834 ctl_set_prkey(lun, pr_key.pr_iid, 835 pr_key.pr_key); 836 i += sizeof(pr_key); 837 } 838 } 839 840 mtx_unlock(&lun->lun_lock); 841 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 842 __func__, msg->hdr.nexus.targ_lun, 843 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 844 "primary" : "secondary")); 845 846 /* If we are primary but peer doesn't know -- notify */ 847 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 848 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 849 ctl_isc_announce_lun(lun); 850 } 851} 852 853static void 854ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 855{ 856 struct ctl_port *port; 857 struct ctl_lun *lun; 858 int i, new; 859 860 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 861 if (port == NULL) { 862 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 863 msg->hdr.nexus.targ_port)); 864 new = 1; 865 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 866 port->frontend = &ha_frontend; 867 port->targ_port = msg->hdr.nexus.targ_port; 868 } else if (port->frontend == &ha_frontend) { 869 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 870 msg->hdr.nexus.targ_port)); 871 new = 0; 872 } else { 873 printf("%s: Received conflicting HA port %d\n", 874 __func__, msg->hdr.nexus.targ_port); 875 return; 876 } 877 port->port_type = msg->port.port_type; 878 port->physical_port = msg->port.physical_port; 879 port->virtual_port = msg->port.virtual_port; 880 port->status = msg->port.status; 881 i = 0; 882 free(port->port_name, M_CTL); 883 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 884 M_CTL); 885 i += msg->port.name_len; 886 if (msg->port.lun_map_len != 0) { 887 if (port->lun_map == NULL) 888 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 889 M_CTL, M_WAITOK); 890 memcpy(port->lun_map, &msg->port.data[i], 891 sizeof(uint32_t) * CTL_MAX_LUNS); 892 i += msg->port.lun_map_len; 893 } else { 894 free(port->lun_map, M_CTL); 895 port->lun_map = NULL; 896 } 897 if (msg->port.port_devid_len != 0) { 898 if (port->port_devid == NULL || 899 port->port_devid->len != msg->port.port_devid_len) { 900 free(port->port_devid, M_CTL); 901 port->port_devid = malloc(sizeof(struct ctl_devid) + 902 msg->port.port_devid_len, M_CTL, M_WAITOK); 903 } 904 memcpy(port->port_devid->data, &msg->port.data[i], 905 msg->port.port_devid_len); 906 port->port_devid->len = msg->port.port_devid_len; 907 i += msg->port.port_devid_len; 908 } else { 909 free(port->port_devid, M_CTL); 910 port->port_devid = NULL; 911 } 912 if (msg->port.target_devid_len != 0) { 913 if (port->target_devid == NULL || 914 port->target_devid->len != msg->port.target_devid_len) { 915 free(port->target_devid, M_CTL); 916 port->target_devid = malloc(sizeof(struct ctl_devid) + 917 msg->port.target_devid_len, M_CTL, M_WAITOK); 918 } 919 memcpy(port->target_devid->data, &msg->port.data[i], 920 msg->port.target_devid_len); 921 port->target_devid->len = msg->port.target_devid_len; 922 i += msg->port.target_devid_len; 923 } else { 924 free(port->target_devid, M_CTL); 925 port->target_devid = NULL; 926 } 927 if (msg->port.init_devid_len != 0) { 928 if (port->init_devid == NULL || 929 port->init_devid->len != msg->port.init_devid_len) { 930 free(port->init_devid, M_CTL); 931 port->init_devid = malloc(sizeof(struct ctl_devid) + 932 msg->port.init_devid_len, M_CTL, M_WAITOK); 933 } 934 memcpy(port->init_devid->data, &msg->port.data[i], 935 msg->port.init_devid_len); 936 port->init_devid->len = msg->port.init_devid_len; 937 i += msg->port.init_devid_len; 938 } else { 939 free(port->init_devid, M_CTL); 940 port->init_devid = NULL; 941 } 942 if (new) { 943 if (ctl_port_register(port) != 0) { 944 printf("%s: ctl_port_register() failed with error\n", 945 __func__); 946 } 947 } 948 mtx_lock(&softc->ctl_lock); 949 STAILQ_FOREACH(lun, &softc->lun_list, links) { 950 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 951 continue; 952 mtx_lock(&lun->lun_lock); 953 ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); 954 mtx_unlock(&lun->lun_lock); 955 } 956 mtx_unlock(&softc->ctl_lock); 957} 958 959static void 960ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 961{ 962 struct ctl_port *port; 963 int iid; 964 965 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 966 if (port == NULL) { 967 printf("%s: Received IID for unknown port %d\n", 968 __func__, msg->hdr.nexus.targ_port); 969 return; 970 } 971 iid = msg->hdr.nexus.initid; 972 port->wwpn_iid[iid].in_use = msg->iid.in_use; 973 port->wwpn_iid[iid].wwpn = msg->iid.wwpn; 974 free(port->wwpn_iid[iid].name, M_CTL); 975 if (msg->iid.name_len) { 976 port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], 977 msg->iid.name_len, M_CTL); 978 } else 979 port->wwpn_iid[iid].name = NULL; 980} 981 982/* 983 * ISC (Inter Shelf Communication) event handler. Events from the HA 984 * subsystem come in here. 985 */ 986static void 987ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 988{ 989 struct ctl_softc *softc; 990 union ctl_io *io; 991 struct ctl_prio *presio; 992 ctl_ha_status isc_status; 993 994 softc = control_softc; 995 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 996 if (event == CTL_HA_EVT_MSG_RECV) { 997 union ctl_ha_msg *msg, msgbuf; 998 999 if (param > sizeof(msgbuf)) 1000 msg = malloc(param, M_CTL, M_WAITOK); 1001 else 1002 msg = &msgbuf; 1003 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 1004 M_WAITOK); 1005 if (isc_status != CTL_HA_STATUS_SUCCESS) { 1006 printf("%s: Error receiving message: %d\n", 1007 __func__, isc_status); 1008 if (msg != &msgbuf) 1009 free(msg, M_CTL); 1010 return; 1011 } 1012 1013 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 1014 switch (msg->hdr.msg_type) { 1015 case CTL_MSG_SERIALIZE: 1016 io = ctl_alloc_io(softc->othersc_pool); 1017 ctl_zero_io(io); 1018 // populate ctsio from msg 1019 io->io_hdr.io_type = CTL_IO_SCSI; 1020 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 1021 io->io_hdr.original_sc = msg->hdr.original_sc; 1022 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 1023 CTL_FLAG_IO_ACTIVE; 1024 /* 1025 * If we're in serialization-only mode, we don't 1026 * want to go through full done processing. Thus 1027 * the COPY flag. 1028 * 1029 * XXX KDM add another flag that is more specific. 1030 */ 1031 if (softc->ha_mode != CTL_HA_MODE_XFER) 1032 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 1033 io->io_hdr.nexus = msg->hdr.nexus; 1034#if 0 1035 printf("port %u, iid %u, lun %u\n", 1036 io->io_hdr.nexus.targ_port, 1037 io->io_hdr.nexus.initid, 1038 io->io_hdr.nexus.targ_lun); 1039#endif 1040 io->scsiio.tag_num = msg->scsi.tag_num; 1041 io->scsiio.tag_type = msg->scsi.tag_type; 1042#ifdef CTL_TIME_IO 1043 io->io_hdr.start_time = time_uptime; 1044 getbintime(&io->io_hdr.start_bt); 1045#endif /* CTL_TIME_IO */ 1046 io->scsiio.cdb_len = msg->scsi.cdb_len; 1047 memcpy(io->scsiio.cdb, msg->scsi.cdb, 1048 CTL_MAX_CDBLEN); 1049 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1050 const struct ctl_cmd_entry *entry; 1051 1052 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 1053 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 1054 io->io_hdr.flags |= 1055 entry->flags & CTL_FLAG_DATA_MASK; 1056 } 1057 ctl_enqueue_isc(io); 1058 break; 1059 1060 /* Performed on the Originating SC, XFER mode only */ 1061 case CTL_MSG_DATAMOVE: { 1062 struct ctl_sg_entry *sgl; 1063 int i, j; 1064 1065 io = msg->hdr.original_sc; 1066 if (io == NULL) { 1067 printf("%s: original_sc == NULL!\n", __func__); 1068 /* XXX KDM do something here */ 1069 break; 1070 } 1071 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 1072 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1073 /* 1074 * Keep track of this, we need to send it back over 1075 * when the datamove is complete. 1076 */ 1077 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1078 if (msg->hdr.status == CTL_SUCCESS) 1079 io->io_hdr.status = msg->hdr.status; 1080 1081 if (msg->dt.sg_sequence == 0) { 1082 i = msg->dt.kern_sg_entries + 1083 io->scsiio.kern_data_len / 1084 CTL_HA_DATAMOVE_SEGMENT + 1; 1085 sgl = malloc(sizeof(*sgl) * i, M_CTL, 1086 M_WAITOK | M_ZERO); 1087 io->io_hdr.remote_sglist = sgl; 1088 io->io_hdr.local_sglist = 1089 &sgl[msg->dt.kern_sg_entries]; 1090 1091 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 1092 1093 io->scsiio.kern_sg_entries = 1094 msg->dt.kern_sg_entries; 1095 io->scsiio.rem_sg_entries = 1096 msg->dt.kern_sg_entries; 1097 io->scsiio.kern_data_len = 1098 msg->dt.kern_data_len; 1099 io->scsiio.kern_total_len = 1100 msg->dt.kern_total_len; 1101 io->scsiio.kern_data_resid = 1102 msg->dt.kern_data_resid; 1103 io->scsiio.kern_rel_offset = 1104 msg->dt.kern_rel_offset; 1105 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1106 io->io_hdr.flags |= msg->dt.flags & 1107 CTL_FLAG_BUS_ADDR; 1108 } else 1109 sgl = (struct ctl_sg_entry *) 1110 io->scsiio.kern_data_ptr; 1111 1112 for (i = msg->dt.sent_sg_entries, j = 0; 1113 i < (msg->dt.sent_sg_entries + 1114 msg->dt.cur_sg_entries); i++, j++) { 1115 sgl[i].addr = msg->dt.sg_list[j].addr; 1116 sgl[i].len = msg->dt.sg_list[j].len; 1117 1118#if 0 1119 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 1120 __func__, 1121 msg->dt.sg_list[j].addr, 1122 msg->dt.sg_list[j].len, 1123 sgl[i].addr, sgl[i].len, j, i); 1124#endif 1125 } 1126 1127 /* 1128 * If this is the last piece of the I/O, we've got 1129 * the full S/G list. Queue processing in the thread. 1130 * Otherwise wait for the next piece. 1131 */ 1132 if (msg->dt.sg_last != 0) 1133 ctl_enqueue_isc(io); 1134 break; 1135 } 1136 /* Performed on the Serializing (primary) SC, XFER mode only */ 1137 case CTL_MSG_DATAMOVE_DONE: { 1138 if (msg->hdr.serializing_sc == NULL) { 1139 printf("%s: serializing_sc == NULL!\n", 1140 __func__); 1141 /* XXX KDM now what? */ 1142 break; 1143 } 1144 /* 1145 * We grab the sense information here in case 1146 * there was a failure, so we can return status 1147 * back to the initiator. 1148 */ 1149 io = msg->hdr.serializing_sc; 1150 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1151 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1152 io->io_hdr.port_status = msg->scsi.fetd_status; 1153 io->scsiio.residual = msg->scsi.residual; 1154 if (msg->hdr.status != CTL_STATUS_NONE) { 1155 io->io_hdr.status = msg->hdr.status; 1156 io->scsiio.scsi_status = msg->scsi.scsi_status; 1157 io->scsiio.sense_len = msg->scsi.sense_len; 1158 io->scsiio.sense_residual =msg->scsi.sense_residual; 1159 memcpy(&io->scsiio.sense_data, 1160 &msg->scsi.sense_data, 1161 msg->scsi.sense_len); 1162 if (msg->hdr.status == CTL_SUCCESS) 1163 io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; 1164 } 1165 ctl_enqueue_isc(io); 1166 break; 1167 } 1168 1169 /* Preformed on Originating SC, SER_ONLY mode */ 1170 case CTL_MSG_R2R: 1171 io = msg->hdr.original_sc; 1172 if (io == NULL) { 1173 printf("%s: original_sc == NULL!\n", 1174 __func__); 1175 break; 1176 } 1177 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1178 io->io_hdr.msg_type = CTL_MSG_R2R; 1179 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1180 ctl_enqueue_isc(io); 1181 break; 1182 1183 /* 1184 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1185 * mode. 1186 * Performed on the Originating (i.e. secondary) SC in XFER 1187 * mode 1188 */ 1189 case CTL_MSG_FINISH_IO: 1190 if (softc->ha_mode == CTL_HA_MODE_XFER) 1191 ctl_isc_handler_finish_xfer(softc, msg); 1192 else 1193 ctl_isc_handler_finish_ser_only(softc, msg); 1194 break; 1195 1196 /* Preformed on Originating SC */ 1197 case CTL_MSG_BAD_JUJU: 1198 io = msg->hdr.original_sc; 1199 if (io == NULL) { 1200 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1201 __func__); 1202 break; 1203 } 1204 ctl_copy_sense_data(msg, io); 1205 /* 1206 * IO should have already been cleaned up on other 1207 * SC so clear this flag so we won't send a message 1208 * back to finish the IO there. 1209 */ 1210 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1211 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1212 1213 /* io = msg->hdr.serializing_sc; */ 1214 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1215 ctl_enqueue_isc(io); 1216 break; 1217 1218 /* Handle resets sent from the other side */ 1219 case CTL_MSG_MANAGE_TASKS: { 1220 struct ctl_taskio *taskio; 1221 taskio = (struct ctl_taskio *)ctl_alloc_io( 1222 softc->othersc_pool); 1223 ctl_zero_io((union ctl_io *)taskio); 1224 taskio->io_hdr.io_type = CTL_IO_TASK; 1225 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1226 taskio->io_hdr.nexus = msg->hdr.nexus; 1227 taskio->task_action = msg->task.task_action; 1228 taskio->tag_num = msg->task.tag_num; 1229 taskio->tag_type = msg->task.tag_type; 1230#ifdef CTL_TIME_IO 1231 taskio->io_hdr.start_time = time_uptime; 1232 getbintime(&taskio->io_hdr.start_bt); 1233#endif /* CTL_TIME_IO */ 1234 ctl_run_task((union ctl_io *)taskio); 1235 break; 1236 } 1237 /* Persistent Reserve action which needs attention */ 1238 case CTL_MSG_PERS_ACTION: 1239 presio = (struct ctl_prio *)ctl_alloc_io( 1240 softc->othersc_pool); 1241 ctl_zero_io((union ctl_io *)presio); 1242 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1243 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1244 presio->io_hdr.nexus = msg->hdr.nexus; 1245 presio->pr_msg = msg->pr; 1246 ctl_enqueue_isc((union ctl_io *)presio); 1247 break; 1248 case CTL_MSG_UA: 1249 ctl_isc_ua(softc, msg, param); 1250 break; 1251 case CTL_MSG_PORT_SYNC: 1252 ctl_isc_port_sync(softc, msg, param); 1253 break; 1254 case CTL_MSG_LUN_SYNC: 1255 ctl_isc_lun_sync(softc, msg, param); 1256 break; 1257 case CTL_MSG_IID_SYNC: 1258 ctl_isc_iid_sync(softc, msg, param); 1259 break; 1260 default: 1261 printf("Received HA message of unknown type %d\n", 1262 msg->hdr.msg_type); 1263 break; 1264 } 1265 if (msg != &msgbuf) 1266 free(msg, M_CTL); 1267 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1268 printf("CTL: HA link status changed from %d to %d\n", 1269 softc->ha_link, param); 1270 if (param == softc->ha_link) 1271 return; 1272 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1273 softc->ha_link = param; 1274 ctl_isc_ha_link_down(softc); 1275 } else { 1276 softc->ha_link = param; 1277 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1278 ctl_isc_ha_link_up(softc); 1279 } 1280 return; 1281 } else { 1282 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1283 return; 1284 } 1285} 1286 1287static void 1288ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1289{ 1290 1291 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1292 src->scsi.sense_len); 1293 dest->scsiio.scsi_status = src->scsi.scsi_status; 1294 dest->scsiio.sense_len = src->scsi.sense_len; 1295 dest->io_hdr.status = src->hdr.status; 1296} 1297 1298static void 1299ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1300{ 1301 1302 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1303 src->scsiio.sense_len); 1304 dest->scsi.scsi_status = src->scsiio.scsi_status; 1305 dest->scsi.sense_len = src->scsiio.sense_len; 1306 dest->hdr.status = src->io_hdr.status; 1307} 1308 1309void 1310ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1311{ 1312 struct ctl_softc *softc = lun->ctl_softc; 1313 ctl_ua_type *pu; 1314 1315 if (initidx < softc->init_min || initidx >= softc->init_max) 1316 return; 1317 mtx_assert(&lun->lun_lock, MA_OWNED); 1318 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1319 if (pu == NULL) 1320 return; 1321 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1322} 1323 1324void 1325ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) 1326{ 1327 int i; 1328 1329 mtx_assert(&lun->lun_lock, MA_OWNED); 1330 if (lun->pending_ua[port] == NULL) 1331 return; 1332 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1333 if (port * CTL_MAX_INIT_PER_PORT + i == except) 1334 continue; 1335 lun->pending_ua[port][i] |= ua; 1336 } 1337} 1338 1339void 1340ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1341{ 1342 struct ctl_softc *softc = lun->ctl_softc; 1343 int i; 1344 1345 mtx_assert(&lun->lun_lock, MA_OWNED); 1346 for (i = softc->port_min; i < softc->port_max; i++) 1347 ctl_est_ua_port(lun, i, except, ua); 1348} 1349 1350void 1351ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1352{ 1353 struct ctl_softc *softc = lun->ctl_softc; 1354 ctl_ua_type *pu; 1355 1356 if (initidx < softc->init_min || initidx >= softc->init_max) 1357 return; 1358 mtx_assert(&lun->lun_lock, MA_OWNED); 1359 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1360 if (pu == NULL) 1361 return; 1362 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1363} 1364 1365void 1366ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1367{ 1368 struct ctl_softc *softc = lun->ctl_softc; 1369 int i, j; 1370 1371 mtx_assert(&lun->lun_lock, MA_OWNED); 1372 for (i = softc->port_min; i < softc->port_max; i++) { 1373 if (lun->pending_ua[i] == NULL) 1374 continue; 1375 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1376 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1377 continue; 1378 lun->pending_ua[i][j] &= ~ua; 1379 } 1380 } 1381} 1382 1383void 1384ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1385 ctl_ua_type ua_type) 1386{ 1387 struct ctl_lun *lun; 1388 1389 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1390 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1391 mtx_lock(&lun->lun_lock); 1392 ctl_clr_ua(lun, initidx, ua_type); 1393 mtx_unlock(&lun->lun_lock); 1394 } 1395} 1396 1397static int 1398ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1399{ 1400 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1401 struct ctl_lun *lun; 1402 struct ctl_lun_req ireq; 1403 int error, value; 1404 1405 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1406 error = sysctl_handle_int(oidp, &value, 0, req); 1407 if ((error != 0) || (req->newptr == NULL)) 1408 return (error); 1409 1410 mtx_lock(&softc->ctl_lock); 1411 if (value == 0) 1412 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1413 else 1414 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1415 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1416 mtx_unlock(&softc->ctl_lock); 1417 bzero(&ireq, sizeof(ireq)); 1418 ireq.reqtype = CTL_LUNREQ_MODIFY; 1419 ireq.reqdata.modify.lun_id = lun->lun; 1420 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1421 curthread); 1422 if (ireq.status != CTL_LUN_OK) { 1423 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1424 __func__, ireq.status, ireq.error_str); 1425 } 1426 mtx_lock(&softc->ctl_lock); 1427 } 1428 mtx_unlock(&softc->ctl_lock); 1429 return (0); 1430} 1431 1432static int 1433ctl_init(void) 1434{ 1435 struct ctl_softc *softc; 1436 void *other_pool; 1437 int i, error, retval; 1438 1439 retval = 0; 1440 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1441 M_WAITOK | M_ZERO); 1442 softc = control_softc; 1443 1444 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1445 "cam/ctl"); 1446 1447 softc->dev->si_drv1 = softc; 1448 1449 sysctl_ctx_init(&softc->sysctl_ctx); 1450 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1451 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1452 CTLFLAG_RD, 0, "CAM Target Layer"); 1453 1454 if (softc->sysctl_tree == NULL) { 1455 printf("%s: unable to allocate sysctl tree\n", __func__); 1456 destroy_dev(softc->dev); 1457 free(control_softc, M_DEVBUF); 1458 control_softc = NULL; 1459 return (ENOMEM); 1460 } 1461 1462 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1463 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1464 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1465 softc->open_count = 0; 1466 1467 /* 1468 * Default to actually sending a SYNCHRONIZE CACHE command down to 1469 * the drive. 1470 */ 1471 softc->flags = CTL_FLAG_REAL_SYNC; 1472 1473 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1474 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1475 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1476 1477 /* 1478 * In Copan's HA scheme, the "master" and "slave" roles are 1479 * figured out through the slot the controller is in. Although it 1480 * is an active/active system, someone has to be in charge. 1481 */ 1482 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1483 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1484 "HA head ID (0 - no HA)"); 1485 if (softc->ha_id == 0 || softc->ha_id > NUM_TARGET_PORT_GROUPS) { 1486 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1487 softc->is_single = 1; 1488 softc->port_cnt = CTL_MAX_PORTS; 1489 softc->port_min = 0; 1490 } else { 1491 softc->port_cnt = CTL_MAX_PORTS / NUM_TARGET_PORT_GROUPS; 1492 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1493 } 1494 softc->port_max = softc->port_min + softc->port_cnt; 1495 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1496 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1497 1498 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1499 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1500 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1501 1502 STAILQ_INIT(&softc->lun_list); 1503 STAILQ_INIT(&softc->pending_lun_queue); 1504 STAILQ_INIT(&softc->fe_list); 1505 STAILQ_INIT(&softc->port_list); 1506 STAILQ_INIT(&softc->be_list); 1507 ctl_tpc_init(softc); 1508 1509 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1510 &other_pool) != 0) 1511 { 1512 printf("ctl: can't allocate %d entry other SC pool, " 1513 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1514 return (ENOMEM); 1515 } 1516 softc->othersc_pool = other_pool; 1517 1518 if (worker_threads <= 0) 1519 worker_threads = max(1, mp_ncpus / 4); 1520 if (worker_threads > CTL_MAX_THREADS) 1521 worker_threads = CTL_MAX_THREADS; 1522 1523 for (i = 0; i < worker_threads; i++) { 1524 struct ctl_thread *thr = &softc->threads[i]; 1525 1526 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1527 thr->ctl_softc = softc; 1528 STAILQ_INIT(&thr->incoming_queue); 1529 STAILQ_INIT(&thr->rtr_queue); 1530 STAILQ_INIT(&thr->done_queue); 1531 STAILQ_INIT(&thr->isc_queue); 1532 1533 error = kproc_kthread_add(ctl_work_thread, thr, 1534 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1535 if (error != 0) { 1536 printf("error creating CTL work thread!\n"); 1537 ctl_pool_free(other_pool); 1538 return (error); 1539 } 1540 } 1541 error = kproc_kthread_add(ctl_lun_thread, softc, 1542 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1543 if (error != 0) { 1544 printf("error creating CTL lun thread!\n"); 1545 ctl_pool_free(other_pool); 1546 return (error); 1547 } 1548 error = kproc_kthread_add(ctl_thresh_thread, softc, 1549 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1550 if (error != 0) { 1551 printf("error creating CTL threshold thread!\n"); 1552 ctl_pool_free(other_pool); 1553 return (error); 1554 } 1555 1556 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1557 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1558 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1559 1560 if (softc->is_single == 0) { 1561 ctl_frontend_register(&ha_frontend); 1562 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 1563 printf("ctl_init: ctl_ha_msg_init failed.\n"); 1564 softc->is_single = 1; 1565 } else 1566 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 1567 != CTL_HA_STATUS_SUCCESS) { 1568 printf("ctl_init: ctl_ha_msg_register failed.\n"); 1569 softc->is_single = 1; 1570 } 1571 } 1572 return (0); 1573} 1574 1575void 1576ctl_shutdown(void) 1577{ 1578 struct ctl_softc *softc; 1579 struct ctl_lun *lun, *next_lun; 1580 1581 softc = (struct ctl_softc *)control_softc; 1582 1583 if (softc->is_single == 0) { 1584 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) 1585 != CTL_HA_STATUS_SUCCESS) { 1586 printf("ctl_shutdown: ctl_ha_msg_deregister failed.\n"); 1587 } 1588 if (ctl_ha_msg_shutdown(softc) != CTL_HA_STATUS_SUCCESS) { 1589 printf("ctl_shutdown: ctl_ha_msg_shutdown failed.\n"); 1590 } 1591 ctl_frontend_deregister(&ha_frontend); 1592 } 1593 1594 mtx_lock(&softc->ctl_lock); 1595 1596 /* 1597 * Free up each LUN. 1598 */ 1599 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1600 next_lun = STAILQ_NEXT(lun, links); 1601 ctl_free_lun(lun); 1602 } 1603 1604 mtx_unlock(&softc->ctl_lock); 1605 1606#if 0 1607 ctl_shutdown_thread(softc->work_thread); 1608 mtx_destroy(&softc->queue_lock); 1609#endif 1610 1611 ctl_tpc_shutdown(softc); 1612 uma_zdestroy(softc->io_zone); 1613 mtx_destroy(&softc->ctl_lock); 1614 1615 destroy_dev(softc->dev); 1616 1617 sysctl_ctx_free(&softc->sysctl_ctx); 1618 1619 free(control_softc, M_DEVBUF); 1620 control_softc = NULL; 1621} 1622 1623static int 1624ctl_module_event_handler(module_t mod, int what, void *arg) 1625{ 1626 1627 switch (what) { 1628 case MOD_LOAD: 1629 return (ctl_init()); 1630 case MOD_UNLOAD: 1631 return (EBUSY); 1632 default: 1633 return (EOPNOTSUPP); 1634 } 1635} 1636 1637/* 1638 * XXX KDM should we do some access checks here? Bump a reference count to 1639 * prevent a CTL module from being unloaded while someone has it open? 1640 */ 1641static int 1642ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1643{ 1644 return (0); 1645} 1646 1647static int 1648ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1649{ 1650 return (0); 1651} 1652 1653/* 1654 * Remove an initiator by port number and initiator ID. 1655 * Returns 0 for success, -1 for failure. 1656 */ 1657int 1658ctl_remove_initiator(struct ctl_port *port, int iid) 1659{ 1660 struct ctl_softc *softc = control_softc; 1661 1662 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1663 1664 if (iid > CTL_MAX_INIT_PER_PORT) { 1665 printf("%s: initiator ID %u > maximun %u!\n", 1666 __func__, iid, CTL_MAX_INIT_PER_PORT); 1667 return (-1); 1668 } 1669 1670 mtx_lock(&softc->ctl_lock); 1671 port->wwpn_iid[iid].in_use--; 1672 port->wwpn_iid[iid].last_use = time_uptime; 1673 mtx_unlock(&softc->ctl_lock); 1674 ctl_isc_announce_iid(port, iid); 1675 1676 return (0); 1677} 1678 1679/* 1680 * Add an initiator to the initiator map. 1681 * Returns iid for success, < 0 for failure. 1682 */ 1683int 1684ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1685{ 1686 struct ctl_softc *softc = control_softc; 1687 time_t best_time; 1688 int i, best; 1689 1690 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1691 1692 if (iid >= CTL_MAX_INIT_PER_PORT) { 1693 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1694 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1695 free(name, M_CTL); 1696 return (-1); 1697 } 1698 1699 mtx_lock(&softc->ctl_lock); 1700 1701 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1702 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1703 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1704 iid = i; 1705 break; 1706 } 1707 if (name != NULL && port->wwpn_iid[i].name != NULL && 1708 strcmp(name, port->wwpn_iid[i].name) == 0) { 1709 iid = i; 1710 break; 1711 } 1712 } 1713 } 1714 1715 if (iid < 0) { 1716 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1717 if (port->wwpn_iid[i].in_use == 0 && 1718 port->wwpn_iid[i].wwpn == 0 && 1719 port->wwpn_iid[i].name == NULL) { 1720 iid = i; 1721 break; 1722 } 1723 } 1724 } 1725 1726 if (iid < 0) { 1727 best = -1; 1728 best_time = INT32_MAX; 1729 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1730 if (port->wwpn_iid[i].in_use == 0) { 1731 if (port->wwpn_iid[i].last_use < best_time) { 1732 best = i; 1733 best_time = port->wwpn_iid[i].last_use; 1734 } 1735 } 1736 } 1737 iid = best; 1738 } 1739 1740 if (iid < 0) { 1741 mtx_unlock(&softc->ctl_lock); 1742 free(name, M_CTL); 1743 return (-2); 1744 } 1745 1746 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1747 /* 1748 * This is not an error yet. 1749 */ 1750 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1751#if 0 1752 printf("%s: port %d iid %u WWPN %#jx arrived" 1753 " again\n", __func__, port->targ_port, 1754 iid, (uintmax_t)wwpn); 1755#endif 1756 goto take; 1757 } 1758 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1759 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1760#if 0 1761 printf("%s: port %d iid %u name '%s' arrived" 1762 " again\n", __func__, port->targ_port, 1763 iid, name); 1764#endif 1765 goto take; 1766 } 1767 1768 /* 1769 * This is an error, but what do we do about it? The 1770 * driver is telling us we have a new WWPN for this 1771 * initiator ID, so we pretty much need to use it. 1772 */ 1773 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1774 " but WWPN %#jx '%s' is still at that address\n", 1775 __func__, port->targ_port, iid, wwpn, name, 1776 (uintmax_t)port->wwpn_iid[iid].wwpn, 1777 port->wwpn_iid[iid].name); 1778 1779 /* 1780 * XXX KDM clear have_ca and ua_pending on each LUN for 1781 * this initiator. 1782 */ 1783 } 1784take: 1785 free(port->wwpn_iid[iid].name, M_CTL); 1786 port->wwpn_iid[iid].name = name; 1787 port->wwpn_iid[iid].wwpn = wwpn; 1788 port->wwpn_iid[iid].in_use++; 1789 mtx_unlock(&softc->ctl_lock); 1790 ctl_isc_announce_iid(port, iid); 1791 1792 return (iid); 1793} 1794 1795static int 1796ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1797{ 1798 int len; 1799 1800 switch (port->port_type) { 1801 case CTL_PORT_FC: 1802 { 1803 struct scsi_transportid_fcp *id = 1804 (struct scsi_transportid_fcp *)buf; 1805 if (port->wwpn_iid[iid].wwpn == 0) 1806 return (0); 1807 memset(id, 0, sizeof(*id)); 1808 id->format_protocol = SCSI_PROTO_FC; 1809 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1810 return (sizeof(*id)); 1811 } 1812 case CTL_PORT_ISCSI: 1813 { 1814 struct scsi_transportid_iscsi_port *id = 1815 (struct scsi_transportid_iscsi_port *)buf; 1816 if (port->wwpn_iid[iid].name == NULL) 1817 return (0); 1818 memset(id, 0, 256); 1819 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1820 SCSI_PROTO_ISCSI; 1821 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1822 len = roundup2(min(len, 252), 4); 1823 scsi_ulto2b(len, id->additional_length); 1824 return (sizeof(*id) + len); 1825 } 1826 case CTL_PORT_SAS: 1827 { 1828 struct scsi_transportid_sas *id = 1829 (struct scsi_transportid_sas *)buf; 1830 if (port->wwpn_iid[iid].wwpn == 0) 1831 return (0); 1832 memset(id, 0, sizeof(*id)); 1833 id->format_protocol = SCSI_PROTO_SAS; 1834 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1835 return (sizeof(*id)); 1836 } 1837 default: 1838 { 1839 struct scsi_transportid_spi *id = 1840 (struct scsi_transportid_spi *)buf; 1841 memset(id, 0, sizeof(*id)); 1842 id->format_protocol = SCSI_PROTO_SPI; 1843 scsi_ulto2b(iid, id->scsi_addr); 1844 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1845 return (sizeof(*id)); 1846 } 1847 } 1848} 1849 1850/* 1851 * Serialize a command that went down the "wrong" side, and so was sent to 1852 * this controller for execution. The logic is a little different than the 1853 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1854 * sent back to the other side, but in the success case, we execute the 1855 * command on this side (XFER mode) or tell the other side to execute it 1856 * (SER_ONLY mode). 1857 */ 1858static int 1859ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1860{ 1861 struct ctl_softc *softc; 1862 union ctl_ha_msg msg_info; 1863 struct ctl_lun *lun; 1864 const struct ctl_cmd_entry *entry; 1865 int retval = 0; 1866 uint32_t targ_lun; 1867 1868 softc = control_softc; 1869 1870 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1871 mtx_lock(&softc->ctl_lock); 1872 if ((targ_lun < CTL_MAX_LUNS) && 1873 ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 1874 mtx_lock(&lun->lun_lock); 1875 mtx_unlock(&softc->ctl_lock); 1876 /* 1877 * If the LUN is invalid, pretend that it doesn't exist. 1878 * It will go away as soon as all pending I/O has been 1879 * completed. 1880 */ 1881 if (lun->flags & CTL_LUN_DISABLED) { 1882 mtx_unlock(&lun->lun_lock); 1883 lun = NULL; 1884 } 1885 } else { 1886 mtx_unlock(&softc->ctl_lock); 1887 lun = NULL; 1888 } 1889 if (lun == NULL) { 1890 /* 1891 * The other node would not send this request to us unless 1892 * received announce that we are primary node for this LUN. 1893 * If this LUN does not exist now, it is probably result of 1894 * a race, so respond to initiator in the most opaque way. 1895 */ 1896 ctl_set_busy(ctsio); 1897 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1898 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1899 msg_info.hdr.serializing_sc = NULL; 1900 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1901 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1902 sizeof(msg_info.scsi), M_WAITOK); 1903 return(1); 1904 } 1905 1906 entry = ctl_get_cmd_entry(ctsio, NULL); 1907 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 1908 mtx_unlock(&lun->lun_lock); 1909 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1910 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1911 msg_info.hdr.serializing_sc = NULL; 1912 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1913 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1914 sizeof(msg_info.scsi), M_WAITOK); 1915 return(1); 1916 } 1917 1918 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 1919 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun; 1920 1921 /* 1922 * Every I/O goes into the OOA queue for a 1923 * particular LUN, and stays there until completion. 1924 */ 1925#ifdef CTL_TIME_IO 1926 if (TAILQ_EMPTY(&lun->ooa_queue)) 1927 lun->idle_time += getsbinuptime() - lun->last_busy; 1928#endif 1929 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1930 1931 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1932 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1933 ooa_links))) { 1934 case CTL_ACTION_BLOCK: 1935 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1936 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1937 blocked_links); 1938 mtx_unlock(&lun->lun_lock); 1939 break; 1940 case CTL_ACTION_PASS: 1941 case CTL_ACTION_SKIP: 1942 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1943 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1944 ctl_enqueue_rtr((union ctl_io *)ctsio); 1945 mtx_unlock(&lun->lun_lock); 1946 } else { 1947 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 1948 mtx_unlock(&lun->lun_lock); 1949 1950 /* send msg back to other side */ 1951 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1952 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1953 msg_info.hdr.msg_type = CTL_MSG_R2R; 1954 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1955 sizeof(msg_info.hdr), M_WAITOK); 1956 } 1957 break; 1958 case CTL_ACTION_OVERLAP: 1959 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1960 mtx_unlock(&lun->lun_lock); 1961 retval = 1; 1962 1963 ctl_set_overlapped_cmd(ctsio); 1964 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1965 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1966 msg_info.hdr.serializing_sc = NULL; 1967 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1968 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1969 sizeof(msg_info.scsi), M_WAITOK); 1970 break; 1971 case CTL_ACTION_OVERLAP_TAG: 1972 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1973 mtx_unlock(&lun->lun_lock); 1974 retval = 1; 1975 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 1976 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1977 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1978 msg_info.hdr.serializing_sc = NULL; 1979 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1980 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1981 sizeof(msg_info.scsi), M_WAITOK); 1982 break; 1983 case CTL_ACTION_ERROR: 1984 default: 1985 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1986 mtx_unlock(&lun->lun_lock); 1987 retval = 1; 1988 1989 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 1990 /*retry_count*/ 0); 1991 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1992 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1993 msg_info.hdr.serializing_sc = NULL; 1994 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1995 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1996 sizeof(msg_info.scsi), M_WAITOK); 1997 break; 1998 } 1999 return (retval); 2000} 2001 2002/* 2003 * Returns 0 for success, errno for failure. 2004 */ 2005static int 2006ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2007 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2008{ 2009 union ctl_io *io; 2010 int retval; 2011 2012 retval = 0; 2013 2014 mtx_lock(&lun->lun_lock); 2015 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2016 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2017 ooa_links)) { 2018 struct ctl_ooa_entry *entry; 2019 2020 /* 2021 * If we've got more than we can fit, just count the 2022 * remaining entries. 2023 */ 2024 if (*cur_fill_num >= ooa_hdr->alloc_num) 2025 continue; 2026 2027 entry = &kern_entries[*cur_fill_num]; 2028 2029 entry->tag_num = io->scsiio.tag_num; 2030 entry->lun_num = lun->lun; 2031#ifdef CTL_TIME_IO 2032 entry->start_bt = io->io_hdr.start_bt; 2033#endif 2034 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2035 entry->cdb_len = io->scsiio.cdb_len; 2036 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2037 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2038 2039 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2040 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2041 2042 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2043 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2044 2045 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2046 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2047 2048 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2049 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2050 } 2051 mtx_unlock(&lun->lun_lock); 2052 2053 return (retval); 2054} 2055 2056static void * 2057ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2058 size_t error_str_len) 2059{ 2060 void *kptr; 2061 2062 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2063 2064 if (copyin(user_addr, kptr, len) != 0) { 2065 snprintf(error_str, error_str_len, "Error copying %d bytes " 2066 "from user address %p to kernel address %p", len, 2067 user_addr, kptr); 2068 free(kptr, M_CTL); 2069 return (NULL); 2070 } 2071 2072 return (kptr); 2073} 2074 2075static void 2076ctl_free_args(int num_args, struct ctl_be_arg *args) 2077{ 2078 int i; 2079 2080 if (args == NULL) 2081 return; 2082 2083 for (i = 0; i < num_args; i++) { 2084 free(args[i].kname, M_CTL); 2085 free(args[i].kvalue, M_CTL); 2086 } 2087 2088 free(args, M_CTL); 2089} 2090 2091static struct ctl_be_arg * 2092ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2093 char *error_str, size_t error_str_len) 2094{ 2095 struct ctl_be_arg *args; 2096 int i; 2097 2098 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2099 error_str, error_str_len); 2100 2101 if (args == NULL) 2102 goto bailout; 2103 2104 for (i = 0; i < num_args; i++) { 2105 args[i].kname = NULL; 2106 args[i].kvalue = NULL; 2107 } 2108 2109 for (i = 0; i < num_args; i++) { 2110 uint8_t *tmpptr; 2111 2112 args[i].kname = ctl_copyin_alloc(args[i].name, 2113 args[i].namelen, error_str, error_str_len); 2114 if (args[i].kname == NULL) 2115 goto bailout; 2116 2117 if (args[i].kname[args[i].namelen - 1] != '\0') { 2118 snprintf(error_str, error_str_len, "Argument %d " 2119 "name is not NUL-terminated", i); 2120 goto bailout; 2121 } 2122 2123 if (args[i].flags & CTL_BEARG_RD) { 2124 tmpptr = ctl_copyin_alloc(args[i].value, 2125 args[i].vallen, error_str, error_str_len); 2126 if (tmpptr == NULL) 2127 goto bailout; 2128 if ((args[i].flags & CTL_BEARG_ASCII) 2129 && (tmpptr[args[i].vallen - 1] != '\0')) { 2130 snprintf(error_str, error_str_len, "Argument " 2131 "%d value is not NUL-terminated", i); 2132 goto bailout; 2133 } 2134 args[i].kvalue = tmpptr; 2135 } else { 2136 args[i].kvalue = malloc(args[i].vallen, 2137 M_CTL, M_WAITOK | M_ZERO); 2138 } 2139 } 2140 2141 return (args); 2142bailout: 2143 2144 ctl_free_args(num_args, args); 2145 2146 return (NULL); 2147} 2148 2149static void 2150ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2151{ 2152 int i; 2153 2154 for (i = 0; i < num_args; i++) { 2155 if (args[i].flags & CTL_BEARG_WR) 2156 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2157 } 2158} 2159 2160/* 2161 * Escape characters that are illegal or not recommended in XML. 2162 */ 2163int 2164ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2165{ 2166 char *end = str + size; 2167 int retval; 2168 2169 retval = 0; 2170 2171 for (; *str && str < end; str++) { 2172 switch (*str) { 2173 case '&': 2174 retval = sbuf_printf(sb, "&"); 2175 break; 2176 case '>': 2177 retval = sbuf_printf(sb, ">"); 2178 break; 2179 case '<': 2180 retval = sbuf_printf(sb, "<"); 2181 break; 2182 default: 2183 retval = sbuf_putc(sb, *str); 2184 break; 2185 } 2186 2187 if (retval != 0) 2188 break; 2189 2190 } 2191 2192 return (retval); 2193} 2194 2195static void 2196ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2197{ 2198 struct scsi_vpd_id_descriptor *desc; 2199 int i; 2200 2201 if (id == NULL || id->len < 4) 2202 return; 2203 desc = (struct scsi_vpd_id_descriptor *)id->data; 2204 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2205 case SVPD_ID_TYPE_T10: 2206 sbuf_printf(sb, "t10."); 2207 break; 2208 case SVPD_ID_TYPE_EUI64: 2209 sbuf_printf(sb, "eui."); 2210 break; 2211 case SVPD_ID_TYPE_NAA: 2212 sbuf_printf(sb, "naa."); 2213 break; 2214 case SVPD_ID_TYPE_SCSI_NAME: 2215 break; 2216 } 2217 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2218 case SVPD_ID_CODESET_BINARY: 2219 for (i = 0; i < desc->length; i++) 2220 sbuf_printf(sb, "%02x", desc->identifier[i]); 2221 break; 2222 case SVPD_ID_CODESET_ASCII: 2223 sbuf_printf(sb, "%.*s", (int)desc->length, 2224 (char *)desc->identifier); 2225 break; 2226 case SVPD_ID_CODESET_UTF8: 2227 sbuf_printf(sb, "%s", (char *)desc->identifier); 2228 break; 2229 } 2230} 2231 2232static int 2233ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2234 struct thread *td) 2235{ 2236 struct ctl_softc *softc; 2237 struct ctl_lun *lun; 2238 int retval; 2239 2240 softc = control_softc; 2241 2242 retval = 0; 2243 2244 switch (cmd) { 2245 case CTL_IO: 2246 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2247 break; 2248 case CTL_ENABLE_PORT: 2249 case CTL_DISABLE_PORT: 2250 case CTL_SET_PORT_WWNS: { 2251 struct ctl_port *port; 2252 struct ctl_port_entry *entry; 2253 2254 entry = (struct ctl_port_entry *)addr; 2255 2256 mtx_lock(&softc->ctl_lock); 2257 STAILQ_FOREACH(port, &softc->port_list, links) { 2258 int action, done; 2259 2260 if (port->targ_port < softc->port_min || 2261 port->targ_port >= softc->port_max) 2262 continue; 2263 2264 action = 0; 2265 done = 0; 2266 if ((entry->port_type == CTL_PORT_NONE) 2267 && (entry->targ_port == port->targ_port)) { 2268 /* 2269 * If the user only wants to enable or 2270 * disable or set WWNs on a specific port, 2271 * do the operation and we're done. 2272 */ 2273 action = 1; 2274 done = 1; 2275 } else if (entry->port_type & port->port_type) { 2276 /* 2277 * Compare the user's type mask with the 2278 * particular frontend type to see if we 2279 * have a match. 2280 */ 2281 action = 1; 2282 done = 0; 2283 2284 /* 2285 * Make sure the user isn't trying to set 2286 * WWNs on multiple ports at the same time. 2287 */ 2288 if (cmd == CTL_SET_PORT_WWNS) { 2289 printf("%s: Can't set WWNs on " 2290 "multiple ports\n", __func__); 2291 retval = EINVAL; 2292 break; 2293 } 2294 } 2295 if (action == 0) 2296 continue; 2297 2298 /* 2299 * XXX KDM we have to drop the lock here, because 2300 * the online/offline operations can potentially 2301 * block. We need to reference count the frontends 2302 * so they can't go away, 2303 */ 2304 if (cmd == CTL_ENABLE_PORT) { 2305 mtx_unlock(&softc->ctl_lock); 2306 ctl_port_online(port); 2307 mtx_lock(&softc->ctl_lock); 2308 } else if (cmd == CTL_DISABLE_PORT) { 2309 mtx_unlock(&softc->ctl_lock); 2310 ctl_port_offline(port); 2311 mtx_lock(&softc->ctl_lock); 2312 } else if (cmd == CTL_SET_PORT_WWNS) { 2313 ctl_port_set_wwns(port, 2314 (entry->flags & CTL_PORT_WWNN_VALID) ? 2315 1 : 0, entry->wwnn, 2316 (entry->flags & CTL_PORT_WWPN_VALID) ? 2317 1 : 0, entry->wwpn); 2318 } 2319 if (done != 0) 2320 break; 2321 } 2322 mtx_unlock(&softc->ctl_lock); 2323 break; 2324 } 2325 case CTL_GET_PORT_LIST: { 2326 struct ctl_port *port; 2327 struct ctl_port_list *list; 2328 int i; 2329 2330 list = (struct ctl_port_list *)addr; 2331 2332 if (list->alloc_len != (list->alloc_num * 2333 sizeof(struct ctl_port_entry))) { 2334 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2335 "alloc_num %u * sizeof(struct ctl_port_entry) " 2336 "%zu\n", __func__, list->alloc_len, 2337 list->alloc_num, sizeof(struct ctl_port_entry)); 2338 retval = EINVAL; 2339 break; 2340 } 2341 list->fill_len = 0; 2342 list->fill_num = 0; 2343 list->dropped_num = 0; 2344 i = 0; 2345 mtx_lock(&softc->ctl_lock); 2346 STAILQ_FOREACH(port, &softc->port_list, links) { 2347 struct ctl_port_entry entry, *list_entry; 2348 2349 if (list->fill_num >= list->alloc_num) { 2350 list->dropped_num++; 2351 continue; 2352 } 2353 2354 entry.port_type = port->port_type; 2355 strlcpy(entry.port_name, port->port_name, 2356 sizeof(entry.port_name)); 2357 entry.targ_port = port->targ_port; 2358 entry.physical_port = port->physical_port; 2359 entry.virtual_port = port->virtual_port; 2360 entry.wwnn = port->wwnn; 2361 entry.wwpn = port->wwpn; 2362 if (port->status & CTL_PORT_STATUS_ONLINE) 2363 entry.online = 1; 2364 else 2365 entry.online = 0; 2366 2367 list_entry = &list->entries[i]; 2368 2369 retval = copyout(&entry, list_entry, sizeof(entry)); 2370 if (retval != 0) { 2371 printf("%s: CTL_GET_PORT_LIST: copyout " 2372 "returned %d\n", __func__, retval); 2373 break; 2374 } 2375 i++; 2376 list->fill_num++; 2377 list->fill_len += sizeof(entry); 2378 } 2379 mtx_unlock(&softc->ctl_lock); 2380 2381 /* 2382 * If this is non-zero, we had a copyout fault, so there's 2383 * probably no point in attempting to set the status inside 2384 * the structure. 2385 */ 2386 if (retval != 0) 2387 break; 2388 2389 if (list->dropped_num > 0) 2390 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2391 else 2392 list->status = CTL_PORT_LIST_OK; 2393 break; 2394 } 2395 case CTL_DUMP_OOA: { 2396 union ctl_io *io; 2397 char printbuf[128]; 2398 struct sbuf sb; 2399 2400 mtx_lock(&softc->ctl_lock); 2401 printf("Dumping OOA queues:\n"); 2402 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2403 mtx_lock(&lun->lun_lock); 2404 for (io = (union ctl_io *)TAILQ_FIRST( 2405 &lun->ooa_queue); io != NULL; 2406 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2407 ooa_links)) { 2408 sbuf_new(&sb, printbuf, sizeof(printbuf), 2409 SBUF_FIXEDLEN); 2410 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2411 (intmax_t)lun->lun, 2412 io->scsiio.tag_num, 2413 (io->io_hdr.flags & 2414 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2415 (io->io_hdr.flags & 2416 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2417 (io->io_hdr.flags & 2418 CTL_FLAG_ABORT) ? " ABORT" : "", 2419 (io->io_hdr.flags & 2420 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2421 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2422 sbuf_finish(&sb); 2423 printf("%s\n", sbuf_data(&sb)); 2424 } 2425 mtx_unlock(&lun->lun_lock); 2426 } 2427 printf("OOA queues dump done\n"); 2428 mtx_unlock(&softc->ctl_lock); 2429 break; 2430 } 2431 case CTL_GET_OOA: { 2432 struct ctl_ooa *ooa_hdr; 2433 struct ctl_ooa_entry *entries; 2434 uint32_t cur_fill_num; 2435 2436 ooa_hdr = (struct ctl_ooa *)addr; 2437 2438 if ((ooa_hdr->alloc_len == 0) 2439 || (ooa_hdr->alloc_num == 0)) { 2440 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2441 "must be non-zero\n", __func__, 2442 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2443 retval = EINVAL; 2444 break; 2445 } 2446 2447 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2448 sizeof(struct ctl_ooa_entry))) { 2449 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2450 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2451 __func__, ooa_hdr->alloc_len, 2452 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2453 retval = EINVAL; 2454 break; 2455 } 2456 2457 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2458 if (entries == NULL) { 2459 printf("%s: could not allocate %d bytes for OOA " 2460 "dump\n", __func__, ooa_hdr->alloc_len); 2461 retval = ENOMEM; 2462 break; 2463 } 2464 2465 mtx_lock(&softc->ctl_lock); 2466 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2467 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2468 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2469 mtx_unlock(&softc->ctl_lock); 2470 free(entries, M_CTL); 2471 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2472 __func__, (uintmax_t)ooa_hdr->lun_num); 2473 retval = EINVAL; 2474 break; 2475 } 2476 2477 cur_fill_num = 0; 2478 2479 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2480 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2481 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2482 ooa_hdr, entries); 2483 if (retval != 0) 2484 break; 2485 } 2486 if (retval != 0) { 2487 mtx_unlock(&softc->ctl_lock); 2488 free(entries, M_CTL); 2489 break; 2490 } 2491 } else { 2492 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2493 2494 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2495 entries); 2496 } 2497 mtx_unlock(&softc->ctl_lock); 2498 2499 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2500 ooa_hdr->fill_len = ooa_hdr->fill_num * 2501 sizeof(struct ctl_ooa_entry); 2502 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2503 if (retval != 0) { 2504 printf("%s: error copying out %d bytes for OOA dump\n", 2505 __func__, ooa_hdr->fill_len); 2506 } 2507 2508 getbintime(&ooa_hdr->cur_bt); 2509 2510 if (cur_fill_num > ooa_hdr->alloc_num) { 2511 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2512 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2513 } else { 2514 ooa_hdr->dropped_num = 0; 2515 ooa_hdr->status = CTL_OOA_OK; 2516 } 2517 2518 free(entries, M_CTL); 2519 break; 2520 } 2521 case CTL_CHECK_OOA: { 2522 union ctl_io *io; 2523 struct ctl_ooa_info *ooa_info; 2524 2525 2526 ooa_info = (struct ctl_ooa_info *)addr; 2527 2528 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2529 ooa_info->status = CTL_OOA_INVALID_LUN; 2530 break; 2531 } 2532 mtx_lock(&softc->ctl_lock); 2533 lun = softc->ctl_luns[ooa_info->lun_id]; 2534 if (lun == NULL) { 2535 mtx_unlock(&softc->ctl_lock); 2536 ooa_info->status = CTL_OOA_INVALID_LUN; 2537 break; 2538 } 2539 mtx_lock(&lun->lun_lock); 2540 mtx_unlock(&softc->ctl_lock); 2541 ooa_info->num_entries = 0; 2542 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2543 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2544 &io->io_hdr, ooa_links)) { 2545 ooa_info->num_entries++; 2546 } 2547 mtx_unlock(&lun->lun_lock); 2548 2549 ooa_info->status = CTL_OOA_SUCCESS; 2550 2551 break; 2552 } 2553 case CTL_DELAY_IO: { 2554 struct ctl_io_delay_info *delay_info; 2555 2556 delay_info = (struct ctl_io_delay_info *)addr; 2557 2558#ifdef CTL_IO_DELAY 2559 mtx_lock(&softc->ctl_lock); 2560 2561 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2562 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2563 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2564 } else { 2565 lun = softc->ctl_luns[delay_info->lun_id]; 2566 mtx_lock(&lun->lun_lock); 2567 2568 delay_info->status = CTL_DELAY_STATUS_OK; 2569 2570 switch (delay_info->delay_type) { 2571 case CTL_DELAY_TYPE_CONT: 2572 break; 2573 case CTL_DELAY_TYPE_ONESHOT: 2574 break; 2575 default: 2576 delay_info->status = 2577 CTL_DELAY_STATUS_INVALID_TYPE; 2578 break; 2579 } 2580 2581 switch (delay_info->delay_loc) { 2582 case CTL_DELAY_LOC_DATAMOVE: 2583 lun->delay_info.datamove_type = 2584 delay_info->delay_type; 2585 lun->delay_info.datamove_delay = 2586 delay_info->delay_secs; 2587 break; 2588 case CTL_DELAY_LOC_DONE: 2589 lun->delay_info.done_type = 2590 delay_info->delay_type; 2591 lun->delay_info.done_delay = 2592 delay_info->delay_secs; 2593 break; 2594 default: 2595 delay_info->status = 2596 CTL_DELAY_STATUS_INVALID_LOC; 2597 break; 2598 } 2599 mtx_unlock(&lun->lun_lock); 2600 } 2601 2602 mtx_unlock(&softc->ctl_lock); 2603#else 2604 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2605#endif /* CTL_IO_DELAY */ 2606 break; 2607 } 2608 case CTL_REALSYNC_SET: { 2609 int *syncstate; 2610 2611 syncstate = (int *)addr; 2612 2613 mtx_lock(&softc->ctl_lock); 2614 switch (*syncstate) { 2615 case 0: 2616 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2617 break; 2618 case 1: 2619 softc->flags |= CTL_FLAG_REAL_SYNC; 2620 break; 2621 default: 2622 retval = EINVAL; 2623 break; 2624 } 2625 mtx_unlock(&softc->ctl_lock); 2626 break; 2627 } 2628 case CTL_REALSYNC_GET: { 2629 int *syncstate; 2630 2631 syncstate = (int*)addr; 2632 2633 mtx_lock(&softc->ctl_lock); 2634 if (softc->flags & CTL_FLAG_REAL_SYNC) 2635 *syncstate = 1; 2636 else 2637 *syncstate = 0; 2638 mtx_unlock(&softc->ctl_lock); 2639 2640 break; 2641 } 2642 case CTL_SETSYNC: 2643 case CTL_GETSYNC: { 2644 struct ctl_sync_info *sync_info; 2645 2646 sync_info = (struct ctl_sync_info *)addr; 2647 2648 mtx_lock(&softc->ctl_lock); 2649 lun = softc->ctl_luns[sync_info->lun_id]; 2650 if (lun == NULL) { 2651 mtx_unlock(&softc->ctl_lock); 2652 sync_info->status = CTL_GS_SYNC_NO_LUN; 2653 break; 2654 } 2655 /* 2656 * Get or set the sync interval. We're not bounds checking 2657 * in the set case, hopefully the user won't do something 2658 * silly. 2659 */ 2660 mtx_lock(&lun->lun_lock); 2661 mtx_unlock(&softc->ctl_lock); 2662 if (cmd == CTL_GETSYNC) 2663 sync_info->sync_interval = lun->sync_interval; 2664 else 2665 lun->sync_interval = sync_info->sync_interval; 2666 mtx_unlock(&lun->lun_lock); 2667 2668 sync_info->status = CTL_GS_SYNC_OK; 2669 2670 break; 2671 } 2672 case CTL_GETSTATS: { 2673 struct ctl_stats *stats; 2674 int i; 2675 2676 stats = (struct ctl_stats *)addr; 2677 2678 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2679 stats->alloc_len) { 2680 stats->status = CTL_SS_NEED_MORE_SPACE; 2681 stats->num_luns = softc->num_luns; 2682 break; 2683 } 2684 /* 2685 * XXX KDM no locking here. If the LUN list changes, 2686 * things can blow up. 2687 */ 2688 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2689 i++, lun = STAILQ_NEXT(lun, links)) { 2690 retval = copyout(&lun->stats, &stats->lun_stats[i], 2691 sizeof(lun->stats)); 2692 if (retval != 0) 2693 break; 2694 } 2695 stats->num_luns = softc->num_luns; 2696 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2697 softc->num_luns; 2698 stats->status = CTL_SS_OK; 2699#ifdef CTL_TIME_IO 2700 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2701#else 2702 stats->flags = CTL_STATS_FLAG_NONE; 2703#endif 2704 getnanouptime(&stats->timestamp); 2705 break; 2706 } 2707 case CTL_ERROR_INJECT: { 2708 struct ctl_error_desc *err_desc, *new_err_desc; 2709 2710 err_desc = (struct ctl_error_desc *)addr; 2711 2712 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2713 M_WAITOK | M_ZERO); 2714 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2715 2716 mtx_lock(&softc->ctl_lock); 2717 lun = softc->ctl_luns[err_desc->lun_id]; 2718 if (lun == NULL) { 2719 mtx_unlock(&softc->ctl_lock); 2720 free(new_err_desc, M_CTL); 2721 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2722 __func__, (uintmax_t)err_desc->lun_id); 2723 retval = EINVAL; 2724 break; 2725 } 2726 mtx_lock(&lun->lun_lock); 2727 mtx_unlock(&softc->ctl_lock); 2728 2729 /* 2730 * We could do some checking here to verify the validity 2731 * of the request, but given the complexity of error 2732 * injection requests, the checking logic would be fairly 2733 * complex. 2734 * 2735 * For now, if the request is invalid, it just won't get 2736 * executed and might get deleted. 2737 */ 2738 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2739 2740 /* 2741 * XXX KDM check to make sure the serial number is unique, 2742 * in case we somehow manage to wrap. That shouldn't 2743 * happen for a very long time, but it's the right thing to 2744 * do. 2745 */ 2746 new_err_desc->serial = lun->error_serial; 2747 err_desc->serial = lun->error_serial; 2748 lun->error_serial++; 2749 2750 mtx_unlock(&lun->lun_lock); 2751 break; 2752 } 2753 case CTL_ERROR_INJECT_DELETE: { 2754 struct ctl_error_desc *delete_desc, *desc, *desc2; 2755 int delete_done; 2756 2757 delete_desc = (struct ctl_error_desc *)addr; 2758 delete_done = 0; 2759 2760 mtx_lock(&softc->ctl_lock); 2761 lun = softc->ctl_luns[delete_desc->lun_id]; 2762 if (lun == NULL) { 2763 mtx_unlock(&softc->ctl_lock); 2764 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2765 __func__, (uintmax_t)delete_desc->lun_id); 2766 retval = EINVAL; 2767 break; 2768 } 2769 mtx_lock(&lun->lun_lock); 2770 mtx_unlock(&softc->ctl_lock); 2771 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2772 if (desc->serial != delete_desc->serial) 2773 continue; 2774 2775 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2776 links); 2777 free(desc, M_CTL); 2778 delete_done = 1; 2779 } 2780 mtx_unlock(&lun->lun_lock); 2781 if (delete_done == 0) { 2782 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2783 "error serial %ju on LUN %u\n", __func__, 2784 delete_desc->serial, delete_desc->lun_id); 2785 retval = EINVAL; 2786 break; 2787 } 2788 break; 2789 } 2790 case CTL_DUMP_STRUCTS: { 2791 int i, j, k; 2792 struct ctl_port *port; 2793 struct ctl_frontend *fe; 2794 2795 mtx_lock(&softc->ctl_lock); 2796 printf("CTL Persistent Reservation information start:\n"); 2797 for (i = 0; i < CTL_MAX_LUNS; i++) { 2798 lun = softc->ctl_luns[i]; 2799 2800 if ((lun == NULL) 2801 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2802 continue; 2803 2804 for (j = 0; j < CTL_MAX_PORTS; j++) { 2805 if (lun->pr_keys[j] == NULL) 2806 continue; 2807 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2808 if (lun->pr_keys[j][k] == 0) 2809 continue; 2810 printf(" LUN %d port %d iid %d key " 2811 "%#jx\n", i, j, k, 2812 (uintmax_t)lun->pr_keys[j][k]); 2813 } 2814 } 2815 } 2816 printf("CTL Persistent Reservation information end\n"); 2817 printf("CTL Ports:\n"); 2818 STAILQ_FOREACH(port, &softc->port_list, links) { 2819 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2820 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2821 port->frontend->name, port->port_type, 2822 port->physical_port, port->virtual_port, 2823 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2824 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2825 if (port->wwpn_iid[j].in_use == 0 && 2826 port->wwpn_iid[j].wwpn == 0 && 2827 port->wwpn_iid[j].name == NULL) 2828 continue; 2829 2830 printf(" iid %u use %d WWPN %#jx '%s'\n", 2831 j, port->wwpn_iid[j].in_use, 2832 (uintmax_t)port->wwpn_iid[j].wwpn, 2833 port->wwpn_iid[j].name); 2834 } 2835 } 2836 printf("CTL Port information end\n"); 2837 mtx_unlock(&softc->ctl_lock); 2838 /* 2839 * XXX KDM calling this without a lock. We'd likely want 2840 * to drop the lock before calling the frontend's dump 2841 * routine anyway. 2842 */ 2843 printf("CTL Frontends:\n"); 2844 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2845 printf(" Frontend '%s'\n", fe->name); 2846 if (fe->fe_dump != NULL) 2847 fe->fe_dump(); 2848 } 2849 printf("CTL Frontend information end\n"); 2850 break; 2851 } 2852 case CTL_LUN_REQ: { 2853 struct ctl_lun_req *lun_req; 2854 struct ctl_backend_driver *backend; 2855 2856 lun_req = (struct ctl_lun_req *)addr; 2857 2858 backend = ctl_backend_find(lun_req->backend); 2859 if (backend == NULL) { 2860 lun_req->status = CTL_LUN_ERROR; 2861 snprintf(lun_req->error_str, 2862 sizeof(lun_req->error_str), 2863 "Backend \"%s\" not found.", 2864 lun_req->backend); 2865 break; 2866 } 2867 if (lun_req->num_be_args > 0) { 2868 lun_req->kern_be_args = ctl_copyin_args( 2869 lun_req->num_be_args, 2870 lun_req->be_args, 2871 lun_req->error_str, 2872 sizeof(lun_req->error_str)); 2873 if (lun_req->kern_be_args == NULL) { 2874 lun_req->status = CTL_LUN_ERROR; 2875 break; 2876 } 2877 } 2878 2879 retval = backend->ioctl(dev, cmd, addr, flag, td); 2880 2881 if (lun_req->num_be_args > 0) { 2882 ctl_copyout_args(lun_req->num_be_args, 2883 lun_req->kern_be_args); 2884 ctl_free_args(lun_req->num_be_args, 2885 lun_req->kern_be_args); 2886 } 2887 break; 2888 } 2889 case CTL_LUN_LIST: { 2890 struct sbuf *sb; 2891 struct ctl_lun_list *list; 2892 struct ctl_option *opt; 2893 2894 list = (struct ctl_lun_list *)addr; 2895 2896 /* 2897 * Allocate a fixed length sbuf here, based on the length 2898 * of the user's buffer. We could allocate an auto-extending 2899 * buffer, and then tell the user how much larger our 2900 * amount of data is than his buffer, but that presents 2901 * some problems: 2902 * 2903 * 1. The sbuf(9) routines use a blocking malloc, and so 2904 * we can't hold a lock while calling them with an 2905 * auto-extending buffer. 2906 * 2907 * 2. There is not currently a LUN reference counting 2908 * mechanism, outside of outstanding transactions on 2909 * the LUN's OOA queue. So a LUN could go away on us 2910 * while we're getting the LUN number, backend-specific 2911 * information, etc. Thus, given the way things 2912 * currently work, we need to hold the CTL lock while 2913 * grabbing LUN information. 2914 * 2915 * So, from the user's standpoint, the best thing to do is 2916 * allocate what he thinks is a reasonable buffer length, 2917 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2918 * double the buffer length and try again. (And repeat 2919 * that until he succeeds.) 2920 */ 2921 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2922 if (sb == NULL) { 2923 list->status = CTL_LUN_LIST_ERROR; 2924 snprintf(list->error_str, sizeof(list->error_str), 2925 "Unable to allocate %d bytes for LUN list", 2926 list->alloc_len); 2927 break; 2928 } 2929 2930 sbuf_printf(sb, "<ctllunlist>\n"); 2931 2932 mtx_lock(&softc->ctl_lock); 2933 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2934 mtx_lock(&lun->lun_lock); 2935 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 2936 (uintmax_t)lun->lun); 2937 2938 /* 2939 * Bail out as soon as we see that we've overfilled 2940 * the buffer. 2941 */ 2942 if (retval != 0) 2943 break; 2944 2945 retval = sbuf_printf(sb, "\t<backend_type>%s" 2946 "</backend_type>\n", 2947 (lun->backend == NULL) ? "none" : 2948 lun->backend->name); 2949 2950 if (retval != 0) 2951 break; 2952 2953 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 2954 lun->be_lun->lun_type); 2955 2956 if (retval != 0) 2957 break; 2958 2959 if (lun->backend == NULL) { 2960 retval = sbuf_printf(sb, "</lun>\n"); 2961 if (retval != 0) 2962 break; 2963 continue; 2964 } 2965 2966 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 2967 (lun->be_lun->maxlba > 0) ? 2968 lun->be_lun->maxlba + 1 : 0); 2969 2970 if (retval != 0) 2971 break; 2972 2973 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 2974 lun->be_lun->blocksize); 2975 2976 if (retval != 0) 2977 break; 2978 2979 retval = sbuf_printf(sb, "\t<serial_number>"); 2980 2981 if (retval != 0) 2982 break; 2983 2984 retval = ctl_sbuf_printf_esc(sb, 2985 lun->be_lun->serial_num, 2986 sizeof(lun->be_lun->serial_num)); 2987 2988 if (retval != 0) 2989 break; 2990 2991 retval = sbuf_printf(sb, "</serial_number>\n"); 2992 2993 if (retval != 0) 2994 break; 2995 2996 retval = sbuf_printf(sb, "\t<device_id>"); 2997 2998 if (retval != 0) 2999 break; 3000 3001 retval = ctl_sbuf_printf_esc(sb, 3002 lun->be_lun->device_id, 3003 sizeof(lun->be_lun->device_id)); 3004 3005 if (retval != 0) 3006 break; 3007 3008 retval = sbuf_printf(sb, "</device_id>\n"); 3009 3010 if (retval != 0) 3011 break; 3012 3013 if (lun->backend->lun_info != NULL) { 3014 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3015 if (retval != 0) 3016 break; 3017 } 3018 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3019 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3020 opt->name, opt->value, opt->name); 3021 if (retval != 0) 3022 break; 3023 } 3024 3025 retval = sbuf_printf(sb, "</lun>\n"); 3026 3027 if (retval != 0) 3028 break; 3029 mtx_unlock(&lun->lun_lock); 3030 } 3031 if (lun != NULL) 3032 mtx_unlock(&lun->lun_lock); 3033 mtx_unlock(&softc->ctl_lock); 3034 3035 if ((retval != 0) 3036 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3037 retval = 0; 3038 sbuf_delete(sb); 3039 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3040 snprintf(list->error_str, sizeof(list->error_str), 3041 "Out of space, %d bytes is too small", 3042 list->alloc_len); 3043 break; 3044 } 3045 3046 sbuf_finish(sb); 3047 3048 retval = copyout(sbuf_data(sb), list->lun_xml, 3049 sbuf_len(sb) + 1); 3050 3051 list->fill_len = sbuf_len(sb) + 1; 3052 list->status = CTL_LUN_LIST_OK; 3053 sbuf_delete(sb); 3054 break; 3055 } 3056 case CTL_ISCSI: { 3057 struct ctl_iscsi *ci; 3058 struct ctl_frontend *fe; 3059 3060 ci = (struct ctl_iscsi *)addr; 3061 3062 fe = ctl_frontend_find("iscsi"); 3063 if (fe == NULL) { 3064 ci->status = CTL_ISCSI_ERROR; 3065 snprintf(ci->error_str, sizeof(ci->error_str), 3066 "Frontend \"iscsi\" not found."); 3067 break; 3068 } 3069 3070 retval = fe->ioctl(dev, cmd, addr, flag, td); 3071 break; 3072 } 3073 case CTL_PORT_REQ: { 3074 struct ctl_req *req; 3075 struct ctl_frontend *fe; 3076 3077 req = (struct ctl_req *)addr; 3078 3079 fe = ctl_frontend_find(req->driver); 3080 if (fe == NULL) { 3081 req->status = CTL_LUN_ERROR; 3082 snprintf(req->error_str, sizeof(req->error_str), 3083 "Frontend \"%s\" not found.", req->driver); 3084 break; 3085 } 3086 if (req->num_args > 0) { 3087 req->kern_args = ctl_copyin_args(req->num_args, 3088 req->args, req->error_str, sizeof(req->error_str)); 3089 if (req->kern_args == NULL) { 3090 req->status = CTL_LUN_ERROR; 3091 break; 3092 } 3093 } 3094 3095 if (fe->ioctl) 3096 retval = fe->ioctl(dev, cmd, addr, flag, td); 3097 else 3098 retval = ENODEV; 3099 3100 if (req->num_args > 0) { 3101 ctl_copyout_args(req->num_args, req->kern_args); 3102 ctl_free_args(req->num_args, req->kern_args); 3103 } 3104 break; 3105 } 3106 case CTL_PORT_LIST: { 3107 struct sbuf *sb; 3108 struct ctl_port *port; 3109 struct ctl_lun_list *list; 3110 struct ctl_option *opt; 3111 int j; 3112 uint32_t plun; 3113 3114 list = (struct ctl_lun_list *)addr; 3115 3116 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3117 if (sb == NULL) { 3118 list->status = CTL_LUN_LIST_ERROR; 3119 snprintf(list->error_str, sizeof(list->error_str), 3120 "Unable to allocate %d bytes for LUN list", 3121 list->alloc_len); 3122 break; 3123 } 3124 3125 sbuf_printf(sb, "<ctlportlist>\n"); 3126 3127 mtx_lock(&softc->ctl_lock); 3128 STAILQ_FOREACH(port, &softc->port_list, links) { 3129 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3130 (uintmax_t)port->targ_port); 3131 3132 /* 3133 * Bail out as soon as we see that we've overfilled 3134 * the buffer. 3135 */ 3136 if (retval != 0) 3137 break; 3138 3139 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3140 "</frontend_type>\n", port->frontend->name); 3141 if (retval != 0) 3142 break; 3143 3144 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3145 port->port_type); 3146 if (retval != 0) 3147 break; 3148 3149 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3150 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3151 if (retval != 0) 3152 break; 3153 3154 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3155 port->port_name); 3156 if (retval != 0) 3157 break; 3158 3159 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3160 port->physical_port); 3161 if (retval != 0) 3162 break; 3163 3164 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3165 port->virtual_port); 3166 if (retval != 0) 3167 break; 3168 3169 if (port->target_devid != NULL) { 3170 sbuf_printf(sb, "\t<target>"); 3171 ctl_id_sbuf(port->target_devid, sb); 3172 sbuf_printf(sb, "</target>\n"); 3173 } 3174 3175 if (port->port_devid != NULL) { 3176 sbuf_printf(sb, "\t<port>"); 3177 ctl_id_sbuf(port->port_devid, sb); 3178 sbuf_printf(sb, "</port>\n"); 3179 } 3180 3181 if (port->port_info != NULL) { 3182 retval = port->port_info(port->onoff_arg, sb); 3183 if (retval != 0) 3184 break; 3185 } 3186 STAILQ_FOREACH(opt, &port->options, links) { 3187 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3188 opt->name, opt->value, opt->name); 3189 if (retval != 0) 3190 break; 3191 } 3192 3193 if (port->lun_map != NULL) { 3194 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3195 for (j = 0; j < CTL_MAX_LUNS; j++) { 3196 plun = ctl_lun_map_from_port(port, j); 3197 if (plun >= CTL_MAX_LUNS) 3198 continue; 3199 sbuf_printf(sb, 3200 "\t<lun id=\"%u\">%u</lun>\n", 3201 j, plun); 3202 } 3203 } 3204 3205 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3206 if (port->wwpn_iid[j].in_use == 0 || 3207 (port->wwpn_iid[j].wwpn == 0 && 3208 port->wwpn_iid[j].name == NULL)) 3209 continue; 3210 3211 if (port->wwpn_iid[j].name != NULL) 3212 retval = sbuf_printf(sb, 3213 "\t<initiator id=\"%u\">%s</initiator>\n", 3214 j, port->wwpn_iid[j].name); 3215 else 3216 retval = sbuf_printf(sb, 3217 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3218 j, port->wwpn_iid[j].wwpn); 3219 if (retval != 0) 3220 break; 3221 } 3222 if (retval != 0) 3223 break; 3224 3225 retval = sbuf_printf(sb, "</targ_port>\n"); 3226 if (retval != 0) 3227 break; 3228 } 3229 mtx_unlock(&softc->ctl_lock); 3230 3231 if ((retval != 0) 3232 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3233 retval = 0; 3234 sbuf_delete(sb); 3235 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3236 snprintf(list->error_str, sizeof(list->error_str), 3237 "Out of space, %d bytes is too small", 3238 list->alloc_len); 3239 break; 3240 } 3241 3242 sbuf_finish(sb); 3243 3244 retval = copyout(sbuf_data(sb), list->lun_xml, 3245 sbuf_len(sb) + 1); 3246 3247 list->fill_len = sbuf_len(sb) + 1; 3248 list->status = CTL_LUN_LIST_OK; 3249 sbuf_delete(sb); 3250 break; 3251 } 3252 case CTL_LUN_MAP: { 3253 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3254 struct ctl_port *port; 3255 3256 mtx_lock(&softc->ctl_lock); 3257 if (lm->port < softc->port_min || 3258 lm->port >= softc->port_max || 3259 (port = softc->ctl_ports[lm->port]) == NULL) { 3260 mtx_unlock(&softc->ctl_lock); 3261 return (ENXIO); 3262 } 3263 if (port->status & CTL_PORT_STATUS_ONLINE) { 3264 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3265 if (ctl_lun_map_to_port(port, lun->lun) >= 3266 CTL_MAX_LUNS) 3267 continue; 3268 mtx_lock(&lun->lun_lock); 3269 ctl_est_ua_port(lun, lm->port, -1, 3270 CTL_UA_LUN_CHANGE); 3271 mtx_unlock(&lun->lun_lock); 3272 } 3273 } 3274 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3275 if (lm->plun < CTL_MAX_LUNS) { 3276 if (lm->lun == UINT32_MAX) 3277 retval = ctl_lun_map_unset(port, lm->plun); 3278 else if (lm->lun < CTL_MAX_LUNS && 3279 softc->ctl_luns[lm->lun] != NULL) 3280 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3281 else 3282 return (ENXIO); 3283 } else if (lm->plun == UINT32_MAX) { 3284 if (lm->lun == UINT32_MAX) 3285 retval = ctl_lun_map_deinit(port); 3286 else 3287 retval = ctl_lun_map_init(port); 3288 } else 3289 return (ENXIO); 3290 if (port->status & CTL_PORT_STATUS_ONLINE) 3291 ctl_isc_announce_port(port); 3292 break; 3293 } 3294 default: { 3295 /* XXX KDM should we fix this? */ 3296#if 0 3297 struct ctl_backend_driver *backend; 3298 unsigned int type; 3299 int found; 3300 3301 found = 0; 3302 3303 /* 3304 * We encode the backend type as the ioctl type for backend 3305 * ioctls. So parse it out here, and then search for a 3306 * backend of this type. 3307 */ 3308 type = _IOC_TYPE(cmd); 3309 3310 STAILQ_FOREACH(backend, &softc->be_list, links) { 3311 if (backend->type == type) { 3312 found = 1; 3313 break; 3314 } 3315 } 3316 if (found == 0) { 3317 printf("ctl: unknown ioctl command %#lx or backend " 3318 "%d\n", cmd, type); 3319 retval = EINVAL; 3320 break; 3321 } 3322 retval = backend->ioctl(dev, cmd, addr, flag, td); 3323#endif 3324 retval = ENOTTY; 3325 break; 3326 } 3327 } 3328 return (retval); 3329} 3330 3331uint32_t 3332ctl_get_initindex(struct ctl_nexus *nexus) 3333{ 3334 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3335} 3336 3337int 3338ctl_lun_map_init(struct ctl_port *port) 3339{ 3340 struct ctl_softc *softc = control_softc; 3341 struct ctl_lun *lun; 3342 uint32_t i; 3343 3344 if (port->lun_map == NULL) 3345 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3346 M_CTL, M_NOWAIT); 3347 if (port->lun_map == NULL) 3348 return (ENOMEM); 3349 for (i = 0; i < CTL_MAX_LUNS; i++) 3350 port->lun_map[i] = UINT32_MAX; 3351 if (port->status & CTL_PORT_STATUS_ONLINE) { 3352 if (port->lun_disable != NULL) { 3353 STAILQ_FOREACH(lun, &softc->lun_list, links) 3354 port->lun_disable(port->targ_lun_arg, lun->lun); 3355 } 3356 ctl_isc_announce_port(port); 3357 } 3358 return (0); 3359} 3360 3361int 3362ctl_lun_map_deinit(struct ctl_port *port) 3363{ 3364 struct ctl_softc *softc = control_softc; 3365 struct ctl_lun *lun; 3366 3367 if (port->lun_map == NULL) 3368 return (0); 3369 free(port->lun_map, M_CTL); 3370 port->lun_map = NULL; 3371 if (port->status & CTL_PORT_STATUS_ONLINE) { 3372 if (port->lun_enable != NULL) { 3373 STAILQ_FOREACH(lun, &softc->lun_list, links) 3374 port->lun_enable(port->targ_lun_arg, lun->lun); 3375 } 3376 ctl_isc_announce_port(port); 3377 } 3378 return (0); 3379} 3380 3381int 3382ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3383{ 3384 int status; 3385 uint32_t old; 3386 3387 if (port->lun_map == NULL) { 3388 status = ctl_lun_map_init(port); 3389 if (status != 0) 3390 return (status); 3391 } 3392 old = port->lun_map[plun]; 3393 port->lun_map[plun] = glun; 3394 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) { 3395 if (port->lun_enable != NULL) 3396 port->lun_enable(port->targ_lun_arg, plun); 3397 ctl_isc_announce_port(port); 3398 } 3399 return (0); 3400} 3401 3402int 3403ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3404{ 3405 uint32_t old; 3406 3407 if (port->lun_map == NULL) 3408 return (0); 3409 old = port->lun_map[plun]; 3410 port->lun_map[plun] = UINT32_MAX; 3411 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) { 3412 if (port->lun_disable != NULL) 3413 port->lun_disable(port->targ_lun_arg, plun); 3414 ctl_isc_announce_port(port); 3415 } 3416 return (0); 3417} 3418 3419uint32_t 3420ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3421{ 3422 3423 if (port == NULL) 3424 return (UINT32_MAX); 3425 if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS) 3426 return (lun_id); 3427 return (port->lun_map[lun_id]); 3428} 3429 3430uint32_t 3431ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3432{ 3433 uint32_t i; 3434 3435 if (port == NULL) 3436 return (UINT32_MAX); 3437 if (port->lun_map == NULL) 3438 return (lun_id); 3439 for (i = 0; i < CTL_MAX_LUNS; i++) { 3440 if (port->lun_map[i] == lun_id) 3441 return (i); 3442 } 3443 return (UINT32_MAX); 3444} 3445 3446static struct ctl_port * 3447ctl_io_port(struct ctl_io_hdr *io_hdr) 3448{ 3449 3450 return (control_softc->ctl_ports[io_hdr->nexus.targ_port]); 3451} 3452 3453int 3454ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3455{ 3456 int i; 3457 3458 for (i = first; i < last; i++) { 3459 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3460 return (i); 3461 } 3462 return (-1); 3463} 3464 3465int 3466ctl_set_mask(uint32_t *mask, uint32_t bit) 3467{ 3468 uint32_t chunk, piece; 3469 3470 chunk = bit >> 5; 3471 piece = bit % (sizeof(uint32_t) * 8); 3472 3473 if ((mask[chunk] & (1 << piece)) != 0) 3474 return (-1); 3475 else 3476 mask[chunk] |= (1 << piece); 3477 3478 return (0); 3479} 3480 3481int 3482ctl_clear_mask(uint32_t *mask, uint32_t bit) 3483{ 3484 uint32_t chunk, piece; 3485 3486 chunk = bit >> 5; 3487 piece = bit % (sizeof(uint32_t) * 8); 3488 3489 if ((mask[chunk] & (1 << piece)) == 0) 3490 return (-1); 3491 else 3492 mask[chunk] &= ~(1 << piece); 3493 3494 return (0); 3495} 3496 3497int 3498ctl_is_set(uint32_t *mask, uint32_t bit) 3499{ 3500 uint32_t chunk, piece; 3501 3502 chunk = bit >> 5; 3503 piece = bit % (sizeof(uint32_t) * 8); 3504 3505 if ((mask[chunk] & (1 << piece)) == 0) 3506 return (0); 3507 else 3508 return (1); 3509} 3510 3511static uint64_t 3512ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3513{ 3514 uint64_t *t; 3515 3516 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3517 if (t == NULL) 3518 return (0); 3519 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3520} 3521 3522static void 3523ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3524{ 3525 uint64_t *t; 3526 3527 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3528 if (t == NULL) 3529 return; 3530 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3531} 3532 3533static void 3534ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3535{ 3536 uint64_t *p; 3537 u_int i; 3538 3539 i = residx/CTL_MAX_INIT_PER_PORT; 3540 if (lun->pr_keys[i] != NULL) 3541 return; 3542 mtx_unlock(&lun->lun_lock); 3543 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3544 M_WAITOK | M_ZERO); 3545 mtx_lock(&lun->lun_lock); 3546 if (lun->pr_keys[i] == NULL) 3547 lun->pr_keys[i] = p; 3548 else 3549 free(p, M_CTL); 3550} 3551 3552static void 3553ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3554{ 3555 uint64_t *t; 3556 3557 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3558 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3559 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3560} 3561 3562/* 3563 * ctl_softc, pool_name, total_ctl_io are passed in. 3564 * npool is passed out. 3565 */ 3566int 3567ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3568 uint32_t total_ctl_io, void **npool) 3569{ 3570#ifdef IO_POOLS 3571 struct ctl_io_pool *pool; 3572 3573 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3574 M_NOWAIT | M_ZERO); 3575 if (pool == NULL) 3576 return (ENOMEM); 3577 3578 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3579 pool->ctl_softc = ctl_softc; 3580 pool->zone = uma_zsecond_create(pool->name, NULL, 3581 NULL, NULL, NULL, ctl_softc->io_zone); 3582 /* uma_prealloc(pool->zone, total_ctl_io); */ 3583 3584 *npool = pool; 3585#else 3586 *npool = ctl_softc->io_zone; 3587#endif 3588 return (0); 3589} 3590 3591void 3592ctl_pool_free(struct ctl_io_pool *pool) 3593{ 3594 3595 if (pool == NULL) 3596 return; 3597 3598#ifdef IO_POOLS 3599 uma_zdestroy(pool->zone); 3600 free(pool, M_CTL); 3601#endif 3602} 3603 3604union ctl_io * 3605ctl_alloc_io(void *pool_ref) 3606{ 3607 union ctl_io *io; 3608#ifdef IO_POOLS 3609 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3610 3611 io = uma_zalloc(pool->zone, M_WAITOK); 3612#else 3613 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3614#endif 3615 if (io != NULL) 3616 io->io_hdr.pool = pool_ref; 3617 return (io); 3618} 3619 3620union ctl_io * 3621ctl_alloc_io_nowait(void *pool_ref) 3622{ 3623 union ctl_io *io; 3624#ifdef IO_POOLS 3625 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3626 3627 io = uma_zalloc(pool->zone, M_NOWAIT); 3628#else 3629 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3630#endif 3631 if (io != NULL) 3632 io->io_hdr.pool = pool_ref; 3633 return (io); 3634} 3635 3636void 3637ctl_free_io(union ctl_io *io) 3638{ 3639#ifdef IO_POOLS 3640 struct ctl_io_pool *pool; 3641#endif 3642 3643 if (io == NULL) 3644 return; 3645 3646#ifdef IO_POOLS 3647 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3648 uma_zfree(pool->zone, io); 3649#else 3650 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3651#endif 3652} 3653 3654void 3655ctl_zero_io(union ctl_io *io) 3656{ 3657 void *pool_ref; 3658 3659 if (io == NULL) 3660 return; 3661 3662 /* 3663 * May need to preserve linked list pointers at some point too. 3664 */ 3665 pool_ref = io->io_hdr.pool; 3666 memset(io, 0, sizeof(*io)); 3667 io->io_hdr.pool = pool_ref; 3668} 3669 3670/* 3671 * This routine is currently used for internal copies of ctl_ios that need 3672 * to persist for some reason after we've already returned status to the 3673 * FETD. (Thus the flag set.) 3674 * 3675 * XXX XXX 3676 * Note that this makes a blind copy of all fields in the ctl_io, except 3677 * for the pool reference. This includes any memory that has been 3678 * allocated! That memory will no longer be valid after done has been 3679 * called, so this would be VERY DANGEROUS for command that actually does 3680 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3681 * start and stop commands, which don't transfer any data, so this is not a 3682 * problem. If it is used for anything else, the caller would also need to 3683 * allocate data buffer space and this routine would need to be modified to 3684 * copy the data buffer(s) as well. 3685 */ 3686void 3687ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3688{ 3689 void *pool_ref; 3690 3691 if ((src == NULL) 3692 || (dest == NULL)) 3693 return; 3694 3695 /* 3696 * May need to preserve linked list pointers at some point too. 3697 */ 3698 pool_ref = dest->io_hdr.pool; 3699 3700 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest))); 3701 3702 dest->io_hdr.pool = pool_ref; 3703 /* 3704 * We need to know that this is an internal copy, and doesn't need 3705 * to get passed back to the FETD that allocated it. 3706 */ 3707 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3708} 3709 3710int 3711ctl_expand_number(const char *buf, uint64_t *num) 3712{ 3713 char *endptr; 3714 uint64_t number; 3715 unsigned shift; 3716 3717 number = strtoq(buf, &endptr, 0); 3718 3719 switch (tolower((unsigned char)*endptr)) { 3720 case 'e': 3721 shift = 60; 3722 break; 3723 case 'p': 3724 shift = 50; 3725 break; 3726 case 't': 3727 shift = 40; 3728 break; 3729 case 'g': 3730 shift = 30; 3731 break; 3732 case 'm': 3733 shift = 20; 3734 break; 3735 case 'k': 3736 shift = 10; 3737 break; 3738 case 'b': 3739 case '\0': /* No unit. */ 3740 *num = number; 3741 return (0); 3742 default: 3743 /* Unrecognized unit. */ 3744 return (-1); 3745 } 3746 3747 if ((number << shift) >> shift != number) { 3748 /* Overflow */ 3749 return (-1); 3750 } 3751 *num = number << shift; 3752 return (0); 3753} 3754 3755 3756/* 3757 * This routine could be used in the future to load default and/or saved 3758 * mode page parameters for a particuar lun. 3759 */ 3760static int 3761ctl_init_page_index(struct ctl_lun *lun) 3762{ 3763 int i; 3764 struct ctl_page_index *page_index; 3765 const char *value; 3766 uint64_t ival; 3767 3768 memcpy(&lun->mode_pages.index, page_index_template, 3769 sizeof(page_index_template)); 3770 3771 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3772 3773 page_index = &lun->mode_pages.index[i]; 3774 /* 3775 * If this is a disk-only mode page, there's no point in 3776 * setting it up. For some pages, we have to have some 3777 * basic information about the disk in order to calculate the 3778 * mode page data. 3779 */ 3780 if ((lun->be_lun->lun_type != T_DIRECT) 3781 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3782 continue; 3783 3784 switch (page_index->page_code & SMPH_PC_MASK) { 3785 case SMS_RW_ERROR_RECOVERY_PAGE: { 3786 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3787 panic("subpage is incorrect!"); 3788 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3789 &rw_er_page_default, 3790 sizeof(rw_er_page_default)); 3791 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3792 &rw_er_page_changeable, 3793 sizeof(rw_er_page_changeable)); 3794 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3795 &rw_er_page_default, 3796 sizeof(rw_er_page_default)); 3797 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3798 &rw_er_page_default, 3799 sizeof(rw_er_page_default)); 3800 page_index->page_data = 3801 (uint8_t *)lun->mode_pages.rw_er_page; 3802 break; 3803 } 3804 case SMS_FORMAT_DEVICE_PAGE: { 3805 struct scsi_format_page *format_page; 3806 3807 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3808 panic("subpage is incorrect!"); 3809 3810 /* 3811 * Sectors per track are set above. Bytes per 3812 * sector need to be set here on a per-LUN basis. 3813 */ 3814 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3815 &format_page_default, 3816 sizeof(format_page_default)); 3817 memcpy(&lun->mode_pages.format_page[ 3818 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3819 sizeof(format_page_changeable)); 3820 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3821 &format_page_default, 3822 sizeof(format_page_default)); 3823 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3824 &format_page_default, 3825 sizeof(format_page_default)); 3826 3827 format_page = &lun->mode_pages.format_page[ 3828 CTL_PAGE_CURRENT]; 3829 scsi_ulto2b(lun->be_lun->blocksize, 3830 format_page->bytes_per_sector); 3831 3832 format_page = &lun->mode_pages.format_page[ 3833 CTL_PAGE_DEFAULT]; 3834 scsi_ulto2b(lun->be_lun->blocksize, 3835 format_page->bytes_per_sector); 3836 3837 format_page = &lun->mode_pages.format_page[ 3838 CTL_PAGE_SAVED]; 3839 scsi_ulto2b(lun->be_lun->blocksize, 3840 format_page->bytes_per_sector); 3841 3842 page_index->page_data = 3843 (uint8_t *)lun->mode_pages.format_page; 3844 break; 3845 } 3846 case SMS_RIGID_DISK_PAGE: { 3847 struct scsi_rigid_disk_page *rigid_disk_page; 3848 uint32_t sectors_per_cylinder; 3849 uint64_t cylinders; 3850#ifndef __XSCALE__ 3851 int shift; 3852#endif /* !__XSCALE__ */ 3853 3854 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3855 panic("invalid subpage value %d", 3856 page_index->subpage); 3857 3858 /* 3859 * Rotation rate and sectors per track are set 3860 * above. We calculate the cylinders here based on 3861 * capacity. Due to the number of heads and 3862 * sectors per track we're using, smaller arrays 3863 * may turn out to have 0 cylinders. Linux and 3864 * FreeBSD don't pay attention to these mode pages 3865 * to figure out capacity, but Solaris does. It 3866 * seems to deal with 0 cylinders just fine, and 3867 * works out a fake geometry based on the capacity. 3868 */ 3869 memcpy(&lun->mode_pages.rigid_disk_page[ 3870 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 3871 sizeof(rigid_disk_page_default)); 3872 memcpy(&lun->mode_pages.rigid_disk_page[ 3873 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 3874 sizeof(rigid_disk_page_changeable)); 3875 3876 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 3877 CTL_DEFAULT_HEADS; 3878 3879 /* 3880 * The divide method here will be more accurate, 3881 * probably, but results in floating point being 3882 * used in the kernel on i386 (__udivdi3()). On the 3883 * XScale, though, __udivdi3() is implemented in 3884 * software. 3885 * 3886 * The shift method for cylinder calculation is 3887 * accurate if sectors_per_cylinder is a power of 3888 * 2. Otherwise it might be slightly off -- you 3889 * might have a bit of a truncation problem. 3890 */ 3891#ifdef __XSCALE__ 3892 cylinders = (lun->be_lun->maxlba + 1) / 3893 sectors_per_cylinder; 3894#else 3895 for (shift = 31; shift > 0; shift--) { 3896 if (sectors_per_cylinder & (1 << shift)) 3897 break; 3898 } 3899 cylinders = (lun->be_lun->maxlba + 1) >> shift; 3900#endif 3901 3902 /* 3903 * We've basically got 3 bytes, or 24 bits for the 3904 * cylinder size in the mode page. If we're over, 3905 * just round down to 2^24. 3906 */ 3907 if (cylinders > 0xffffff) 3908 cylinders = 0xffffff; 3909 3910 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3911 CTL_PAGE_DEFAULT]; 3912 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3913 3914 if ((value = ctl_get_opt(&lun->be_lun->options, 3915 "rpm")) != NULL) { 3916 scsi_ulto2b(strtol(value, NULL, 0), 3917 rigid_disk_page->rotation_rate); 3918 } 3919 3920 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 3921 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3922 sizeof(rigid_disk_page_default)); 3923 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 3924 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3925 sizeof(rigid_disk_page_default)); 3926 3927 page_index->page_data = 3928 (uint8_t *)lun->mode_pages.rigid_disk_page; 3929 break; 3930 } 3931 case SMS_CACHING_PAGE: { 3932 struct scsi_caching_page *caching_page; 3933 3934 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3935 panic("invalid subpage value %d", 3936 page_index->subpage); 3937 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 3938 &caching_page_default, 3939 sizeof(caching_page_default)); 3940 memcpy(&lun->mode_pages.caching_page[ 3941 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 3942 sizeof(caching_page_changeable)); 3943 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3944 &caching_page_default, 3945 sizeof(caching_page_default)); 3946 caching_page = &lun->mode_pages.caching_page[ 3947 CTL_PAGE_SAVED]; 3948 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 3949 if (value != NULL && strcmp(value, "off") == 0) 3950 caching_page->flags1 &= ~SCP_WCE; 3951 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 3952 if (value != NULL && strcmp(value, "off") == 0) 3953 caching_page->flags1 |= SCP_RCD; 3954 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 3955 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3956 sizeof(caching_page_default)); 3957 page_index->page_data = 3958 (uint8_t *)lun->mode_pages.caching_page; 3959 break; 3960 } 3961 case SMS_CONTROL_MODE_PAGE: { 3962 struct scsi_control_page *control_page; 3963 3964 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3965 panic("invalid subpage value %d", 3966 page_index->subpage); 3967 3968 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 3969 &control_page_default, 3970 sizeof(control_page_default)); 3971 memcpy(&lun->mode_pages.control_page[ 3972 CTL_PAGE_CHANGEABLE], &control_page_changeable, 3973 sizeof(control_page_changeable)); 3974 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 3975 &control_page_default, 3976 sizeof(control_page_default)); 3977 control_page = &lun->mode_pages.control_page[ 3978 CTL_PAGE_SAVED]; 3979 value = ctl_get_opt(&lun->be_lun->options, "reordering"); 3980 if (value != NULL && strcmp(value, "unrestricted") == 0) { 3981 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; 3982 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; 3983 } 3984 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 3985 &lun->mode_pages.control_page[CTL_PAGE_SAVED], 3986 sizeof(control_page_default)); 3987 page_index->page_data = 3988 (uint8_t *)lun->mode_pages.control_page; 3989 break; 3990 3991 } 3992 case SMS_INFO_EXCEPTIONS_PAGE: { 3993 switch (page_index->subpage) { 3994 case SMS_SUBPAGE_PAGE_0: 3995 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 3996 &ie_page_default, 3997 sizeof(ie_page_default)); 3998 memcpy(&lun->mode_pages.ie_page[ 3999 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4000 sizeof(ie_page_changeable)); 4001 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4002 &ie_page_default, 4003 sizeof(ie_page_default)); 4004 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4005 &ie_page_default, 4006 sizeof(ie_page_default)); 4007 page_index->page_data = 4008 (uint8_t *)lun->mode_pages.ie_page; 4009 break; 4010 case 0x02: { 4011 struct ctl_logical_block_provisioning_page *page; 4012 4013 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4014 &lbp_page_default, 4015 sizeof(lbp_page_default)); 4016 memcpy(&lun->mode_pages.lbp_page[ 4017 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4018 sizeof(lbp_page_changeable)); 4019 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4020 &lbp_page_default, 4021 sizeof(lbp_page_default)); 4022 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4023 value = ctl_get_opt(&lun->be_lun->options, 4024 "avail-threshold"); 4025 if (value != NULL && 4026 ctl_expand_number(value, &ival) == 0) { 4027 page->descr[0].flags |= SLBPPD_ENABLED | 4028 SLBPPD_ARMING_DEC; 4029 if (lun->be_lun->blocksize) 4030 ival /= lun->be_lun->blocksize; 4031 else 4032 ival /= 512; 4033 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4034 page->descr[0].count); 4035 } 4036 value = ctl_get_opt(&lun->be_lun->options, 4037 "used-threshold"); 4038 if (value != NULL && 4039 ctl_expand_number(value, &ival) == 0) { 4040 page->descr[1].flags |= SLBPPD_ENABLED | 4041 SLBPPD_ARMING_INC; 4042 if (lun->be_lun->blocksize) 4043 ival /= lun->be_lun->blocksize; 4044 else 4045 ival /= 512; 4046 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4047 page->descr[1].count); 4048 } 4049 value = ctl_get_opt(&lun->be_lun->options, 4050 "pool-avail-threshold"); 4051 if (value != NULL && 4052 ctl_expand_number(value, &ival) == 0) { 4053 page->descr[2].flags |= SLBPPD_ENABLED | 4054 SLBPPD_ARMING_DEC; 4055 if (lun->be_lun->blocksize) 4056 ival /= lun->be_lun->blocksize; 4057 else 4058 ival /= 512; 4059 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4060 page->descr[2].count); 4061 } 4062 value = ctl_get_opt(&lun->be_lun->options, 4063 "pool-used-threshold"); 4064 if (value != NULL && 4065 ctl_expand_number(value, &ival) == 0) { 4066 page->descr[3].flags |= SLBPPD_ENABLED | 4067 SLBPPD_ARMING_INC; 4068 if (lun->be_lun->blocksize) 4069 ival /= lun->be_lun->blocksize; 4070 else 4071 ival /= 512; 4072 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4073 page->descr[3].count); 4074 } 4075 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4076 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4077 sizeof(lbp_page_default)); 4078 page_index->page_data = 4079 (uint8_t *)lun->mode_pages.lbp_page; 4080 }} 4081 break; 4082 } 4083 case SMS_VENDOR_SPECIFIC_PAGE:{ 4084 switch (page_index->subpage) { 4085 case DBGCNF_SUBPAGE_CODE: { 4086 struct copan_debugconf_subpage *current_page, 4087 *saved_page; 4088 4089 memcpy(&lun->mode_pages.debugconf_subpage[ 4090 CTL_PAGE_CURRENT], 4091 &debugconf_page_default, 4092 sizeof(debugconf_page_default)); 4093 memcpy(&lun->mode_pages.debugconf_subpage[ 4094 CTL_PAGE_CHANGEABLE], 4095 &debugconf_page_changeable, 4096 sizeof(debugconf_page_changeable)); 4097 memcpy(&lun->mode_pages.debugconf_subpage[ 4098 CTL_PAGE_DEFAULT], 4099 &debugconf_page_default, 4100 sizeof(debugconf_page_default)); 4101 memcpy(&lun->mode_pages.debugconf_subpage[ 4102 CTL_PAGE_SAVED], 4103 &debugconf_page_default, 4104 sizeof(debugconf_page_default)); 4105 page_index->page_data = 4106 (uint8_t *)lun->mode_pages.debugconf_subpage; 4107 4108 current_page = (struct copan_debugconf_subpage *) 4109 (page_index->page_data + 4110 (page_index->page_len * 4111 CTL_PAGE_CURRENT)); 4112 saved_page = (struct copan_debugconf_subpage *) 4113 (page_index->page_data + 4114 (page_index->page_len * 4115 CTL_PAGE_SAVED)); 4116 break; 4117 } 4118 default: 4119 panic("invalid subpage value %d", 4120 page_index->subpage); 4121 break; 4122 } 4123 break; 4124 } 4125 default: 4126 panic("invalid page value %d", 4127 page_index->page_code & SMPH_PC_MASK); 4128 break; 4129 } 4130 } 4131 4132 return (CTL_RETVAL_COMPLETE); 4133} 4134 4135static int 4136ctl_init_log_page_index(struct ctl_lun *lun) 4137{ 4138 struct ctl_page_index *page_index; 4139 int i, j, k, prev; 4140 4141 memcpy(&lun->log_pages.index, log_page_index_template, 4142 sizeof(log_page_index_template)); 4143 4144 prev = -1; 4145 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4146 4147 page_index = &lun->log_pages.index[i]; 4148 /* 4149 * If this is a disk-only mode page, there's no point in 4150 * setting it up. For some pages, we have to have some 4151 * basic information about the disk in order to calculate the 4152 * mode page data. 4153 */ 4154 if ((lun->be_lun->lun_type != T_DIRECT) 4155 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4156 continue; 4157 4158 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4159 lun->backend->lun_attr == NULL) 4160 continue; 4161 4162 if (page_index->page_code != prev) { 4163 lun->log_pages.pages_page[j] = page_index->page_code; 4164 prev = page_index->page_code; 4165 j++; 4166 } 4167 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4168 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4169 k++; 4170 } 4171 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4172 lun->log_pages.index[0].page_len = j; 4173 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4174 lun->log_pages.index[1].page_len = k * 2; 4175 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4176 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4177 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4178 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4179 4180 return (CTL_RETVAL_COMPLETE); 4181} 4182 4183static int 4184hex2bin(const char *str, uint8_t *buf, int buf_size) 4185{ 4186 int i; 4187 u_char c; 4188 4189 memset(buf, 0, buf_size); 4190 while (isspace(str[0])) 4191 str++; 4192 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4193 str += 2; 4194 buf_size *= 2; 4195 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4196 c = str[i]; 4197 if (isdigit(c)) 4198 c -= '0'; 4199 else if (isalpha(c)) 4200 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4201 else 4202 break; 4203 if (c >= 16) 4204 break; 4205 if ((i & 1) == 0) 4206 buf[i / 2] |= (c << 4); 4207 else 4208 buf[i / 2] |= c; 4209 } 4210 return ((i + 1) / 2); 4211} 4212 4213/* 4214 * LUN allocation. 4215 * 4216 * Requirements: 4217 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4218 * wants us to allocate the LUN and he can block. 4219 * - ctl_softc is always set 4220 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4221 * 4222 * Returns 0 for success, non-zero (errno) for failure. 4223 */ 4224static int 4225ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4226 struct ctl_be_lun *const be_lun) 4227{ 4228 struct ctl_lun *nlun, *lun; 4229 struct scsi_vpd_id_descriptor *desc; 4230 struct scsi_vpd_id_t10 *t10id; 4231 const char *eui, *naa, *scsiname, *vendor; 4232 int lun_number, i, lun_malloced; 4233 int devidlen, idlen1, idlen2 = 0, len; 4234 4235 if (be_lun == NULL) 4236 return (EINVAL); 4237 4238 /* 4239 * We currently only support Direct Access or Processor LUN types. 4240 */ 4241 switch (be_lun->lun_type) { 4242 case T_DIRECT: 4243 break; 4244 case T_PROCESSOR: 4245 break; 4246 case T_SEQUENTIAL: 4247 case T_CHANGER: 4248 default: 4249 be_lun->lun_config_status(be_lun->be_lun, 4250 CTL_LUN_CONFIG_FAILURE); 4251 break; 4252 } 4253 if (ctl_lun == NULL) { 4254 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4255 lun_malloced = 1; 4256 } else { 4257 lun_malloced = 0; 4258 lun = ctl_lun; 4259 } 4260 4261 memset(lun, 0, sizeof(*lun)); 4262 if (lun_malloced) 4263 lun->flags = CTL_LUN_MALLOCED; 4264 4265 /* Generate LUN ID. */ 4266 devidlen = max(CTL_DEVID_MIN_LEN, 4267 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4268 idlen1 = sizeof(*t10id) + devidlen; 4269 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4270 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4271 if (scsiname != NULL) { 4272 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4273 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4274 } 4275 eui = ctl_get_opt(&be_lun->options, "eui"); 4276 if (eui != NULL) { 4277 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4278 } 4279 naa = ctl_get_opt(&be_lun->options, "naa"); 4280 if (naa != NULL) { 4281 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4282 } 4283 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4284 M_CTL, M_WAITOK | M_ZERO); 4285 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4286 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4287 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4288 desc->length = idlen1; 4289 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4290 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4291 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4292 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4293 } else { 4294 strncpy(t10id->vendor, vendor, 4295 min(sizeof(t10id->vendor), strlen(vendor))); 4296 } 4297 strncpy((char *)t10id->vendor_spec_id, 4298 (char *)be_lun->device_id, devidlen); 4299 if (scsiname != NULL) { 4300 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4301 desc->length); 4302 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4303 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4304 SVPD_ID_TYPE_SCSI_NAME; 4305 desc->length = idlen2; 4306 strlcpy(desc->identifier, scsiname, idlen2); 4307 } 4308 if (eui != NULL) { 4309 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4310 desc->length); 4311 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4312 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4313 SVPD_ID_TYPE_EUI64; 4314 desc->length = hex2bin(eui, desc->identifier, 16); 4315 desc->length = desc->length > 12 ? 16 : 4316 (desc->length > 8 ? 12 : 8); 4317 len -= 16 - desc->length; 4318 } 4319 if (naa != NULL) { 4320 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4321 desc->length); 4322 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4323 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4324 SVPD_ID_TYPE_NAA; 4325 desc->length = hex2bin(naa, desc->identifier, 16); 4326 desc->length = desc->length > 8 ? 16 : 8; 4327 len -= 16 - desc->length; 4328 } 4329 lun->lun_devid->len = len; 4330 4331 mtx_lock(&ctl_softc->ctl_lock); 4332 /* 4333 * See if the caller requested a particular LUN number. If so, see 4334 * if it is available. Otherwise, allocate the first available LUN. 4335 */ 4336 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4337 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4338 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4339 mtx_unlock(&ctl_softc->ctl_lock); 4340 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4341 printf("ctl: requested LUN ID %d is higher " 4342 "than CTL_MAX_LUNS - 1 (%d)\n", 4343 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4344 } else { 4345 /* 4346 * XXX KDM return an error, or just assign 4347 * another LUN ID in this case?? 4348 */ 4349 printf("ctl: requested LUN ID %d is already " 4350 "in use\n", be_lun->req_lun_id); 4351 } 4352 if (lun->flags & CTL_LUN_MALLOCED) 4353 free(lun, M_CTL); 4354 be_lun->lun_config_status(be_lun->be_lun, 4355 CTL_LUN_CONFIG_FAILURE); 4356 return (ENOSPC); 4357 } 4358 lun_number = be_lun->req_lun_id; 4359 } else { 4360 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); 4361 if (lun_number == -1) { 4362 mtx_unlock(&ctl_softc->ctl_lock); 4363 printf("ctl: can't allocate LUN, out of LUNs\n"); 4364 if (lun->flags & CTL_LUN_MALLOCED) 4365 free(lun, M_CTL); 4366 be_lun->lun_config_status(be_lun->be_lun, 4367 CTL_LUN_CONFIG_FAILURE); 4368 return (ENOSPC); 4369 } 4370 } 4371 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4372 4373 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4374 lun->lun = lun_number; 4375 lun->be_lun = be_lun; 4376 /* 4377 * The processor LUN is always enabled. Disk LUNs come on line 4378 * disabled, and must be enabled by the backend. 4379 */ 4380 lun->flags |= CTL_LUN_DISABLED; 4381 lun->backend = be_lun->be; 4382 be_lun->ctl_lun = lun; 4383 be_lun->lun_id = lun_number; 4384 atomic_add_int(&be_lun->be->num_luns, 1); 4385 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4386 lun->flags |= CTL_LUN_OFFLINE; 4387 4388 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4389 lun->flags |= CTL_LUN_STOPPED; 4390 4391 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4392 lun->flags |= CTL_LUN_INOPERABLE; 4393 4394 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4395 lun->flags |= CTL_LUN_PRIMARY_SC; 4396 4397 lun->ctl_softc = ctl_softc; 4398#ifdef CTL_TIME_IO 4399 lun->last_busy = getsbinuptime(); 4400#endif 4401 TAILQ_INIT(&lun->ooa_queue); 4402 TAILQ_INIT(&lun->blocked_queue); 4403 STAILQ_INIT(&lun->error_list); 4404 ctl_tpc_lun_init(lun); 4405 4406 /* 4407 * Initialize the mode and log page index. 4408 */ 4409 ctl_init_page_index(lun); 4410 ctl_init_log_page_index(lun); 4411 4412 /* 4413 * Now, before we insert this lun on the lun list, set the lun 4414 * inventory changed UA for all other luns. 4415 */ 4416 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4417 mtx_lock(&nlun->lun_lock); 4418 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4419 mtx_unlock(&nlun->lun_lock); 4420 } 4421 4422 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4423 4424 ctl_softc->ctl_luns[lun_number] = lun; 4425 4426 ctl_softc->num_luns++; 4427 4428 /* Setup statistics gathering */ 4429 lun->stats.device_type = be_lun->lun_type; 4430 lun->stats.lun_number = lun_number; 4431 if (lun->stats.device_type == T_DIRECT) 4432 lun->stats.blocksize = be_lun->blocksize; 4433 else 4434 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4435 for (i = 0;i < CTL_MAX_PORTS;i++) 4436 lun->stats.ports[i].targ_port = i; 4437 4438 mtx_unlock(&ctl_softc->ctl_lock); 4439 4440 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4441 return (0); 4442} 4443 4444/* 4445 * Delete a LUN. 4446 * Assumptions: 4447 * - LUN has already been marked invalid and any pending I/O has been taken 4448 * care of. 4449 */ 4450static int 4451ctl_free_lun(struct ctl_lun *lun) 4452{ 4453 struct ctl_softc *softc; 4454 struct ctl_lun *nlun; 4455 int i; 4456 4457 softc = lun->ctl_softc; 4458 4459 mtx_assert(&softc->ctl_lock, MA_OWNED); 4460 4461 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4462 4463 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4464 4465 softc->ctl_luns[lun->lun] = NULL; 4466 4467 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4468 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4469 4470 softc->num_luns--; 4471 4472 /* 4473 * Tell the backend to free resources, if this LUN has a backend. 4474 */ 4475 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4476 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4477 4478 ctl_tpc_lun_shutdown(lun); 4479 mtx_destroy(&lun->lun_lock); 4480 free(lun->lun_devid, M_CTL); 4481 for (i = 0; i < CTL_MAX_PORTS; i++) 4482 free(lun->pending_ua[i], M_CTL); 4483 for (i = 0; i < CTL_MAX_PORTS; i++) 4484 free(lun->pr_keys[i], M_CTL); 4485 free(lun->write_buffer, M_CTL); 4486 if (lun->flags & CTL_LUN_MALLOCED) 4487 free(lun, M_CTL); 4488 4489 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4490 mtx_lock(&nlun->lun_lock); 4491 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4492 mtx_unlock(&nlun->lun_lock); 4493 } 4494 4495 return (0); 4496} 4497 4498static void 4499ctl_create_lun(struct ctl_be_lun *be_lun) 4500{ 4501 struct ctl_softc *softc; 4502 4503 softc = control_softc; 4504 4505 /* 4506 * ctl_alloc_lun() should handle all potential failure cases. 4507 */ 4508 ctl_alloc_lun(softc, NULL, be_lun); 4509} 4510 4511int 4512ctl_add_lun(struct ctl_be_lun *be_lun) 4513{ 4514 struct ctl_softc *softc = control_softc; 4515 4516 mtx_lock(&softc->ctl_lock); 4517 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4518 mtx_unlock(&softc->ctl_lock); 4519 wakeup(&softc->pending_lun_queue); 4520 4521 return (0); 4522} 4523 4524int 4525ctl_enable_lun(struct ctl_be_lun *be_lun) 4526{ 4527 struct ctl_softc *softc; 4528 struct ctl_port *port, *nport; 4529 struct ctl_lun *lun; 4530 int retval; 4531 4532 lun = (struct ctl_lun *)be_lun->ctl_lun; 4533 softc = lun->ctl_softc; 4534 4535 mtx_lock(&softc->ctl_lock); 4536 mtx_lock(&lun->lun_lock); 4537 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4538 /* 4539 * eh? Why did we get called if the LUN is already 4540 * enabled? 4541 */ 4542 mtx_unlock(&lun->lun_lock); 4543 mtx_unlock(&softc->ctl_lock); 4544 return (0); 4545 } 4546 lun->flags &= ~CTL_LUN_DISABLED; 4547 mtx_unlock(&lun->lun_lock); 4548 4549 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) { 4550 nport = STAILQ_NEXT(port, links); 4551 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4552 port->lun_map != NULL || port->lun_enable == NULL) 4553 continue; 4554 4555 /* 4556 * Drop the lock while we call the FETD's enable routine. 4557 * This can lead to a callback into CTL (at least in the 4558 * case of the internal initiator frontend. 4559 */ 4560 mtx_unlock(&softc->ctl_lock); 4561 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4562 mtx_lock(&softc->ctl_lock); 4563 if (retval != 0) { 4564 printf("%s: FETD %s port %d returned error " 4565 "%d for lun_enable on lun %jd\n", 4566 __func__, port->port_name, port->targ_port, 4567 retval, (intmax_t)lun->lun); 4568 } 4569 } 4570 4571 mtx_unlock(&softc->ctl_lock); 4572 ctl_isc_announce_lun(lun); 4573 4574 return (0); 4575} 4576 4577int 4578ctl_disable_lun(struct ctl_be_lun *be_lun) 4579{ 4580 struct ctl_softc *softc; 4581 struct ctl_port *port; 4582 struct ctl_lun *lun; 4583 int retval; 4584 4585 lun = (struct ctl_lun *)be_lun->ctl_lun; 4586 softc = lun->ctl_softc; 4587 4588 mtx_lock(&softc->ctl_lock); 4589 mtx_lock(&lun->lun_lock); 4590 if (lun->flags & CTL_LUN_DISABLED) { 4591 mtx_unlock(&lun->lun_lock); 4592 mtx_unlock(&softc->ctl_lock); 4593 return (0); 4594 } 4595 lun->flags |= CTL_LUN_DISABLED; 4596 mtx_unlock(&lun->lun_lock); 4597 4598 STAILQ_FOREACH(port, &softc->port_list, links) { 4599 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4600 port->lun_map != NULL || port->lun_disable == NULL) 4601 continue; 4602 4603 /* 4604 * Drop the lock before we call the frontend's disable 4605 * routine, to avoid lock order reversals. 4606 * 4607 * XXX KDM what happens if the frontend list changes while 4608 * we're traversing it? It's unlikely, but should be handled. 4609 */ 4610 mtx_unlock(&softc->ctl_lock); 4611 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4612 mtx_lock(&softc->ctl_lock); 4613 if (retval != 0) { 4614 printf("%s: FETD %s port %d returned error " 4615 "%d for lun_disable on lun %jd\n", 4616 __func__, port->port_name, port->targ_port, 4617 retval, (intmax_t)lun->lun); 4618 } 4619 } 4620 4621 mtx_unlock(&softc->ctl_lock); 4622 ctl_isc_announce_lun(lun); 4623 4624 return (0); 4625} 4626 4627int 4628ctl_start_lun(struct ctl_be_lun *be_lun) 4629{ 4630 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4631 4632 mtx_lock(&lun->lun_lock); 4633 lun->flags &= ~CTL_LUN_STOPPED; 4634 mtx_unlock(&lun->lun_lock); 4635 return (0); 4636} 4637 4638int 4639ctl_stop_lun(struct ctl_be_lun *be_lun) 4640{ 4641 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4642 4643 mtx_lock(&lun->lun_lock); 4644 lun->flags |= CTL_LUN_STOPPED; 4645 mtx_unlock(&lun->lun_lock); 4646 return (0); 4647} 4648 4649int 4650ctl_lun_offline(struct ctl_be_lun *be_lun) 4651{ 4652 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4653 4654 mtx_lock(&lun->lun_lock); 4655 lun->flags |= CTL_LUN_OFFLINE; 4656 mtx_unlock(&lun->lun_lock); 4657 return (0); 4658} 4659 4660int 4661ctl_lun_online(struct ctl_be_lun *be_lun) 4662{ 4663 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4664 4665 mtx_lock(&lun->lun_lock); 4666 lun->flags &= ~CTL_LUN_OFFLINE; 4667 mtx_unlock(&lun->lun_lock); 4668 return (0); 4669} 4670 4671int 4672ctl_lun_primary(struct ctl_be_lun *be_lun) 4673{ 4674 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4675 4676 mtx_lock(&lun->lun_lock); 4677 lun->flags |= CTL_LUN_PRIMARY_SC; 4678 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4679 mtx_unlock(&lun->lun_lock); 4680 ctl_isc_announce_lun(lun); 4681 return (0); 4682} 4683 4684int 4685ctl_lun_secondary(struct ctl_be_lun *be_lun) 4686{ 4687 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4688 4689 mtx_lock(&lun->lun_lock); 4690 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4691 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4692 mtx_unlock(&lun->lun_lock); 4693 ctl_isc_announce_lun(lun); 4694 return (0); 4695} 4696 4697int 4698ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4699{ 4700 struct ctl_softc *softc; 4701 struct ctl_lun *lun; 4702 4703 lun = (struct ctl_lun *)be_lun->ctl_lun; 4704 softc = lun->ctl_softc; 4705 4706 mtx_lock(&lun->lun_lock); 4707 4708 /* 4709 * The LUN needs to be disabled before it can be marked invalid. 4710 */ 4711 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4712 mtx_unlock(&lun->lun_lock); 4713 return (-1); 4714 } 4715 /* 4716 * Mark the LUN invalid. 4717 */ 4718 lun->flags |= CTL_LUN_INVALID; 4719 4720 /* 4721 * If there is nothing in the OOA queue, go ahead and free the LUN. 4722 * If we have something in the OOA queue, we'll free it when the 4723 * last I/O completes. 4724 */ 4725 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4726 mtx_unlock(&lun->lun_lock); 4727 mtx_lock(&softc->ctl_lock); 4728 ctl_free_lun(lun); 4729 mtx_unlock(&softc->ctl_lock); 4730 } else 4731 mtx_unlock(&lun->lun_lock); 4732 4733 return (0); 4734} 4735 4736int 4737ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4738{ 4739 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4740 4741 mtx_lock(&lun->lun_lock); 4742 lun->flags |= CTL_LUN_INOPERABLE; 4743 mtx_unlock(&lun->lun_lock); 4744 return (0); 4745} 4746 4747int 4748ctl_lun_operable(struct ctl_be_lun *be_lun) 4749{ 4750 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4751 4752 mtx_lock(&lun->lun_lock); 4753 lun->flags &= ~CTL_LUN_INOPERABLE; 4754 mtx_unlock(&lun->lun_lock); 4755 return (0); 4756} 4757 4758void 4759ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4760{ 4761 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4762 union ctl_ha_msg msg; 4763 4764 mtx_lock(&lun->lun_lock); 4765 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 4766 mtx_unlock(&lun->lun_lock); 4767 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4768 /* Send msg to other side. */ 4769 bzero(&msg.ua, sizeof(msg.ua)); 4770 msg.hdr.msg_type = CTL_MSG_UA; 4771 msg.hdr.nexus.initid = -1; 4772 msg.hdr.nexus.targ_port = -1; 4773 msg.hdr.nexus.targ_lun = lun->lun; 4774 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4775 msg.ua.ua_all = 1; 4776 msg.ua.ua_set = 1; 4777 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGED; 4778 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4779 M_WAITOK); 4780 } 4781} 4782 4783/* 4784 * Backend "memory move is complete" callback for requests that never 4785 * make it down to say RAIDCore's configuration code. 4786 */ 4787int 4788ctl_config_move_done(union ctl_io *io) 4789{ 4790 int retval; 4791 4792 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4793 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 4794 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 4795 4796 if ((io->io_hdr.port_status != 0) && 4797 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4798 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4799 /* 4800 * For hardware error sense keys, the sense key 4801 * specific value is defined to be a retry count, 4802 * but we use it to pass back an internal FETD 4803 * error code. XXX KDM Hopefully the FETD is only 4804 * using 16 bits for an error code, since that's 4805 * all the space we have in the sks field. 4806 */ 4807 ctl_set_internal_failure(&io->scsiio, 4808 /*sks_valid*/ 1, 4809 /*retry_count*/ 4810 io->io_hdr.port_status); 4811 } 4812 4813 if (ctl_debug & CTL_DEBUG_CDB_DATA) 4814 ctl_data_print(io); 4815 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 4816 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4817 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 4818 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4819 /* 4820 * XXX KDM just assuming a single pointer here, and not a 4821 * S/G list. If we start using S/G lists for config data, 4822 * we'll need to know how to clean them up here as well. 4823 */ 4824 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4825 free(io->scsiio.kern_data_ptr, M_CTL); 4826 ctl_done(io); 4827 retval = CTL_RETVAL_COMPLETE; 4828 } else { 4829 /* 4830 * XXX KDM now we need to continue data movement. Some 4831 * options: 4832 * - call ctl_scsiio() again? We don't do this for data 4833 * writes, because for those at least we know ahead of 4834 * time where the write will go and how long it is. For 4835 * config writes, though, that information is largely 4836 * contained within the write itself, thus we need to 4837 * parse out the data again. 4838 * 4839 * - Call some other function once the data is in? 4840 */ 4841 4842 /* 4843 * XXX KDM call ctl_scsiio() again for now, and check flag 4844 * bits to see whether we're allocated or not. 4845 */ 4846 retval = ctl_scsiio(&io->scsiio); 4847 } 4848 return (retval); 4849} 4850 4851/* 4852 * This gets called by a backend driver when it is done with a 4853 * data_submit method. 4854 */ 4855void 4856ctl_data_submit_done(union ctl_io *io) 4857{ 4858 /* 4859 * If the IO_CONT flag is set, we need to call the supplied 4860 * function to continue processing the I/O, instead of completing 4861 * the I/O just yet. 4862 * 4863 * If there is an error, though, we don't want to keep processing. 4864 * Instead, just send status back to the initiator. 4865 */ 4866 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4867 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4868 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4869 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4870 io->scsiio.io_cont(io); 4871 return; 4872 } 4873 ctl_done(io); 4874} 4875 4876/* 4877 * This gets called by a backend driver when it is done with a 4878 * configuration write. 4879 */ 4880void 4881ctl_config_write_done(union ctl_io *io) 4882{ 4883 uint8_t *buf; 4884 4885 /* 4886 * If the IO_CONT flag is set, we need to call the supplied 4887 * function to continue processing the I/O, instead of completing 4888 * the I/O just yet. 4889 * 4890 * If there is an error, though, we don't want to keep processing. 4891 * Instead, just send status back to the initiator. 4892 */ 4893 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4894 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4895 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4896 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4897 io->scsiio.io_cont(io); 4898 return; 4899 } 4900 /* 4901 * Since a configuration write can be done for commands that actually 4902 * have data allocated, like write buffer, and commands that have 4903 * no data, like start/stop unit, we need to check here. 4904 */ 4905 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4906 buf = io->scsiio.kern_data_ptr; 4907 else 4908 buf = NULL; 4909 ctl_done(io); 4910 if (buf) 4911 free(buf, M_CTL); 4912} 4913 4914void 4915ctl_config_read_done(union ctl_io *io) 4916{ 4917 uint8_t *buf; 4918 4919 /* 4920 * If there is some error -- we are done, skip data transfer. 4921 */ 4922 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 4923 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4924 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 4925 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4926 buf = io->scsiio.kern_data_ptr; 4927 else 4928 buf = NULL; 4929 ctl_done(io); 4930 if (buf) 4931 free(buf, M_CTL); 4932 return; 4933 } 4934 4935 /* 4936 * If the IO_CONT flag is set, we need to call the supplied 4937 * function to continue processing the I/O, instead of completing 4938 * the I/O just yet. 4939 */ 4940 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 4941 io->scsiio.io_cont(io); 4942 return; 4943 } 4944 4945 ctl_datamove(io); 4946} 4947 4948/* 4949 * SCSI release command. 4950 */ 4951int 4952ctl_scsi_release(struct ctl_scsiio *ctsio) 4953{ 4954 int length, longid, thirdparty_id, resv_id; 4955 struct ctl_lun *lun; 4956 uint32_t residx; 4957 4958 length = 0; 4959 resv_id = 0; 4960 4961 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4962 4963 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 4964 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4965 4966 switch (ctsio->cdb[0]) { 4967 case RELEASE_10: { 4968 struct scsi_release_10 *cdb; 4969 4970 cdb = (struct scsi_release_10 *)ctsio->cdb; 4971 4972 if (cdb->byte2 & SR10_LONGID) 4973 longid = 1; 4974 else 4975 thirdparty_id = cdb->thirdparty_id; 4976 4977 resv_id = cdb->resv_id; 4978 length = scsi_2btoul(cdb->length); 4979 break; 4980 } 4981 } 4982 4983 4984 /* 4985 * XXX KDM right now, we only support LUN reservation. We don't 4986 * support 3rd party reservations, or extent reservations, which 4987 * might actually need the parameter list. If we've gotten this 4988 * far, we've got a LUN reservation. Anything else got kicked out 4989 * above. So, according to SPC, ignore the length. 4990 */ 4991 length = 0; 4992 4993 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 4994 && (length > 0)) { 4995 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 4996 ctsio->kern_data_len = length; 4997 ctsio->kern_total_len = length; 4998 ctsio->kern_data_resid = 0; 4999 ctsio->kern_rel_offset = 0; 5000 ctsio->kern_sg_entries = 0; 5001 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5002 ctsio->be_move_done = ctl_config_move_done; 5003 ctl_datamove((union ctl_io *)ctsio); 5004 5005 return (CTL_RETVAL_COMPLETE); 5006 } 5007 5008 if (length > 0) 5009 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5010 5011 mtx_lock(&lun->lun_lock); 5012 5013 /* 5014 * According to SPC, it is not an error for an intiator to attempt 5015 * to release a reservation on a LUN that isn't reserved, or that 5016 * is reserved by another initiator. The reservation can only be 5017 * released, though, by the initiator who made it or by one of 5018 * several reset type events. 5019 */ 5020 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5021 lun->flags &= ~CTL_LUN_RESERVED; 5022 5023 mtx_unlock(&lun->lun_lock); 5024 5025 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5026 free(ctsio->kern_data_ptr, M_CTL); 5027 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5028 } 5029 5030 ctl_set_success(ctsio); 5031 ctl_done((union ctl_io *)ctsio); 5032 return (CTL_RETVAL_COMPLETE); 5033} 5034 5035int 5036ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5037{ 5038 int extent, thirdparty, longid; 5039 int resv_id, length; 5040 uint64_t thirdparty_id; 5041 struct ctl_lun *lun; 5042 uint32_t residx; 5043 5044 extent = 0; 5045 thirdparty = 0; 5046 longid = 0; 5047 resv_id = 0; 5048 length = 0; 5049 thirdparty_id = 0; 5050 5051 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5052 5053 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5054 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5055 5056 switch (ctsio->cdb[0]) { 5057 case RESERVE_10: { 5058 struct scsi_reserve_10 *cdb; 5059 5060 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5061 5062 if (cdb->byte2 & SR10_LONGID) 5063 longid = 1; 5064 else 5065 thirdparty_id = cdb->thirdparty_id; 5066 5067 resv_id = cdb->resv_id; 5068 length = scsi_2btoul(cdb->length); 5069 break; 5070 } 5071 } 5072 5073 /* 5074 * XXX KDM right now, we only support LUN reservation. We don't 5075 * support 3rd party reservations, or extent reservations, which 5076 * might actually need the parameter list. If we've gotten this 5077 * far, we've got a LUN reservation. Anything else got kicked out 5078 * above. So, according to SPC, ignore the length. 5079 */ 5080 length = 0; 5081 5082 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5083 && (length > 0)) { 5084 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5085 ctsio->kern_data_len = length; 5086 ctsio->kern_total_len = length; 5087 ctsio->kern_data_resid = 0; 5088 ctsio->kern_rel_offset = 0; 5089 ctsio->kern_sg_entries = 0; 5090 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5091 ctsio->be_move_done = ctl_config_move_done; 5092 ctl_datamove((union ctl_io *)ctsio); 5093 5094 return (CTL_RETVAL_COMPLETE); 5095 } 5096 5097 if (length > 0) 5098 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5099 5100 mtx_lock(&lun->lun_lock); 5101 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5102 ctl_set_reservation_conflict(ctsio); 5103 goto bailout; 5104 } 5105 5106 lun->flags |= CTL_LUN_RESERVED; 5107 lun->res_idx = residx; 5108 5109 ctl_set_success(ctsio); 5110 5111bailout: 5112 mtx_unlock(&lun->lun_lock); 5113 5114 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5115 free(ctsio->kern_data_ptr, M_CTL); 5116 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5117 } 5118 5119 ctl_done((union ctl_io *)ctsio); 5120 return (CTL_RETVAL_COMPLETE); 5121} 5122 5123int 5124ctl_start_stop(struct ctl_scsiio *ctsio) 5125{ 5126 struct scsi_start_stop_unit *cdb; 5127 struct ctl_lun *lun; 5128 int retval; 5129 5130 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5131 5132 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5133 retval = 0; 5134 5135 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5136 5137 /* 5138 * XXX KDM 5139 * We don't support the immediate bit on a stop unit. In order to 5140 * do that, we would need to code up a way to know that a stop is 5141 * pending, and hold off any new commands until it completes, one 5142 * way or another. Then we could accept or reject those commands 5143 * depending on its status. We would almost need to do the reverse 5144 * of what we do below for an immediate start -- return the copy of 5145 * the ctl_io to the FETD with status to send to the host (and to 5146 * free the copy!) and then free the original I/O once the stop 5147 * actually completes. That way, the OOA queue mechanism can work 5148 * to block commands that shouldn't proceed. Another alternative 5149 * would be to put the copy in the queue in place of the original, 5150 * and return the original back to the caller. That could be 5151 * slightly safer.. 5152 */ 5153 if ((cdb->byte2 & SSS_IMMED) 5154 && ((cdb->how & SSS_START) == 0)) { 5155 ctl_set_invalid_field(ctsio, 5156 /*sks_valid*/ 1, 5157 /*command*/ 1, 5158 /*field*/ 1, 5159 /*bit_valid*/ 1, 5160 /*bit*/ 0); 5161 ctl_done((union ctl_io *)ctsio); 5162 return (CTL_RETVAL_COMPLETE); 5163 } 5164 5165 if ((lun->flags & CTL_LUN_PR_RESERVED) 5166 && ((cdb->how & SSS_START)==0)) { 5167 uint32_t residx; 5168 5169 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5170 if (ctl_get_prkey(lun, residx) == 0 5171 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5172 5173 ctl_set_reservation_conflict(ctsio); 5174 ctl_done((union ctl_io *)ctsio); 5175 return (CTL_RETVAL_COMPLETE); 5176 } 5177 } 5178 5179 /* 5180 * If there is no backend on this device, we can't start or stop 5181 * it. In theory we shouldn't get any start/stop commands in the 5182 * first place at this level if the LUN doesn't have a backend. 5183 * That should get stopped by the command decode code. 5184 */ 5185 if (lun->backend == NULL) { 5186 ctl_set_invalid_opcode(ctsio); 5187 ctl_done((union ctl_io *)ctsio); 5188 return (CTL_RETVAL_COMPLETE); 5189 } 5190 5191 /* 5192 * XXX KDM Copan-specific offline behavior. 5193 * Figure out a reasonable way to port this? 5194 */ 5195#ifdef NEEDTOPORT 5196 mtx_lock(&lun->lun_lock); 5197 5198 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5199 && (lun->flags & CTL_LUN_OFFLINE)) { 5200 /* 5201 * If the LUN is offline, and the on/offline bit isn't set, 5202 * reject the start or stop. Otherwise, let it through. 5203 */ 5204 mtx_unlock(&lun->lun_lock); 5205 ctl_set_lun_not_ready(ctsio); 5206 ctl_done((union ctl_io *)ctsio); 5207 } else { 5208 mtx_unlock(&lun->lun_lock); 5209#endif /* NEEDTOPORT */ 5210 /* 5211 * This could be a start or a stop when we're online, 5212 * or a stop/offline or start/online. A start or stop when 5213 * we're offline is covered in the case above. 5214 */ 5215 /* 5216 * In the non-immediate case, we send the request to 5217 * the backend and return status to the user when 5218 * it is done. 5219 * 5220 * In the immediate case, we allocate a new ctl_io 5221 * to hold a copy of the request, and send that to 5222 * the backend. We then set good status on the 5223 * user's request and return it immediately. 5224 */ 5225 if (cdb->byte2 & SSS_IMMED) { 5226 union ctl_io *new_io; 5227 5228 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5229 ctl_copy_io((union ctl_io *)ctsio, new_io); 5230 retval = lun->backend->config_write(new_io); 5231 ctl_set_success(ctsio); 5232 ctl_done((union ctl_io *)ctsio); 5233 } else { 5234 retval = lun->backend->config_write( 5235 (union ctl_io *)ctsio); 5236 } 5237#ifdef NEEDTOPORT 5238 } 5239#endif 5240 return (retval); 5241} 5242 5243/* 5244 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5245 * we don't really do anything with the LBA and length fields if the user 5246 * passes them in. Instead we'll just flush out the cache for the entire 5247 * LUN. 5248 */ 5249int 5250ctl_sync_cache(struct ctl_scsiio *ctsio) 5251{ 5252 struct ctl_lun *lun; 5253 struct ctl_softc *softc; 5254 struct ctl_lba_len_flags *lbalen; 5255 uint64_t starting_lba; 5256 uint32_t block_count; 5257 int retval; 5258 uint8_t byte2; 5259 5260 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5261 5262 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5263 softc = lun->ctl_softc; 5264 retval = 0; 5265 5266 switch (ctsio->cdb[0]) { 5267 case SYNCHRONIZE_CACHE: { 5268 struct scsi_sync_cache *cdb; 5269 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5270 5271 starting_lba = scsi_4btoul(cdb->begin_lba); 5272 block_count = scsi_2btoul(cdb->lb_count); 5273 byte2 = cdb->byte2; 5274 break; 5275 } 5276 case SYNCHRONIZE_CACHE_16: { 5277 struct scsi_sync_cache_16 *cdb; 5278 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5279 5280 starting_lba = scsi_8btou64(cdb->begin_lba); 5281 block_count = scsi_4btoul(cdb->lb_count); 5282 byte2 = cdb->byte2; 5283 break; 5284 } 5285 default: 5286 ctl_set_invalid_opcode(ctsio); 5287 ctl_done((union ctl_io *)ctsio); 5288 goto bailout; 5289 break; /* NOTREACHED */ 5290 } 5291 5292 /* 5293 * We check the LBA and length, but don't do anything with them. 5294 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5295 * get flushed. This check will just help satisfy anyone who wants 5296 * to see an error for an out of range LBA. 5297 */ 5298 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5299 ctl_set_lba_out_of_range(ctsio); 5300 ctl_done((union ctl_io *)ctsio); 5301 goto bailout; 5302 } 5303 5304 /* 5305 * If this LUN has no backend, we can't flush the cache anyway. 5306 */ 5307 if (lun->backend == NULL) { 5308 ctl_set_invalid_opcode(ctsio); 5309 ctl_done((union ctl_io *)ctsio); 5310 goto bailout; 5311 } 5312 5313 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5314 lbalen->lba = starting_lba; 5315 lbalen->len = block_count; 5316 lbalen->flags = byte2; 5317 5318 /* 5319 * Check to see whether we're configured to send the SYNCHRONIZE 5320 * CACHE command directly to the back end. 5321 */ 5322 mtx_lock(&lun->lun_lock); 5323 if ((softc->flags & CTL_FLAG_REAL_SYNC) 5324 && (++(lun->sync_count) >= lun->sync_interval)) { 5325 lun->sync_count = 0; 5326 mtx_unlock(&lun->lun_lock); 5327 retval = lun->backend->config_write((union ctl_io *)ctsio); 5328 } else { 5329 mtx_unlock(&lun->lun_lock); 5330 ctl_set_success(ctsio); 5331 ctl_done((union ctl_io *)ctsio); 5332 } 5333 5334bailout: 5335 5336 return (retval); 5337} 5338 5339int 5340ctl_format(struct ctl_scsiio *ctsio) 5341{ 5342 struct scsi_format *cdb; 5343 struct ctl_lun *lun; 5344 int length, defect_list_len; 5345 5346 CTL_DEBUG_PRINT(("ctl_format\n")); 5347 5348 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5349 5350 cdb = (struct scsi_format *)ctsio->cdb; 5351 5352 length = 0; 5353 if (cdb->byte2 & SF_FMTDATA) { 5354 if (cdb->byte2 & SF_LONGLIST) 5355 length = sizeof(struct scsi_format_header_long); 5356 else 5357 length = sizeof(struct scsi_format_header_short); 5358 } 5359 5360 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5361 && (length > 0)) { 5362 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5363 ctsio->kern_data_len = length; 5364 ctsio->kern_total_len = length; 5365 ctsio->kern_data_resid = 0; 5366 ctsio->kern_rel_offset = 0; 5367 ctsio->kern_sg_entries = 0; 5368 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5369 ctsio->be_move_done = ctl_config_move_done; 5370 ctl_datamove((union ctl_io *)ctsio); 5371 5372 return (CTL_RETVAL_COMPLETE); 5373 } 5374 5375 defect_list_len = 0; 5376 5377 if (cdb->byte2 & SF_FMTDATA) { 5378 if (cdb->byte2 & SF_LONGLIST) { 5379 struct scsi_format_header_long *header; 5380 5381 header = (struct scsi_format_header_long *) 5382 ctsio->kern_data_ptr; 5383 5384 defect_list_len = scsi_4btoul(header->defect_list_len); 5385 if (defect_list_len != 0) { 5386 ctl_set_invalid_field(ctsio, 5387 /*sks_valid*/ 1, 5388 /*command*/ 0, 5389 /*field*/ 2, 5390 /*bit_valid*/ 0, 5391 /*bit*/ 0); 5392 goto bailout; 5393 } 5394 } else { 5395 struct scsi_format_header_short *header; 5396 5397 header = (struct scsi_format_header_short *) 5398 ctsio->kern_data_ptr; 5399 5400 defect_list_len = scsi_2btoul(header->defect_list_len); 5401 if (defect_list_len != 0) { 5402 ctl_set_invalid_field(ctsio, 5403 /*sks_valid*/ 1, 5404 /*command*/ 0, 5405 /*field*/ 2, 5406 /*bit_valid*/ 0, 5407 /*bit*/ 0); 5408 goto bailout; 5409 } 5410 } 5411 } 5412 5413 /* 5414 * The format command will clear out the "Medium format corrupted" 5415 * status if set by the configuration code. That status is really 5416 * just a way to notify the host that we have lost the media, and 5417 * get them to issue a command that will basically make them think 5418 * they're blowing away the media. 5419 */ 5420 mtx_lock(&lun->lun_lock); 5421 lun->flags &= ~CTL_LUN_INOPERABLE; 5422 mtx_unlock(&lun->lun_lock); 5423 5424 ctl_set_success(ctsio); 5425bailout: 5426 5427 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5428 free(ctsio->kern_data_ptr, M_CTL); 5429 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5430 } 5431 5432 ctl_done((union ctl_io *)ctsio); 5433 return (CTL_RETVAL_COMPLETE); 5434} 5435 5436int 5437ctl_read_buffer(struct ctl_scsiio *ctsio) 5438{ 5439 struct scsi_read_buffer *cdb; 5440 struct ctl_lun *lun; 5441 int buffer_offset, len; 5442 static uint8_t descr[4]; 5443 static uint8_t echo_descr[4] = { 0 }; 5444 5445 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5446 5447 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5448 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5449 5450 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5451 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5452 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5453 ctl_set_invalid_field(ctsio, 5454 /*sks_valid*/ 1, 5455 /*command*/ 1, 5456 /*field*/ 1, 5457 /*bit_valid*/ 1, 5458 /*bit*/ 4); 5459 ctl_done((union ctl_io *)ctsio); 5460 return (CTL_RETVAL_COMPLETE); 5461 } 5462 5463 len = scsi_3btoul(cdb->length); 5464 buffer_offset = scsi_3btoul(cdb->offset); 5465 5466 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5467 ctl_set_invalid_field(ctsio, 5468 /*sks_valid*/ 1, 5469 /*command*/ 1, 5470 /*field*/ 6, 5471 /*bit_valid*/ 0, 5472 /*bit*/ 0); 5473 ctl_done((union ctl_io *)ctsio); 5474 return (CTL_RETVAL_COMPLETE); 5475 } 5476 5477 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5478 descr[0] = 0; 5479 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5480 ctsio->kern_data_ptr = descr; 5481 len = min(len, sizeof(descr)); 5482 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5483 ctsio->kern_data_ptr = echo_descr; 5484 len = min(len, sizeof(echo_descr)); 5485 } else { 5486 if (lun->write_buffer == NULL) { 5487 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5488 M_CTL, M_WAITOK); 5489 } 5490 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5491 } 5492 ctsio->kern_data_len = len; 5493 ctsio->kern_total_len = len; 5494 ctsio->kern_data_resid = 0; 5495 ctsio->kern_rel_offset = 0; 5496 ctsio->kern_sg_entries = 0; 5497 ctl_set_success(ctsio); 5498 ctsio->be_move_done = ctl_config_move_done; 5499 ctl_datamove((union ctl_io *)ctsio); 5500 return (CTL_RETVAL_COMPLETE); 5501} 5502 5503int 5504ctl_write_buffer(struct ctl_scsiio *ctsio) 5505{ 5506 struct scsi_write_buffer *cdb; 5507 struct ctl_lun *lun; 5508 int buffer_offset, len; 5509 5510 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5511 5512 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5513 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5514 5515 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5516 ctl_set_invalid_field(ctsio, 5517 /*sks_valid*/ 1, 5518 /*command*/ 1, 5519 /*field*/ 1, 5520 /*bit_valid*/ 1, 5521 /*bit*/ 4); 5522 ctl_done((union ctl_io *)ctsio); 5523 return (CTL_RETVAL_COMPLETE); 5524 } 5525 5526 len = scsi_3btoul(cdb->length); 5527 buffer_offset = scsi_3btoul(cdb->offset); 5528 5529 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5530 ctl_set_invalid_field(ctsio, 5531 /*sks_valid*/ 1, 5532 /*command*/ 1, 5533 /*field*/ 6, 5534 /*bit_valid*/ 0, 5535 /*bit*/ 0); 5536 ctl_done((union ctl_io *)ctsio); 5537 return (CTL_RETVAL_COMPLETE); 5538 } 5539 5540 /* 5541 * If we've got a kernel request that hasn't been malloced yet, 5542 * malloc it and tell the caller the data buffer is here. 5543 */ 5544 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5545 if (lun->write_buffer == NULL) { 5546 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5547 M_CTL, M_WAITOK); 5548 } 5549 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5550 ctsio->kern_data_len = len; 5551 ctsio->kern_total_len = len; 5552 ctsio->kern_data_resid = 0; 5553 ctsio->kern_rel_offset = 0; 5554 ctsio->kern_sg_entries = 0; 5555 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5556 ctsio->be_move_done = ctl_config_move_done; 5557 ctl_datamove((union ctl_io *)ctsio); 5558 5559 return (CTL_RETVAL_COMPLETE); 5560 } 5561 5562 ctl_set_success(ctsio); 5563 ctl_done((union ctl_io *)ctsio); 5564 return (CTL_RETVAL_COMPLETE); 5565} 5566 5567int 5568ctl_write_same(struct ctl_scsiio *ctsio) 5569{ 5570 struct ctl_lun *lun; 5571 struct ctl_lba_len_flags *lbalen; 5572 uint64_t lba; 5573 uint32_t num_blocks; 5574 int len, retval; 5575 uint8_t byte2; 5576 5577 retval = CTL_RETVAL_COMPLETE; 5578 5579 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5580 5581 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5582 5583 switch (ctsio->cdb[0]) { 5584 case WRITE_SAME_10: { 5585 struct scsi_write_same_10 *cdb; 5586 5587 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5588 5589 lba = scsi_4btoul(cdb->addr); 5590 num_blocks = scsi_2btoul(cdb->length); 5591 byte2 = cdb->byte2; 5592 break; 5593 } 5594 case WRITE_SAME_16: { 5595 struct scsi_write_same_16 *cdb; 5596 5597 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5598 5599 lba = scsi_8btou64(cdb->addr); 5600 num_blocks = scsi_4btoul(cdb->length); 5601 byte2 = cdb->byte2; 5602 break; 5603 } 5604 default: 5605 /* 5606 * We got a command we don't support. This shouldn't 5607 * happen, commands should be filtered out above us. 5608 */ 5609 ctl_set_invalid_opcode(ctsio); 5610 ctl_done((union ctl_io *)ctsio); 5611 5612 return (CTL_RETVAL_COMPLETE); 5613 break; /* NOTREACHED */ 5614 } 5615 5616 /* NDOB and ANCHOR flags can be used only together with UNMAP */ 5617 if ((byte2 & SWS_UNMAP) == 0 && 5618 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { 5619 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5620 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5621 ctl_done((union ctl_io *)ctsio); 5622 return (CTL_RETVAL_COMPLETE); 5623 } 5624 5625 /* 5626 * The first check is to make sure we're in bounds, the second 5627 * check is to catch wrap-around problems. If the lba + num blocks 5628 * is less than the lba, then we've wrapped around and the block 5629 * range is invalid anyway. 5630 */ 5631 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5632 || ((lba + num_blocks) < lba)) { 5633 ctl_set_lba_out_of_range(ctsio); 5634 ctl_done((union ctl_io *)ctsio); 5635 return (CTL_RETVAL_COMPLETE); 5636 } 5637 5638 /* Zero number of blocks means "to the last logical block" */ 5639 if (num_blocks == 0) { 5640 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5641 ctl_set_invalid_field(ctsio, 5642 /*sks_valid*/ 0, 5643 /*command*/ 1, 5644 /*field*/ 0, 5645 /*bit_valid*/ 0, 5646 /*bit*/ 0); 5647 ctl_done((union ctl_io *)ctsio); 5648 return (CTL_RETVAL_COMPLETE); 5649 } 5650 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5651 } 5652 5653 len = lun->be_lun->blocksize; 5654 5655 /* 5656 * If we've got a kernel request that hasn't been malloced yet, 5657 * malloc it and tell the caller the data buffer is here. 5658 */ 5659 if ((byte2 & SWS_NDOB) == 0 && 5660 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5661 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5662 ctsio->kern_data_len = len; 5663 ctsio->kern_total_len = len; 5664 ctsio->kern_data_resid = 0; 5665 ctsio->kern_rel_offset = 0; 5666 ctsio->kern_sg_entries = 0; 5667 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5668 ctsio->be_move_done = ctl_config_move_done; 5669 ctl_datamove((union ctl_io *)ctsio); 5670 5671 return (CTL_RETVAL_COMPLETE); 5672 } 5673 5674 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5675 lbalen->lba = lba; 5676 lbalen->len = num_blocks; 5677 lbalen->flags = byte2; 5678 retval = lun->backend->config_write((union ctl_io *)ctsio); 5679 5680 return (retval); 5681} 5682 5683int 5684ctl_unmap(struct ctl_scsiio *ctsio) 5685{ 5686 struct ctl_lun *lun; 5687 struct scsi_unmap *cdb; 5688 struct ctl_ptr_len_flags *ptrlen; 5689 struct scsi_unmap_header *hdr; 5690 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5691 uint64_t lba; 5692 uint32_t num_blocks; 5693 int len, retval; 5694 uint8_t byte2; 5695 5696 retval = CTL_RETVAL_COMPLETE; 5697 5698 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5699 5700 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5701 cdb = (struct scsi_unmap *)ctsio->cdb; 5702 5703 len = scsi_2btoul(cdb->length); 5704 byte2 = cdb->byte2; 5705 5706 /* 5707 * If we've got a kernel request that hasn't been malloced yet, 5708 * malloc it and tell the caller the data buffer is here. 5709 */ 5710 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5711 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5712 ctsio->kern_data_len = len; 5713 ctsio->kern_total_len = len; 5714 ctsio->kern_data_resid = 0; 5715 ctsio->kern_rel_offset = 0; 5716 ctsio->kern_sg_entries = 0; 5717 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5718 ctsio->be_move_done = ctl_config_move_done; 5719 ctl_datamove((union ctl_io *)ctsio); 5720 5721 return (CTL_RETVAL_COMPLETE); 5722 } 5723 5724 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5725 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5726 if (len < sizeof (*hdr) || 5727 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5728 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5729 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5730 ctl_set_invalid_field(ctsio, 5731 /*sks_valid*/ 0, 5732 /*command*/ 0, 5733 /*field*/ 0, 5734 /*bit_valid*/ 0, 5735 /*bit*/ 0); 5736 goto done; 5737 } 5738 len = scsi_2btoul(hdr->desc_length); 5739 buf = (struct scsi_unmap_desc *)(hdr + 1); 5740 end = buf + len / sizeof(*buf); 5741 5742 endnz = buf; 5743 for (range = buf; range < end; range++) { 5744 lba = scsi_8btou64(range->lba); 5745 num_blocks = scsi_4btoul(range->length); 5746 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5747 || ((lba + num_blocks) < lba)) { 5748 ctl_set_lba_out_of_range(ctsio); 5749 ctl_done((union ctl_io *)ctsio); 5750 return (CTL_RETVAL_COMPLETE); 5751 } 5752 if (num_blocks != 0) 5753 endnz = range + 1; 5754 } 5755 5756 /* 5757 * Block backend can not handle zero last range. 5758 * Filter it out and return if there is nothing left. 5759 */ 5760 len = (uint8_t *)endnz - (uint8_t *)buf; 5761 if (len == 0) { 5762 ctl_set_success(ctsio); 5763 goto done; 5764 } 5765 5766 mtx_lock(&lun->lun_lock); 5767 ptrlen = (struct ctl_ptr_len_flags *) 5768 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5769 ptrlen->ptr = (void *)buf; 5770 ptrlen->len = len; 5771 ptrlen->flags = byte2; 5772 ctl_check_blocked(lun); 5773 mtx_unlock(&lun->lun_lock); 5774 5775 retval = lun->backend->config_write((union ctl_io *)ctsio); 5776 return (retval); 5777 5778done: 5779 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5780 free(ctsio->kern_data_ptr, M_CTL); 5781 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5782 } 5783 ctl_done((union ctl_io *)ctsio); 5784 return (CTL_RETVAL_COMPLETE); 5785} 5786 5787/* 5788 * Note that this function currently doesn't actually do anything inside 5789 * CTL to enforce things if the DQue bit is turned on. 5790 * 5791 * Also note that this function can't be used in the default case, because 5792 * the DQue bit isn't set in the changeable mask for the control mode page 5793 * anyway. This is just here as an example for how to implement a page 5794 * handler, and a placeholder in case we want to allow the user to turn 5795 * tagged queueing on and off. 5796 * 5797 * The D_SENSE bit handling is functional, however, and will turn 5798 * descriptor sense on and off for a given LUN. 5799 */ 5800int 5801ctl_control_page_handler(struct ctl_scsiio *ctsio, 5802 struct ctl_page_index *page_index, uint8_t *page_ptr) 5803{ 5804 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5805 struct ctl_lun *lun; 5806 int set_ua; 5807 uint32_t initidx; 5808 5809 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5810 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5811 set_ua = 0; 5812 5813 user_cp = (struct scsi_control_page *)page_ptr; 5814 current_cp = (struct scsi_control_page *) 5815 (page_index->page_data + (page_index->page_len * 5816 CTL_PAGE_CURRENT)); 5817 saved_cp = (struct scsi_control_page *) 5818 (page_index->page_data + (page_index->page_len * 5819 CTL_PAGE_SAVED)); 5820 5821 mtx_lock(&lun->lun_lock); 5822 if (((current_cp->rlec & SCP_DSENSE) == 0) 5823 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 5824 /* 5825 * Descriptor sense is currently turned off and the user 5826 * wants to turn it on. 5827 */ 5828 current_cp->rlec |= SCP_DSENSE; 5829 saved_cp->rlec |= SCP_DSENSE; 5830 lun->flags |= CTL_LUN_SENSE_DESC; 5831 set_ua = 1; 5832 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 5833 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 5834 /* 5835 * Descriptor sense is currently turned on, and the user 5836 * wants to turn it off. 5837 */ 5838 current_cp->rlec &= ~SCP_DSENSE; 5839 saved_cp->rlec &= ~SCP_DSENSE; 5840 lun->flags &= ~CTL_LUN_SENSE_DESC; 5841 set_ua = 1; 5842 } 5843 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 5844 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 5845 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5846 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5847 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5848 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5849 set_ua = 1; 5850 } 5851 if ((current_cp->eca_and_aen & SCP_SWP) != 5852 (user_cp->eca_and_aen & SCP_SWP)) { 5853 current_cp->eca_and_aen &= ~SCP_SWP; 5854 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5855 saved_cp->eca_and_aen &= ~SCP_SWP; 5856 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5857 set_ua = 1; 5858 } 5859 if (set_ua != 0) 5860 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5861 mtx_unlock(&lun->lun_lock); 5862 5863 return (0); 5864} 5865 5866int 5867ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 5868 struct ctl_page_index *page_index, uint8_t *page_ptr) 5869{ 5870 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 5871 struct ctl_lun *lun; 5872 int set_ua; 5873 uint32_t initidx; 5874 5875 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5876 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5877 set_ua = 0; 5878 5879 user_cp = (struct scsi_caching_page *)page_ptr; 5880 current_cp = (struct scsi_caching_page *) 5881 (page_index->page_data + (page_index->page_len * 5882 CTL_PAGE_CURRENT)); 5883 saved_cp = (struct scsi_caching_page *) 5884 (page_index->page_data + (page_index->page_len * 5885 CTL_PAGE_SAVED)); 5886 5887 mtx_lock(&lun->lun_lock); 5888 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 5889 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 5890 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5891 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5892 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5893 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5894 set_ua = 1; 5895 } 5896 if (set_ua != 0) 5897 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5898 mtx_unlock(&lun->lun_lock); 5899 5900 return (0); 5901} 5902 5903int 5904ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 5905 struct ctl_page_index *page_index, 5906 uint8_t *page_ptr) 5907{ 5908 uint8_t *c; 5909 int i; 5910 5911 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 5912 ctl_time_io_secs = 5913 (c[0] << 8) | 5914 (c[1] << 0) | 5915 0; 5916 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 5917 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 5918 printf("page data:"); 5919 for (i=0; i<8; i++) 5920 printf(" %.2x",page_ptr[i]); 5921 printf("\n"); 5922 return (0); 5923} 5924 5925int 5926ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 5927 struct ctl_page_index *page_index, 5928 int pc) 5929{ 5930 struct copan_debugconf_subpage *page; 5931 5932 page = (struct copan_debugconf_subpage *)page_index->page_data + 5933 (page_index->page_len * pc); 5934 5935 switch (pc) { 5936 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 5937 case SMS_PAGE_CTRL_DEFAULT >> 6: 5938 case SMS_PAGE_CTRL_SAVED >> 6: 5939 /* 5940 * We don't update the changable or default bits for this page. 5941 */ 5942 break; 5943 case SMS_PAGE_CTRL_CURRENT >> 6: 5944 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 5945 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 5946 break; 5947 default: 5948#ifdef NEEDTOPORT 5949 EPRINT(0, "Invalid PC %d!!", pc); 5950#endif /* NEEDTOPORT */ 5951 break; 5952 } 5953 return (0); 5954} 5955 5956 5957static int 5958ctl_do_mode_select(union ctl_io *io) 5959{ 5960 struct scsi_mode_page_header *page_header; 5961 struct ctl_page_index *page_index; 5962 struct ctl_scsiio *ctsio; 5963 int control_dev, page_len; 5964 int page_len_offset, page_len_size; 5965 union ctl_modepage_info *modepage_info; 5966 struct ctl_lun *lun; 5967 int *len_left, *len_used; 5968 int retval, i; 5969 5970 ctsio = &io->scsiio; 5971 page_index = NULL; 5972 page_len = 0; 5973 retval = CTL_RETVAL_COMPLETE; 5974 5975 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5976 5977 if (lun->be_lun->lun_type != T_DIRECT) 5978 control_dev = 1; 5979 else 5980 control_dev = 0; 5981 5982 modepage_info = (union ctl_modepage_info *) 5983 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5984 len_left = &modepage_info->header.len_left; 5985 len_used = &modepage_info->header.len_used; 5986 5987do_next_page: 5988 5989 page_header = (struct scsi_mode_page_header *) 5990 (ctsio->kern_data_ptr + *len_used); 5991 5992 if (*len_left == 0) { 5993 free(ctsio->kern_data_ptr, M_CTL); 5994 ctl_set_success(ctsio); 5995 ctl_done((union ctl_io *)ctsio); 5996 return (CTL_RETVAL_COMPLETE); 5997 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5998 5999 free(ctsio->kern_data_ptr, M_CTL); 6000 ctl_set_param_len_error(ctsio); 6001 ctl_done((union ctl_io *)ctsio); 6002 return (CTL_RETVAL_COMPLETE); 6003 6004 } else if ((page_header->page_code & SMPH_SPF) 6005 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6006 6007 free(ctsio->kern_data_ptr, M_CTL); 6008 ctl_set_param_len_error(ctsio); 6009 ctl_done((union ctl_io *)ctsio); 6010 return (CTL_RETVAL_COMPLETE); 6011 } 6012 6013 6014 /* 6015 * XXX KDM should we do something with the block descriptor? 6016 */ 6017 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6018 6019 if ((control_dev != 0) 6020 && (lun->mode_pages.index[i].page_flags & 6021 CTL_PAGE_FLAG_DISK_ONLY)) 6022 continue; 6023 6024 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6025 (page_header->page_code & SMPH_PC_MASK)) 6026 continue; 6027 6028 /* 6029 * If neither page has a subpage code, then we've got a 6030 * match. 6031 */ 6032 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6033 && ((page_header->page_code & SMPH_SPF) == 0)) { 6034 page_index = &lun->mode_pages.index[i]; 6035 page_len = page_header->page_length; 6036 break; 6037 } 6038 6039 /* 6040 * If both pages have subpages, then the subpage numbers 6041 * have to match. 6042 */ 6043 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6044 && (page_header->page_code & SMPH_SPF)) { 6045 struct scsi_mode_page_header_sp *sph; 6046 6047 sph = (struct scsi_mode_page_header_sp *)page_header; 6048 6049 if (lun->mode_pages.index[i].subpage == 6050 sph->subpage) { 6051 page_index = &lun->mode_pages.index[i]; 6052 page_len = scsi_2btoul(sph->page_length); 6053 break; 6054 } 6055 } 6056 } 6057 6058 /* 6059 * If we couldn't find the page, or if we don't have a mode select 6060 * handler for it, send back an error to the user. 6061 */ 6062 if ((page_index == NULL) 6063 || (page_index->select_handler == NULL)) { 6064 ctl_set_invalid_field(ctsio, 6065 /*sks_valid*/ 1, 6066 /*command*/ 0, 6067 /*field*/ *len_used, 6068 /*bit_valid*/ 0, 6069 /*bit*/ 0); 6070 free(ctsio->kern_data_ptr, M_CTL); 6071 ctl_done((union ctl_io *)ctsio); 6072 return (CTL_RETVAL_COMPLETE); 6073 } 6074 6075 if (page_index->page_code & SMPH_SPF) { 6076 page_len_offset = 2; 6077 page_len_size = 2; 6078 } else { 6079 page_len_size = 1; 6080 page_len_offset = 1; 6081 } 6082 6083 /* 6084 * If the length the initiator gives us isn't the one we specify in 6085 * the mode page header, or if they didn't specify enough data in 6086 * the CDB to avoid truncating this page, kick out the request. 6087 */ 6088 if ((page_len != (page_index->page_len - page_len_offset - 6089 page_len_size)) 6090 || (*len_left < page_index->page_len)) { 6091 6092 6093 ctl_set_invalid_field(ctsio, 6094 /*sks_valid*/ 1, 6095 /*command*/ 0, 6096 /*field*/ *len_used + page_len_offset, 6097 /*bit_valid*/ 0, 6098 /*bit*/ 0); 6099 free(ctsio->kern_data_ptr, M_CTL); 6100 ctl_done((union ctl_io *)ctsio); 6101 return (CTL_RETVAL_COMPLETE); 6102 } 6103 6104 /* 6105 * Run through the mode page, checking to make sure that the bits 6106 * the user changed are actually legal for him to change. 6107 */ 6108 for (i = 0; i < page_index->page_len; i++) { 6109 uint8_t *user_byte, *change_mask, *current_byte; 6110 int bad_bit; 6111 int j; 6112 6113 user_byte = (uint8_t *)page_header + i; 6114 change_mask = page_index->page_data + 6115 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6116 current_byte = page_index->page_data + 6117 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6118 6119 /* 6120 * Check to see whether the user set any bits in this byte 6121 * that he is not allowed to set. 6122 */ 6123 if ((*user_byte & ~(*change_mask)) == 6124 (*current_byte & ~(*change_mask))) 6125 continue; 6126 6127 /* 6128 * Go through bit by bit to determine which one is illegal. 6129 */ 6130 bad_bit = 0; 6131 for (j = 7; j >= 0; j--) { 6132 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6133 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6134 bad_bit = i; 6135 break; 6136 } 6137 } 6138 ctl_set_invalid_field(ctsio, 6139 /*sks_valid*/ 1, 6140 /*command*/ 0, 6141 /*field*/ *len_used + i, 6142 /*bit_valid*/ 1, 6143 /*bit*/ bad_bit); 6144 free(ctsio->kern_data_ptr, M_CTL); 6145 ctl_done((union ctl_io *)ctsio); 6146 return (CTL_RETVAL_COMPLETE); 6147 } 6148 6149 /* 6150 * Decrement these before we call the page handler, since we may 6151 * end up getting called back one way or another before the handler 6152 * returns to this context. 6153 */ 6154 *len_left -= page_index->page_len; 6155 *len_used += page_index->page_len; 6156 6157 retval = page_index->select_handler(ctsio, page_index, 6158 (uint8_t *)page_header); 6159 6160 /* 6161 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6162 * wait until this queued command completes to finish processing 6163 * the mode page. If it returns anything other than 6164 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6165 * already set the sense information, freed the data pointer, and 6166 * completed the io for us. 6167 */ 6168 if (retval != CTL_RETVAL_COMPLETE) 6169 goto bailout_no_done; 6170 6171 /* 6172 * If the initiator sent us more than one page, parse the next one. 6173 */ 6174 if (*len_left > 0) 6175 goto do_next_page; 6176 6177 ctl_set_success(ctsio); 6178 free(ctsio->kern_data_ptr, M_CTL); 6179 ctl_done((union ctl_io *)ctsio); 6180 6181bailout_no_done: 6182 6183 return (CTL_RETVAL_COMPLETE); 6184 6185} 6186 6187int 6188ctl_mode_select(struct ctl_scsiio *ctsio) 6189{ 6190 int param_len, pf, sp; 6191 int header_size, bd_len; 6192 int len_left, len_used; 6193 struct ctl_page_index *page_index; 6194 struct ctl_lun *lun; 6195 int control_dev, page_len; 6196 union ctl_modepage_info *modepage_info; 6197 int retval; 6198 6199 pf = 0; 6200 sp = 0; 6201 page_len = 0; 6202 len_used = 0; 6203 len_left = 0; 6204 retval = 0; 6205 bd_len = 0; 6206 page_index = NULL; 6207 6208 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6209 6210 if (lun->be_lun->lun_type != T_DIRECT) 6211 control_dev = 1; 6212 else 6213 control_dev = 0; 6214 6215 switch (ctsio->cdb[0]) { 6216 case MODE_SELECT_6: { 6217 struct scsi_mode_select_6 *cdb; 6218 6219 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6220 6221 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6222 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6223 6224 param_len = cdb->length; 6225 header_size = sizeof(struct scsi_mode_header_6); 6226 break; 6227 } 6228 case MODE_SELECT_10: { 6229 struct scsi_mode_select_10 *cdb; 6230 6231 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6232 6233 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6234 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6235 6236 param_len = scsi_2btoul(cdb->length); 6237 header_size = sizeof(struct scsi_mode_header_10); 6238 break; 6239 } 6240 default: 6241 ctl_set_invalid_opcode(ctsio); 6242 ctl_done((union ctl_io *)ctsio); 6243 return (CTL_RETVAL_COMPLETE); 6244 break; /* NOTREACHED */ 6245 } 6246 6247 /* 6248 * From SPC-3: 6249 * "A parameter list length of zero indicates that the Data-Out Buffer 6250 * shall be empty. This condition shall not be considered as an error." 6251 */ 6252 if (param_len == 0) { 6253 ctl_set_success(ctsio); 6254 ctl_done((union ctl_io *)ctsio); 6255 return (CTL_RETVAL_COMPLETE); 6256 } 6257 6258 /* 6259 * Since we'll hit this the first time through, prior to 6260 * allocation, we don't need to free a data buffer here. 6261 */ 6262 if (param_len < header_size) { 6263 ctl_set_param_len_error(ctsio); 6264 ctl_done((union ctl_io *)ctsio); 6265 return (CTL_RETVAL_COMPLETE); 6266 } 6267 6268 /* 6269 * Allocate the data buffer and grab the user's data. In theory, 6270 * we shouldn't have to sanity check the parameter list length here 6271 * because the maximum size is 64K. We should be able to malloc 6272 * that much without too many problems. 6273 */ 6274 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6275 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6276 ctsio->kern_data_len = param_len; 6277 ctsio->kern_total_len = param_len; 6278 ctsio->kern_data_resid = 0; 6279 ctsio->kern_rel_offset = 0; 6280 ctsio->kern_sg_entries = 0; 6281 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6282 ctsio->be_move_done = ctl_config_move_done; 6283 ctl_datamove((union ctl_io *)ctsio); 6284 6285 return (CTL_RETVAL_COMPLETE); 6286 } 6287 6288 switch (ctsio->cdb[0]) { 6289 case MODE_SELECT_6: { 6290 struct scsi_mode_header_6 *mh6; 6291 6292 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6293 bd_len = mh6->blk_desc_len; 6294 break; 6295 } 6296 case MODE_SELECT_10: { 6297 struct scsi_mode_header_10 *mh10; 6298 6299 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6300 bd_len = scsi_2btoul(mh10->blk_desc_len); 6301 break; 6302 } 6303 default: 6304 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6305 break; 6306 } 6307 6308 if (param_len < (header_size + bd_len)) { 6309 free(ctsio->kern_data_ptr, M_CTL); 6310 ctl_set_param_len_error(ctsio); 6311 ctl_done((union ctl_io *)ctsio); 6312 return (CTL_RETVAL_COMPLETE); 6313 } 6314 6315 /* 6316 * Set the IO_CONT flag, so that if this I/O gets passed to 6317 * ctl_config_write_done(), it'll get passed back to 6318 * ctl_do_mode_select() for further processing, or completion if 6319 * we're all done. 6320 */ 6321 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6322 ctsio->io_cont = ctl_do_mode_select; 6323 6324 modepage_info = (union ctl_modepage_info *) 6325 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6326 6327 memset(modepage_info, 0, sizeof(*modepage_info)); 6328 6329 len_left = param_len - header_size - bd_len; 6330 len_used = header_size + bd_len; 6331 6332 modepage_info->header.len_left = len_left; 6333 modepage_info->header.len_used = len_used; 6334 6335 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6336} 6337 6338int 6339ctl_mode_sense(struct ctl_scsiio *ctsio) 6340{ 6341 struct ctl_lun *lun; 6342 int pc, page_code, dbd, llba, subpage; 6343 int alloc_len, page_len, header_len, total_len; 6344 struct scsi_mode_block_descr *block_desc; 6345 struct ctl_page_index *page_index; 6346 int control_dev; 6347 6348 dbd = 0; 6349 llba = 0; 6350 block_desc = NULL; 6351 page_index = NULL; 6352 6353 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6354 6355 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6356 6357 if (lun->be_lun->lun_type != T_DIRECT) 6358 control_dev = 1; 6359 else 6360 control_dev = 0; 6361 6362 switch (ctsio->cdb[0]) { 6363 case MODE_SENSE_6: { 6364 struct scsi_mode_sense_6 *cdb; 6365 6366 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6367 6368 header_len = sizeof(struct scsi_mode_hdr_6); 6369 if (cdb->byte2 & SMS_DBD) 6370 dbd = 1; 6371 else 6372 header_len += sizeof(struct scsi_mode_block_descr); 6373 6374 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6375 page_code = cdb->page & SMS_PAGE_CODE; 6376 subpage = cdb->subpage; 6377 alloc_len = cdb->length; 6378 break; 6379 } 6380 case MODE_SENSE_10: { 6381 struct scsi_mode_sense_10 *cdb; 6382 6383 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6384 6385 header_len = sizeof(struct scsi_mode_hdr_10); 6386 6387 if (cdb->byte2 & SMS_DBD) 6388 dbd = 1; 6389 else 6390 header_len += sizeof(struct scsi_mode_block_descr); 6391 if (cdb->byte2 & SMS10_LLBAA) 6392 llba = 1; 6393 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6394 page_code = cdb->page & SMS_PAGE_CODE; 6395 subpage = cdb->subpage; 6396 alloc_len = scsi_2btoul(cdb->length); 6397 break; 6398 } 6399 default: 6400 ctl_set_invalid_opcode(ctsio); 6401 ctl_done((union ctl_io *)ctsio); 6402 return (CTL_RETVAL_COMPLETE); 6403 break; /* NOTREACHED */ 6404 } 6405 6406 /* 6407 * We have to make a first pass through to calculate the size of 6408 * the pages that match the user's query. Then we allocate enough 6409 * memory to hold it, and actually copy the data into the buffer. 6410 */ 6411 switch (page_code) { 6412 case SMS_ALL_PAGES_PAGE: { 6413 int i; 6414 6415 page_len = 0; 6416 6417 /* 6418 * At the moment, values other than 0 and 0xff here are 6419 * reserved according to SPC-3. 6420 */ 6421 if ((subpage != SMS_SUBPAGE_PAGE_0) 6422 && (subpage != SMS_SUBPAGE_ALL)) { 6423 ctl_set_invalid_field(ctsio, 6424 /*sks_valid*/ 1, 6425 /*command*/ 1, 6426 /*field*/ 3, 6427 /*bit_valid*/ 0, 6428 /*bit*/ 0); 6429 ctl_done((union ctl_io *)ctsio); 6430 return (CTL_RETVAL_COMPLETE); 6431 } 6432 6433 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6434 if ((control_dev != 0) 6435 && (lun->mode_pages.index[i].page_flags & 6436 CTL_PAGE_FLAG_DISK_ONLY)) 6437 continue; 6438 6439 /* 6440 * We don't use this subpage if the user didn't 6441 * request all subpages. 6442 */ 6443 if ((lun->mode_pages.index[i].subpage != 0) 6444 && (subpage == SMS_SUBPAGE_PAGE_0)) 6445 continue; 6446 6447#if 0 6448 printf("found page %#x len %d\n", 6449 lun->mode_pages.index[i].page_code & 6450 SMPH_PC_MASK, 6451 lun->mode_pages.index[i].page_len); 6452#endif 6453 page_len += lun->mode_pages.index[i].page_len; 6454 } 6455 break; 6456 } 6457 default: { 6458 int i; 6459 6460 page_len = 0; 6461 6462 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6463 /* Look for the right page code */ 6464 if ((lun->mode_pages.index[i].page_code & 6465 SMPH_PC_MASK) != page_code) 6466 continue; 6467 6468 /* Look for the right subpage or the subpage wildcard*/ 6469 if ((lun->mode_pages.index[i].subpage != subpage) 6470 && (subpage != SMS_SUBPAGE_ALL)) 6471 continue; 6472 6473 /* Make sure the page is supported for this dev type */ 6474 if ((control_dev != 0) 6475 && (lun->mode_pages.index[i].page_flags & 6476 CTL_PAGE_FLAG_DISK_ONLY)) 6477 continue; 6478 6479#if 0 6480 printf("found page %#x len %d\n", 6481 lun->mode_pages.index[i].page_code & 6482 SMPH_PC_MASK, 6483 lun->mode_pages.index[i].page_len); 6484#endif 6485 6486 page_len += lun->mode_pages.index[i].page_len; 6487 } 6488 6489 if (page_len == 0) { 6490 ctl_set_invalid_field(ctsio, 6491 /*sks_valid*/ 1, 6492 /*command*/ 1, 6493 /*field*/ 2, 6494 /*bit_valid*/ 1, 6495 /*bit*/ 5); 6496 ctl_done((union ctl_io *)ctsio); 6497 return (CTL_RETVAL_COMPLETE); 6498 } 6499 break; 6500 } 6501 } 6502 6503 total_len = header_len + page_len; 6504#if 0 6505 printf("header_len = %d, page_len = %d, total_len = %d\n", 6506 header_len, page_len, total_len); 6507#endif 6508 6509 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6510 ctsio->kern_sg_entries = 0; 6511 ctsio->kern_data_resid = 0; 6512 ctsio->kern_rel_offset = 0; 6513 if (total_len < alloc_len) { 6514 ctsio->residual = alloc_len - total_len; 6515 ctsio->kern_data_len = total_len; 6516 ctsio->kern_total_len = total_len; 6517 } else { 6518 ctsio->residual = 0; 6519 ctsio->kern_data_len = alloc_len; 6520 ctsio->kern_total_len = alloc_len; 6521 } 6522 6523 switch (ctsio->cdb[0]) { 6524 case MODE_SENSE_6: { 6525 struct scsi_mode_hdr_6 *header; 6526 6527 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6528 6529 header->datalen = MIN(total_len - 1, 254); 6530 if (control_dev == 0) { 6531 header->dev_specific = 0x10; /* DPOFUA */ 6532 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6533 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6534 .eca_and_aen & SCP_SWP) != 0) 6535 header->dev_specific |= 0x80; /* WP */ 6536 } 6537 if (dbd) 6538 header->block_descr_len = 0; 6539 else 6540 header->block_descr_len = 6541 sizeof(struct scsi_mode_block_descr); 6542 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6543 break; 6544 } 6545 case MODE_SENSE_10: { 6546 struct scsi_mode_hdr_10 *header; 6547 int datalen; 6548 6549 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6550 6551 datalen = MIN(total_len - 2, 65533); 6552 scsi_ulto2b(datalen, header->datalen); 6553 if (control_dev == 0) { 6554 header->dev_specific = 0x10; /* DPOFUA */ 6555 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6556 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6557 .eca_and_aen & SCP_SWP) != 0) 6558 header->dev_specific |= 0x80; /* WP */ 6559 } 6560 if (dbd) 6561 scsi_ulto2b(0, header->block_descr_len); 6562 else 6563 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6564 header->block_descr_len); 6565 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6566 break; 6567 } 6568 default: 6569 panic("invalid CDB type %#x", ctsio->cdb[0]); 6570 break; /* NOTREACHED */ 6571 } 6572 6573 /* 6574 * If we've got a disk, use its blocksize in the block 6575 * descriptor. Otherwise, just set it to 0. 6576 */ 6577 if (dbd == 0) { 6578 if (control_dev == 0) 6579 scsi_ulto3b(lun->be_lun->blocksize, 6580 block_desc->block_len); 6581 else 6582 scsi_ulto3b(0, block_desc->block_len); 6583 } 6584 6585 switch (page_code) { 6586 case SMS_ALL_PAGES_PAGE: { 6587 int i, data_used; 6588 6589 data_used = header_len; 6590 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6591 struct ctl_page_index *page_index; 6592 6593 page_index = &lun->mode_pages.index[i]; 6594 6595 if ((control_dev != 0) 6596 && (page_index->page_flags & 6597 CTL_PAGE_FLAG_DISK_ONLY)) 6598 continue; 6599 6600 /* 6601 * We don't use this subpage if the user didn't 6602 * request all subpages. We already checked (above) 6603 * to make sure the user only specified a subpage 6604 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6605 */ 6606 if ((page_index->subpage != 0) 6607 && (subpage == SMS_SUBPAGE_PAGE_0)) 6608 continue; 6609 6610 /* 6611 * Call the handler, if it exists, to update the 6612 * page to the latest values. 6613 */ 6614 if (page_index->sense_handler != NULL) 6615 page_index->sense_handler(ctsio, page_index,pc); 6616 6617 memcpy(ctsio->kern_data_ptr + data_used, 6618 page_index->page_data + 6619 (page_index->page_len * pc), 6620 page_index->page_len); 6621 data_used += page_index->page_len; 6622 } 6623 break; 6624 } 6625 default: { 6626 int i, data_used; 6627 6628 data_used = header_len; 6629 6630 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6631 struct ctl_page_index *page_index; 6632 6633 page_index = &lun->mode_pages.index[i]; 6634 6635 /* Look for the right page code */ 6636 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6637 continue; 6638 6639 /* Look for the right subpage or the subpage wildcard*/ 6640 if ((page_index->subpage != subpage) 6641 && (subpage != SMS_SUBPAGE_ALL)) 6642 continue; 6643 6644 /* Make sure the page is supported for this dev type */ 6645 if ((control_dev != 0) 6646 && (page_index->page_flags & 6647 CTL_PAGE_FLAG_DISK_ONLY)) 6648 continue; 6649 6650 /* 6651 * Call the handler, if it exists, to update the 6652 * page to the latest values. 6653 */ 6654 if (page_index->sense_handler != NULL) 6655 page_index->sense_handler(ctsio, page_index,pc); 6656 6657 memcpy(ctsio->kern_data_ptr + data_used, 6658 page_index->page_data + 6659 (page_index->page_len * pc), 6660 page_index->page_len); 6661 data_used += page_index->page_len; 6662 } 6663 break; 6664 } 6665 } 6666 6667 ctl_set_success(ctsio); 6668 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6669 ctsio->be_move_done = ctl_config_move_done; 6670 ctl_datamove((union ctl_io *)ctsio); 6671 return (CTL_RETVAL_COMPLETE); 6672} 6673 6674int 6675ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6676 struct ctl_page_index *page_index, 6677 int pc) 6678{ 6679 struct ctl_lun *lun; 6680 struct scsi_log_param_header *phdr; 6681 uint8_t *data; 6682 uint64_t val; 6683 6684 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6685 data = page_index->page_data; 6686 6687 if (lun->backend->lun_attr != NULL && 6688 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6689 != UINT64_MAX) { 6690 phdr = (struct scsi_log_param_header *)data; 6691 scsi_ulto2b(0x0001, phdr->param_code); 6692 phdr->param_control = SLP_LBIN | SLP_LP; 6693 phdr->param_len = 8; 6694 data = (uint8_t *)(phdr + 1); 6695 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6696 data[4] = 0x02; /* per-pool */ 6697 data += phdr->param_len; 6698 } 6699 6700 if (lun->backend->lun_attr != NULL && 6701 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6702 != UINT64_MAX) { 6703 phdr = (struct scsi_log_param_header *)data; 6704 scsi_ulto2b(0x0002, phdr->param_code); 6705 phdr->param_control = SLP_LBIN | SLP_LP; 6706 phdr->param_len = 8; 6707 data = (uint8_t *)(phdr + 1); 6708 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6709 data[4] = 0x01; /* per-LUN */ 6710 data += phdr->param_len; 6711 } 6712 6713 if (lun->backend->lun_attr != NULL && 6714 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6715 != UINT64_MAX) { 6716 phdr = (struct scsi_log_param_header *)data; 6717 scsi_ulto2b(0x00f1, phdr->param_code); 6718 phdr->param_control = SLP_LBIN | SLP_LP; 6719 phdr->param_len = 8; 6720 data = (uint8_t *)(phdr + 1); 6721 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6722 data[4] = 0x02; /* per-pool */ 6723 data += phdr->param_len; 6724 } 6725 6726 if (lun->backend->lun_attr != NULL && 6727 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6728 != UINT64_MAX) { 6729 phdr = (struct scsi_log_param_header *)data; 6730 scsi_ulto2b(0x00f2, phdr->param_code); 6731 phdr->param_control = SLP_LBIN | SLP_LP; 6732 phdr->param_len = 8; 6733 data = (uint8_t *)(phdr + 1); 6734 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6735 data[4] = 0x02; /* per-pool */ 6736 data += phdr->param_len; 6737 } 6738 6739 page_index->page_len = data - page_index->page_data; 6740 return (0); 6741} 6742 6743int 6744ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6745 struct ctl_page_index *page_index, 6746 int pc) 6747{ 6748 struct ctl_lun *lun; 6749 struct stat_page *data; 6750 uint64_t rn, wn, rb, wb; 6751 struct bintime rt, wt; 6752 int i; 6753 6754 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6755 data = (struct stat_page *)page_index->page_data; 6756 6757 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6758 data->sap.hdr.param_control = SLP_LBIN; 6759 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6760 sizeof(struct scsi_log_param_header); 6761 rn = wn = rb = wb = 0; 6762 bintime_clear(&rt); 6763 bintime_clear(&wt); 6764 for (i = 0; i < CTL_MAX_PORTS; i++) { 6765 rn += lun->stats.ports[i].operations[CTL_STATS_READ]; 6766 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE]; 6767 rb += lun->stats.ports[i].bytes[CTL_STATS_READ]; 6768 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE]; 6769 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]); 6770 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]); 6771 } 6772 scsi_u64to8b(rn, data->sap.read_num); 6773 scsi_u64to8b(wn, data->sap.write_num); 6774 if (lun->stats.blocksize > 0) { 6775 scsi_u64to8b(wb / lun->stats.blocksize, 6776 data->sap.recvieved_lba); 6777 scsi_u64to8b(rb / lun->stats.blocksize, 6778 data->sap.transmitted_lba); 6779 } 6780 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000), 6781 data->sap.read_int); 6782 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000), 6783 data->sap.write_int); 6784 scsi_u64to8b(0, data->sap.weighted_num); 6785 scsi_u64to8b(0, data->sap.weighted_int); 6786 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6787 data->it.hdr.param_control = SLP_LBIN; 6788 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6789 sizeof(struct scsi_log_param_header); 6790#ifdef CTL_TIME_IO 6791 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6792#endif 6793 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6794 data->it.hdr.param_control = SLP_LBIN; 6795 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6796 sizeof(struct scsi_log_param_header); 6797 scsi_ulto4b(3, data->ti.exponent); 6798 scsi_ulto4b(1, data->ti.integer); 6799 6800 page_index->page_len = sizeof(*data); 6801 return (0); 6802} 6803 6804int 6805ctl_log_sense(struct ctl_scsiio *ctsio) 6806{ 6807 struct ctl_lun *lun; 6808 int i, pc, page_code, subpage; 6809 int alloc_len, total_len; 6810 struct ctl_page_index *page_index; 6811 struct scsi_log_sense *cdb; 6812 struct scsi_log_header *header; 6813 6814 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6815 6816 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6817 cdb = (struct scsi_log_sense *)ctsio->cdb; 6818 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6819 page_code = cdb->page & SLS_PAGE_CODE; 6820 subpage = cdb->subpage; 6821 alloc_len = scsi_2btoul(cdb->length); 6822 6823 page_index = NULL; 6824 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6825 page_index = &lun->log_pages.index[i]; 6826 6827 /* Look for the right page code */ 6828 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6829 continue; 6830 6831 /* Look for the right subpage or the subpage wildcard*/ 6832 if (page_index->subpage != subpage) 6833 continue; 6834 6835 break; 6836 } 6837 if (i >= CTL_NUM_LOG_PAGES) { 6838 ctl_set_invalid_field(ctsio, 6839 /*sks_valid*/ 1, 6840 /*command*/ 1, 6841 /*field*/ 2, 6842 /*bit_valid*/ 0, 6843 /*bit*/ 0); 6844 ctl_done((union ctl_io *)ctsio); 6845 return (CTL_RETVAL_COMPLETE); 6846 } 6847 6848 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6849 6850 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6851 ctsio->kern_sg_entries = 0; 6852 ctsio->kern_data_resid = 0; 6853 ctsio->kern_rel_offset = 0; 6854 if (total_len < alloc_len) { 6855 ctsio->residual = alloc_len - total_len; 6856 ctsio->kern_data_len = total_len; 6857 ctsio->kern_total_len = total_len; 6858 } else { 6859 ctsio->residual = 0; 6860 ctsio->kern_data_len = alloc_len; 6861 ctsio->kern_total_len = alloc_len; 6862 } 6863 6864 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6865 header->page = page_index->page_code; 6866 if (page_index->subpage) { 6867 header->page |= SL_SPF; 6868 header->subpage = page_index->subpage; 6869 } 6870 scsi_ulto2b(page_index->page_len, header->datalen); 6871 6872 /* 6873 * Call the handler, if it exists, to update the 6874 * page to the latest values. 6875 */ 6876 if (page_index->sense_handler != NULL) 6877 page_index->sense_handler(ctsio, page_index, pc); 6878 6879 memcpy(header + 1, page_index->page_data, page_index->page_len); 6880 6881 ctl_set_success(ctsio); 6882 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6883 ctsio->be_move_done = ctl_config_move_done; 6884 ctl_datamove((union ctl_io *)ctsio); 6885 return (CTL_RETVAL_COMPLETE); 6886} 6887 6888int 6889ctl_read_capacity(struct ctl_scsiio *ctsio) 6890{ 6891 struct scsi_read_capacity *cdb; 6892 struct scsi_read_capacity_data *data; 6893 struct ctl_lun *lun; 6894 uint32_t lba; 6895 6896 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6897 6898 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6899 6900 lba = scsi_4btoul(cdb->addr); 6901 if (((cdb->pmi & SRC_PMI) == 0) 6902 && (lba != 0)) { 6903 ctl_set_invalid_field(/*ctsio*/ ctsio, 6904 /*sks_valid*/ 1, 6905 /*command*/ 1, 6906 /*field*/ 2, 6907 /*bit_valid*/ 0, 6908 /*bit*/ 0); 6909 ctl_done((union ctl_io *)ctsio); 6910 return (CTL_RETVAL_COMPLETE); 6911 } 6912 6913 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6914 6915 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6916 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6917 ctsio->residual = 0; 6918 ctsio->kern_data_len = sizeof(*data); 6919 ctsio->kern_total_len = sizeof(*data); 6920 ctsio->kern_data_resid = 0; 6921 ctsio->kern_rel_offset = 0; 6922 ctsio->kern_sg_entries = 0; 6923 6924 /* 6925 * If the maximum LBA is greater than 0xfffffffe, the user must 6926 * issue a SERVICE ACTION IN (16) command, with the read capacity 6927 * serivce action set. 6928 */ 6929 if (lun->be_lun->maxlba > 0xfffffffe) 6930 scsi_ulto4b(0xffffffff, data->addr); 6931 else 6932 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6933 6934 /* 6935 * XXX KDM this may not be 512 bytes... 6936 */ 6937 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6938 6939 ctl_set_success(ctsio); 6940 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6941 ctsio->be_move_done = ctl_config_move_done; 6942 ctl_datamove((union ctl_io *)ctsio); 6943 return (CTL_RETVAL_COMPLETE); 6944} 6945 6946int 6947ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6948{ 6949 struct scsi_read_capacity_16 *cdb; 6950 struct scsi_read_capacity_data_long *data; 6951 struct ctl_lun *lun; 6952 uint64_t lba; 6953 uint32_t alloc_len; 6954 6955 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6956 6957 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6958 6959 alloc_len = scsi_4btoul(cdb->alloc_len); 6960 lba = scsi_8btou64(cdb->addr); 6961 6962 if ((cdb->reladr & SRC16_PMI) 6963 && (lba != 0)) { 6964 ctl_set_invalid_field(/*ctsio*/ ctsio, 6965 /*sks_valid*/ 1, 6966 /*command*/ 1, 6967 /*field*/ 2, 6968 /*bit_valid*/ 0, 6969 /*bit*/ 0); 6970 ctl_done((union ctl_io *)ctsio); 6971 return (CTL_RETVAL_COMPLETE); 6972 } 6973 6974 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6975 6976 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6977 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6978 6979 if (sizeof(*data) < alloc_len) { 6980 ctsio->residual = alloc_len - sizeof(*data); 6981 ctsio->kern_data_len = sizeof(*data); 6982 ctsio->kern_total_len = sizeof(*data); 6983 } else { 6984 ctsio->residual = 0; 6985 ctsio->kern_data_len = alloc_len; 6986 ctsio->kern_total_len = alloc_len; 6987 } 6988 ctsio->kern_data_resid = 0; 6989 ctsio->kern_rel_offset = 0; 6990 ctsio->kern_sg_entries = 0; 6991 6992 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6993 /* XXX KDM this may not be 512 bytes... */ 6994 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6995 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 6996 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 6997 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 6998 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 6999 7000 ctl_set_success(ctsio); 7001 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7002 ctsio->be_move_done = ctl_config_move_done; 7003 ctl_datamove((union ctl_io *)ctsio); 7004 return (CTL_RETVAL_COMPLETE); 7005} 7006 7007int 7008ctl_get_lba_status(struct ctl_scsiio *ctsio) 7009{ 7010 struct scsi_get_lba_status *cdb; 7011 struct scsi_get_lba_status_data *data; 7012 struct ctl_lun *lun; 7013 struct ctl_lba_len_flags *lbalen; 7014 uint64_t lba; 7015 uint32_t alloc_len, total_len; 7016 int retval; 7017 7018 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7019 7020 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7021 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7022 lba = scsi_8btou64(cdb->addr); 7023 alloc_len = scsi_4btoul(cdb->alloc_len); 7024 7025 if (lba > lun->be_lun->maxlba) { 7026 ctl_set_lba_out_of_range(ctsio); 7027 ctl_done((union ctl_io *)ctsio); 7028 return (CTL_RETVAL_COMPLETE); 7029 } 7030 7031 total_len = sizeof(*data) + sizeof(data->descr[0]); 7032 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7033 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7034 7035 if (total_len < alloc_len) { 7036 ctsio->residual = alloc_len - total_len; 7037 ctsio->kern_data_len = total_len; 7038 ctsio->kern_total_len = total_len; 7039 } else { 7040 ctsio->residual = 0; 7041 ctsio->kern_data_len = alloc_len; 7042 ctsio->kern_total_len = alloc_len; 7043 } 7044 ctsio->kern_data_resid = 0; 7045 ctsio->kern_rel_offset = 0; 7046 ctsio->kern_sg_entries = 0; 7047 7048 /* Fill dummy data in case backend can't tell anything. */ 7049 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7050 scsi_u64to8b(lba, data->descr[0].addr); 7051 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7052 data->descr[0].length); 7053 data->descr[0].status = 0; /* Mapped or unknown. */ 7054 7055 ctl_set_success(ctsio); 7056 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7057 ctsio->be_move_done = ctl_config_move_done; 7058 7059 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7060 lbalen->lba = lba; 7061 lbalen->len = total_len; 7062 lbalen->flags = 0; 7063 retval = lun->backend->config_read((union ctl_io *)ctsio); 7064 return (CTL_RETVAL_COMPLETE); 7065} 7066 7067int 7068ctl_read_defect(struct ctl_scsiio *ctsio) 7069{ 7070 struct scsi_read_defect_data_10 *ccb10; 7071 struct scsi_read_defect_data_12 *ccb12; 7072 struct scsi_read_defect_data_hdr_10 *data10; 7073 struct scsi_read_defect_data_hdr_12 *data12; 7074 uint32_t alloc_len, data_len; 7075 uint8_t format; 7076 7077 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7078 7079 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7080 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7081 format = ccb10->format; 7082 alloc_len = scsi_2btoul(ccb10->alloc_length); 7083 data_len = sizeof(*data10); 7084 } else { 7085 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7086 format = ccb12->format; 7087 alloc_len = scsi_4btoul(ccb12->alloc_length); 7088 data_len = sizeof(*data12); 7089 } 7090 if (alloc_len == 0) { 7091 ctl_set_success(ctsio); 7092 ctl_done((union ctl_io *)ctsio); 7093 return (CTL_RETVAL_COMPLETE); 7094 } 7095 7096 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7097 if (data_len < alloc_len) { 7098 ctsio->residual = alloc_len - data_len; 7099 ctsio->kern_data_len = data_len; 7100 ctsio->kern_total_len = data_len; 7101 } else { 7102 ctsio->residual = 0; 7103 ctsio->kern_data_len = alloc_len; 7104 ctsio->kern_total_len = alloc_len; 7105 } 7106 ctsio->kern_data_resid = 0; 7107 ctsio->kern_rel_offset = 0; 7108 ctsio->kern_sg_entries = 0; 7109 7110 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7111 data10 = (struct scsi_read_defect_data_hdr_10 *) 7112 ctsio->kern_data_ptr; 7113 data10->format = format; 7114 scsi_ulto2b(0, data10->length); 7115 } else { 7116 data12 = (struct scsi_read_defect_data_hdr_12 *) 7117 ctsio->kern_data_ptr; 7118 data12->format = format; 7119 scsi_ulto2b(0, data12->generation); 7120 scsi_ulto4b(0, data12->length); 7121 } 7122 7123 ctl_set_success(ctsio); 7124 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7125 ctsio->be_move_done = ctl_config_move_done; 7126 ctl_datamove((union ctl_io *)ctsio); 7127 return (CTL_RETVAL_COMPLETE); 7128} 7129 7130int 7131ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7132{ 7133 struct scsi_maintenance_in *cdb; 7134 int retval; 7135 int alloc_len, ext, total_len = 0, g, pc, pg, gs, os; 7136 int num_target_port_groups, num_target_ports; 7137 struct ctl_lun *lun; 7138 struct ctl_softc *softc; 7139 struct ctl_port *port; 7140 struct scsi_target_group_data *rtg_ptr; 7141 struct scsi_target_group_data_extended *rtg_ext_ptr; 7142 struct scsi_target_port_group_descriptor *tpg_desc; 7143 7144 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7145 7146 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7147 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7148 softc = lun->ctl_softc; 7149 7150 retval = CTL_RETVAL_COMPLETE; 7151 7152 switch (cdb->byte2 & STG_PDF_MASK) { 7153 case STG_PDF_LENGTH: 7154 ext = 0; 7155 break; 7156 case STG_PDF_EXTENDED: 7157 ext = 1; 7158 break; 7159 default: 7160 ctl_set_invalid_field(/*ctsio*/ ctsio, 7161 /*sks_valid*/ 1, 7162 /*command*/ 1, 7163 /*field*/ 2, 7164 /*bit_valid*/ 1, 7165 /*bit*/ 5); 7166 ctl_done((union ctl_io *)ctsio); 7167 return(retval); 7168 } 7169 7170 if (softc->is_single) 7171 num_target_port_groups = 1; 7172 else 7173 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7174 num_target_ports = 0; 7175 mtx_lock(&softc->ctl_lock); 7176 STAILQ_FOREACH(port, &softc->port_list, links) { 7177 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7178 continue; 7179 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7180 continue; 7181 num_target_ports++; 7182 } 7183 mtx_unlock(&softc->ctl_lock); 7184 7185 if (ext) 7186 total_len = sizeof(struct scsi_target_group_data_extended); 7187 else 7188 total_len = sizeof(struct scsi_target_group_data); 7189 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7190 num_target_port_groups + 7191 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7192 7193 alloc_len = scsi_4btoul(cdb->length); 7194 7195 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7196 7197 ctsio->kern_sg_entries = 0; 7198 7199 if (total_len < alloc_len) { 7200 ctsio->residual = alloc_len - total_len; 7201 ctsio->kern_data_len = total_len; 7202 ctsio->kern_total_len = total_len; 7203 } else { 7204 ctsio->residual = 0; 7205 ctsio->kern_data_len = alloc_len; 7206 ctsio->kern_total_len = alloc_len; 7207 } 7208 ctsio->kern_data_resid = 0; 7209 ctsio->kern_rel_offset = 0; 7210 7211 if (ext) { 7212 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7213 ctsio->kern_data_ptr; 7214 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7215 rtg_ext_ptr->format_type = 0x10; 7216 rtg_ext_ptr->implicit_transition_time = 0; 7217 tpg_desc = &rtg_ext_ptr->groups[0]; 7218 } else { 7219 rtg_ptr = (struct scsi_target_group_data *) 7220 ctsio->kern_data_ptr; 7221 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7222 tpg_desc = &rtg_ptr->groups[0]; 7223 } 7224 7225 mtx_lock(&softc->ctl_lock); 7226 pg = softc->port_min / softc->port_cnt; 7227 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7228 gs = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7229 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7230 gs = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7231 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7232 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; 7233 else 7234 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7235 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7236 os = gs; 7237 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7238 } else 7239 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7240 for (g = 0; g < num_target_port_groups; g++) { 7241 tpg_desc->pref_state = (g == pg) ? gs : os; 7242 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7243 TPG_U_SUP | TPG_T_SUP; 7244 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7245 tpg_desc->status = TPG_IMPLICIT; 7246 pc = 0; 7247 STAILQ_FOREACH(port, &softc->port_list, links) { 7248 if (port->targ_port < g * softc->port_cnt || 7249 port->targ_port >= (g + 1) * softc->port_cnt) 7250 continue; 7251 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7252 continue; 7253 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7254 continue; 7255 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7256 relative_target_port_identifier); 7257 pc++; 7258 } 7259 tpg_desc->target_port_count = pc; 7260 tpg_desc = (struct scsi_target_port_group_descriptor *) 7261 &tpg_desc->descriptors[pc]; 7262 } 7263 mtx_unlock(&softc->ctl_lock); 7264 7265 ctl_set_success(ctsio); 7266 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7267 ctsio->be_move_done = ctl_config_move_done; 7268 ctl_datamove((union ctl_io *)ctsio); 7269 return(retval); 7270} 7271 7272int 7273ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7274{ 7275 struct ctl_lun *lun; 7276 struct scsi_report_supported_opcodes *cdb; 7277 const struct ctl_cmd_entry *entry, *sentry; 7278 struct scsi_report_supported_opcodes_all *all; 7279 struct scsi_report_supported_opcodes_descr *descr; 7280 struct scsi_report_supported_opcodes_one *one; 7281 int retval; 7282 int alloc_len, total_len; 7283 int opcode, service_action, i, j, num; 7284 7285 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7286 7287 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7288 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7289 7290 retval = CTL_RETVAL_COMPLETE; 7291 7292 opcode = cdb->requested_opcode; 7293 service_action = scsi_2btoul(cdb->requested_service_action); 7294 switch (cdb->options & RSO_OPTIONS_MASK) { 7295 case RSO_OPTIONS_ALL: 7296 num = 0; 7297 for (i = 0; i < 256; i++) { 7298 entry = &ctl_cmd_table[i]; 7299 if (entry->flags & CTL_CMD_FLAG_SA5) { 7300 for (j = 0; j < 32; j++) { 7301 sentry = &((const struct ctl_cmd_entry *) 7302 entry->execute)[j]; 7303 if (ctl_cmd_applicable( 7304 lun->be_lun->lun_type, sentry)) 7305 num++; 7306 } 7307 } else { 7308 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7309 entry)) 7310 num++; 7311 } 7312 } 7313 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7314 num * sizeof(struct scsi_report_supported_opcodes_descr); 7315 break; 7316 case RSO_OPTIONS_OC: 7317 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7318 ctl_set_invalid_field(/*ctsio*/ ctsio, 7319 /*sks_valid*/ 1, 7320 /*command*/ 1, 7321 /*field*/ 2, 7322 /*bit_valid*/ 1, 7323 /*bit*/ 2); 7324 ctl_done((union ctl_io *)ctsio); 7325 return (CTL_RETVAL_COMPLETE); 7326 } 7327 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7328 break; 7329 case RSO_OPTIONS_OC_SA: 7330 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7331 service_action >= 32) { 7332 ctl_set_invalid_field(/*ctsio*/ ctsio, 7333 /*sks_valid*/ 1, 7334 /*command*/ 1, 7335 /*field*/ 2, 7336 /*bit_valid*/ 1, 7337 /*bit*/ 2); 7338 ctl_done((union ctl_io *)ctsio); 7339 return (CTL_RETVAL_COMPLETE); 7340 } 7341 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7342 break; 7343 default: 7344 ctl_set_invalid_field(/*ctsio*/ ctsio, 7345 /*sks_valid*/ 1, 7346 /*command*/ 1, 7347 /*field*/ 2, 7348 /*bit_valid*/ 1, 7349 /*bit*/ 2); 7350 ctl_done((union ctl_io *)ctsio); 7351 return (CTL_RETVAL_COMPLETE); 7352 } 7353 7354 alloc_len = scsi_4btoul(cdb->length); 7355 7356 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7357 7358 ctsio->kern_sg_entries = 0; 7359 7360 if (total_len < alloc_len) { 7361 ctsio->residual = alloc_len - total_len; 7362 ctsio->kern_data_len = total_len; 7363 ctsio->kern_total_len = total_len; 7364 } else { 7365 ctsio->residual = 0; 7366 ctsio->kern_data_len = alloc_len; 7367 ctsio->kern_total_len = alloc_len; 7368 } 7369 ctsio->kern_data_resid = 0; 7370 ctsio->kern_rel_offset = 0; 7371 7372 switch (cdb->options & RSO_OPTIONS_MASK) { 7373 case RSO_OPTIONS_ALL: 7374 all = (struct scsi_report_supported_opcodes_all *) 7375 ctsio->kern_data_ptr; 7376 num = 0; 7377 for (i = 0; i < 256; i++) { 7378 entry = &ctl_cmd_table[i]; 7379 if (entry->flags & CTL_CMD_FLAG_SA5) { 7380 for (j = 0; j < 32; j++) { 7381 sentry = &((const struct ctl_cmd_entry *) 7382 entry->execute)[j]; 7383 if (!ctl_cmd_applicable( 7384 lun->be_lun->lun_type, sentry)) 7385 continue; 7386 descr = &all->descr[num++]; 7387 descr->opcode = i; 7388 scsi_ulto2b(j, descr->service_action); 7389 descr->flags = RSO_SERVACTV; 7390 scsi_ulto2b(sentry->length, 7391 descr->cdb_length); 7392 } 7393 } else { 7394 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7395 entry)) 7396 continue; 7397 descr = &all->descr[num++]; 7398 descr->opcode = i; 7399 scsi_ulto2b(0, descr->service_action); 7400 descr->flags = 0; 7401 scsi_ulto2b(entry->length, descr->cdb_length); 7402 } 7403 } 7404 scsi_ulto4b( 7405 num * sizeof(struct scsi_report_supported_opcodes_descr), 7406 all->length); 7407 break; 7408 case RSO_OPTIONS_OC: 7409 one = (struct scsi_report_supported_opcodes_one *) 7410 ctsio->kern_data_ptr; 7411 entry = &ctl_cmd_table[opcode]; 7412 goto fill_one; 7413 case RSO_OPTIONS_OC_SA: 7414 one = (struct scsi_report_supported_opcodes_one *) 7415 ctsio->kern_data_ptr; 7416 entry = &ctl_cmd_table[opcode]; 7417 entry = &((const struct ctl_cmd_entry *) 7418 entry->execute)[service_action]; 7419fill_one: 7420 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7421 one->support = 3; 7422 scsi_ulto2b(entry->length, one->cdb_length); 7423 one->cdb_usage[0] = opcode; 7424 memcpy(&one->cdb_usage[1], entry->usage, 7425 entry->length - 1); 7426 } else 7427 one->support = 1; 7428 break; 7429 } 7430 7431 ctl_set_success(ctsio); 7432 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7433 ctsio->be_move_done = ctl_config_move_done; 7434 ctl_datamove((union ctl_io *)ctsio); 7435 return(retval); 7436} 7437 7438int 7439ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7440{ 7441 struct scsi_report_supported_tmf *cdb; 7442 struct scsi_report_supported_tmf_data *data; 7443 int retval; 7444 int alloc_len, total_len; 7445 7446 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7447 7448 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7449 7450 retval = CTL_RETVAL_COMPLETE; 7451 7452 total_len = sizeof(struct scsi_report_supported_tmf_data); 7453 alloc_len = scsi_4btoul(cdb->length); 7454 7455 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7456 7457 ctsio->kern_sg_entries = 0; 7458 7459 if (total_len < alloc_len) { 7460 ctsio->residual = alloc_len - total_len; 7461 ctsio->kern_data_len = total_len; 7462 ctsio->kern_total_len = total_len; 7463 } else { 7464 ctsio->residual = 0; 7465 ctsio->kern_data_len = alloc_len; 7466 ctsio->kern_total_len = alloc_len; 7467 } 7468 ctsio->kern_data_resid = 0; 7469 ctsio->kern_rel_offset = 0; 7470 7471 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7472 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | 7473 RST_TRS; 7474 data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; 7475 7476 ctl_set_success(ctsio); 7477 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7478 ctsio->be_move_done = ctl_config_move_done; 7479 ctl_datamove((union ctl_io *)ctsio); 7480 return (retval); 7481} 7482 7483int 7484ctl_report_timestamp(struct ctl_scsiio *ctsio) 7485{ 7486 struct scsi_report_timestamp *cdb; 7487 struct scsi_report_timestamp_data *data; 7488 struct timeval tv; 7489 int64_t timestamp; 7490 int retval; 7491 int alloc_len, total_len; 7492 7493 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7494 7495 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7496 7497 retval = CTL_RETVAL_COMPLETE; 7498 7499 total_len = sizeof(struct scsi_report_timestamp_data); 7500 alloc_len = scsi_4btoul(cdb->length); 7501 7502 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7503 7504 ctsio->kern_sg_entries = 0; 7505 7506 if (total_len < alloc_len) { 7507 ctsio->residual = alloc_len - total_len; 7508 ctsio->kern_data_len = total_len; 7509 ctsio->kern_total_len = total_len; 7510 } else { 7511 ctsio->residual = 0; 7512 ctsio->kern_data_len = alloc_len; 7513 ctsio->kern_total_len = alloc_len; 7514 } 7515 ctsio->kern_data_resid = 0; 7516 ctsio->kern_rel_offset = 0; 7517 7518 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7519 scsi_ulto2b(sizeof(*data) - 2, data->length); 7520 data->origin = RTS_ORIG_OUTSIDE; 7521 getmicrotime(&tv); 7522 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7523 scsi_ulto4b(timestamp >> 16, data->timestamp); 7524 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7525 7526 ctl_set_success(ctsio); 7527 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7528 ctsio->be_move_done = ctl_config_move_done; 7529 ctl_datamove((union ctl_io *)ctsio); 7530 return (retval); 7531} 7532 7533int 7534ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7535{ 7536 struct scsi_per_res_in *cdb; 7537 int alloc_len, total_len = 0; 7538 /* struct scsi_per_res_in_rsrv in_data; */ 7539 struct ctl_lun *lun; 7540 struct ctl_softc *softc; 7541 uint64_t key; 7542 7543 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7544 7545 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7546 7547 alloc_len = scsi_2btoul(cdb->length); 7548 7549 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7550 softc = lun->ctl_softc; 7551 7552retry: 7553 mtx_lock(&lun->lun_lock); 7554 switch (cdb->action) { 7555 case SPRI_RK: /* read keys */ 7556 total_len = sizeof(struct scsi_per_res_in_keys) + 7557 lun->pr_key_count * 7558 sizeof(struct scsi_per_res_key); 7559 break; 7560 case SPRI_RR: /* read reservation */ 7561 if (lun->flags & CTL_LUN_PR_RESERVED) 7562 total_len = sizeof(struct scsi_per_res_in_rsrv); 7563 else 7564 total_len = sizeof(struct scsi_per_res_in_header); 7565 break; 7566 case SPRI_RC: /* report capabilities */ 7567 total_len = sizeof(struct scsi_per_res_cap); 7568 break; 7569 case SPRI_RS: /* read full status */ 7570 total_len = sizeof(struct scsi_per_res_in_header) + 7571 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7572 lun->pr_key_count; 7573 break; 7574 default: 7575 panic("Invalid PR type %x", cdb->action); 7576 } 7577 mtx_unlock(&lun->lun_lock); 7578 7579 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7580 7581 if (total_len < alloc_len) { 7582 ctsio->residual = alloc_len - total_len; 7583 ctsio->kern_data_len = total_len; 7584 ctsio->kern_total_len = total_len; 7585 } else { 7586 ctsio->residual = 0; 7587 ctsio->kern_data_len = alloc_len; 7588 ctsio->kern_total_len = alloc_len; 7589 } 7590 7591 ctsio->kern_data_resid = 0; 7592 ctsio->kern_rel_offset = 0; 7593 ctsio->kern_sg_entries = 0; 7594 7595 mtx_lock(&lun->lun_lock); 7596 switch (cdb->action) { 7597 case SPRI_RK: { // read keys 7598 struct scsi_per_res_in_keys *res_keys; 7599 int i, key_count; 7600 7601 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7602 7603 /* 7604 * We had to drop the lock to allocate our buffer, which 7605 * leaves time for someone to come in with another 7606 * persistent reservation. (That is unlikely, though, 7607 * since this should be the only persistent reservation 7608 * command active right now.) 7609 */ 7610 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7611 (lun->pr_key_count * 7612 sizeof(struct scsi_per_res_key)))){ 7613 mtx_unlock(&lun->lun_lock); 7614 free(ctsio->kern_data_ptr, M_CTL); 7615 printf("%s: reservation length changed, retrying\n", 7616 __func__); 7617 goto retry; 7618 } 7619 7620 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7621 7622 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7623 lun->pr_key_count, res_keys->header.length); 7624 7625 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7626 if ((key = ctl_get_prkey(lun, i)) == 0) 7627 continue; 7628 7629 /* 7630 * We used lun->pr_key_count to calculate the 7631 * size to allocate. If it turns out the number of 7632 * initiators with the registered flag set is 7633 * larger than that (i.e. they haven't been kept in 7634 * sync), we've got a problem. 7635 */ 7636 if (key_count >= lun->pr_key_count) { 7637#ifdef NEEDTOPORT 7638 csevent_log(CSC_CTL | CSC_SHELF_SW | 7639 CTL_PR_ERROR, 7640 csevent_LogType_Fault, 7641 csevent_AlertLevel_Yellow, 7642 csevent_FRU_ShelfController, 7643 csevent_FRU_Firmware, 7644 csevent_FRU_Unknown, 7645 "registered keys %d >= key " 7646 "count %d", key_count, 7647 lun->pr_key_count); 7648#endif 7649 key_count++; 7650 continue; 7651 } 7652 scsi_u64to8b(key, res_keys->keys[key_count].key); 7653 key_count++; 7654 } 7655 break; 7656 } 7657 case SPRI_RR: { // read reservation 7658 struct scsi_per_res_in_rsrv *res; 7659 int tmp_len, header_only; 7660 7661 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7662 7663 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7664 7665 if (lun->flags & CTL_LUN_PR_RESERVED) 7666 { 7667 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7668 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7669 res->header.length); 7670 header_only = 0; 7671 } else { 7672 tmp_len = sizeof(struct scsi_per_res_in_header); 7673 scsi_ulto4b(0, res->header.length); 7674 header_only = 1; 7675 } 7676 7677 /* 7678 * We had to drop the lock to allocate our buffer, which 7679 * leaves time for someone to come in with another 7680 * persistent reservation. (That is unlikely, though, 7681 * since this should be the only persistent reservation 7682 * command active right now.) 7683 */ 7684 if (tmp_len != total_len) { 7685 mtx_unlock(&lun->lun_lock); 7686 free(ctsio->kern_data_ptr, M_CTL); 7687 printf("%s: reservation status changed, retrying\n", 7688 __func__); 7689 goto retry; 7690 } 7691 7692 /* 7693 * No reservation held, so we're done. 7694 */ 7695 if (header_only != 0) 7696 break; 7697 7698 /* 7699 * If the registration is an All Registrants type, the key 7700 * is 0, since it doesn't really matter. 7701 */ 7702 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7703 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7704 res->data.reservation); 7705 } 7706 res->data.scopetype = lun->res_type; 7707 break; 7708 } 7709 case SPRI_RC: //report capabilities 7710 { 7711 struct scsi_per_res_cap *res_cap; 7712 uint16_t type_mask; 7713 7714 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7715 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7716 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 7717 type_mask = SPRI_TM_WR_EX_AR | 7718 SPRI_TM_EX_AC_RO | 7719 SPRI_TM_WR_EX_RO | 7720 SPRI_TM_EX_AC | 7721 SPRI_TM_WR_EX | 7722 SPRI_TM_EX_AC_AR; 7723 scsi_ulto2b(type_mask, res_cap->type_mask); 7724 break; 7725 } 7726 case SPRI_RS: { // read full status 7727 struct scsi_per_res_in_full *res_status; 7728 struct scsi_per_res_in_full_desc *res_desc; 7729 struct ctl_port *port; 7730 int i, len; 7731 7732 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7733 7734 /* 7735 * We had to drop the lock to allocate our buffer, which 7736 * leaves time for someone to come in with another 7737 * persistent reservation. (That is unlikely, though, 7738 * since this should be the only persistent reservation 7739 * command active right now.) 7740 */ 7741 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7742 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7743 lun->pr_key_count)){ 7744 mtx_unlock(&lun->lun_lock); 7745 free(ctsio->kern_data_ptr, M_CTL); 7746 printf("%s: reservation length changed, retrying\n", 7747 __func__); 7748 goto retry; 7749 } 7750 7751 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7752 7753 res_desc = &res_status->desc[0]; 7754 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7755 if ((key = ctl_get_prkey(lun, i)) == 0) 7756 continue; 7757 7758 scsi_u64to8b(key, res_desc->res_key.key); 7759 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7760 (lun->pr_res_idx == i || 7761 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7762 res_desc->flags = SPRI_FULL_R_HOLDER; 7763 res_desc->scopetype = lun->res_type; 7764 } 7765 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7766 res_desc->rel_trgt_port_id); 7767 len = 0; 7768 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7769 if (port != NULL) 7770 len = ctl_create_iid(port, 7771 i % CTL_MAX_INIT_PER_PORT, 7772 res_desc->transport_id); 7773 scsi_ulto4b(len, res_desc->additional_length); 7774 res_desc = (struct scsi_per_res_in_full_desc *) 7775 &res_desc->transport_id[len]; 7776 } 7777 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7778 res_status->header.length); 7779 break; 7780 } 7781 default: 7782 /* 7783 * This is a bug, because we just checked for this above, 7784 * and should have returned an error. 7785 */ 7786 panic("Invalid PR type %x", cdb->action); 7787 break; /* NOTREACHED */ 7788 } 7789 mtx_unlock(&lun->lun_lock); 7790 7791 ctl_set_success(ctsio); 7792 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7793 ctsio->be_move_done = ctl_config_move_done; 7794 ctl_datamove((union ctl_io *)ctsio); 7795 return (CTL_RETVAL_COMPLETE); 7796} 7797 7798/* 7799 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7800 * it should return. 7801 */ 7802static int 7803ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7804 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7805 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7806 struct scsi_per_res_out_parms* param) 7807{ 7808 union ctl_ha_msg persis_io; 7809 int i; 7810 7811 mtx_lock(&lun->lun_lock); 7812 if (sa_res_key == 0) { 7813 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7814 /* validate scope and type */ 7815 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7816 SPR_LU_SCOPE) { 7817 mtx_unlock(&lun->lun_lock); 7818 ctl_set_invalid_field(/*ctsio*/ ctsio, 7819 /*sks_valid*/ 1, 7820 /*command*/ 1, 7821 /*field*/ 2, 7822 /*bit_valid*/ 1, 7823 /*bit*/ 4); 7824 ctl_done((union ctl_io *)ctsio); 7825 return (1); 7826 } 7827 7828 if (type>8 || type==2 || type==4 || type==0) { 7829 mtx_unlock(&lun->lun_lock); 7830 ctl_set_invalid_field(/*ctsio*/ ctsio, 7831 /*sks_valid*/ 1, 7832 /*command*/ 1, 7833 /*field*/ 2, 7834 /*bit_valid*/ 1, 7835 /*bit*/ 0); 7836 ctl_done((union ctl_io *)ctsio); 7837 return (1); 7838 } 7839 7840 /* 7841 * Unregister everybody else and build UA for 7842 * them 7843 */ 7844 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7845 if (i == residx || ctl_get_prkey(lun, i) == 0) 7846 continue; 7847 7848 ctl_clr_prkey(lun, i); 7849 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7850 } 7851 lun->pr_key_count = 1; 7852 lun->res_type = type; 7853 if (lun->res_type != SPR_TYPE_WR_EX_AR 7854 && lun->res_type != SPR_TYPE_EX_AC_AR) 7855 lun->pr_res_idx = residx; 7856 lun->PRGeneration++; 7857 mtx_unlock(&lun->lun_lock); 7858 7859 /* send msg to other side */ 7860 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7861 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7862 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7863 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7864 persis_io.pr.pr_info.res_type = type; 7865 memcpy(persis_io.pr.pr_info.sa_res_key, 7866 param->serv_act_res_key, 7867 sizeof(param->serv_act_res_key)); 7868 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7869 sizeof(persis_io.pr), M_WAITOK); 7870 } else { 7871 /* not all registrants */ 7872 mtx_unlock(&lun->lun_lock); 7873 free(ctsio->kern_data_ptr, M_CTL); 7874 ctl_set_invalid_field(ctsio, 7875 /*sks_valid*/ 1, 7876 /*command*/ 0, 7877 /*field*/ 8, 7878 /*bit_valid*/ 0, 7879 /*bit*/ 0); 7880 ctl_done((union ctl_io *)ctsio); 7881 return (1); 7882 } 7883 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7884 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7885 int found = 0; 7886 7887 if (res_key == sa_res_key) { 7888 /* special case */ 7889 /* 7890 * The spec implies this is not good but doesn't 7891 * say what to do. There are two choices either 7892 * generate a res conflict or check condition 7893 * with illegal field in parameter data. Since 7894 * that is what is done when the sa_res_key is 7895 * zero I'll take that approach since this has 7896 * to do with the sa_res_key. 7897 */ 7898 mtx_unlock(&lun->lun_lock); 7899 free(ctsio->kern_data_ptr, M_CTL); 7900 ctl_set_invalid_field(ctsio, 7901 /*sks_valid*/ 1, 7902 /*command*/ 0, 7903 /*field*/ 8, 7904 /*bit_valid*/ 0, 7905 /*bit*/ 0); 7906 ctl_done((union ctl_io *)ctsio); 7907 return (1); 7908 } 7909 7910 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7911 if (ctl_get_prkey(lun, i) != sa_res_key) 7912 continue; 7913 7914 found = 1; 7915 ctl_clr_prkey(lun, i); 7916 lun->pr_key_count--; 7917 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7918 } 7919 if (!found) { 7920 mtx_unlock(&lun->lun_lock); 7921 free(ctsio->kern_data_ptr, M_CTL); 7922 ctl_set_reservation_conflict(ctsio); 7923 ctl_done((union ctl_io *)ctsio); 7924 return (CTL_RETVAL_COMPLETE); 7925 } 7926 lun->PRGeneration++; 7927 mtx_unlock(&lun->lun_lock); 7928 7929 /* send msg to other side */ 7930 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7931 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7932 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7933 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7934 persis_io.pr.pr_info.res_type = type; 7935 memcpy(persis_io.pr.pr_info.sa_res_key, 7936 param->serv_act_res_key, 7937 sizeof(param->serv_act_res_key)); 7938 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7939 sizeof(persis_io.pr), M_WAITOK); 7940 } else { 7941 /* Reserved but not all registrants */ 7942 /* sa_res_key is res holder */ 7943 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7944 /* validate scope and type */ 7945 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7946 SPR_LU_SCOPE) { 7947 mtx_unlock(&lun->lun_lock); 7948 ctl_set_invalid_field(/*ctsio*/ ctsio, 7949 /*sks_valid*/ 1, 7950 /*command*/ 1, 7951 /*field*/ 2, 7952 /*bit_valid*/ 1, 7953 /*bit*/ 4); 7954 ctl_done((union ctl_io *)ctsio); 7955 return (1); 7956 } 7957 7958 if (type>8 || type==2 || type==4 || type==0) { 7959 mtx_unlock(&lun->lun_lock); 7960 ctl_set_invalid_field(/*ctsio*/ ctsio, 7961 /*sks_valid*/ 1, 7962 /*command*/ 1, 7963 /*field*/ 2, 7964 /*bit_valid*/ 1, 7965 /*bit*/ 0); 7966 ctl_done((union ctl_io *)ctsio); 7967 return (1); 7968 } 7969 7970 /* 7971 * Do the following: 7972 * if sa_res_key != res_key remove all 7973 * registrants w/sa_res_key and generate UA 7974 * for these registrants(Registrations 7975 * Preempted) if it wasn't an exclusive 7976 * reservation generate UA(Reservations 7977 * Preempted) for all other registered nexuses 7978 * if the type has changed. Establish the new 7979 * reservation and holder. If res_key and 7980 * sa_res_key are the same do the above 7981 * except don't unregister the res holder. 7982 */ 7983 7984 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7985 if (i == residx || ctl_get_prkey(lun, i) == 0) 7986 continue; 7987 7988 if (sa_res_key == ctl_get_prkey(lun, i)) { 7989 ctl_clr_prkey(lun, i); 7990 lun->pr_key_count--; 7991 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7992 } else if (type != lun->res_type 7993 && (lun->res_type == SPR_TYPE_WR_EX_RO 7994 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 7995 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 7996 } 7997 } 7998 lun->res_type = type; 7999 if (lun->res_type != SPR_TYPE_WR_EX_AR 8000 && lun->res_type != SPR_TYPE_EX_AC_AR) 8001 lun->pr_res_idx = residx; 8002 else 8003 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8004 lun->PRGeneration++; 8005 mtx_unlock(&lun->lun_lock); 8006 8007 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8008 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8009 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8010 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8011 persis_io.pr.pr_info.res_type = type; 8012 memcpy(persis_io.pr.pr_info.sa_res_key, 8013 param->serv_act_res_key, 8014 sizeof(param->serv_act_res_key)); 8015 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8016 sizeof(persis_io.pr), M_WAITOK); 8017 } else { 8018 /* 8019 * sa_res_key is not the res holder just 8020 * remove registrants 8021 */ 8022 int found=0; 8023 8024 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8025 if (sa_res_key != ctl_get_prkey(lun, i)) 8026 continue; 8027 8028 found = 1; 8029 ctl_clr_prkey(lun, i); 8030 lun->pr_key_count--; 8031 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8032 } 8033 8034 if (!found) { 8035 mtx_unlock(&lun->lun_lock); 8036 free(ctsio->kern_data_ptr, M_CTL); 8037 ctl_set_reservation_conflict(ctsio); 8038 ctl_done((union ctl_io *)ctsio); 8039 return (1); 8040 } 8041 lun->PRGeneration++; 8042 mtx_unlock(&lun->lun_lock); 8043 8044 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8045 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8046 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8047 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8048 persis_io.pr.pr_info.res_type = type; 8049 memcpy(persis_io.pr.pr_info.sa_res_key, 8050 param->serv_act_res_key, 8051 sizeof(param->serv_act_res_key)); 8052 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8053 sizeof(persis_io.pr), M_WAITOK); 8054 } 8055 } 8056 return (0); 8057} 8058 8059static void 8060ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8061{ 8062 uint64_t sa_res_key; 8063 int i; 8064 8065 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8066 8067 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8068 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8069 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8070 if (sa_res_key == 0) { 8071 /* 8072 * Unregister everybody else and build UA for 8073 * them 8074 */ 8075 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 8076 if (i == msg->pr.pr_info.residx || 8077 ctl_get_prkey(lun, i) == 0) 8078 continue; 8079 8080 ctl_clr_prkey(lun, i); 8081 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8082 } 8083 8084 lun->pr_key_count = 1; 8085 lun->res_type = msg->pr.pr_info.res_type; 8086 if (lun->res_type != SPR_TYPE_WR_EX_AR 8087 && lun->res_type != SPR_TYPE_EX_AC_AR) 8088 lun->pr_res_idx = msg->pr.pr_info.residx; 8089 } else { 8090 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8091 if (sa_res_key == ctl_get_prkey(lun, i)) 8092 continue; 8093 8094 ctl_clr_prkey(lun, i); 8095 lun->pr_key_count--; 8096 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8097 } 8098 } 8099 } else { 8100 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8101 if (i == msg->pr.pr_info.residx || 8102 ctl_get_prkey(lun, i) == 0) 8103 continue; 8104 8105 if (sa_res_key == ctl_get_prkey(lun, i)) { 8106 ctl_clr_prkey(lun, i); 8107 lun->pr_key_count--; 8108 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8109 } else if (msg->pr.pr_info.res_type != lun->res_type 8110 && (lun->res_type == SPR_TYPE_WR_EX_RO 8111 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 8112 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8113 } 8114 } 8115 lun->res_type = msg->pr.pr_info.res_type; 8116 if (lun->res_type != SPR_TYPE_WR_EX_AR 8117 && lun->res_type != SPR_TYPE_EX_AC_AR) 8118 lun->pr_res_idx = msg->pr.pr_info.residx; 8119 else 8120 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8121 } 8122 lun->PRGeneration++; 8123 8124} 8125 8126 8127int 8128ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8129{ 8130 int retval; 8131 u_int32_t param_len; 8132 struct scsi_per_res_out *cdb; 8133 struct ctl_lun *lun; 8134 struct scsi_per_res_out_parms* param; 8135 struct ctl_softc *softc; 8136 uint32_t residx; 8137 uint64_t res_key, sa_res_key, key; 8138 uint8_t type; 8139 union ctl_ha_msg persis_io; 8140 int i; 8141 8142 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8143 8144 retval = CTL_RETVAL_COMPLETE; 8145 8146 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8147 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8148 softc = lun->ctl_softc; 8149 8150 /* 8151 * We only support whole-LUN scope. The scope & type are ignored for 8152 * register, register and ignore existing key and clear. 8153 * We sometimes ignore scope and type on preempts too!! 8154 * Verify reservation type here as well. 8155 */ 8156 type = cdb->scope_type & SPR_TYPE_MASK; 8157 if ((cdb->action == SPRO_RESERVE) 8158 || (cdb->action == SPRO_RELEASE)) { 8159 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8160 ctl_set_invalid_field(/*ctsio*/ ctsio, 8161 /*sks_valid*/ 1, 8162 /*command*/ 1, 8163 /*field*/ 2, 8164 /*bit_valid*/ 1, 8165 /*bit*/ 4); 8166 ctl_done((union ctl_io *)ctsio); 8167 return (CTL_RETVAL_COMPLETE); 8168 } 8169 8170 if (type>8 || type==2 || type==4 || type==0) { 8171 ctl_set_invalid_field(/*ctsio*/ ctsio, 8172 /*sks_valid*/ 1, 8173 /*command*/ 1, 8174 /*field*/ 2, 8175 /*bit_valid*/ 1, 8176 /*bit*/ 0); 8177 ctl_done((union ctl_io *)ctsio); 8178 return (CTL_RETVAL_COMPLETE); 8179 } 8180 } 8181 8182 param_len = scsi_4btoul(cdb->length); 8183 8184 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8185 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8186 ctsio->kern_data_len = param_len; 8187 ctsio->kern_total_len = param_len; 8188 ctsio->kern_data_resid = 0; 8189 ctsio->kern_rel_offset = 0; 8190 ctsio->kern_sg_entries = 0; 8191 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8192 ctsio->be_move_done = ctl_config_move_done; 8193 ctl_datamove((union ctl_io *)ctsio); 8194 8195 return (CTL_RETVAL_COMPLETE); 8196 } 8197 8198 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8199 8200 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8201 res_key = scsi_8btou64(param->res_key.key); 8202 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8203 8204 /* 8205 * Validate the reservation key here except for SPRO_REG_IGNO 8206 * This must be done for all other service actions 8207 */ 8208 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8209 mtx_lock(&lun->lun_lock); 8210 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8211 if (res_key != key) { 8212 /* 8213 * The current key passed in doesn't match 8214 * the one the initiator previously 8215 * registered. 8216 */ 8217 mtx_unlock(&lun->lun_lock); 8218 free(ctsio->kern_data_ptr, M_CTL); 8219 ctl_set_reservation_conflict(ctsio); 8220 ctl_done((union ctl_io *)ctsio); 8221 return (CTL_RETVAL_COMPLETE); 8222 } 8223 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8224 /* 8225 * We are not registered 8226 */ 8227 mtx_unlock(&lun->lun_lock); 8228 free(ctsio->kern_data_ptr, M_CTL); 8229 ctl_set_reservation_conflict(ctsio); 8230 ctl_done((union ctl_io *)ctsio); 8231 return (CTL_RETVAL_COMPLETE); 8232 } else if (res_key != 0) { 8233 /* 8234 * We are not registered and trying to register but 8235 * the register key isn't zero. 8236 */ 8237 mtx_unlock(&lun->lun_lock); 8238 free(ctsio->kern_data_ptr, M_CTL); 8239 ctl_set_reservation_conflict(ctsio); 8240 ctl_done((union ctl_io *)ctsio); 8241 return (CTL_RETVAL_COMPLETE); 8242 } 8243 mtx_unlock(&lun->lun_lock); 8244 } 8245 8246 switch (cdb->action & SPRO_ACTION_MASK) { 8247 case SPRO_REGISTER: 8248 case SPRO_REG_IGNO: { 8249 8250#if 0 8251 printf("Registration received\n"); 8252#endif 8253 8254 /* 8255 * We don't support any of these options, as we report in 8256 * the read capabilities request (see 8257 * ctl_persistent_reserve_in(), above). 8258 */ 8259 if ((param->flags & SPR_SPEC_I_PT) 8260 || (param->flags & SPR_ALL_TG_PT) 8261 || (param->flags & SPR_APTPL)) { 8262 int bit_ptr; 8263 8264 if (param->flags & SPR_APTPL) 8265 bit_ptr = 0; 8266 else if (param->flags & SPR_ALL_TG_PT) 8267 bit_ptr = 2; 8268 else /* SPR_SPEC_I_PT */ 8269 bit_ptr = 3; 8270 8271 free(ctsio->kern_data_ptr, M_CTL); 8272 ctl_set_invalid_field(ctsio, 8273 /*sks_valid*/ 1, 8274 /*command*/ 0, 8275 /*field*/ 20, 8276 /*bit_valid*/ 1, 8277 /*bit*/ bit_ptr); 8278 ctl_done((union ctl_io *)ctsio); 8279 return (CTL_RETVAL_COMPLETE); 8280 } 8281 8282 mtx_lock(&lun->lun_lock); 8283 8284 /* 8285 * The initiator wants to clear the 8286 * key/unregister. 8287 */ 8288 if (sa_res_key == 0) { 8289 if ((res_key == 0 8290 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8291 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8292 && ctl_get_prkey(lun, residx) == 0)) { 8293 mtx_unlock(&lun->lun_lock); 8294 goto done; 8295 } 8296 8297 ctl_clr_prkey(lun, residx); 8298 lun->pr_key_count--; 8299 8300 if (residx == lun->pr_res_idx) { 8301 lun->flags &= ~CTL_LUN_PR_RESERVED; 8302 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8303 8304 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8305 || lun->res_type == SPR_TYPE_EX_AC_RO) 8306 && lun->pr_key_count) { 8307 /* 8308 * If the reservation is a registrants 8309 * only type we need to generate a UA 8310 * for other registered inits. The 8311 * sense code should be RESERVATIONS 8312 * RELEASED 8313 */ 8314 8315 for (i = softc->init_min; i < softc->init_max; i++){ 8316 if (ctl_get_prkey(lun, i) == 0) 8317 continue; 8318 ctl_est_ua(lun, i, 8319 CTL_UA_RES_RELEASE); 8320 } 8321 } 8322 lun->res_type = 0; 8323 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8324 if (lun->pr_key_count==0) { 8325 lun->flags &= ~CTL_LUN_PR_RESERVED; 8326 lun->res_type = 0; 8327 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8328 } 8329 } 8330 lun->PRGeneration++; 8331 mtx_unlock(&lun->lun_lock); 8332 8333 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8334 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8335 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8336 persis_io.pr.pr_info.residx = residx; 8337 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8338 sizeof(persis_io.pr), M_WAITOK); 8339 } else /* sa_res_key != 0 */ { 8340 8341 /* 8342 * If we aren't registered currently then increment 8343 * the key count and set the registered flag. 8344 */ 8345 ctl_alloc_prkey(lun, residx); 8346 if (ctl_get_prkey(lun, residx) == 0) 8347 lun->pr_key_count++; 8348 ctl_set_prkey(lun, residx, sa_res_key); 8349 lun->PRGeneration++; 8350 mtx_unlock(&lun->lun_lock); 8351 8352 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8353 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8354 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8355 persis_io.pr.pr_info.residx = residx; 8356 memcpy(persis_io.pr.pr_info.sa_res_key, 8357 param->serv_act_res_key, 8358 sizeof(param->serv_act_res_key)); 8359 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8360 sizeof(persis_io.pr), M_WAITOK); 8361 } 8362 8363 break; 8364 } 8365 case SPRO_RESERVE: 8366#if 0 8367 printf("Reserve executed type %d\n", type); 8368#endif 8369 mtx_lock(&lun->lun_lock); 8370 if (lun->flags & CTL_LUN_PR_RESERVED) { 8371 /* 8372 * if this isn't the reservation holder and it's 8373 * not a "all registrants" type or if the type is 8374 * different then we have a conflict 8375 */ 8376 if ((lun->pr_res_idx != residx 8377 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8378 || lun->res_type != type) { 8379 mtx_unlock(&lun->lun_lock); 8380 free(ctsio->kern_data_ptr, M_CTL); 8381 ctl_set_reservation_conflict(ctsio); 8382 ctl_done((union ctl_io *)ctsio); 8383 return (CTL_RETVAL_COMPLETE); 8384 } 8385 mtx_unlock(&lun->lun_lock); 8386 } else /* create a reservation */ { 8387 /* 8388 * If it's not an "all registrants" type record 8389 * reservation holder 8390 */ 8391 if (type != SPR_TYPE_WR_EX_AR 8392 && type != SPR_TYPE_EX_AC_AR) 8393 lun->pr_res_idx = residx; /* Res holder */ 8394 else 8395 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8396 8397 lun->flags |= CTL_LUN_PR_RESERVED; 8398 lun->res_type = type; 8399 8400 mtx_unlock(&lun->lun_lock); 8401 8402 /* send msg to other side */ 8403 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8404 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8405 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8406 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8407 persis_io.pr.pr_info.res_type = type; 8408 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8409 sizeof(persis_io.pr), M_WAITOK); 8410 } 8411 break; 8412 8413 case SPRO_RELEASE: 8414 mtx_lock(&lun->lun_lock); 8415 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8416 /* No reservation exists return good status */ 8417 mtx_unlock(&lun->lun_lock); 8418 goto done; 8419 } 8420 /* 8421 * Is this nexus a reservation holder? 8422 */ 8423 if (lun->pr_res_idx != residx 8424 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8425 /* 8426 * not a res holder return good status but 8427 * do nothing 8428 */ 8429 mtx_unlock(&lun->lun_lock); 8430 goto done; 8431 } 8432 8433 if (lun->res_type != type) { 8434 mtx_unlock(&lun->lun_lock); 8435 free(ctsio->kern_data_ptr, M_CTL); 8436 ctl_set_illegal_pr_release(ctsio); 8437 ctl_done((union ctl_io *)ctsio); 8438 return (CTL_RETVAL_COMPLETE); 8439 } 8440 8441 /* okay to release */ 8442 lun->flags &= ~CTL_LUN_PR_RESERVED; 8443 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8444 lun->res_type = 0; 8445 8446 /* 8447 * if this isn't an exclusive access 8448 * res generate UA for all other 8449 * registrants. 8450 */ 8451 if (type != SPR_TYPE_EX_AC 8452 && type != SPR_TYPE_WR_EX) { 8453 for (i = softc->init_min; i < softc->init_max; i++) { 8454 if (i == residx || ctl_get_prkey(lun, i) == 0) 8455 continue; 8456 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8457 } 8458 } 8459 mtx_unlock(&lun->lun_lock); 8460 8461 /* Send msg to other side */ 8462 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8463 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8464 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8465 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8466 sizeof(persis_io.pr), M_WAITOK); 8467 break; 8468 8469 case SPRO_CLEAR: 8470 /* send msg to other side */ 8471 8472 mtx_lock(&lun->lun_lock); 8473 lun->flags &= ~CTL_LUN_PR_RESERVED; 8474 lun->res_type = 0; 8475 lun->pr_key_count = 0; 8476 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8477 8478 ctl_clr_prkey(lun, residx); 8479 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8480 if (ctl_get_prkey(lun, i) != 0) { 8481 ctl_clr_prkey(lun, i); 8482 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8483 } 8484 lun->PRGeneration++; 8485 mtx_unlock(&lun->lun_lock); 8486 8487 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8488 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8489 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8490 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8491 sizeof(persis_io.pr), M_WAITOK); 8492 break; 8493 8494 case SPRO_PREEMPT: 8495 case SPRO_PRE_ABO: { 8496 int nretval; 8497 8498 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8499 residx, ctsio, cdb, param); 8500 if (nretval != 0) 8501 return (CTL_RETVAL_COMPLETE); 8502 break; 8503 } 8504 default: 8505 panic("Invalid PR type %x", cdb->action); 8506 } 8507 8508done: 8509 free(ctsio->kern_data_ptr, M_CTL); 8510 ctl_set_success(ctsio); 8511 ctl_done((union ctl_io *)ctsio); 8512 8513 return (retval); 8514} 8515 8516/* 8517 * This routine is for handling a message from the other SC pertaining to 8518 * persistent reserve out. All the error checking will have been done 8519 * so only perorming the action need be done here to keep the two 8520 * in sync. 8521 */ 8522static void 8523ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8524{ 8525 struct ctl_lun *lun; 8526 struct ctl_softc *softc; 8527 int i; 8528 uint32_t residx, targ_lun; 8529 8530 softc = control_softc; 8531 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8532 mtx_lock(&softc->ctl_lock); 8533 if ((targ_lun >= CTL_MAX_LUNS) || 8534 ((lun = softc->ctl_luns[targ_lun]) == NULL)) { 8535 mtx_unlock(&softc->ctl_lock); 8536 return; 8537 } 8538 mtx_lock(&lun->lun_lock); 8539 mtx_unlock(&softc->ctl_lock); 8540 if (lun->flags & CTL_LUN_DISABLED) { 8541 mtx_unlock(&lun->lun_lock); 8542 return; 8543 } 8544 residx = ctl_get_initindex(&msg->hdr.nexus); 8545 switch(msg->pr.pr_info.action) { 8546 case CTL_PR_REG_KEY: 8547 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8548 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8549 lun->pr_key_count++; 8550 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8551 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8552 lun->PRGeneration++; 8553 break; 8554 8555 case CTL_PR_UNREG_KEY: 8556 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8557 lun->pr_key_count--; 8558 8559 /* XXX Need to see if the reservation has been released */ 8560 /* if so do we need to generate UA? */ 8561 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8562 lun->flags &= ~CTL_LUN_PR_RESERVED; 8563 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8564 8565 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8566 || lun->res_type == SPR_TYPE_EX_AC_RO) 8567 && lun->pr_key_count) { 8568 /* 8569 * If the reservation is a registrants 8570 * only type we need to generate a UA 8571 * for other registered inits. The 8572 * sense code should be RESERVATIONS 8573 * RELEASED 8574 */ 8575 8576 for (i = softc->init_min; i < softc->init_max; i++) { 8577 if (ctl_get_prkey(lun, i) == 0) 8578 continue; 8579 8580 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8581 } 8582 } 8583 lun->res_type = 0; 8584 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8585 if (lun->pr_key_count==0) { 8586 lun->flags &= ~CTL_LUN_PR_RESERVED; 8587 lun->res_type = 0; 8588 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8589 } 8590 } 8591 lun->PRGeneration++; 8592 break; 8593 8594 case CTL_PR_RESERVE: 8595 lun->flags |= CTL_LUN_PR_RESERVED; 8596 lun->res_type = msg->pr.pr_info.res_type; 8597 lun->pr_res_idx = msg->pr.pr_info.residx; 8598 8599 break; 8600 8601 case CTL_PR_RELEASE: 8602 /* 8603 * if this isn't an exclusive access res generate UA for all 8604 * other registrants. 8605 */ 8606 if (lun->res_type != SPR_TYPE_EX_AC 8607 && lun->res_type != SPR_TYPE_WR_EX) { 8608 for (i = softc->init_min; i < softc->init_max; i++) 8609 if (i == residx || ctl_get_prkey(lun, i) == 0) 8610 continue; 8611 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8612 } 8613 8614 lun->flags &= ~CTL_LUN_PR_RESERVED; 8615 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8616 lun->res_type = 0; 8617 break; 8618 8619 case CTL_PR_PREEMPT: 8620 ctl_pro_preempt_other(lun, msg); 8621 break; 8622 case CTL_PR_CLEAR: 8623 lun->flags &= ~CTL_LUN_PR_RESERVED; 8624 lun->res_type = 0; 8625 lun->pr_key_count = 0; 8626 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8627 8628 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8629 if (ctl_get_prkey(lun, i) == 0) 8630 continue; 8631 ctl_clr_prkey(lun, i); 8632 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8633 } 8634 lun->PRGeneration++; 8635 break; 8636 } 8637 8638 mtx_unlock(&lun->lun_lock); 8639} 8640 8641int 8642ctl_read_write(struct ctl_scsiio *ctsio) 8643{ 8644 struct ctl_lun *lun; 8645 struct ctl_lba_len_flags *lbalen; 8646 uint64_t lba; 8647 uint32_t num_blocks; 8648 int flags, retval; 8649 int isread; 8650 8651 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8652 8653 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8654 8655 flags = 0; 8656 retval = CTL_RETVAL_COMPLETE; 8657 8658 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8659 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8660 switch (ctsio->cdb[0]) { 8661 case READ_6: 8662 case WRITE_6: { 8663 struct scsi_rw_6 *cdb; 8664 8665 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8666 8667 lba = scsi_3btoul(cdb->addr); 8668 /* only 5 bits are valid in the most significant address byte */ 8669 lba &= 0x1fffff; 8670 num_blocks = cdb->length; 8671 /* 8672 * This is correct according to SBC-2. 8673 */ 8674 if (num_blocks == 0) 8675 num_blocks = 256; 8676 break; 8677 } 8678 case READ_10: 8679 case WRITE_10: { 8680 struct scsi_rw_10 *cdb; 8681 8682 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8683 if (cdb->byte2 & SRW10_FUA) 8684 flags |= CTL_LLF_FUA; 8685 if (cdb->byte2 & SRW10_DPO) 8686 flags |= CTL_LLF_DPO; 8687 lba = scsi_4btoul(cdb->addr); 8688 num_blocks = scsi_2btoul(cdb->length); 8689 break; 8690 } 8691 case WRITE_VERIFY_10: { 8692 struct scsi_write_verify_10 *cdb; 8693 8694 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8695 flags |= CTL_LLF_FUA; 8696 if (cdb->byte2 & SWV_DPO) 8697 flags |= CTL_LLF_DPO; 8698 lba = scsi_4btoul(cdb->addr); 8699 num_blocks = scsi_2btoul(cdb->length); 8700 break; 8701 } 8702 case READ_12: 8703 case WRITE_12: { 8704 struct scsi_rw_12 *cdb; 8705 8706 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8707 if (cdb->byte2 & SRW12_FUA) 8708 flags |= CTL_LLF_FUA; 8709 if (cdb->byte2 & SRW12_DPO) 8710 flags |= CTL_LLF_DPO; 8711 lba = scsi_4btoul(cdb->addr); 8712 num_blocks = scsi_4btoul(cdb->length); 8713 break; 8714 } 8715 case WRITE_VERIFY_12: { 8716 struct scsi_write_verify_12 *cdb; 8717 8718 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8719 flags |= CTL_LLF_FUA; 8720 if (cdb->byte2 & SWV_DPO) 8721 flags |= CTL_LLF_DPO; 8722 lba = scsi_4btoul(cdb->addr); 8723 num_blocks = scsi_4btoul(cdb->length); 8724 break; 8725 } 8726 case READ_16: 8727 case WRITE_16: { 8728 struct scsi_rw_16 *cdb; 8729 8730 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8731 if (cdb->byte2 & SRW12_FUA) 8732 flags |= CTL_LLF_FUA; 8733 if (cdb->byte2 & SRW12_DPO) 8734 flags |= CTL_LLF_DPO; 8735 lba = scsi_8btou64(cdb->addr); 8736 num_blocks = scsi_4btoul(cdb->length); 8737 break; 8738 } 8739 case WRITE_ATOMIC_16: { 8740 struct scsi_rw_16 *cdb; 8741 8742 if (lun->be_lun->atomicblock == 0) { 8743 ctl_set_invalid_opcode(ctsio); 8744 ctl_done((union ctl_io *)ctsio); 8745 return (CTL_RETVAL_COMPLETE); 8746 } 8747 8748 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8749 if (cdb->byte2 & SRW12_FUA) 8750 flags |= CTL_LLF_FUA; 8751 if (cdb->byte2 & SRW12_DPO) 8752 flags |= CTL_LLF_DPO; 8753 lba = scsi_8btou64(cdb->addr); 8754 num_blocks = scsi_4btoul(cdb->length); 8755 if (num_blocks > lun->be_lun->atomicblock) { 8756 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8757 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8758 /*bit*/ 0); 8759 ctl_done((union ctl_io *)ctsio); 8760 return (CTL_RETVAL_COMPLETE); 8761 } 8762 break; 8763 } 8764 case WRITE_VERIFY_16: { 8765 struct scsi_write_verify_16 *cdb; 8766 8767 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8768 flags |= CTL_LLF_FUA; 8769 if (cdb->byte2 & SWV_DPO) 8770 flags |= CTL_LLF_DPO; 8771 lba = scsi_8btou64(cdb->addr); 8772 num_blocks = scsi_4btoul(cdb->length); 8773 break; 8774 } 8775 default: 8776 /* 8777 * We got a command we don't support. This shouldn't 8778 * happen, commands should be filtered out above us. 8779 */ 8780 ctl_set_invalid_opcode(ctsio); 8781 ctl_done((union ctl_io *)ctsio); 8782 8783 return (CTL_RETVAL_COMPLETE); 8784 break; /* NOTREACHED */ 8785 } 8786 8787 /* 8788 * The first check is to make sure we're in bounds, the second 8789 * check is to catch wrap-around problems. If the lba + num blocks 8790 * is less than the lba, then we've wrapped around and the block 8791 * range is invalid anyway. 8792 */ 8793 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8794 || ((lba + num_blocks) < lba)) { 8795 ctl_set_lba_out_of_range(ctsio); 8796 ctl_done((union ctl_io *)ctsio); 8797 return (CTL_RETVAL_COMPLETE); 8798 } 8799 8800 /* 8801 * According to SBC-3, a transfer length of 0 is not an error. 8802 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8803 * translates to 256 blocks for those commands. 8804 */ 8805 if (num_blocks == 0) { 8806 ctl_set_success(ctsio); 8807 ctl_done((union ctl_io *)ctsio); 8808 return (CTL_RETVAL_COMPLETE); 8809 } 8810 8811 /* Set FUA and/or DPO if caches are disabled. */ 8812 if (isread) { 8813 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8814 SCP_RCD) != 0) 8815 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8816 } else { 8817 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8818 SCP_WCE) == 0) 8819 flags |= CTL_LLF_FUA; 8820 } 8821 8822 lbalen = (struct ctl_lba_len_flags *) 8823 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8824 lbalen->lba = lba; 8825 lbalen->len = num_blocks; 8826 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8827 8828 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8829 ctsio->kern_rel_offset = 0; 8830 8831 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8832 8833 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8834 8835 return (retval); 8836} 8837 8838static int 8839ctl_cnw_cont(union ctl_io *io) 8840{ 8841 struct ctl_scsiio *ctsio; 8842 struct ctl_lun *lun; 8843 struct ctl_lba_len_flags *lbalen; 8844 int retval; 8845 8846 ctsio = &io->scsiio; 8847 ctsio->io_hdr.status = CTL_STATUS_NONE; 8848 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8849 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8850 lbalen = (struct ctl_lba_len_flags *) 8851 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8852 lbalen->flags &= ~CTL_LLF_COMPARE; 8853 lbalen->flags |= CTL_LLF_WRITE; 8854 8855 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8856 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8857 return (retval); 8858} 8859 8860int 8861ctl_cnw(struct ctl_scsiio *ctsio) 8862{ 8863 struct ctl_lun *lun; 8864 struct ctl_lba_len_flags *lbalen; 8865 uint64_t lba; 8866 uint32_t num_blocks; 8867 int flags, retval; 8868 8869 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8870 8871 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8872 8873 flags = 0; 8874 retval = CTL_RETVAL_COMPLETE; 8875 8876 switch (ctsio->cdb[0]) { 8877 case COMPARE_AND_WRITE: { 8878 struct scsi_compare_and_write *cdb; 8879 8880 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8881 if (cdb->byte2 & SRW10_FUA) 8882 flags |= CTL_LLF_FUA; 8883 if (cdb->byte2 & SRW10_DPO) 8884 flags |= CTL_LLF_DPO; 8885 lba = scsi_8btou64(cdb->addr); 8886 num_blocks = cdb->length; 8887 break; 8888 } 8889 default: 8890 /* 8891 * We got a command we don't support. This shouldn't 8892 * happen, commands should be filtered out above us. 8893 */ 8894 ctl_set_invalid_opcode(ctsio); 8895 ctl_done((union ctl_io *)ctsio); 8896 8897 return (CTL_RETVAL_COMPLETE); 8898 break; /* NOTREACHED */ 8899 } 8900 8901 /* 8902 * The first check is to make sure we're in bounds, the second 8903 * check is to catch wrap-around problems. If the lba + num blocks 8904 * is less than the lba, then we've wrapped around and the block 8905 * range is invalid anyway. 8906 */ 8907 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8908 || ((lba + num_blocks) < lba)) { 8909 ctl_set_lba_out_of_range(ctsio); 8910 ctl_done((union ctl_io *)ctsio); 8911 return (CTL_RETVAL_COMPLETE); 8912 } 8913 8914 /* 8915 * According to SBC-3, a transfer length of 0 is not an error. 8916 */ 8917 if (num_blocks == 0) { 8918 ctl_set_success(ctsio); 8919 ctl_done((union ctl_io *)ctsio); 8920 return (CTL_RETVAL_COMPLETE); 8921 } 8922 8923 /* Set FUA if write cache is disabled. */ 8924 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8925 SCP_WCE) == 0) 8926 flags |= CTL_LLF_FUA; 8927 8928 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8929 ctsio->kern_rel_offset = 0; 8930 8931 /* 8932 * Set the IO_CONT flag, so that if this I/O gets passed to 8933 * ctl_data_submit_done(), it'll get passed back to 8934 * ctl_ctl_cnw_cont() for further processing. 8935 */ 8936 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8937 ctsio->io_cont = ctl_cnw_cont; 8938 8939 lbalen = (struct ctl_lba_len_flags *) 8940 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8941 lbalen->lba = lba; 8942 lbalen->len = num_blocks; 8943 lbalen->flags = CTL_LLF_COMPARE | flags; 8944 8945 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8946 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8947 return (retval); 8948} 8949 8950int 8951ctl_verify(struct ctl_scsiio *ctsio) 8952{ 8953 struct ctl_lun *lun; 8954 struct ctl_lba_len_flags *lbalen; 8955 uint64_t lba; 8956 uint32_t num_blocks; 8957 int bytchk, flags; 8958 int retval; 8959 8960 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8961 8962 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8963 8964 bytchk = 0; 8965 flags = CTL_LLF_FUA; 8966 retval = CTL_RETVAL_COMPLETE; 8967 8968 switch (ctsio->cdb[0]) { 8969 case VERIFY_10: { 8970 struct scsi_verify_10 *cdb; 8971 8972 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8973 if (cdb->byte2 & SVFY_BYTCHK) 8974 bytchk = 1; 8975 if (cdb->byte2 & SVFY_DPO) 8976 flags |= CTL_LLF_DPO; 8977 lba = scsi_4btoul(cdb->addr); 8978 num_blocks = scsi_2btoul(cdb->length); 8979 break; 8980 } 8981 case VERIFY_12: { 8982 struct scsi_verify_12 *cdb; 8983 8984 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8985 if (cdb->byte2 & SVFY_BYTCHK) 8986 bytchk = 1; 8987 if (cdb->byte2 & SVFY_DPO) 8988 flags |= CTL_LLF_DPO; 8989 lba = scsi_4btoul(cdb->addr); 8990 num_blocks = scsi_4btoul(cdb->length); 8991 break; 8992 } 8993 case VERIFY_16: { 8994 struct scsi_rw_16 *cdb; 8995 8996 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8997 if (cdb->byte2 & SVFY_BYTCHK) 8998 bytchk = 1; 8999 if (cdb->byte2 & SVFY_DPO) 9000 flags |= CTL_LLF_DPO; 9001 lba = scsi_8btou64(cdb->addr); 9002 num_blocks = scsi_4btoul(cdb->length); 9003 break; 9004 } 9005 default: 9006 /* 9007 * We got a command we don't support. This shouldn't 9008 * happen, commands should be filtered out above us. 9009 */ 9010 ctl_set_invalid_opcode(ctsio); 9011 ctl_done((union ctl_io *)ctsio); 9012 return (CTL_RETVAL_COMPLETE); 9013 } 9014 9015 /* 9016 * The first check is to make sure we're in bounds, the second 9017 * check is to catch wrap-around problems. If the lba + num blocks 9018 * is less than the lba, then we've wrapped around and the block 9019 * range is invalid anyway. 9020 */ 9021 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9022 || ((lba + num_blocks) < lba)) { 9023 ctl_set_lba_out_of_range(ctsio); 9024 ctl_done((union ctl_io *)ctsio); 9025 return (CTL_RETVAL_COMPLETE); 9026 } 9027 9028 /* 9029 * According to SBC-3, a transfer length of 0 is not an error. 9030 */ 9031 if (num_blocks == 0) { 9032 ctl_set_success(ctsio); 9033 ctl_done((union ctl_io *)ctsio); 9034 return (CTL_RETVAL_COMPLETE); 9035 } 9036 9037 lbalen = (struct ctl_lba_len_flags *) 9038 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9039 lbalen->lba = lba; 9040 lbalen->len = num_blocks; 9041 if (bytchk) { 9042 lbalen->flags = CTL_LLF_COMPARE | flags; 9043 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9044 } else { 9045 lbalen->flags = CTL_LLF_VERIFY | flags; 9046 ctsio->kern_total_len = 0; 9047 } 9048 ctsio->kern_rel_offset = 0; 9049 9050 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9051 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9052 return (retval); 9053} 9054 9055int 9056ctl_report_luns(struct ctl_scsiio *ctsio) 9057{ 9058 struct ctl_softc *softc = control_softc; 9059 struct scsi_report_luns *cdb; 9060 struct scsi_report_luns_data *lun_data; 9061 struct ctl_lun *lun, *request_lun; 9062 struct ctl_port *port; 9063 int num_luns, retval; 9064 uint32_t alloc_len, lun_datalen; 9065 int num_filled, well_known; 9066 uint32_t initidx, targ_lun_id, lun_id; 9067 9068 retval = CTL_RETVAL_COMPLETE; 9069 well_known = 0; 9070 9071 cdb = (struct scsi_report_luns *)ctsio->cdb; 9072 port = ctl_io_port(&ctsio->io_hdr); 9073 9074 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9075 9076 mtx_lock(&softc->ctl_lock); 9077 num_luns = 0; 9078 for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) { 9079 if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS) 9080 num_luns++; 9081 } 9082 mtx_unlock(&softc->ctl_lock); 9083 9084 switch (cdb->select_report) { 9085 case RPL_REPORT_DEFAULT: 9086 case RPL_REPORT_ALL: 9087 break; 9088 case RPL_REPORT_WELLKNOWN: 9089 well_known = 1; 9090 num_luns = 0; 9091 break; 9092 default: 9093 ctl_set_invalid_field(ctsio, 9094 /*sks_valid*/ 1, 9095 /*command*/ 1, 9096 /*field*/ 2, 9097 /*bit_valid*/ 0, 9098 /*bit*/ 0); 9099 ctl_done((union ctl_io *)ctsio); 9100 return (retval); 9101 break; /* NOTREACHED */ 9102 } 9103 9104 alloc_len = scsi_4btoul(cdb->length); 9105 /* 9106 * The initiator has to allocate at least 16 bytes for this request, 9107 * so he can at least get the header and the first LUN. Otherwise 9108 * we reject the request (per SPC-3 rev 14, section 6.21). 9109 */ 9110 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9111 sizeof(struct scsi_report_luns_lundata))) { 9112 ctl_set_invalid_field(ctsio, 9113 /*sks_valid*/ 1, 9114 /*command*/ 1, 9115 /*field*/ 6, 9116 /*bit_valid*/ 0, 9117 /*bit*/ 0); 9118 ctl_done((union ctl_io *)ctsio); 9119 return (retval); 9120 } 9121 9122 request_lun = (struct ctl_lun *) 9123 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9124 9125 lun_datalen = sizeof(*lun_data) + 9126 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9127 9128 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9129 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9130 ctsio->kern_sg_entries = 0; 9131 9132 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9133 9134 mtx_lock(&softc->ctl_lock); 9135 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9136 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9137 if (lun_id >= CTL_MAX_LUNS) 9138 continue; 9139 lun = softc->ctl_luns[lun_id]; 9140 if (lun == NULL) 9141 continue; 9142 9143 if (targ_lun_id <= 0xff) { 9144 /* 9145 * Peripheral addressing method, bus number 0. 9146 */ 9147 lun_data->luns[num_filled].lundata[0] = 9148 RPL_LUNDATA_ATYP_PERIPH; 9149 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9150 num_filled++; 9151 } else if (targ_lun_id <= 0x3fff) { 9152 /* 9153 * Flat addressing method. 9154 */ 9155 lun_data->luns[num_filled].lundata[0] = 9156 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); 9157 lun_data->luns[num_filled].lundata[1] = 9158 (targ_lun_id & 0xff); 9159 num_filled++; 9160 } else if (targ_lun_id <= 0xffffff) { 9161 /* 9162 * Extended flat addressing method. 9163 */ 9164 lun_data->luns[num_filled].lundata[0] = 9165 RPL_LUNDATA_ATYP_EXTLUN | 0x12; 9166 scsi_ulto3b(targ_lun_id, 9167 &lun_data->luns[num_filled].lundata[1]); 9168 num_filled++; 9169 } else { 9170 printf("ctl_report_luns: bogus LUN number %jd, " 9171 "skipping\n", (intmax_t)targ_lun_id); 9172 } 9173 /* 9174 * According to SPC-3, rev 14 section 6.21: 9175 * 9176 * "The execution of a REPORT LUNS command to any valid and 9177 * installed logical unit shall clear the REPORTED LUNS DATA 9178 * HAS CHANGED unit attention condition for all logical 9179 * units of that target with respect to the requesting 9180 * initiator. A valid and installed logical unit is one 9181 * having a PERIPHERAL QUALIFIER of 000b in the standard 9182 * INQUIRY data (see 6.4.2)." 9183 * 9184 * If request_lun is NULL, the LUN this report luns command 9185 * was issued to is either disabled or doesn't exist. In that 9186 * case, we shouldn't clear any pending lun change unit 9187 * attention. 9188 */ 9189 if (request_lun != NULL) { 9190 mtx_lock(&lun->lun_lock); 9191 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9192 mtx_unlock(&lun->lun_lock); 9193 } 9194 } 9195 mtx_unlock(&softc->ctl_lock); 9196 9197 /* 9198 * It's quite possible that we've returned fewer LUNs than we allocated 9199 * space for. Trim it. 9200 */ 9201 lun_datalen = sizeof(*lun_data) + 9202 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9203 9204 if (lun_datalen < alloc_len) { 9205 ctsio->residual = alloc_len - lun_datalen; 9206 ctsio->kern_data_len = lun_datalen; 9207 ctsio->kern_total_len = lun_datalen; 9208 } else { 9209 ctsio->residual = 0; 9210 ctsio->kern_data_len = alloc_len; 9211 ctsio->kern_total_len = alloc_len; 9212 } 9213 ctsio->kern_data_resid = 0; 9214 ctsio->kern_rel_offset = 0; 9215 ctsio->kern_sg_entries = 0; 9216 9217 /* 9218 * We set this to the actual data length, regardless of how much 9219 * space we actually have to return results. If the user looks at 9220 * this value, he'll know whether or not he allocated enough space 9221 * and reissue the command if necessary. We don't support well 9222 * known logical units, so if the user asks for that, return none. 9223 */ 9224 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9225 9226 /* 9227 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9228 * this request. 9229 */ 9230 ctl_set_success(ctsio); 9231 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9232 ctsio->be_move_done = ctl_config_move_done; 9233 ctl_datamove((union ctl_io *)ctsio); 9234 return (retval); 9235} 9236 9237int 9238ctl_request_sense(struct ctl_scsiio *ctsio) 9239{ 9240 struct scsi_request_sense *cdb; 9241 struct scsi_sense_data *sense_ptr; 9242 struct ctl_softc *ctl_softc; 9243 struct ctl_lun *lun; 9244 uint32_t initidx; 9245 int have_error; 9246 scsi_sense_data_type sense_format; 9247 ctl_ua_type ua_type; 9248 9249 cdb = (struct scsi_request_sense *)ctsio->cdb; 9250 9251 ctl_softc = control_softc; 9252 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9253 9254 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9255 9256 /* 9257 * Determine which sense format the user wants. 9258 */ 9259 if (cdb->byte2 & SRS_DESC) 9260 sense_format = SSD_TYPE_DESC; 9261 else 9262 sense_format = SSD_TYPE_FIXED; 9263 9264 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9265 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9266 ctsio->kern_sg_entries = 0; 9267 9268 /* 9269 * struct scsi_sense_data, which is currently set to 256 bytes, is 9270 * larger than the largest allowed value for the length field in the 9271 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9272 */ 9273 ctsio->residual = 0; 9274 ctsio->kern_data_len = cdb->length; 9275 ctsio->kern_total_len = cdb->length; 9276 9277 ctsio->kern_data_resid = 0; 9278 ctsio->kern_rel_offset = 0; 9279 ctsio->kern_sg_entries = 0; 9280 9281 /* 9282 * If we don't have a LUN, we don't have any pending sense. 9283 */ 9284 if (lun == NULL) 9285 goto no_sense; 9286 9287 have_error = 0; 9288 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9289 /* 9290 * Check for pending sense, and then for pending unit attentions. 9291 * Pending sense gets returned first, then pending unit attentions. 9292 */ 9293 mtx_lock(&lun->lun_lock); 9294#ifdef CTL_WITH_CA 9295 if (ctl_is_set(lun->have_ca, initidx)) { 9296 scsi_sense_data_type stored_format; 9297 9298 /* 9299 * Check to see which sense format was used for the stored 9300 * sense data. 9301 */ 9302 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9303 9304 /* 9305 * If the user requested a different sense format than the 9306 * one we stored, then we need to convert it to the other 9307 * format. If we're going from descriptor to fixed format 9308 * sense data, we may lose things in translation, depending 9309 * on what options were used. 9310 * 9311 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9312 * for some reason we'll just copy it out as-is. 9313 */ 9314 if ((stored_format == SSD_TYPE_FIXED) 9315 && (sense_format == SSD_TYPE_DESC)) 9316 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9317 &lun->pending_sense[initidx], 9318 (struct scsi_sense_data_desc *)sense_ptr); 9319 else if ((stored_format == SSD_TYPE_DESC) 9320 && (sense_format == SSD_TYPE_FIXED)) 9321 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9322 &lun->pending_sense[initidx], 9323 (struct scsi_sense_data_fixed *)sense_ptr); 9324 else 9325 memcpy(sense_ptr, &lun->pending_sense[initidx], 9326 MIN(sizeof(*sense_ptr), 9327 sizeof(lun->pending_sense[initidx]))); 9328 9329 ctl_clear_mask(lun->have_ca, initidx); 9330 have_error = 1; 9331 } else 9332#endif 9333 { 9334 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); 9335 if (ua_type != CTL_UA_NONE) 9336 have_error = 1; 9337 if (ua_type == CTL_UA_LUN_CHANGE) { 9338 mtx_unlock(&lun->lun_lock); 9339 mtx_lock(&ctl_softc->ctl_lock); 9340 ctl_clr_ua_allluns(ctl_softc, initidx, ua_type); 9341 mtx_unlock(&ctl_softc->ctl_lock); 9342 mtx_lock(&lun->lun_lock); 9343 } 9344 9345 } 9346 mtx_unlock(&lun->lun_lock); 9347 9348 /* 9349 * We already have a pending error, return it. 9350 */ 9351 if (have_error != 0) { 9352 /* 9353 * We report the SCSI status as OK, since the status of the 9354 * request sense command itself is OK. 9355 * We report 0 for the sense length, because we aren't doing 9356 * autosense in this case. We're reporting sense as 9357 * parameter data. 9358 */ 9359 ctl_set_success(ctsio); 9360 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9361 ctsio->be_move_done = ctl_config_move_done; 9362 ctl_datamove((union ctl_io *)ctsio); 9363 return (CTL_RETVAL_COMPLETE); 9364 } 9365 9366no_sense: 9367 9368 /* 9369 * No sense information to report, so we report that everything is 9370 * okay. 9371 */ 9372 ctl_set_sense_data(sense_ptr, 9373 lun, 9374 sense_format, 9375 /*current_error*/ 1, 9376 /*sense_key*/ SSD_KEY_NO_SENSE, 9377 /*asc*/ 0x00, 9378 /*ascq*/ 0x00, 9379 SSD_ELEM_NONE); 9380 9381 /* 9382 * We report 0 for the sense length, because we aren't doing 9383 * autosense in this case. We're reporting sense as parameter data. 9384 */ 9385 ctl_set_success(ctsio); 9386 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9387 ctsio->be_move_done = ctl_config_move_done; 9388 ctl_datamove((union ctl_io *)ctsio); 9389 return (CTL_RETVAL_COMPLETE); 9390} 9391 9392int 9393ctl_tur(struct ctl_scsiio *ctsio) 9394{ 9395 9396 CTL_DEBUG_PRINT(("ctl_tur\n")); 9397 9398 ctl_set_success(ctsio); 9399 ctl_done((union ctl_io *)ctsio); 9400 9401 return (CTL_RETVAL_COMPLETE); 9402} 9403 9404/* 9405 * SCSI VPD page 0x00, the Supported VPD Pages page. 9406 */ 9407static int 9408ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9409{ 9410 struct scsi_vpd_supported_pages *pages; 9411 int sup_page_size; 9412 struct ctl_lun *lun; 9413 int p; 9414 9415 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9416 9417 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9418 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9419 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9420 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9421 ctsio->kern_sg_entries = 0; 9422 9423 if (sup_page_size < alloc_len) { 9424 ctsio->residual = alloc_len - sup_page_size; 9425 ctsio->kern_data_len = sup_page_size; 9426 ctsio->kern_total_len = sup_page_size; 9427 } else { 9428 ctsio->residual = 0; 9429 ctsio->kern_data_len = alloc_len; 9430 ctsio->kern_total_len = alloc_len; 9431 } 9432 ctsio->kern_data_resid = 0; 9433 ctsio->kern_rel_offset = 0; 9434 ctsio->kern_sg_entries = 0; 9435 9436 /* 9437 * The control device is always connected. The disk device, on the 9438 * other hand, may not be online all the time. Need to change this 9439 * to figure out whether the disk device is actually online or not. 9440 */ 9441 if (lun != NULL) 9442 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9443 lun->be_lun->lun_type; 9444 else 9445 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9446 9447 p = 0; 9448 /* Supported VPD pages */ 9449 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9450 /* Serial Number */ 9451 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9452 /* Device Identification */ 9453 pages->page_list[p++] = SVPD_DEVICE_ID; 9454 /* Extended INQUIRY Data */ 9455 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9456 /* Mode Page Policy */ 9457 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9458 /* SCSI Ports */ 9459 pages->page_list[p++] = SVPD_SCSI_PORTS; 9460 /* Third-party Copy */ 9461 pages->page_list[p++] = SVPD_SCSI_TPC; 9462 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9463 /* Block limits */ 9464 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9465 /* Block Device Characteristics */ 9466 pages->page_list[p++] = SVPD_BDC; 9467 /* Logical Block Provisioning */ 9468 pages->page_list[p++] = SVPD_LBP; 9469 } 9470 pages->length = p; 9471 9472 ctl_set_success(ctsio); 9473 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9474 ctsio->be_move_done = ctl_config_move_done; 9475 ctl_datamove((union ctl_io *)ctsio); 9476 return (CTL_RETVAL_COMPLETE); 9477} 9478 9479/* 9480 * SCSI VPD page 0x80, the Unit Serial Number page. 9481 */ 9482static int 9483ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9484{ 9485 struct scsi_vpd_unit_serial_number *sn_ptr; 9486 struct ctl_lun *lun; 9487 int data_len; 9488 9489 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9490 9491 data_len = 4 + CTL_SN_LEN; 9492 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9493 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9494 if (data_len < alloc_len) { 9495 ctsio->residual = alloc_len - data_len; 9496 ctsio->kern_data_len = data_len; 9497 ctsio->kern_total_len = data_len; 9498 } else { 9499 ctsio->residual = 0; 9500 ctsio->kern_data_len = alloc_len; 9501 ctsio->kern_total_len = alloc_len; 9502 } 9503 ctsio->kern_data_resid = 0; 9504 ctsio->kern_rel_offset = 0; 9505 ctsio->kern_sg_entries = 0; 9506 9507 /* 9508 * The control device is always connected. The disk device, on the 9509 * other hand, may not be online all the time. Need to change this 9510 * to figure out whether the disk device is actually online or not. 9511 */ 9512 if (lun != NULL) 9513 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9514 lun->be_lun->lun_type; 9515 else 9516 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9517 9518 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9519 sn_ptr->length = CTL_SN_LEN; 9520 /* 9521 * If we don't have a LUN, we just leave the serial number as 9522 * all spaces. 9523 */ 9524 if (lun != NULL) { 9525 strncpy((char *)sn_ptr->serial_num, 9526 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9527 } else 9528 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9529 9530 ctl_set_success(ctsio); 9531 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9532 ctsio->be_move_done = ctl_config_move_done; 9533 ctl_datamove((union ctl_io *)ctsio); 9534 return (CTL_RETVAL_COMPLETE); 9535} 9536 9537 9538/* 9539 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9540 */ 9541static int 9542ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9543{ 9544 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9545 struct ctl_lun *lun; 9546 int data_len; 9547 9548 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9549 9550 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9551 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9552 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9553 ctsio->kern_sg_entries = 0; 9554 9555 if (data_len < alloc_len) { 9556 ctsio->residual = alloc_len - data_len; 9557 ctsio->kern_data_len = data_len; 9558 ctsio->kern_total_len = data_len; 9559 } else { 9560 ctsio->residual = 0; 9561 ctsio->kern_data_len = alloc_len; 9562 ctsio->kern_total_len = alloc_len; 9563 } 9564 ctsio->kern_data_resid = 0; 9565 ctsio->kern_rel_offset = 0; 9566 ctsio->kern_sg_entries = 0; 9567 9568 /* 9569 * The control device is always connected. The disk device, on the 9570 * other hand, may not be online all the time. 9571 */ 9572 if (lun != NULL) 9573 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9574 lun->be_lun->lun_type; 9575 else 9576 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9577 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9578 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9579 /* 9580 * We support head of queue, ordered and simple tags. 9581 */ 9582 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9583 /* 9584 * Volatile cache supported. 9585 */ 9586 eid_ptr->flags3 = SVPD_EID_V_SUP; 9587 9588 /* 9589 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9590 * attention for a particular IT nexus on all LUNs once we report 9591 * it to that nexus once. This bit is required as of SPC-4. 9592 */ 9593 eid_ptr->flags4 = SVPD_EID_LUICLT; 9594 9595 /* 9596 * XXX KDM in order to correctly answer this, we would need 9597 * information from the SIM to determine how much sense data it 9598 * can send. So this would really be a path inquiry field, most 9599 * likely. This can be set to a maximum of 252 according to SPC-4, 9600 * but the hardware may or may not be able to support that much. 9601 * 0 just means that the maximum sense data length is not reported. 9602 */ 9603 eid_ptr->max_sense_length = 0; 9604 9605 ctl_set_success(ctsio); 9606 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9607 ctsio->be_move_done = ctl_config_move_done; 9608 ctl_datamove((union ctl_io *)ctsio); 9609 return (CTL_RETVAL_COMPLETE); 9610} 9611 9612static int 9613ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9614{ 9615 struct scsi_vpd_mode_page_policy *mpp_ptr; 9616 struct ctl_lun *lun; 9617 int data_len; 9618 9619 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9620 9621 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9622 sizeof(struct scsi_vpd_mode_page_policy_descr); 9623 9624 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9625 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9626 ctsio->kern_sg_entries = 0; 9627 9628 if (data_len < alloc_len) { 9629 ctsio->residual = alloc_len - data_len; 9630 ctsio->kern_data_len = data_len; 9631 ctsio->kern_total_len = data_len; 9632 } else { 9633 ctsio->residual = 0; 9634 ctsio->kern_data_len = alloc_len; 9635 ctsio->kern_total_len = alloc_len; 9636 } 9637 ctsio->kern_data_resid = 0; 9638 ctsio->kern_rel_offset = 0; 9639 ctsio->kern_sg_entries = 0; 9640 9641 /* 9642 * The control device is always connected. The disk device, on the 9643 * other hand, may not be online all the time. 9644 */ 9645 if (lun != NULL) 9646 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9647 lun->be_lun->lun_type; 9648 else 9649 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9650 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9651 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9652 mpp_ptr->descr[0].page_code = 0x3f; 9653 mpp_ptr->descr[0].subpage_code = 0xff; 9654 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9655 9656 ctl_set_success(ctsio); 9657 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9658 ctsio->be_move_done = ctl_config_move_done; 9659 ctl_datamove((union ctl_io *)ctsio); 9660 return (CTL_RETVAL_COMPLETE); 9661} 9662 9663/* 9664 * SCSI VPD page 0x83, the Device Identification page. 9665 */ 9666static int 9667ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9668{ 9669 struct scsi_vpd_device_id *devid_ptr; 9670 struct scsi_vpd_id_descriptor *desc; 9671 struct ctl_softc *softc; 9672 struct ctl_lun *lun; 9673 struct ctl_port *port; 9674 int data_len; 9675 uint8_t proto; 9676 9677 softc = control_softc; 9678 9679 port = ctl_io_port(&ctsio->io_hdr); 9680 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9681 9682 data_len = sizeof(struct scsi_vpd_device_id) + 9683 sizeof(struct scsi_vpd_id_descriptor) + 9684 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9685 sizeof(struct scsi_vpd_id_descriptor) + 9686 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9687 if (lun && lun->lun_devid) 9688 data_len += lun->lun_devid->len; 9689 if (port && port->port_devid) 9690 data_len += port->port_devid->len; 9691 if (port && port->target_devid) 9692 data_len += port->target_devid->len; 9693 9694 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9695 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9696 ctsio->kern_sg_entries = 0; 9697 9698 if (data_len < alloc_len) { 9699 ctsio->residual = alloc_len - data_len; 9700 ctsio->kern_data_len = data_len; 9701 ctsio->kern_total_len = data_len; 9702 } else { 9703 ctsio->residual = 0; 9704 ctsio->kern_data_len = alloc_len; 9705 ctsio->kern_total_len = alloc_len; 9706 } 9707 ctsio->kern_data_resid = 0; 9708 ctsio->kern_rel_offset = 0; 9709 ctsio->kern_sg_entries = 0; 9710 9711 /* 9712 * The control device is always connected. The disk device, on the 9713 * other hand, may not be online all the time. 9714 */ 9715 if (lun != NULL) 9716 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9717 lun->be_lun->lun_type; 9718 else 9719 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9720 devid_ptr->page_code = SVPD_DEVICE_ID; 9721 scsi_ulto2b(data_len - 4, devid_ptr->length); 9722 9723 if (port && port->port_type == CTL_PORT_FC) 9724 proto = SCSI_PROTO_FC << 4; 9725 else if (port && port->port_type == CTL_PORT_ISCSI) 9726 proto = SCSI_PROTO_ISCSI << 4; 9727 else 9728 proto = SCSI_PROTO_SPI << 4; 9729 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9730 9731 /* 9732 * We're using a LUN association here. i.e., this device ID is a 9733 * per-LUN identifier. 9734 */ 9735 if (lun && lun->lun_devid) { 9736 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9737 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9738 lun->lun_devid->len); 9739 } 9740 9741 /* 9742 * This is for the WWPN which is a port association. 9743 */ 9744 if (port && port->port_devid) { 9745 memcpy(desc, port->port_devid->data, port->port_devid->len); 9746 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9747 port->port_devid->len); 9748 } 9749 9750 /* 9751 * This is for the Relative Target Port(type 4h) identifier 9752 */ 9753 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9754 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9755 SVPD_ID_TYPE_RELTARG; 9756 desc->length = 4; 9757 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9758 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9759 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9760 9761 /* 9762 * This is for the Target Port Group(type 5h) identifier 9763 */ 9764 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9765 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9766 SVPD_ID_TYPE_TPORTGRP; 9767 desc->length = 4; 9768 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / softc->port_cnt + 1, 9769 &desc->identifier[2]); 9770 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9771 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9772 9773 /* 9774 * This is for the Target identifier 9775 */ 9776 if (port && port->target_devid) { 9777 memcpy(desc, port->target_devid->data, port->target_devid->len); 9778 } 9779 9780 ctl_set_success(ctsio); 9781 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9782 ctsio->be_move_done = ctl_config_move_done; 9783 ctl_datamove((union ctl_io *)ctsio); 9784 return (CTL_RETVAL_COMPLETE); 9785} 9786 9787static int 9788ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9789{ 9790 struct ctl_softc *softc = control_softc; 9791 struct scsi_vpd_scsi_ports *sp; 9792 struct scsi_vpd_port_designation *pd; 9793 struct scsi_vpd_port_designation_cont *pdc; 9794 struct ctl_lun *lun; 9795 struct ctl_port *port; 9796 int data_len, num_target_ports, iid_len, id_len; 9797 9798 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9799 9800 num_target_ports = 0; 9801 iid_len = 0; 9802 id_len = 0; 9803 mtx_lock(&softc->ctl_lock); 9804 STAILQ_FOREACH(port, &softc->port_list, links) { 9805 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9806 continue; 9807 if (lun != NULL && 9808 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9809 continue; 9810 num_target_ports++; 9811 if (port->init_devid) 9812 iid_len += port->init_devid->len; 9813 if (port->port_devid) 9814 id_len += port->port_devid->len; 9815 } 9816 mtx_unlock(&softc->ctl_lock); 9817 9818 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9819 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9820 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9821 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9822 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9823 ctsio->kern_sg_entries = 0; 9824 9825 if (data_len < alloc_len) { 9826 ctsio->residual = alloc_len - data_len; 9827 ctsio->kern_data_len = data_len; 9828 ctsio->kern_total_len = data_len; 9829 } else { 9830 ctsio->residual = 0; 9831 ctsio->kern_data_len = alloc_len; 9832 ctsio->kern_total_len = alloc_len; 9833 } 9834 ctsio->kern_data_resid = 0; 9835 ctsio->kern_rel_offset = 0; 9836 ctsio->kern_sg_entries = 0; 9837 9838 /* 9839 * The control device is always connected. The disk device, on the 9840 * other hand, may not be online all the time. Need to change this 9841 * to figure out whether the disk device is actually online or not. 9842 */ 9843 if (lun != NULL) 9844 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9845 lun->be_lun->lun_type; 9846 else 9847 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9848 9849 sp->page_code = SVPD_SCSI_PORTS; 9850 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9851 sp->page_length); 9852 pd = &sp->design[0]; 9853 9854 mtx_lock(&softc->ctl_lock); 9855 STAILQ_FOREACH(port, &softc->port_list, links) { 9856 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9857 continue; 9858 if (lun != NULL && 9859 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9860 continue; 9861 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9862 if (port->init_devid) { 9863 iid_len = port->init_devid->len; 9864 memcpy(pd->initiator_transportid, 9865 port->init_devid->data, port->init_devid->len); 9866 } else 9867 iid_len = 0; 9868 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9869 pdc = (struct scsi_vpd_port_designation_cont *) 9870 (&pd->initiator_transportid[iid_len]); 9871 if (port->port_devid) { 9872 id_len = port->port_devid->len; 9873 memcpy(pdc->target_port_descriptors, 9874 port->port_devid->data, port->port_devid->len); 9875 } else 9876 id_len = 0; 9877 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9878 pd = (struct scsi_vpd_port_designation *) 9879 ((uint8_t *)pdc->target_port_descriptors + id_len); 9880 } 9881 mtx_unlock(&softc->ctl_lock); 9882 9883 ctl_set_success(ctsio); 9884 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9885 ctsio->be_move_done = ctl_config_move_done; 9886 ctl_datamove((union ctl_io *)ctsio); 9887 return (CTL_RETVAL_COMPLETE); 9888} 9889 9890static int 9891ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9892{ 9893 struct scsi_vpd_block_limits *bl_ptr; 9894 struct ctl_lun *lun; 9895 int bs; 9896 9897 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9898 9899 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9900 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9901 ctsio->kern_sg_entries = 0; 9902 9903 if (sizeof(*bl_ptr) < alloc_len) { 9904 ctsio->residual = alloc_len - sizeof(*bl_ptr); 9905 ctsio->kern_data_len = sizeof(*bl_ptr); 9906 ctsio->kern_total_len = sizeof(*bl_ptr); 9907 } else { 9908 ctsio->residual = 0; 9909 ctsio->kern_data_len = alloc_len; 9910 ctsio->kern_total_len = alloc_len; 9911 } 9912 ctsio->kern_data_resid = 0; 9913 ctsio->kern_rel_offset = 0; 9914 ctsio->kern_sg_entries = 0; 9915 9916 /* 9917 * The control device is always connected. The disk device, on the 9918 * other hand, may not be online all the time. Need to change this 9919 * to figure out whether the disk device is actually online or not. 9920 */ 9921 if (lun != NULL) 9922 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9923 lun->be_lun->lun_type; 9924 else 9925 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9926 9927 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9928 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9929 bl_ptr->max_cmp_write_len = 0xff; 9930 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9931 if (lun != NULL) { 9932 bs = lun->be_lun->blocksize; 9933 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9934 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9935 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 9936 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 9937 if (lun->be_lun->ublockexp != 0) { 9938 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9939 bl_ptr->opt_unmap_grain); 9940 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9941 bl_ptr->unmap_grain_align); 9942 } 9943 } 9944 scsi_ulto4b(lun->be_lun->atomicblock, 9945 bl_ptr->max_atomic_transfer_length); 9946 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9947 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9948 } 9949 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 9950 9951 ctl_set_success(ctsio); 9952 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9953 ctsio->be_move_done = ctl_config_move_done; 9954 ctl_datamove((union ctl_io *)ctsio); 9955 return (CTL_RETVAL_COMPLETE); 9956} 9957 9958static int 9959ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9960{ 9961 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9962 struct ctl_lun *lun; 9963 const char *value; 9964 u_int i; 9965 9966 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9967 9968 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9969 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9970 ctsio->kern_sg_entries = 0; 9971 9972 if (sizeof(*bdc_ptr) < alloc_len) { 9973 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 9974 ctsio->kern_data_len = sizeof(*bdc_ptr); 9975 ctsio->kern_total_len = sizeof(*bdc_ptr); 9976 } else { 9977 ctsio->residual = 0; 9978 ctsio->kern_data_len = alloc_len; 9979 ctsio->kern_total_len = alloc_len; 9980 } 9981 ctsio->kern_data_resid = 0; 9982 ctsio->kern_rel_offset = 0; 9983 ctsio->kern_sg_entries = 0; 9984 9985 /* 9986 * The control device is always connected. The disk device, on the 9987 * other hand, may not be online all the time. Need to change this 9988 * to figure out whether the disk device is actually online or not. 9989 */ 9990 if (lun != NULL) 9991 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9992 lun->be_lun->lun_type; 9993 else 9994 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9995 bdc_ptr->page_code = SVPD_BDC; 9996 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9997 if (lun != NULL && 9998 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 9999 i = strtol(value, NULL, 0); 10000 else 10001 i = CTL_DEFAULT_ROTATION_RATE; 10002 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 10003 if (lun != NULL && 10004 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 10005 i = strtol(value, NULL, 0); 10006 else 10007 i = 0; 10008 bdc_ptr->wab_wac_ff = (i & 0x0f); 10009 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 10010 10011 ctl_set_success(ctsio); 10012 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10013 ctsio->be_move_done = ctl_config_move_done; 10014 ctl_datamove((union ctl_io *)ctsio); 10015 return (CTL_RETVAL_COMPLETE); 10016} 10017 10018static int 10019ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 10020{ 10021 struct scsi_vpd_logical_block_prov *lbp_ptr; 10022 struct ctl_lun *lun; 10023 10024 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10025 10026 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 10027 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 10028 ctsio->kern_sg_entries = 0; 10029 10030 if (sizeof(*lbp_ptr) < alloc_len) { 10031 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 10032 ctsio->kern_data_len = sizeof(*lbp_ptr); 10033 ctsio->kern_total_len = sizeof(*lbp_ptr); 10034 } else { 10035 ctsio->residual = 0; 10036 ctsio->kern_data_len = alloc_len; 10037 ctsio->kern_total_len = alloc_len; 10038 } 10039 ctsio->kern_data_resid = 0; 10040 ctsio->kern_rel_offset = 0; 10041 ctsio->kern_sg_entries = 0; 10042 10043 /* 10044 * The control device is always connected. The disk device, on the 10045 * other hand, may not be online all the time. Need to change this 10046 * to figure out whether the disk device is actually online or not. 10047 */ 10048 if (lun != NULL) 10049 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10050 lun->be_lun->lun_type; 10051 else 10052 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10053 10054 lbp_ptr->page_code = SVPD_LBP; 10055 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10056 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10057 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10058 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10059 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10060 lbp_ptr->prov_type = SVPD_LBP_THIN; 10061 } 10062 10063 ctl_set_success(ctsio); 10064 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10065 ctsio->be_move_done = ctl_config_move_done; 10066 ctl_datamove((union ctl_io *)ctsio); 10067 return (CTL_RETVAL_COMPLETE); 10068} 10069 10070/* 10071 * INQUIRY with the EVPD bit set. 10072 */ 10073static int 10074ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10075{ 10076 struct ctl_lun *lun; 10077 struct scsi_inquiry *cdb; 10078 int alloc_len, retval; 10079 10080 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10081 cdb = (struct scsi_inquiry *)ctsio->cdb; 10082 alloc_len = scsi_2btoul(cdb->length); 10083 10084 switch (cdb->page_code) { 10085 case SVPD_SUPPORTED_PAGES: 10086 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10087 break; 10088 case SVPD_UNIT_SERIAL_NUMBER: 10089 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10090 break; 10091 case SVPD_DEVICE_ID: 10092 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10093 break; 10094 case SVPD_EXTENDED_INQUIRY_DATA: 10095 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10096 break; 10097 case SVPD_MODE_PAGE_POLICY: 10098 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10099 break; 10100 case SVPD_SCSI_PORTS: 10101 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10102 break; 10103 case SVPD_SCSI_TPC: 10104 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10105 break; 10106 case SVPD_BLOCK_LIMITS: 10107 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10108 goto err; 10109 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10110 break; 10111 case SVPD_BDC: 10112 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10113 goto err; 10114 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10115 break; 10116 case SVPD_LBP: 10117 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10118 goto err; 10119 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10120 break; 10121 default: 10122err: 10123 ctl_set_invalid_field(ctsio, 10124 /*sks_valid*/ 1, 10125 /*command*/ 1, 10126 /*field*/ 2, 10127 /*bit_valid*/ 0, 10128 /*bit*/ 0); 10129 ctl_done((union ctl_io *)ctsio); 10130 retval = CTL_RETVAL_COMPLETE; 10131 break; 10132 } 10133 10134 return (retval); 10135} 10136 10137/* 10138 * Standard INQUIRY data. 10139 */ 10140static int 10141ctl_inquiry_std(struct ctl_scsiio *ctsio) 10142{ 10143 struct scsi_inquiry_data *inq_ptr; 10144 struct scsi_inquiry *cdb; 10145 struct ctl_softc *softc; 10146 struct ctl_port *port; 10147 struct ctl_lun *lun; 10148 char *val; 10149 uint32_t alloc_len, data_len; 10150 ctl_port_type port_type; 10151 10152 softc = control_softc; 10153 10154 /* 10155 * Figure out whether we're talking to a Fibre Channel port or not. 10156 * We treat the ioctl front end, and any SCSI adapters, as packetized 10157 * SCSI front ends. 10158 */ 10159 port = ctl_io_port(&ctsio->io_hdr); 10160 if (port != NULL) 10161 port_type = port->port_type; 10162 else 10163 port_type = CTL_PORT_SCSI; 10164 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10165 port_type = CTL_PORT_SCSI; 10166 10167 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10168 cdb = (struct scsi_inquiry *)ctsio->cdb; 10169 alloc_len = scsi_2btoul(cdb->length); 10170 10171 /* 10172 * We malloc the full inquiry data size here and fill it 10173 * in. If the user only asks for less, we'll give him 10174 * that much. 10175 */ 10176 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10177 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10178 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10179 ctsio->kern_sg_entries = 0; 10180 ctsio->kern_data_resid = 0; 10181 ctsio->kern_rel_offset = 0; 10182 10183 if (data_len < alloc_len) { 10184 ctsio->residual = alloc_len - data_len; 10185 ctsio->kern_data_len = data_len; 10186 ctsio->kern_total_len = data_len; 10187 } else { 10188 ctsio->residual = 0; 10189 ctsio->kern_data_len = alloc_len; 10190 ctsio->kern_total_len = alloc_len; 10191 } 10192 10193 if (lun != NULL) { 10194 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10195 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10196 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10197 lun->be_lun->lun_type; 10198 } else { 10199 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10200 lun->be_lun->lun_type; 10201 } 10202 } else 10203 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10204 10205 /* RMB in byte 2 is 0 */ 10206 inq_ptr->version = SCSI_REV_SPC4; 10207 10208 /* 10209 * According to SAM-3, even if a device only supports a single 10210 * level of LUN addressing, it should still set the HISUP bit: 10211 * 10212 * 4.9.1 Logical unit numbers overview 10213 * 10214 * All logical unit number formats described in this standard are 10215 * hierarchical in structure even when only a single level in that 10216 * hierarchy is used. The HISUP bit shall be set to one in the 10217 * standard INQUIRY data (see SPC-2) when any logical unit number 10218 * format described in this standard is used. Non-hierarchical 10219 * formats are outside the scope of this standard. 10220 * 10221 * Therefore we set the HiSup bit here. 10222 * 10223 * The reponse format is 2, per SPC-3. 10224 */ 10225 inq_ptr->response_format = SID_HiSup | 2; 10226 10227 inq_ptr->additional_length = data_len - 10228 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10229 CTL_DEBUG_PRINT(("additional_length = %d\n", 10230 inq_ptr->additional_length)); 10231 10232 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10233 /* 16 bit addressing */ 10234 if (port_type == CTL_PORT_SCSI) 10235 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10236 /* XXX set the SID_MultiP bit here if we're actually going to 10237 respond on multiple ports */ 10238 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10239 10240 /* 16 bit data bus, synchronous transfers */ 10241 if (port_type == CTL_PORT_SCSI) 10242 inq_ptr->flags = SID_WBus16 | SID_Sync; 10243 /* 10244 * XXX KDM do we want to support tagged queueing on the control 10245 * device at all? 10246 */ 10247 if ((lun == NULL) 10248 || (lun->be_lun->lun_type != T_PROCESSOR)) 10249 inq_ptr->flags |= SID_CmdQue; 10250 /* 10251 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10252 * We have 8 bytes for the vendor name, and 16 bytes for the device 10253 * name and 4 bytes for the revision. 10254 */ 10255 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10256 "vendor")) == NULL) { 10257 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10258 } else { 10259 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10260 strncpy(inq_ptr->vendor, val, 10261 min(sizeof(inq_ptr->vendor), strlen(val))); 10262 } 10263 if (lun == NULL) { 10264 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10265 sizeof(inq_ptr->product)); 10266 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10267 switch (lun->be_lun->lun_type) { 10268 case T_DIRECT: 10269 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10270 sizeof(inq_ptr->product)); 10271 break; 10272 case T_PROCESSOR: 10273 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10274 sizeof(inq_ptr->product)); 10275 break; 10276 default: 10277 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10278 sizeof(inq_ptr->product)); 10279 break; 10280 } 10281 } else { 10282 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10283 strncpy(inq_ptr->product, val, 10284 min(sizeof(inq_ptr->product), strlen(val))); 10285 } 10286 10287 /* 10288 * XXX make this a macro somewhere so it automatically gets 10289 * incremented when we make changes. 10290 */ 10291 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10292 "revision")) == NULL) { 10293 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10294 } else { 10295 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10296 strncpy(inq_ptr->revision, val, 10297 min(sizeof(inq_ptr->revision), strlen(val))); 10298 } 10299 10300 /* 10301 * For parallel SCSI, we support double transition and single 10302 * transition clocking. We also support QAS (Quick Arbitration 10303 * and Selection) and Information Unit transfers on both the 10304 * control and array devices. 10305 */ 10306 if (port_type == CTL_PORT_SCSI) 10307 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10308 SID_SPI_IUS; 10309 10310 /* SAM-5 (no version claimed) */ 10311 scsi_ulto2b(0x00A0, inq_ptr->version1); 10312 /* SPC-4 (no version claimed) */ 10313 scsi_ulto2b(0x0460, inq_ptr->version2); 10314 if (port_type == CTL_PORT_FC) { 10315 /* FCP-2 ANSI INCITS.350:2003 */ 10316 scsi_ulto2b(0x0917, inq_ptr->version3); 10317 } else if (port_type == CTL_PORT_SCSI) { 10318 /* SPI-4 ANSI INCITS.362:200x */ 10319 scsi_ulto2b(0x0B56, inq_ptr->version3); 10320 } else if (port_type == CTL_PORT_ISCSI) { 10321 /* iSCSI (no version claimed) */ 10322 scsi_ulto2b(0x0960, inq_ptr->version3); 10323 } else if (port_type == CTL_PORT_SAS) { 10324 /* SAS (no version claimed) */ 10325 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10326 } 10327 10328 if (lun == NULL) { 10329 /* SBC-4 (no version claimed) */ 10330 scsi_ulto2b(0x0600, inq_ptr->version4); 10331 } else { 10332 switch (lun->be_lun->lun_type) { 10333 case T_DIRECT: 10334 /* SBC-4 (no version claimed) */ 10335 scsi_ulto2b(0x0600, inq_ptr->version4); 10336 break; 10337 case T_PROCESSOR: 10338 default: 10339 break; 10340 } 10341 } 10342 10343 ctl_set_success(ctsio); 10344 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10345 ctsio->be_move_done = ctl_config_move_done; 10346 ctl_datamove((union ctl_io *)ctsio); 10347 return (CTL_RETVAL_COMPLETE); 10348} 10349 10350int 10351ctl_inquiry(struct ctl_scsiio *ctsio) 10352{ 10353 struct scsi_inquiry *cdb; 10354 int retval; 10355 10356 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10357 10358 cdb = (struct scsi_inquiry *)ctsio->cdb; 10359 if (cdb->byte2 & SI_EVPD) 10360 retval = ctl_inquiry_evpd(ctsio); 10361 else if (cdb->page_code == 0) 10362 retval = ctl_inquiry_std(ctsio); 10363 else { 10364 ctl_set_invalid_field(ctsio, 10365 /*sks_valid*/ 1, 10366 /*command*/ 1, 10367 /*field*/ 2, 10368 /*bit_valid*/ 0, 10369 /*bit*/ 0); 10370 ctl_done((union ctl_io *)ctsio); 10371 return (CTL_RETVAL_COMPLETE); 10372 } 10373 10374 return (retval); 10375} 10376 10377/* 10378 * For known CDB types, parse the LBA and length. 10379 */ 10380static int 10381ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10382{ 10383 if (io->io_hdr.io_type != CTL_IO_SCSI) 10384 return (1); 10385 10386 switch (io->scsiio.cdb[0]) { 10387 case COMPARE_AND_WRITE: { 10388 struct scsi_compare_and_write *cdb; 10389 10390 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10391 10392 *lba = scsi_8btou64(cdb->addr); 10393 *len = cdb->length; 10394 break; 10395 } 10396 case READ_6: 10397 case WRITE_6: { 10398 struct scsi_rw_6 *cdb; 10399 10400 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10401 10402 *lba = scsi_3btoul(cdb->addr); 10403 /* only 5 bits are valid in the most significant address byte */ 10404 *lba &= 0x1fffff; 10405 *len = cdb->length; 10406 break; 10407 } 10408 case READ_10: 10409 case WRITE_10: { 10410 struct scsi_rw_10 *cdb; 10411 10412 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10413 10414 *lba = scsi_4btoul(cdb->addr); 10415 *len = scsi_2btoul(cdb->length); 10416 break; 10417 } 10418 case WRITE_VERIFY_10: { 10419 struct scsi_write_verify_10 *cdb; 10420 10421 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10422 10423 *lba = scsi_4btoul(cdb->addr); 10424 *len = scsi_2btoul(cdb->length); 10425 break; 10426 } 10427 case READ_12: 10428 case WRITE_12: { 10429 struct scsi_rw_12 *cdb; 10430 10431 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10432 10433 *lba = scsi_4btoul(cdb->addr); 10434 *len = scsi_4btoul(cdb->length); 10435 break; 10436 } 10437 case WRITE_VERIFY_12: { 10438 struct scsi_write_verify_12 *cdb; 10439 10440 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10441 10442 *lba = scsi_4btoul(cdb->addr); 10443 *len = scsi_4btoul(cdb->length); 10444 break; 10445 } 10446 case READ_16: 10447 case WRITE_16: 10448 case WRITE_ATOMIC_16: { 10449 struct scsi_rw_16 *cdb; 10450 10451 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10452 10453 *lba = scsi_8btou64(cdb->addr); 10454 *len = scsi_4btoul(cdb->length); 10455 break; 10456 } 10457 case WRITE_VERIFY_16: { 10458 struct scsi_write_verify_16 *cdb; 10459 10460 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10461 10462 *lba = scsi_8btou64(cdb->addr); 10463 *len = scsi_4btoul(cdb->length); 10464 break; 10465 } 10466 case WRITE_SAME_10: { 10467 struct scsi_write_same_10 *cdb; 10468 10469 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10470 10471 *lba = scsi_4btoul(cdb->addr); 10472 *len = scsi_2btoul(cdb->length); 10473 break; 10474 } 10475 case WRITE_SAME_16: { 10476 struct scsi_write_same_16 *cdb; 10477 10478 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10479 10480 *lba = scsi_8btou64(cdb->addr); 10481 *len = scsi_4btoul(cdb->length); 10482 break; 10483 } 10484 case VERIFY_10: { 10485 struct scsi_verify_10 *cdb; 10486 10487 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10488 10489 *lba = scsi_4btoul(cdb->addr); 10490 *len = scsi_2btoul(cdb->length); 10491 break; 10492 } 10493 case VERIFY_12: { 10494 struct scsi_verify_12 *cdb; 10495 10496 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10497 10498 *lba = scsi_4btoul(cdb->addr); 10499 *len = scsi_4btoul(cdb->length); 10500 break; 10501 } 10502 case VERIFY_16: { 10503 struct scsi_verify_16 *cdb; 10504 10505 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10506 10507 *lba = scsi_8btou64(cdb->addr); 10508 *len = scsi_4btoul(cdb->length); 10509 break; 10510 } 10511 case UNMAP: { 10512 *lba = 0; 10513 *len = UINT64_MAX; 10514 break; 10515 } 10516 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10517 struct scsi_get_lba_status *cdb; 10518 10519 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10520 *lba = scsi_8btou64(cdb->addr); 10521 *len = UINT32_MAX; 10522 break; 10523 } 10524 default: 10525 return (1); 10526 break; /* NOTREACHED */ 10527 } 10528 10529 return (0); 10530} 10531 10532static ctl_action 10533ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10534 bool seq) 10535{ 10536 uint64_t endlba1, endlba2; 10537 10538 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10539 endlba2 = lba2 + len2 - 1; 10540 10541 if ((endlba1 < lba2) || (endlba2 < lba1)) 10542 return (CTL_ACTION_PASS); 10543 else 10544 return (CTL_ACTION_BLOCK); 10545} 10546 10547static int 10548ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10549{ 10550 struct ctl_ptr_len_flags *ptrlen; 10551 struct scsi_unmap_desc *buf, *end, *range; 10552 uint64_t lba; 10553 uint32_t len; 10554 10555 /* If not UNMAP -- go other way. */ 10556 if (io->io_hdr.io_type != CTL_IO_SCSI || 10557 io->scsiio.cdb[0] != UNMAP) 10558 return (CTL_ACTION_ERROR); 10559 10560 /* If UNMAP without data -- block and wait for data. */ 10561 ptrlen = (struct ctl_ptr_len_flags *) 10562 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10563 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10564 ptrlen->ptr == NULL) 10565 return (CTL_ACTION_BLOCK); 10566 10567 /* UNMAP with data -- check for collision. */ 10568 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10569 end = buf + ptrlen->len / sizeof(*buf); 10570 for (range = buf; range < end; range++) { 10571 lba = scsi_8btou64(range->lba); 10572 len = scsi_4btoul(range->length); 10573 if ((lba < lba2 + len2) && (lba + len > lba2)) 10574 return (CTL_ACTION_BLOCK); 10575 } 10576 return (CTL_ACTION_PASS); 10577} 10578 10579static ctl_action 10580ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10581{ 10582 uint64_t lba1, lba2; 10583 uint64_t len1, len2; 10584 int retval; 10585 10586 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10587 return (CTL_ACTION_ERROR); 10588 10589 retval = ctl_extent_check_unmap(io1, lba2, len2); 10590 if (retval != CTL_ACTION_ERROR) 10591 return (retval); 10592 10593 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10594 return (CTL_ACTION_ERROR); 10595 10596 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10597} 10598 10599static ctl_action 10600ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10601{ 10602 uint64_t lba1, lba2; 10603 uint64_t len1, len2; 10604 10605 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10606 return (CTL_ACTION_ERROR); 10607 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10608 return (CTL_ACTION_ERROR); 10609 10610 if (lba1 + len1 == lba2) 10611 return (CTL_ACTION_BLOCK); 10612 return (CTL_ACTION_PASS); 10613} 10614 10615static ctl_action 10616ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10617 union ctl_io *ooa_io) 10618{ 10619 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10620 ctl_serialize_action *serialize_row; 10621 10622 /* 10623 * The initiator attempted multiple untagged commands at the same 10624 * time. Can't do that. 10625 */ 10626 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10627 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10628 && ((pending_io->io_hdr.nexus.targ_port == 10629 ooa_io->io_hdr.nexus.targ_port) 10630 && (pending_io->io_hdr.nexus.initid == 10631 ooa_io->io_hdr.nexus.initid)) 10632 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10633 CTL_FLAG_STATUS_SENT)) == 0)) 10634 return (CTL_ACTION_OVERLAP); 10635 10636 /* 10637 * The initiator attempted to send multiple tagged commands with 10638 * the same ID. (It's fine if different initiators have the same 10639 * tag ID.) 10640 * 10641 * Even if all of those conditions are true, we don't kill the I/O 10642 * if the command ahead of us has been aborted. We won't end up 10643 * sending it to the FETD, and it's perfectly legal to resend a 10644 * command with the same tag number as long as the previous 10645 * instance of this tag number has been aborted somehow. 10646 */ 10647 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10648 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10649 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10650 && ((pending_io->io_hdr.nexus.targ_port == 10651 ooa_io->io_hdr.nexus.targ_port) 10652 && (pending_io->io_hdr.nexus.initid == 10653 ooa_io->io_hdr.nexus.initid)) 10654 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10655 CTL_FLAG_STATUS_SENT)) == 0)) 10656 return (CTL_ACTION_OVERLAP_TAG); 10657 10658 /* 10659 * If we get a head of queue tag, SAM-3 says that we should 10660 * immediately execute it. 10661 * 10662 * What happens if this command would normally block for some other 10663 * reason? e.g. a request sense with a head of queue tag 10664 * immediately after a write. Normally that would block, but this 10665 * will result in its getting executed immediately... 10666 * 10667 * We currently return "pass" instead of "skip", so we'll end up 10668 * going through the rest of the queue to check for overlapped tags. 10669 * 10670 * XXX KDM check for other types of blockage first?? 10671 */ 10672 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10673 return (CTL_ACTION_PASS); 10674 10675 /* 10676 * Ordered tags have to block until all items ahead of them 10677 * have completed. If we get called with an ordered tag, we always 10678 * block, if something else is ahead of us in the queue. 10679 */ 10680 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10681 return (CTL_ACTION_BLOCK); 10682 10683 /* 10684 * Simple tags get blocked until all head of queue and ordered tags 10685 * ahead of them have completed. I'm lumping untagged commands in 10686 * with simple tags here. XXX KDM is that the right thing to do? 10687 */ 10688 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10689 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10690 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10691 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10692 return (CTL_ACTION_BLOCK); 10693 10694 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10695 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10696 10697 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10698 10699 switch (serialize_row[pending_entry->seridx]) { 10700 case CTL_SER_BLOCK: 10701 return (CTL_ACTION_BLOCK); 10702 case CTL_SER_EXTENT: 10703 return (ctl_extent_check(ooa_io, pending_io, 10704 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10705 case CTL_SER_EXTENTOPT: 10706 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10707 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10708 return (ctl_extent_check(ooa_io, pending_io, 10709 (lun->be_lun && 10710 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10711 return (CTL_ACTION_PASS); 10712 case CTL_SER_EXTENTSEQ: 10713 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10714 return (ctl_extent_check_seq(ooa_io, pending_io)); 10715 return (CTL_ACTION_PASS); 10716 case CTL_SER_PASS: 10717 return (CTL_ACTION_PASS); 10718 case CTL_SER_BLOCKOPT: 10719 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10720 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10721 return (CTL_ACTION_BLOCK); 10722 return (CTL_ACTION_PASS); 10723 case CTL_SER_SKIP: 10724 return (CTL_ACTION_SKIP); 10725 default: 10726 panic("invalid serialization value %d", 10727 serialize_row[pending_entry->seridx]); 10728 } 10729 10730 return (CTL_ACTION_ERROR); 10731} 10732 10733/* 10734 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10735 * Assumptions: 10736 * - pending_io is generally either incoming, or on the blocked queue 10737 * - starting I/O is the I/O we want to start the check with. 10738 */ 10739static ctl_action 10740ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10741 union ctl_io *starting_io) 10742{ 10743 union ctl_io *ooa_io; 10744 ctl_action action; 10745 10746 mtx_assert(&lun->lun_lock, MA_OWNED); 10747 10748 /* 10749 * Run back along the OOA queue, starting with the current 10750 * blocked I/O and going through every I/O before it on the 10751 * queue. If starting_io is NULL, we'll just end up returning 10752 * CTL_ACTION_PASS. 10753 */ 10754 for (ooa_io = starting_io; ooa_io != NULL; 10755 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10756 ooa_links)){ 10757 10758 /* 10759 * This routine just checks to see whether 10760 * cur_blocked is blocked by ooa_io, which is ahead 10761 * of it in the queue. It doesn't queue/dequeue 10762 * cur_blocked. 10763 */ 10764 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10765 switch (action) { 10766 case CTL_ACTION_BLOCK: 10767 case CTL_ACTION_OVERLAP: 10768 case CTL_ACTION_OVERLAP_TAG: 10769 case CTL_ACTION_SKIP: 10770 case CTL_ACTION_ERROR: 10771 return (action); 10772 break; /* NOTREACHED */ 10773 case CTL_ACTION_PASS: 10774 break; 10775 default: 10776 panic("invalid action %d", action); 10777 break; /* NOTREACHED */ 10778 } 10779 } 10780 10781 return (CTL_ACTION_PASS); 10782} 10783 10784/* 10785 * Assumptions: 10786 * - An I/O has just completed, and has been removed from the per-LUN OOA 10787 * queue, so some items on the blocked queue may now be unblocked. 10788 */ 10789static int 10790ctl_check_blocked(struct ctl_lun *lun) 10791{ 10792 struct ctl_softc *softc = lun->ctl_softc; 10793 union ctl_io *cur_blocked, *next_blocked; 10794 10795 mtx_assert(&lun->lun_lock, MA_OWNED); 10796 10797 /* 10798 * Run forward from the head of the blocked queue, checking each 10799 * entry against the I/Os prior to it on the OOA queue to see if 10800 * there is still any blockage. 10801 * 10802 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10803 * with our removing a variable on it while it is traversing the 10804 * list. 10805 */ 10806 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10807 cur_blocked != NULL; cur_blocked = next_blocked) { 10808 union ctl_io *prev_ooa; 10809 ctl_action action; 10810 10811 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10812 blocked_links); 10813 10814 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10815 ctl_ooaq, ooa_links); 10816 10817 /* 10818 * If cur_blocked happens to be the first item in the OOA 10819 * queue now, prev_ooa will be NULL, and the action 10820 * returned will just be CTL_ACTION_PASS. 10821 */ 10822 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10823 10824 switch (action) { 10825 case CTL_ACTION_BLOCK: 10826 /* Nothing to do here, still blocked */ 10827 break; 10828 case CTL_ACTION_OVERLAP: 10829 case CTL_ACTION_OVERLAP_TAG: 10830 /* 10831 * This shouldn't happen! In theory we've already 10832 * checked this command for overlap... 10833 */ 10834 break; 10835 case CTL_ACTION_PASS: 10836 case CTL_ACTION_SKIP: { 10837 const struct ctl_cmd_entry *entry; 10838 10839 /* 10840 * The skip case shouldn't happen, this transaction 10841 * should have never made it onto the blocked queue. 10842 */ 10843 /* 10844 * This I/O is no longer blocked, we can remove it 10845 * from the blocked queue. Since this is a TAILQ 10846 * (doubly linked list), we can do O(1) removals 10847 * from any place on the list. 10848 */ 10849 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10850 blocked_links); 10851 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10852 10853 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 10854 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 10855 /* 10856 * Need to send IO back to original side to 10857 * run 10858 */ 10859 union ctl_ha_msg msg_info; 10860 10861 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 10862 msg_info.hdr.original_sc = 10863 cur_blocked->io_hdr.original_sc; 10864 msg_info.hdr.serializing_sc = cur_blocked; 10865 msg_info.hdr.msg_type = CTL_MSG_R2R; 10866 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 10867 sizeof(msg_info.hdr), M_NOWAIT); 10868 break; 10869 } 10870 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 10871 10872 /* 10873 * Check this I/O for LUN state changes that may 10874 * have happened while this command was blocked. 10875 * The LUN state may have been changed by a command 10876 * ahead of us in the queue, so we need to re-check 10877 * for any states that can be caused by SCSI 10878 * commands. 10879 */ 10880 if (ctl_scsiio_lun_check(lun, entry, 10881 &cur_blocked->scsiio) == 0) { 10882 cur_blocked->io_hdr.flags |= 10883 CTL_FLAG_IS_WAS_ON_RTR; 10884 ctl_enqueue_rtr(cur_blocked); 10885 } else 10886 ctl_done(cur_blocked); 10887 break; 10888 } 10889 default: 10890 /* 10891 * This probably shouldn't happen -- we shouldn't 10892 * get CTL_ACTION_ERROR, or anything else. 10893 */ 10894 break; 10895 } 10896 } 10897 10898 return (CTL_RETVAL_COMPLETE); 10899} 10900 10901/* 10902 * This routine (with one exception) checks LUN flags that can be set by 10903 * commands ahead of us in the OOA queue. These flags have to be checked 10904 * when a command initially comes in, and when we pull a command off the 10905 * blocked queue and are preparing to execute it. The reason we have to 10906 * check these flags for commands on the blocked queue is that the LUN 10907 * state may have been changed by a command ahead of us while we're on the 10908 * blocked queue. 10909 * 10910 * Ordering is somewhat important with these checks, so please pay 10911 * careful attention to the placement of any new checks. 10912 */ 10913static int 10914ctl_scsiio_lun_check(struct ctl_lun *lun, 10915 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 10916{ 10917 struct ctl_softc *softc = lun->ctl_softc; 10918 int retval; 10919 uint32_t residx; 10920 10921 retval = 0; 10922 10923 mtx_assert(&lun->lun_lock, MA_OWNED); 10924 10925 /* 10926 * If this shelf is a secondary shelf controller, we may have to 10927 * reject some commands disallowed by HA mode and link state. 10928 */ 10929 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 10930 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 10931 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10932 ctl_set_lun_unavail(ctsio); 10933 retval = 1; 10934 goto bailout; 10935 } 10936 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 10937 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10938 ctl_set_lun_transit(ctsio); 10939 retval = 1; 10940 goto bailout; 10941 } 10942 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 10943 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 10944 ctl_set_lun_standby(ctsio); 10945 retval = 1; 10946 goto bailout; 10947 } 10948 10949 /* The rest of checks are only done on executing side */ 10950 if (softc->ha_mode == CTL_HA_MODE_XFER) 10951 goto bailout; 10952 } 10953 10954 if (entry->pattern & CTL_LUN_PAT_WRITE) { 10955 if (lun->be_lun && 10956 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 10957 ctl_set_hw_write_protected(ctsio); 10958 retval = 1; 10959 goto bailout; 10960 } 10961 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 10962 .eca_and_aen & SCP_SWP) != 0) { 10963 ctl_set_sense(ctsio, /*current_error*/ 1, 10964 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10965 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 10966 retval = 1; 10967 goto bailout; 10968 } 10969 } 10970 10971 /* 10972 * Check for a reservation conflict. If this command isn't allowed 10973 * even on reserved LUNs, and if this initiator isn't the one who 10974 * reserved us, reject the command with a reservation conflict. 10975 */ 10976 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 10977 if ((lun->flags & CTL_LUN_RESERVED) 10978 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10979 if (lun->res_idx != residx) { 10980 ctl_set_reservation_conflict(ctsio); 10981 retval = 1; 10982 goto bailout; 10983 } 10984 } 10985 10986 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 10987 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 10988 /* No reservation or command is allowed. */; 10989 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 10990 (lun->res_type == SPR_TYPE_WR_EX || 10991 lun->res_type == SPR_TYPE_WR_EX_RO || 10992 lun->res_type == SPR_TYPE_WR_EX_AR)) { 10993 /* The command is allowed for Write Exclusive resv. */; 10994 } else { 10995 /* 10996 * if we aren't registered or it's a res holder type 10997 * reservation and this isn't the res holder then set a 10998 * conflict. 10999 */ 11000 if (ctl_get_prkey(lun, residx) == 0 11001 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 11002 ctl_set_reservation_conflict(ctsio); 11003 retval = 1; 11004 goto bailout; 11005 } 11006 } 11007 11008 if ((lun->flags & CTL_LUN_OFFLINE) 11009 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0)) { 11010 ctl_set_lun_not_ready(ctsio); 11011 retval = 1; 11012 goto bailout; 11013 } 11014 11015 if ((lun->flags & CTL_LUN_STOPPED) 11016 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 11017 /* "Logical unit not ready, initializing cmd. required" */ 11018 ctl_set_lun_stopped(ctsio); 11019 retval = 1; 11020 goto bailout; 11021 } 11022 11023 if ((lun->flags & CTL_LUN_INOPERABLE) 11024 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 11025 /* "Medium format corrupted" */ 11026 ctl_set_medium_format_corrupted(ctsio); 11027 retval = 1; 11028 goto bailout; 11029 } 11030 11031bailout: 11032 return (retval); 11033} 11034 11035static void 11036ctl_failover_io(union ctl_io *io, int have_lock) 11037{ 11038 ctl_set_busy(&io->scsiio); 11039 ctl_done(io); 11040} 11041 11042static void 11043ctl_failover_lun(struct ctl_lun *lun) 11044{ 11045 struct ctl_softc *softc = lun->ctl_softc; 11046 struct ctl_io_hdr *io, *next_io; 11047 11048 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", lun->lun)); 11049 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11050 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11051 /* We are master */ 11052 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11053 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11054 io->flags |= CTL_FLAG_ABORT; 11055 io->flags |= CTL_FLAG_FAILOVER; 11056 } else { /* This can be only due to DATAMOVE */ 11057 io->msg_type = CTL_MSG_DATAMOVE_DONE; 11058 io->flags |= CTL_FLAG_IO_ACTIVE; 11059 io->port_status = 31340; 11060 ctl_enqueue_isc((union ctl_io *)io); 11061 } 11062 } 11063 /* We are slave */ 11064 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11065 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11066 if (io->flags & CTL_FLAG_IO_ACTIVE) { 11067 io->flags |= CTL_FLAG_FAILOVER; 11068 } else { 11069 ctl_set_busy(&((union ctl_io *)io)-> 11070 scsiio); 11071 ctl_done((union ctl_io *)io); 11072 } 11073 } 11074 } 11075 } else { /* SERIALIZE modes */ 11076 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 11077 next_io) { 11078 /* We are master */ 11079 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11080 TAILQ_REMOVE(&lun->blocked_queue, io, 11081 blocked_links); 11082 io->flags &= ~CTL_FLAG_BLOCKED; 11083 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11084 ctl_free_io((union ctl_io *)io); 11085 } 11086 } 11087 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 11088 /* We are master */ 11089 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 11090 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 11091 ctl_free_io((union ctl_io *)io); 11092 } 11093 /* We are slave */ 11094 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 11095 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11096 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 11097 ctl_set_busy(&((union ctl_io *)io)-> 11098 scsiio); 11099 ctl_done((union ctl_io *)io); 11100 } 11101 } 11102 } 11103 ctl_check_blocked(lun); 11104 } 11105} 11106 11107static int 11108ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11109{ 11110 struct ctl_lun *lun; 11111 const struct ctl_cmd_entry *entry; 11112 uint32_t initidx, targ_lun; 11113 int retval; 11114 11115 retval = 0; 11116 11117 lun = NULL; 11118 11119 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11120 if ((targ_lun < CTL_MAX_LUNS) 11121 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 11122 /* 11123 * If the LUN is invalid, pretend that it doesn't exist. 11124 * It will go away as soon as all pending I/O has been 11125 * completed. 11126 */ 11127 mtx_lock(&lun->lun_lock); 11128 if (lun->flags & CTL_LUN_DISABLED) { 11129 mtx_unlock(&lun->lun_lock); 11130 lun = NULL; 11131 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11132 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11133 } else { 11134 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11135 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11136 lun->be_lun; 11137 11138 /* 11139 * Every I/O goes into the OOA queue for a 11140 * particular LUN, and stays there until completion. 11141 */ 11142#ifdef CTL_TIME_IO 11143 if (TAILQ_EMPTY(&lun->ooa_queue)) { 11144 lun->idle_time += getsbinuptime() - 11145 lun->last_busy; 11146 } 11147#endif 11148 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11149 ooa_links); 11150 } 11151 } else { 11152 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11153 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11154 } 11155 11156 /* Get command entry and return error if it is unsuppotyed. */ 11157 entry = ctl_validate_command(ctsio); 11158 if (entry == NULL) { 11159 if (lun) 11160 mtx_unlock(&lun->lun_lock); 11161 return (retval); 11162 } 11163 11164 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11165 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11166 11167 /* 11168 * Check to see whether we can send this command to LUNs that don't 11169 * exist. This should pretty much only be the case for inquiry 11170 * and request sense. Further checks, below, really require having 11171 * a LUN, so we can't really check the command anymore. Just put 11172 * it on the rtr queue. 11173 */ 11174 if (lun == NULL) { 11175 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11176 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11177 ctl_enqueue_rtr((union ctl_io *)ctsio); 11178 return (retval); 11179 } 11180 11181 ctl_set_unsupported_lun(ctsio); 11182 ctl_done((union ctl_io *)ctsio); 11183 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11184 return (retval); 11185 } else { 11186 /* 11187 * Make sure we support this particular command on this LUN. 11188 * e.g., we don't support writes to the control LUN. 11189 */ 11190 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11191 mtx_unlock(&lun->lun_lock); 11192 ctl_set_invalid_opcode(ctsio); 11193 ctl_done((union ctl_io *)ctsio); 11194 return (retval); 11195 } 11196 } 11197 11198 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11199 11200#ifdef CTL_WITH_CA 11201 /* 11202 * If we've got a request sense, it'll clear the contingent 11203 * allegiance condition. Otherwise, if we have a CA condition for 11204 * this initiator, clear it, because it sent down a command other 11205 * than request sense. 11206 */ 11207 if ((ctsio->cdb[0] != REQUEST_SENSE) 11208 && (ctl_is_set(lun->have_ca, initidx))) 11209 ctl_clear_mask(lun->have_ca, initidx); 11210#endif 11211 11212 /* 11213 * If the command has this flag set, it handles its own unit 11214 * attention reporting, we shouldn't do anything. Otherwise we 11215 * check for any pending unit attentions, and send them back to the 11216 * initiator. We only do this when a command initially comes in, 11217 * not when we pull it off the blocked queue. 11218 * 11219 * According to SAM-3, section 5.3.2, the order that things get 11220 * presented back to the host is basically unit attentions caused 11221 * by some sort of reset event, busy status, reservation conflicts 11222 * or task set full, and finally any other status. 11223 * 11224 * One issue here is that some of the unit attentions we report 11225 * don't fall into the "reset" category (e.g. "reported luns data 11226 * has changed"). So reporting it here, before the reservation 11227 * check, may be technically wrong. I guess the only thing to do 11228 * would be to check for and report the reset events here, and then 11229 * check for the other unit attention types after we check for a 11230 * reservation conflict. 11231 * 11232 * XXX KDM need to fix this 11233 */ 11234 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11235 ctl_ua_type ua_type; 11236 11237 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11238 SSD_TYPE_NONE); 11239 if (ua_type != CTL_UA_NONE) { 11240 mtx_unlock(&lun->lun_lock); 11241 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11242 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11243 ctsio->sense_len = SSD_FULL_SIZE; 11244 ctl_done((union ctl_io *)ctsio); 11245 return (retval); 11246 } 11247 } 11248 11249 11250 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11251 mtx_unlock(&lun->lun_lock); 11252 ctl_done((union ctl_io *)ctsio); 11253 return (retval); 11254 } 11255 11256 /* 11257 * XXX CHD this is where we want to send IO to other side if 11258 * this LUN is secondary on this SC. We will need to make a copy 11259 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11260 * the copy we send as FROM_OTHER. 11261 * We also need to stuff the address of the original IO so we can 11262 * find it easily. Something similar will need be done on the other 11263 * side so when we are done we can find the copy. 11264 */ 11265 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11266 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0) { 11267 union ctl_ha_msg msg_info; 11268 int isc_retval; 11269 11270 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11271 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11272 mtx_unlock(&lun->lun_lock); 11273 11274 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11275 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11276 msg_info.hdr.serializing_sc = NULL; 11277 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11278 msg_info.scsi.tag_num = ctsio->tag_num; 11279 msg_info.scsi.tag_type = ctsio->tag_type; 11280 msg_info.scsi.cdb_len = ctsio->cdb_len; 11281 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11282 11283 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11284 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11285 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11286 ctl_set_busy(ctsio); 11287 ctl_done((union ctl_io *)ctsio); 11288 return (retval); 11289 } 11290 return (retval); 11291 } 11292 11293 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11294 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11295 ctl_ooaq, ooa_links))) { 11296 case CTL_ACTION_BLOCK: 11297 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11298 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11299 blocked_links); 11300 mtx_unlock(&lun->lun_lock); 11301 return (retval); 11302 case CTL_ACTION_PASS: 11303 case CTL_ACTION_SKIP: 11304 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11305 mtx_unlock(&lun->lun_lock); 11306 ctl_enqueue_rtr((union ctl_io *)ctsio); 11307 break; 11308 case CTL_ACTION_OVERLAP: 11309 mtx_unlock(&lun->lun_lock); 11310 ctl_set_overlapped_cmd(ctsio); 11311 ctl_done((union ctl_io *)ctsio); 11312 break; 11313 case CTL_ACTION_OVERLAP_TAG: 11314 mtx_unlock(&lun->lun_lock); 11315 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11316 ctl_done((union ctl_io *)ctsio); 11317 break; 11318 case CTL_ACTION_ERROR: 11319 default: 11320 mtx_unlock(&lun->lun_lock); 11321 ctl_set_internal_failure(ctsio, 11322 /*sks_valid*/ 0, 11323 /*retry_count*/ 0); 11324 ctl_done((union ctl_io *)ctsio); 11325 break; 11326 } 11327 return (retval); 11328} 11329 11330const struct ctl_cmd_entry * 11331ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11332{ 11333 const struct ctl_cmd_entry *entry; 11334 int service_action; 11335 11336 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11337 if (sa) 11338 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11339 if (entry->flags & CTL_CMD_FLAG_SA5) { 11340 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11341 entry = &((const struct ctl_cmd_entry *) 11342 entry->execute)[service_action]; 11343 } 11344 return (entry); 11345} 11346 11347const struct ctl_cmd_entry * 11348ctl_validate_command(struct ctl_scsiio *ctsio) 11349{ 11350 const struct ctl_cmd_entry *entry; 11351 int i, sa; 11352 uint8_t diff; 11353 11354 entry = ctl_get_cmd_entry(ctsio, &sa); 11355 if (entry->execute == NULL) { 11356 if (sa) 11357 ctl_set_invalid_field(ctsio, 11358 /*sks_valid*/ 1, 11359 /*command*/ 1, 11360 /*field*/ 1, 11361 /*bit_valid*/ 1, 11362 /*bit*/ 4); 11363 else 11364 ctl_set_invalid_opcode(ctsio); 11365 ctl_done((union ctl_io *)ctsio); 11366 return (NULL); 11367 } 11368 KASSERT(entry->length > 0, 11369 ("Not defined length for command 0x%02x/0x%02x", 11370 ctsio->cdb[0], ctsio->cdb[1])); 11371 for (i = 1; i < entry->length; i++) { 11372 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11373 if (diff == 0) 11374 continue; 11375 ctl_set_invalid_field(ctsio, 11376 /*sks_valid*/ 1, 11377 /*command*/ 1, 11378 /*field*/ i, 11379 /*bit_valid*/ 1, 11380 /*bit*/ fls(diff) - 1); 11381 ctl_done((union ctl_io *)ctsio); 11382 return (NULL); 11383 } 11384 return (entry); 11385} 11386 11387static int 11388ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11389{ 11390 11391 switch (lun_type) { 11392 case T_PROCESSOR: 11393 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && 11394 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11395 return (0); 11396 break; 11397 case T_DIRECT: 11398 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && 11399 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11400 return (0); 11401 break; 11402 default: 11403 return (0); 11404 } 11405 return (1); 11406} 11407 11408static int 11409ctl_scsiio(struct ctl_scsiio *ctsio) 11410{ 11411 int retval; 11412 const struct ctl_cmd_entry *entry; 11413 11414 retval = CTL_RETVAL_COMPLETE; 11415 11416 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11417 11418 entry = ctl_get_cmd_entry(ctsio, NULL); 11419 11420 /* 11421 * If this I/O has been aborted, just send it straight to 11422 * ctl_done() without executing it. 11423 */ 11424 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11425 ctl_done((union ctl_io *)ctsio); 11426 goto bailout; 11427 } 11428 11429 /* 11430 * All the checks should have been handled by ctl_scsiio_precheck(). 11431 * We should be clear now to just execute the I/O. 11432 */ 11433 retval = entry->execute(ctsio); 11434 11435bailout: 11436 return (retval); 11437} 11438 11439/* 11440 * Since we only implement one target right now, a bus reset simply resets 11441 * our single target. 11442 */ 11443static int 11444ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11445{ 11446 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11447} 11448 11449static int 11450ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11451 ctl_ua_type ua_type) 11452{ 11453 struct ctl_port *port; 11454 struct ctl_lun *lun; 11455 int retval; 11456 11457 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11458 union ctl_ha_msg msg_info; 11459 11460 msg_info.hdr.nexus = io->io_hdr.nexus; 11461 if (ua_type==CTL_UA_TARG_RESET) 11462 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11463 else 11464 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11465 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11466 msg_info.hdr.original_sc = NULL; 11467 msg_info.hdr.serializing_sc = NULL; 11468 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11469 sizeof(msg_info.task), M_WAITOK); 11470 } 11471 retval = 0; 11472 11473 mtx_lock(&softc->ctl_lock); 11474 port = softc->ctl_ports[io->io_hdr.nexus.targ_port]; 11475 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11476 if (port != NULL && 11477 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 11478 continue; 11479 retval += ctl_do_lun_reset(lun, io, ua_type); 11480 } 11481 mtx_unlock(&softc->ctl_lock); 11482 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11483 return (retval); 11484} 11485 11486/* 11487 * The LUN should always be set. The I/O is optional, and is used to 11488 * distinguish between I/Os sent by this initiator, and by other 11489 * initiators. We set unit attention for initiators other than this one. 11490 * SAM-3 is vague on this point. It does say that a unit attention should 11491 * be established for other initiators when a LUN is reset (see section 11492 * 5.7.3), but it doesn't specifically say that the unit attention should 11493 * be established for this particular initiator when a LUN is reset. Here 11494 * is the relevant text, from SAM-3 rev 8: 11495 * 11496 * 5.7.2 When a SCSI initiator port aborts its own tasks 11497 * 11498 * When a SCSI initiator port causes its own task(s) to be aborted, no 11499 * notification that the task(s) have been aborted shall be returned to 11500 * the SCSI initiator port other than the completion response for the 11501 * command or task management function action that caused the task(s) to 11502 * be aborted and notification(s) associated with related effects of the 11503 * action (e.g., a reset unit attention condition). 11504 * 11505 * XXX KDM for now, we're setting unit attention for all initiators. 11506 */ 11507static int 11508ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11509{ 11510 union ctl_io *xio; 11511#if 0 11512 uint32_t initidx; 11513#endif 11514#ifdef CTL_WITH_CA 11515 int i; 11516#endif 11517 11518 mtx_lock(&lun->lun_lock); 11519 /* 11520 * Run through the OOA queue and abort each I/O. 11521 */ 11522 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11523 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11524 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11525 } 11526 11527 /* 11528 * This version sets unit attention for every 11529 */ 11530#if 0 11531 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11532 ctl_est_ua_all(lun, initidx, ua_type); 11533#else 11534 ctl_est_ua_all(lun, -1, ua_type); 11535#endif 11536 11537 /* 11538 * A reset (any kind, really) clears reservations established with 11539 * RESERVE/RELEASE. It does not clear reservations established 11540 * with PERSISTENT RESERVE OUT, but we don't support that at the 11541 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11542 * reservations made with the RESERVE/RELEASE commands, because 11543 * those commands are obsolete in SPC-3. 11544 */ 11545 lun->flags &= ~CTL_LUN_RESERVED; 11546 11547#ifdef CTL_WITH_CA 11548 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11549 ctl_clear_mask(lun->have_ca, i); 11550#endif 11551 mtx_unlock(&lun->lun_lock); 11552 11553 return (0); 11554} 11555 11556static int 11557ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io) 11558{ 11559 struct ctl_lun *lun; 11560 uint32_t targ_lun; 11561 int retval; 11562 11563 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11564 mtx_lock(&softc->ctl_lock); 11565 if ((targ_lun >= CTL_MAX_LUNS) || 11566 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11567 mtx_unlock(&softc->ctl_lock); 11568 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11569 return (1); 11570 } 11571 retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET); 11572 mtx_unlock(&softc->ctl_lock); 11573 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11574 11575 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11576 union ctl_ha_msg msg_info; 11577 11578 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11579 msg_info.hdr.nexus = io->io_hdr.nexus; 11580 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11581 msg_info.hdr.original_sc = NULL; 11582 msg_info.hdr.serializing_sc = NULL; 11583 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11584 sizeof(msg_info.task), M_WAITOK); 11585 } 11586 return (retval); 11587} 11588 11589static void 11590ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11591 int other_sc) 11592{ 11593 union ctl_io *xio; 11594 11595 mtx_assert(&lun->lun_lock, MA_OWNED); 11596 11597 /* 11598 * Run through the OOA queue and attempt to find the given I/O. 11599 * The target port, initiator ID, tag type and tag number have to 11600 * match the values that we got from the initiator. If we have an 11601 * untagged command to abort, simply abort the first untagged command 11602 * we come to. We only allow one untagged command at a time of course. 11603 */ 11604 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11605 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11606 11607 if ((targ_port == UINT32_MAX || 11608 targ_port == xio->io_hdr.nexus.targ_port) && 11609 (init_id == UINT32_MAX || 11610 init_id == xio->io_hdr.nexus.initid)) { 11611 if (targ_port != xio->io_hdr.nexus.targ_port || 11612 init_id != xio->io_hdr.nexus.initid) 11613 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11614 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11615 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11616 union ctl_ha_msg msg_info; 11617 11618 msg_info.hdr.nexus = xio->io_hdr.nexus; 11619 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11620 msg_info.task.tag_num = xio->scsiio.tag_num; 11621 msg_info.task.tag_type = xio->scsiio.tag_type; 11622 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11623 msg_info.hdr.original_sc = NULL; 11624 msg_info.hdr.serializing_sc = NULL; 11625 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11626 sizeof(msg_info.task), M_NOWAIT); 11627 } 11628 } 11629 } 11630} 11631 11632static int 11633ctl_abort_task_set(union ctl_io *io) 11634{ 11635 struct ctl_softc *softc = control_softc; 11636 struct ctl_lun *lun; 11637 uint32_t targ_lun; 11638 11639 /* 11640 * Look up the LUN. 11641 */ 11642 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11643 mtx_lock(&softc->ctl_lock); 11644 if ((targ_lun >= CTL_MAX_LUNS) || 11645 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11646 mtx_unlock(&softc->ctl_lock); 11647 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11648 return (1); 11649 } 11650 11651 mtx_lock(&lun->lun_lock); 11652 mtx_unlock(&softc->ctl_lock); 11653 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11654 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11655 io->io_hdr.nexus.initid, 11656 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11657 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11658 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11659 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11660 } 11661 mtx_unlock(&lun->lun_lock); 11662 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11663 return (0); 11664} 11665 11666static int 11667ctl_i_t_nexus_reset(union ctl_io *io) 11668{ 11669 struct ctl_softc *softc = control_softc; 11670 struct ctl_lun *lun; 11671 uint32_t initidx; 11672 11673 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11674 union ctl_ha_msg msg_info; 11675 11676 msg_info.hdr.nexus = io->io_hdr.nexus; 11677 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11678 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11679 msg_info.hdr.original_sc = NULL; 11680 msg_info.hdr.serializing_sc = NULL; 11681 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11682 sizeof(msg_info.task), M_WAITOK); 11683 } 11684 11685 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11686 mtx_lock(&softc->ctl_lock); 11687 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11688 mtx_lock(&lun->lun_lock); 11689 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11690 io->io_hdr.nexus.initid, 1); 11691#ifdef CTL_WITH_CA 11692 ctl_clear_mask(lun->have_ca, initidx); 11693#endif 11694 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11695 lun->flags &= ~CTL_LUN_RESERVED; 11696 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11697 mtx_unlock(&lun->lun_lock); 11698 } 11699 mtx_unlock(&softc->ctl_lock); 11700 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11701 return (0); 11702} 11703 11704static int 11705ctl_abort_task(union ctl_io *io) 11706{ 11707 union ctl_io *xio; 11708 struct ctl_lun *lun; 11709 struct ctl_softc *softc; 11710#if 0 11711 struct sbuf sb; 11712 char printbuf[128]; 11713#endif 11714 int found; 11715 uint32_t targ_lun; 11716 11717 softc = control_softc; 11718 found = 0; 11719 11720 /* 11721 * Look up the LUN. 11722 */ 11723 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11724 mtx_lock(&softc->ctl_lock); 11725 if ((targ_lun >= CTL_MAX_LUNS) || 11726 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11727 mtx_unlock(&softc->ctl_lock); 11728 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11729 return (1); 11730 } 11731 11732#if 0 11733 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11734 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11735#endif 11736 11737 mtx_lock(&lun->lun_lock); 11738 mtx_unlock(&softc->ctl_lock); 11739 /* 11740 * Run through the OOA queue and attempt to find the given I/O. 11741 * The target port, initiator ID, tag type and tag number have to 11742 * match the values that we got from the initiator. If we have an 11743 * untagged command to abort, simply abort the first untagged command 11744 * we come to. We only allow one untagged command at a time of course. 11745 */ 11746 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11747 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11748#if 0 11749 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11750 11751 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11752 lun->lun, xio->scsiio.tag_num, 11753 xio->scsiio.tag_type, 11754 (xio->io_hdr.blocked_links.tqe_prev 11755 == NULL) ? "" : " BLOCKED", 11756 (xio->io_hdr.flags & 11757 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11758 (xio->io_hdr.flags & 11759 CTL_FLAG_ABORT) ? " ABORT" : "", 11760 (xio->io_hdr.flags & 11761 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11762 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11763 sbuf_finish(&sb); 11764 printf("%s\n", sbuf_data(&sb)); 11765#endif 11766 11767 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11768 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11769 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11770 continue; 11771 11772 /* 11773 * If the abort says that the task is untagged, the 11774 * task in the queue must be untagged. Otherwise, 11775 * we just check to see whether the tag numbers 11776 * match. This is because the QLogic firmware 11777 * doesn't pass back the tag type in an abort 11778 * request. 11779 */ 11780#if 0 11781 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11782 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11783 || (xio->scsiio.tag_num == io->taskio.tag_num)) 11784#endif 11785 /* 11786 * XXX KDM we've got problems with FC, because it 11787 * doesn't send down a tag type with aborts. So we 11788 * can only really go by the tag number... 11789 * This may cause problems with parallel SCSI. 11790 * Need to figure that out!! 11791 */ 11792 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11793 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11794 found = 1; 11795 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11796 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11797 union ctl_ha_msg msg_info; 11798 11799 msg_info.hdr.nexus = io->io_hdr.nexus; 11800 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11801 msg_info.task.tag_num = io->taskio.tag_num; 11802 msg_info.task.tag_type = io->taskio.tag_type; 11803 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11804 msg_info.hdr.original_sc = NULL; 11805 msg_info.hdr.serializing_sc = NULL; 11806#if 0 11807 printf("Sent Abort to other side\n"); 11808#endif 11809 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11810 sizeof(msg_info.task), M_NOWAIT); 11811 } 11812#if 0 11813 printf("ctl_abort_task: found I/O to abort\n"); 11814#endif 11815 } 11816 } 11817 mtx_unlock(&lun->lun_lock); 11818 11819 if (found == 0) { 11820 /* 11821 * This isn't really an error. It's entirely possible for 11822 * the abort and command completion to cross on the wire. 11823 * This is more of an informative/diagnostic error. 11824 */ 11825#if 0 11826 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11827 "%u:%u:%u tag %d type %d\n", 11828 io->io_hdr.nexus.initid, 11829 io->io_hdr.nexus.targ_port, 11830 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11831 io->taskio.tag_type); 11832#endif 11833 } 11834 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11835 return (0); 11836} 11837 11838static int 11839ctl_query_task(union ctl_io *io, int task_set) 11840{ 11841 union ctl_io *xio; 11842 struct ctl_lun *lun; 11843 struct ctl_softc *softc; 11844 int found = 0; 11845 uint32_t targ_lun; 11846 11847 softc = control_softc; 11848 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11849 mtx_lock(&softc->ctl_lock); 11850 if ((targ_lun >= CTL_MAX_LUNS) || 11851 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11852 mtx_unlock(&softc->ctl_lock); 11853 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11854 return (1); 11855 } 11856 mtx_lock(&lun->lun_lock); 11857 mtx_unlock(&softc->ctl_lock); 11858 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11859 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11860 11861 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11862 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11863 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11864 continue; 11865 11866 if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { 11867 found = 1; 11868 break; 11869 } 11870 } 11871 mtx_unlock(&lun->lun_lock); 11872 if (found) 11873 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 11874 else 11875 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11876 return (0); 11877} 11878 11879static int 11880ctl_query_async_event(union ctl_io *io) 11881{ 11882 struct ctl_lun *lun; 11883 struct ctl_softc *softc; 11884 ctl_ua_type ua; 11885 uint32_t targ_lun, initidx; 11886 11887 softc = control_softc; 11888 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11889 mtx_lock(&softc->ctl_lock); 11890 if ((targ_lun >= CTL_MAX_LUNS) || 11891 (lun = softc->ctl_luns[targ_lun]) == NULL) { 11892 mtx_unlock(&softc->ctl_lock); 11893 io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; 11894 return (1); 11895 } 11896 mtx_lock(&lun->lun_lock); 11897 mtx_unlock(&softc->ctl_lock); 11898 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11899 ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); 11900 mtx_unlock(&lun->lun_lock); 11901 if (ua != CTL_UA_NONE) 11902 io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; 11903 else 11904 io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; 11905 return (0); 11906} 11907 11908static void 11909ctl_run_task(union ctl_io *io) 11910{ 11911 struct ctl_softc *softc = control_softc; 11912 int retval = 1; 11913 11914 CTL_DEBUG_PRINT(("ctl_run_task\n")); 11915 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 11916 ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); 11917 io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; 11918 bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); 11919 switch (io->taskio.task_action) { 11920 case CTL_TASK_ABORT_TASK: 11921 retval = ctl_abort_task(io); 11922 break; 11923 case CTL_TASK_ABORT_TASK_SET: 11924 case CTL_TASK_CLEAR_TASK_SET: 11925 retval = ctl_abort_task_set(io); 11926 break; 11927 case CTL_TASK_CLEAR_ACA: 11928 break; 11929 case CTL_TASK_I_T_NEXUS_RESET: 11930 retval = ctl_i_t_nexus_reset(io); 11931 break; 11932 case CTL_TASK_LUN_RESET: 11933 retval = ctl_lun_reset(softc, io); 11934 break; 11935 case CTL_TASK_TARGET_RESET: 11936 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 11937 break; 11938 case CTL_TASK_BUS_RESET: 11939 retval = ctl_bus_reset(softc, io); 11940 break; 11941 case CTL_TASK_PORT_LOGIN: 11942 break; 11943 case CTL_TASK_PORT_LOGOUT: 11944 break; 11945 case CTL_TASK_QUERY_TASK: 11946 retval = ctl_query_task(io, 0); 11947 break; 11948 case CTL_TASK_QUERY_TASK_SET: 11949 retval = ctl_query_task(io, 1); 11950 break; 11951 case CTL_TASK_QUERY_ASYNC_EVENT: 11952 retval = ctl_query_async_event(io); 11953 break; 11954 default: 11955 printf("%s: got unknown task management event %d\n", 11956 __func__, io->taskio.task_action); 11957 break; 11958 } 11959 if (retval == 0) 11960 io->io_hdr.status = CTL_SUCCESS; 11961 else 11962 io->io_hdr.status = CTL_ERROR; 11963 ctl_done(io); 11964} 11965 11966/* 11967 * For HA operation. Handle commands that come in from the other 11968 * controller. 11969 */ 11970static void 11971ctl_handle_isc(union ctl_io *io) 11972{ 11973 int free_io; 11974 struct ctl_lun *lun; 11975 struct ctl_softc *softc; 11976 uint32_t targ_lun; 11977 11978 softc = control_softc; 11979 11980 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11981 lun = softc->ctl_luns[targ_lun]; 11982 11983 switch (io->io_hdr.msg_type) { 11984 case CTL_MSG_SERIALIZE: 11985 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 11986 break; 11987 case CTL_MSG_R2R: { 11988 const struct ctl_cmd_entry *entry; 11989 11990 /* 11991 * This is only used in SER_ONLY mode. 11992 */ 11993 free_io = 0; 11994 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11995 mtx_lock(&lun->lun_lock); 11996 if (ctl_scsiio_lun_check(lun, 11997 entry, (struct ctl_scsiio *)io) != 0) { 11998 mtx_unlock(&lun->lun_lock); 11999 ctl_done(io); 12000 break; 12001 } 12002 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12003 mtx_unlock(&lun->lun_lock); 12004 ctl_enqueue_rtr(io); 12005 break; 12006 } 12007 case CTL_MSG_FINISH_IO: 12008 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12009 free_io = 0; 12010 ctl_done(io); 12011 } else { 12012 free_io = 1; 12013 mtx_lock(&lun->lun_lock); 12014 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 12015 ooa_links); 12016 ctl_check_blocked(lun); 12017 mtx_unlock(&lun->lun_lock); 12018 } 12019 break; 12020 case CTL_MSG_PERS_ACTION: 12021 ctl_hndl_per_res_out_on_other_sc( 12022 (union ctl_ha_msg *)&io->presio.pr_msg); 12023 free_io = 1; 12024 break; 12025 case CTL_MSG_BAD_JUJU: 12026 free_io = 0; 12027 ctl_done(io); 12028 break; 12029 case CTL_MSG_DATAMOVE: 12030 /* Only used in XFER mode */ 12031 free_io = 0; 12032 ctl_datamove_remote(io); 12033 break; 12034 case CTL_MSG_DATAMOVE_DONE: 12035 /* Only used in XFER mode */ 12036 free_io = 0; 12037 io->scsiio.be_move_done(io); 12038 break; 12039 case CTL_MSG_FAILOVER: 12040 mtx_lock(&lun->lun_lock); 12041 ctl_failover_lun(lun); 12042 mtx_unlock(&lun->lun_lock); 12043 free_io = 1; 12044 break; 12045 default: 12046 free_io = 1; 12047 printf("%s: Invalid message type %d\n", 12048 __func__, io->io_hdr.msg_type); 12049 break; 12050 } 12051 if (free_io) 12052 ctl_free_io(io); 12053 12054} 12055 12056 12057/* 12058 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12059 * there is no match. 12060 */ 12061static ctl_lun_error_pattern 12062ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12063{ 12064 const struct ctl_cmd_entry *entry; 12065 ctl_lun_error_pattern filtered_pattern, pattern; 12066 12067 pattern = desc->error_pattern; 12068 12069 /* 12070 * XXX KDM we need more data passed into this function to match a 12071 * custom pattern, and we actually need to implement custom pattern 12072 * matching. 12073 */ 12074 if (pattern & CTL_LUN_PAT_CMD) 12075 return (CTL_LUN_PAT_CMD); 12076 12077 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12078 return (CTL_LUN_PAT_ANY); 12079 12080 entry = ctl_get_cmd_entry(ctsio, NULL); 12081 12082 filtered_pattern = entry->pattern & pattern; 12083 12084 /* 12085 * If the user requested specific flags in the pattern (e.g. 12086 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12087 * flags. 12088 * 12089 * If the user did not specify any flags, it doesn't matter whether 12090 * or not the command supports the flags. 12091 */ 12092 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12093 (pattern & ~CTL_LUN_PAT_MASK)) 12094 return (CTL_LUN_PAT_NONE); 12095 12096 /* 12097 * If the user asked for a range check, see if the requested LBA 12098 * range overlaps with this command's LBA range. 12099 */ 12100 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12101 uint64_t lba1; 12102 uint64_t len1; 12103 ctl_action action; 12104 int retval; 12105 12106 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12107 if (retval != 0) 12108 return (CTL_LUN_PAT_NONE); 12109 12110 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12111 desc->lba_range.len, FALSE); 12112 /* 12113 * A "pass" means that the LBA ranges don't overlap, so 12114 * this doesn't match the user's range criteria. 12115 */ 12116 if (action == CTL_ACTION_PASS) 12117 return (CTL_LUN_PAT_NONE); 12118 } 12119 12120 return (filtered_pattern); 12121} 12122 12123static void 12124ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12125{ 12126 struct ctl_error_desc *desc, *desc2; 12127 12128 mtx_assert(&lun->lun_lock, MA_OWNED); 12129 12130 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12131 ctl_lun_error_pattern pattern; 12132 /* 12133 * Check to see whether this particular command matches 12134 * the pattern in the descriptor. 12135 */ 12136 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12137 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12138 continue; 12139 12140 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12141 case CTL_LUN_INJ_ABORTED: 12142 ctl_set_aborted(&io->scsiio); 12143 break; 12144 case CTL_LUN_INJ_MEDIUM_ERR: 12145 ctl_set_medium_error(&io->scsiio, 12146 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != 12147 CTL_FLAG_DATA_OUT); 12148 break; 12149 case CTL_LUN_INJ_UA: 12150 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12151 * OCCURRED */ 12152 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12153 break; 12154 case CTL_LUN_INJ_CUSTOM: 12155 /* 12156 * We're assuming the user knows what he is doing. 12157 * Just copy the sense information without doing 12158 * checks. 12159 */ 12160 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12161 MIN(sizeof(desc->custom_sense), 12162 sizeof(io->scsiio.sense_data))); 12163 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12164 io->scsiio.sense_len = SSD_FULL_SIZE; 12165 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12166 break; 12167 case CTL_LUN_INJ_NONE: 12168 default: 12169 /* 12170 * If this is an error injection type we don't know 12171 * about, clear the continuous flag (if it is set) 12172 * so it will get deleted below. 12173 */ 12174 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12175 break; 12176 } 12177 /* 12178 * By default, each error injection action is a one-shot 12179 */ 12180 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12181 continue; 12182 12183 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12184 12185 free(desc, M_CTL); 12186 } 12187} 12188 12189#ifdef CTL_IO_DELAY 12190static void 12191ctl_datamove_timer_wakeup(void *arg) 12192{ 12193 union ctl_io *io; 12194 12195 io = (union ctl_io *)arg; 12196 12197 ctl_datamove(io); 12198} 12199#endif /* CTL_IO_DELAY */ 12200 12201void 12202ctl_datamove(union ctl_io *io) 12203{ 12204 struct ctl_lun *lun; 12205 void (*fe_datamove)(union ctl_io *io); 12206 12207 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12208 12209 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12210 12211 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12212#ifdef CTL_TIME_IO 12213 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12214 char str[256]; 12215 char path_str[64]; 12216 struct sbuf sb; 12217 12218 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12219 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12220 12221 sbuf_cat(&sb, path_str); 12222 switch (io->io_hdr.io_type) { 12223 case CTL_IO_SCSI: 12224 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12225 sbuf_printf(&sb, "\n"); 12226 sbuf_cat(&sb, path_str); 12227 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12228 io->scsiio.tag_num, io->scsiio.tag_type); 12229 break; 12230 case CTL_IO_TASK: 12231 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12232 "Tag Type: %d\n", io->taskio.task_action, 12233 io->taskio.tag_num, io->taskio.tag_type); 12234 break; 12235 default: 12236 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12237 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12238 break; 12239 } 12240 sbuf_cat(&sb, path_str); 12241 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12242 (intmax_t)time_uptime - io->io_hdr.start_time); 12243 sbuf_finish(&sb); 12244 printf("%s", sbuf_data(&sb)); 12245 } 12246#endif /* CTL_TIME_IO */ 12247 12248#ifdef CTL_IO_DELAY 12249 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12250 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12251 } else { 12252 if ((lun != NULL) 12253 && (lun->delay_info.datamove_delay > 0)) { 12254 12255 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12256 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12257 callout_reset(&io->io_hdr.delay_callout, 12258 lun->delay_info.datamove_delay * hz, 12259 ctl_datamove_timer_wakeup, io); 12260 if (lun->delay_info.datamove_type == 12261 CTL_DELAY_TYPE_ONESHOT) 12262 lun->delay_info.datamove_delay = 0; 12263 return; 12264 } 12265 } 12266#endif 12267 12268 /* 12269 * This command has been aborted. Set the port status, so we fail 12270 * the data move. 12271 */ 12272 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12273 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12274 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12275 io->io_hdr.nexus.targ_port, 12276 io->io_hdr.nexus.targ_lun); 12277 io->io_hdr.port_status = 31337; 12278 /* 12279 * Note that the backend, in this case, will get the 12280 * callback in its context. In other cases it may get 12281 * called in the frontend's interrupt thread context. 12282 */ 12283 io->scsiio.be_move_done(io); 12284 return; 12285 } 12286 12287 /* Don't confuse frontend with zero length data move. */ 12288 if (io->scsiio.kern_data_len == 0) { 12289 io->scsiio.be_move_done(io); 12290 return; 12291 } 12292 12293 /* 12294 * If we're in XFER mode and this I/O is from the other shelf 12295 * controller, we need to send the DMA to the other side to 12296 * actually transfer the data to/from the host. In serialize only 12297 * mode the transfer happens below CTL and ctl_datamove() is only 12298 * called on the machine that originally received the I/O. 12299 */ 12300 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12301 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12302 union ctl_ha_msg msg; 12303 uint32_t sg_entries_sent; 12304 int do_sg_copy; 12305 int i; 12306 12307 memset(&msg, 0, sizeof(msg)); 12308 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12309 msg.hdr.original_sc = io->io_hdr.original_sc; 12310 msg.hdr.serializing_sc = io; 12311 msg.hdr.nexus = io->io_hdr.nexus; 12312 msg.hdr.status = io->io_hdr.status; 12313 msg.dt.flags = io->io_hdr.flags; 12314 /* 12315 * We convert everything into a S/G list here. We can't 12316 * pass by reference, only by value between controllers. 12317 * So we can't pass a pointer to the S/G list, only as many 12318 * S/G entries as we can fit in here. If it's possible for 12319 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12320 * then we need to break this up into multiple transfers. 12321 */ 12322 if (io->scsiio.kern_sg_entries == 0) { 12323 msg.dt.kern_sg_entries = 1; 12324#if 0 12325 /* 12326 * Convert to a physical address if this is a 12327 * virtual address. 12328 */ 12329 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12330 msg.dt.sg_list[0].addr = 12331 io->scsiio.kern_data_ptr; 12332 } else { 12333 /* 12334 * XXX KDM use busdma here! 12335 */ 12336 msg.dt.sg_list[0].addr = (void *) 12337 vtophys(io->scsiio.kern_data_ptr); 12338 } 12339#else 12340 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12341 ("HA does not support BUS_ADDR")); 12342 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 12343#endif 12344 12345 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12346 do_sg_copy = 0; 12347 } else { 12348 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12349 do_sg_copy = 1; 12350 } 12351 12352 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12353 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12354 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12355 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12356 msg.dt.sg_sequence = 0; 12357 12358 /* 12359 * Loop until we've sent all of the S/G entries. On the 12360 * other end, we'll recompose these S/G entries into one 12361 * contiguous list before passing it to the 12362 */ 12363 for (sg_entries_sent = 0; sg_entries_sent < 12364 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12365 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/ 12366 sizeof(msg.dt.sg_list[0])), 12367 msg.dt.kern_sg_entries - sg_entries_sent); 12368 12369 if (do_sg_copy != 0) { 12370 struct ctl_sg_entry *sgl; 12371 int j; 12372 12373 sgl = (struct ctl_sg_entry *) 12374 io->scsiio.kern_data_ptr; 12375 /* 12376 * If this is in cached memory, flush the cache 12377 * before we send the DMA request to the other 12378 * controller. We want to do this in either 12379 * the * read or the write case. The read 12380 * case is straightforward. In the write 12381 * case, we want to make sure nothing is 12382 * in the local cache that could overwrite 12383 * the DMAed data. 12384 */ 12385 12386 for (i = sg_entries_sent, j = 0; 12387 i < msg.dt.cur_sg_entries; i++, j++) { 12388#if 0 12389 if ((io->io_hdr.flags & 12390 CTL_FLAG_BUS_ADDR) == 0) { 12391 /* 12392 * XXX KDM use busdma. 12393 */ 12394 msg.dt.sg_list[j].addr =(void *) 12395 vtophys(sgl[i].addr); 12396 } else { 12397 msg.dt.sg_list[j].addr = 12398 sgl[i].addr; 12399 } 12400#else 12401 KASSERT((io->io_hdr.flags & 12402 CTL_FLAG_BUS_ADDR) == 0, 12403 ("HA does not support BUS_ADDR")); 12404 msg.dt.sg_list[j].addr = sgl[i].addr; 12405#endif 12406 msg.dt.sg_list[j].len = sgl[i].len; 12407 } 12408 } 12409 12410 sg_entries_sent += msg.dt.cur_sg_entries; 12411 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12412 msg.dt.sg_last = 1; 12413 else 12414 msg.dt.sg_last = 0; 12415 12416 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12417 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 12418 sizeof(struct ctl_sg_entry)*msg.dt.cur_sg_entries, 12419 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 12420 io->io_hdr.port_status = 31341; 12421 io->scsiio.be_move_done(io); 12422 return; 12423 } 12424 12425 msg.dt.sent_sg_entries = sg_entries_sent; 12426 } 12427 12428 /* 12429 * Officially handover the request from us to peer. 12430 * If failover has just happened, then we must return error. 12431 * If failover happen just after, then it is not our problem. 12432 */ 12433 if (lun) 12434 mtx_lock(&lun->lun_lock); 12435 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12436 if (lun) 12437 mtx_unlock(&lun->lun_lock); 12438 io->io_hdr.port_status = 31342; 12439 io->scsiio.be_move_done(io); 12440 return; 12441 } 12442 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12443 if (lun) 12444 mtx_unlock(&lun->lun_lock); 12445 } else { 12446 12447 /* 12448 * Lookup the fe_datamove() function for this particular 12449 * front end. 12450 */ 12451 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12452 12453 fe_datamove(io); 12454 } 12455} 12456 12457static void 12458ctl_send_datamove_done(union ctl_io *io, int have_lock) 12459{ 12460 union ctl_ha_msg msg; 12461 12462 memset(&msg, 0, sizeof(msg)); 12463 12464 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12465 msg.hdr.original_sc = io; 12466 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12467 msg.hdr.nexus = io->io_hdr.nexus; 12468 msg.hdr.status = io->io_hdr.status; 12469 msg.scsi.tag_num = io->scsiio.tag_num; 12470 msg.scsi.tag_type = io->scsiio.tag_type; 12471 msg.scsi.scsi_status = io->scsiio.scsi_status; 12472 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12473 io->scsiio.sense_len); 12474 msg.scsi.sense_len = io->scsiio.sense_len; 12475 msg.scsi.sense_residual = io->scsiio.sense_residual; 12476 msg.scsi.fetd_status = io->io_hdr.port_status; 12477 msg.scsi.residual = io->scsiio.residual; 12478 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12479 12480 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12481 ctl_failover_io(io, /*have_lock*/ have_lock); 12482 return; 12483 } 12484 12485 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12486 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12487 msg.scsi.sense_len, M_WAITOK); 12488} 12489 12490/* 12491 * The DMA to the remote side is done, now we need to tell the other side 12492 * we're done so it can continue with its data movement. 12493 */ 12494static void 12495ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12496{ 12497 union ctl_io *io; 12498 int i; 12499 12500 io = rq->context; 12501 12502 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12503 printf("%s: ISC DMA write failed with error %d", __func__, 12504 rq->ret); 12505 ctl_set_internal_failure(&io->scsiio, 12506 /*sks_valid*/ 1, 12507 /*retry_count*/ rq->ret); 12508 } 12509 12510 ctl_dt_req_free(rq); 12511 12512 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12513 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12514 free(io->io_hdr.remote_sglist, M_CTL); 12515 io->io_hdr.remote_sglist = NULL; 12516 io->io_hdr.local_sglist = NULL; 12517 12518 /* 12519 * The data is in local and remote memory, so now we need to send 12520 * status (good or back) back to the other side. 12521 */ 12522 ctl_send_datamove_done(io, /*have_lock*/ 0); 12523} 12524 12525/* 12526 * We've moved the data from the host/controller into local memory. Now we 12527 * need to push it over to the remote controller's memory. 12528 */ 12529static int 12530ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12531{ 12532 int retval; 12533 12534 retval = 0; 12535 12536 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12537 ctl_datamove_remote_write_cb); 12538 12539 return (retval); 12540} 12541 12542static void 12543ctl_datamove_remote_write(union ctl_io *io) 12544{ 12545 int retval; 12546 void (*fe_datamove)(union ctl_io *io); 12547 12548 /* 12549 * - Get the data from the host/HBA into local memory. 12550 * - DMA memory from the local controller to the remote controller. 12551 * - Send status back to the remote controller. 12552 */ 12553 12554 retval = ctl_datamove_remote_sgl_setup(io); 12555 if (retval != 0) 12556 return; 12557 12558 /* Switch the pointer over so the FETD knows what to do */ 12559 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12560 12561 /* 12562 * Use a custom move done callback, since we need to send completion 12563 * back to the other controller, not to the backend on this side. 12564 */ 12565 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12566 12567 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12568 12569 fe_datamove(io); 12570 12571 return; 12572 12573} 12574 12575static int 12576ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12577{ 12578#if 0 12579 char str[256]; 12580 char path_str[64]; 12581 struct sbuf sb; 12582#endif 12583 int i; 12584 12585 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12586 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12587 free(io->io_hdr.remote_sglist, M_CTL); 12588 io->io_hdr.remote_sglist = NULL; 12589 io->io_hdr.local_sglist = NULL; 12590 12591#if 0 12592 scsi_path_string(io, path_str, sizeof(path_str)); 12593 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12594 sbuf_cat(&sb, path_str); 12595 scsi_command_string(&io->scsiio, NULL, &sb); 12596 sbuf_printf(&sb, "\n"); 12597 sbuf_cat(&sb, path_str); 12598 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12599 io->scsiio.tag_num, io->scsiio.tag_type); 12600 sbuf_cat(&sb, path_str); 12601 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12602 io->io_hdr.flags, io->io_hdr.status); 12603 sbuf_finish(&sb); 12604 printk("%s", sbuf_data(&sb)); 12605#endif 12606 12607 12608 /* 12609 * The read is done, now we need to send status (good or bad) back 12610 * to the other side. 12611 */ 12612 ctl_send_datamove_done(io, /*have_lock*/ 0); 12613 12614 return (0); 12615} 12616 12617static void 12618ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12619{ 12620 union ctl_io *io; 12621 void (*fe_datamove)(union ctl_io *io); 12622 12623 io = rq->context; 12624 12625 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12626 printf("%s: ISC DMA read failed with error %d\n", __func__, 12627 rq->ret); 12628 ctl_set_internal_failure(&io->scsiio, 12629 /*sks_valid*/ 1, 12630 /*retry_count*/ rq->ret); 12631 } 12632 12633 ctl_dt_req_free(rq); 12634 12635 /* Switch the pointer over so the FETD knows what to do */ 12636 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12637 12638 /* 12639 * Use a custom move done callback, since we need to send completion 12640 * back to the other controller, not to the backend on this side. 12641 */ 12642 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12643 12644 /* XXX KDM add checks like the ones in ctl_datamove? */ 12645 12646 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12647 12648 fe_datamove(io); 12649} 12650 12651static int 12652ctl_datamove_remote_sgl_setup(union ctl_io *io) 12653{ 12654 struct ctl_sg_entry *local_sglist, *remote_sglist; 12655 struct ctl_softc *softc; 12656 uint32_t len_to_go; 12657 int retval; 12658 int i; 12659 12660 retval = 0; 12661 softc = control_softc; 12662 local_sglist = io->io_hdr.local_sglist; 12663 remote_sglist = io->io_hdr.remote_sglist; 12664 len_to_go = io->scsiio.kern_data_len; 12665 12666 /* 12667 * The difficult thing here is that the size of the various 12668 * S/G segments may be different than the size from the 12669 * remote controller. That'll make it harder when DMAing 12670 * the data back to the other side. 12671 */ 12672 for (i = 0; len_to_go > 0; i++) { 12673 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12674 local_sglist[i].addr = 12675 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12676 12677 len_to_go -= local_sglist[i].len; 12678 } 12679 /* 12680 * Reset the number of S/G entries accordingly. The original 12681 * number of S/G entries is available in rem_sg_entries. 12682 */ 12683 io->scsiio.kern_sg_entries = i; 12684 12685#if 0 12686 printf("%s: kern_sg_entries = %d\n", __func__, 12687 io->scsiio.kern_sg_entries); 12688 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12689 printf("%s: sg[%d] = %p, %d\n", __func__, i, 12690 local_sglist[i].addr, local_sglist[i].len); 12691#endif 12692 12693 return (retval); 12694} 12695 12696static int 12697ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12698 ctl_ha_dt_cb callback) 12699{ 12700 struct ctl_ha_dt_req *rq; 12701 struct ctl_sg_entry *remote_sglist, *local_sglist; 12702 uint32_t local_used, remote_used, total_used; 12703 int i, j, isc_ret; 12704 12705 rq = ctl_dt_req_alloc(); 12706 12707 /* 12708 * If we failed to allocate the request, and if the DMA didn't fail 12709 * anyway, set busy status. This is just a resource allocation 12710 * failure. 12711 */ 12712 if ((rq == NULL) 12713 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12714 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) 12715 ctl_set_busy(&io->scsiio); 12716 12717 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 12718 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { 12719 12720 if (rq != NULL) 12721 ctl_dt_req_free(rq); 12722 12723 /* 12724 * The data move failed. We need to return status back 12725 * to the other controller. No point in trying to DMA 12726 * data to the remote controller. 12727 */ 12728 12729 ctl_send_datamove_done(io, /*have_lock*/ 0); 12730 12731 return (1); 12732 } 12733 12734 local_sglist = io->io_hdr.local_sglist; 12735 remote_sglist = io->io_hdr.remote_sglist; 12736 local_used = 0; 12737 remote_used = 0; 12738 total_used = 0; 12739 12740 /* 12741 * Pull/push the data over the wire from/to the other controller. 12742 * This takes into account the possibility that the local and 12743 * remote sglists may not be identical in terms of the size of 12744 * the elements and the number of elements. 12745 * 12746 * One fundamental assumption here is that the length allocated for 12747 * both the local and remote sglists is identical. Otherwise, we've 12748 * essentially got a coding error of some sort. 12749 */ 12750 isc_ret = CTL_HA_STATUS_SUCCESS; 12751 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12752 uint32_t cur_len; 12753 uint8_t *tmp_ptr; 12754 12755 rq->command = command; 12756 rq->context = io; 12757 12758 /* 12759 * Both pointers should be aligned. But it is possible 12760 * that the allocation length is not. They should both 12761 * also have enough slack left over at the end, though, 12762 * to round up to the next 8 byte boundary. 12763 */ 12764 cur_len = MIN(local_sglist[i].len - local_used, 12765 remote_sglist[j].len - remote_used); 12766 rq->size = cur_len; 12767 12768 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12769 tmp_ptr += local_used; 12770 12771#if 0 12772 /* Use physical addresses when talking to ISC hardware */ 12773 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12774 /* XXX KDM use busdma */ 12775 rq->local = vtophys(tmp_ptr); 12776 } else 12777 rq->local = tmp_ptr; 12778#else 12779 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12780 ("HA does not support BUS_ADDR")); 12781 rq->local = tmp_ptr; 12782#endif 12783 12784 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12785 tmp_ptr += remote_used; 12786 rq->remote = tmp_ptr; 12787 12788 rq->callback = NULL; 12789 12790 local_used += cur_len; 12791 if (local_used >= local_sglist[i].len) { 12792 i++; 12793 local_used = 0; 12794 } 12795 12796 remote_used += cur_len; 12797 if (remote_used >= remote_sglist[j].len) { 12798 j++; 12799 remote_used = 0; 12800 } 12801 total_used += cur_len; 12802 12803 if (total_used >= io->scsiio.kern_data_len) 12804 rq->callback = callback; 12805 12806#if 0 12807 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12808 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12809 rq->local, rq->remote, rq->size); 12810#endif 12811 12812 isc_ret = ctl_dt_single(rq); 12813 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12814 break; 12815 } 12816 if (isc_ret != CTL_HA_STATUS_WAIT) { 12817 rq->ret = isc_ret; 12818 callback(rq); 12819 } 12820 12821 return (0); 12822} 12823 12824static void 12825ctl_datamove_remote_read(union ctl_io *io) 12826{ 12827 int retval; 12828 int i; 12829 12830 /* 12831 * This will send an error to the other controller in the case of a 12832 * failure. 12833 */ 12834 retval = ctl_datamove_remote_sgl_setup(io); 12835 if (retval != 0) 12836 return; 12837 12838 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12839 ctl_datamove_remote_read_cb); 12840 if (retval != 0) { 12841 /* 12842 * Make sure we free memory if there was an error.. The 12843 * ctl_datamove_remote_xfer() function will send the 12844 * datamove done message, or call the callback with an 12845 * error if there is a problem. 12846 */ 12847 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12848 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12849 free(io->io_hdr.remote_sglist, M_CTL); 12850 io->io_hdr.remote_sglist = NULL; 12851 io->io_hdr.local_sglist = NULL; 12852 } 12853 12854 return; 12855} 12856 12857/* 12858 * Process a datamove request from the other controller. This is used for 12859 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12860 * first. Once that is complete, the data gets DMAed into the remote 12861 * controller's memory. For reads, we DMA from the remote controller's 12862 * memory into our memory first, and then move it out to the FETD. 12863 */ 12864static void 12865ctl_datamove_remote(union ctl_io *io) 12866{ 12867 12868 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12869 12870 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12871 ctl_failover_io(io, /*have_lock*/ 0); 12872 return; 12873 } 12874 12875 /* 12876 * Note that we look for an aborted I/O here, but don't do some of 12877 * the other checks that ctl_datamove() normally does. 12878 * We don't need to run the datamove delay code, since that should 12879 * have been done if need be on the other controller. 12880 */ 12881 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12882 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12883 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12884 io->io_hdr.nexus.targ_port, 12885 io->io_hdr.nexus.targ_lun); 12886 io->io_hdr.port_status = 31338; 12887 ctl_send_datamove_done(io, /*have_lock*/ 0); 12888 return; 12889 } 12890 12891 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12892 ctl_datamove_remote_write(io); 12893 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12894 ctl_datamove_remote_read(io); 12895 else { 12896 io->io_hdr.port_status = 31339; 12897 ctl_send_datamove_done(io, /*have_lock*/ 0); 12898 } 12899} 12900 12901static int 12902ctl_process_done(union ctl_io *io) 12903{ 12904 struct ctl_lun *lun; 12905 struct ctl_softc *softc = control_softc; 12906 void (*fe_done)(union ctl_io *io); 12907 union ctl_ha_msg msg; 12908 uint32_t targ_port = io->io_hdr.nexus.targ_port; 12909 12910 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12911 12912 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) 12913 fe_done = softc->ctl_ports[targ_port]->fe_done; 12914 else 12915 fe_done = NULL; 12916 12917#ifdef CTL_TIME_IO 12918 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12919 char str[256]; 12920 char path_str[64]; 12921 struct sbuf sb; 12922 12923 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12924 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12925 12926 sbuf_cat(&sb, path_str); 12927 switch (io->io_hdr.io_type) { 12928 case CTL_IO_SCSI: 12929 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12930 sbuf_printf(&sb, "\n"); 12931 sbuf_cat(&sb, path_str); 12932 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12933 io->scsiio.tag_num, io->scsiio.tag_type); 12934 break; 12935 case CTL_IO_TASK: 12936 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12937 "Tag Type: %d\n", io->taskio.task_action, 12938 io->taskio.tag_num, io->taskio.tag_type); 12939 break; 12940 default: 12941 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12942 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12943 break; 12944 } 12945 sbuf_cat(&sb, path_str); 12946 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12947 (intmax_t)time_uptime - io->io_hdr.start_time); 12948 sbuf_finish(&sb); 12949 printf("%s", sbuf_data(&sb)); 12950 } 12951#endif /* CTL_TIME_IO */ 12952 12953 switch (io->io_hdr.io_type) { 12954 case CTL_IO_SCSI: 12955 break; 12956 case CTL_IO_TASK: 12957 if (ctl_debug & CTL_DEBUG_INFO) 12958 ctl_io_error_print(io, NULL); 12959 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 12960 ctl_free_io(io); 12961 else 12962 fe_done(io); 12963 return (CTL_RETVAL_COMPLETE); 12964 default: 12965 panic("ctl_process_done: invalid io type %d\n", 12966 io->io_hdr.io_type); 12967 break; /* NOTREACHED */ 12968 } 12969 12970 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12971 if (lun == NULL) { 12972 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12973 io->io_hdr.nexus.targ_mapped_lun)); 12974 goto bailout; 12975 } 12976 12977 mtx_lock(&lun->lun_lock); 12978 12979 /* 12980 * Check to see if we have any errors to inject here. We only 12981 * inject errors for commands that don't already have errors set. 12982 */ 12983 if ((STAILQ_FIRST(&lun->error_list) != NULL) && 12984 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 12985 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 12986 ctl_inject_error(lun, io); 12987 12988 /* 12989 * XXX KDM how do we treat commands that aren't completed 12990 * successfully? 12991 * 12992 * XXX KDM should we also track I/O latency? 12993 */ 12994 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 12995 io->io_hdr.io_type == CTL_IO_SCSI) { 12996#ifdef CTL_TIME_IO 12997 struct bintime cur_bt; 12998#endif 12999 int type; 13000 13001 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13002 CTL_FLAG_DATA_IN) 13003 type = CTL_STATS_READ; 13004 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13005 CTL_FLAG_DATA_OUT) 13006 type = CTL_STATS_WRITE; 13007 else 13008 type = CTL_STATS_NO_IO; 13009 13010 lun->stats.ports[targ_port].bytes[type] += 13011 io->scsiio.kern_total_len; 13012 lun->stats.ports[targ_port].operations[type]++; 13013#ifdef CTL_TIME_IO 13014 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 13015 &io->io_hdr.dma_bt); 13016 lun->stats.ports[targ_port].num_dmas[type] += 13017 io->io_hdr.num_dmas; 13018 getbintime(&cur_bt); 13019 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 13020 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 13021#endif 13022 } 13023 13024 /* 13025 * Remove this from the OOA queue. 13026 */ 13027 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13028#ifdef CTL_TIME_IO 13029 if (TAILQ_EMPTY(&lun->ooa_queue)) 13030 lun->last_busy = getsbinuptime(); 13031#endif 13032 13033 /* 13034 * Run through the blocked queue on this LUN and see if anything 13035 * has become unblocked, now that this transaction is done. 13036 */ 13037 ctl_check_blocked(lun); 13038 13039 /* 13040 * If the LUN has been invalidated, free it if there is nothing 13041 * left on its OOA queue. 13042 */ 13043 if ((lun->flags & CTL_LUN_INVALID) 13044 && TAILQ_EMPTY(&lun->ooa_queue)) { 13045 mtx_unlock(&lun->lun_lock); 13046 mtx_lock(&softc->ctl_lock); 13047 ctl_free_lun(lun); 13048 mtx_unlock(&softc->ctl_lock); 13049 } else 13050 mtx_unlock(&lun->lun_lock); 13051 13052bailout: 13053 13054 /* 13055 * If this command has been aborted, make sure we set the status 13056 * properly. The FETD is responsible for freeing the I/O and doing 13057 * whatever it needs to do to clean up its state. 13058 */ 13059 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13060 ctl_set_task_aborted(&io->scsiio); 13061 13062 /* 13063 * If enabled, print command error status. 13064 */ 13065 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13066 (ctl_debug & CTL_DEBUG_INFO) != 0) 13067 ctl_io_error_print(io, NULL); 13068 13069 /* 13070 * Tell the FETD or the other shelf controller we're done with this 13071 * command. Note that only SCSI commands get to this point. Task 13072 * management commands are completed above. 13073 */ 13074 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 13075 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 13076 memset(&msg, 0, sizeof(msg)); 13077 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13078 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 13079 msg.hdr.nexus = io->io_hdr.nexus; 13080 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13081 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 13082 M_WAITOK); 13083 } 13084 if ((softc->ha_mode == CTL_HA_MODE_XFER) 13085 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13086 memset(&msg, 0, sizeof(msg)); 13087 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13088 msg.hdr.original_sc = io->io_hdr.original_sc; 13089 msg.hdr.nexus = io->io_hdr.nexus; 13090 msg.hdr.status = io->io_hdr.status; 13091 msg.scsi.scsi_status = io->scsiio.scsi_status; 13092 msg.scsi.tag_num = io->scsiio.tag_num; 13093 msg.scsi.tag_type = io->scsiio.tag_type; 13094 msg.scsi.sense_len = io->scsiio.sense_len; 13095 msg.scsi.sense_residual = io->scsiio.sense_residual; 13096 msg.scsi.residual = io->scsiio.residual; 13097 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13098 io->scsiio.sense_len); 13099 13100 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13101 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 13102 msg.scsi.sense_len, M_WAITOK); 13103 ctl_free_io(io); 13104 } else 13105 fe_done(io); 13106 13107 return (CTL_RETVAL_COMPLETE); 13108} 13109 13110#ifdef CTL_WITH_CA 13111/* 13112 * Front end should call this if it doesn't do autosense. When the request 13113 * sense comes back in from the initiator, we'll dequeue this and send it. 13114 */ 13115int 13116ctl_queue_sense(union ctl_io *io) 13117{ 13118 struct ctl_lun *lun; 13119 struct ctl_port *port; 13120 struct ctl_softc *softc; 13121 uint32_t initidx, targ_lun; 13122 13123 softc = control_softc; 13124 13125 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13126 13127 /* 13128 * LUN lookup will likely move to the ctl_work_thread() once we 13129 * have our new queueing infrastructure (that doesn't put things on 13130 * a per-LUN queue initially). That is so that we can handle 13131 * things like an INQUIRY to a LUN that we don't have enabled. We 13132 * can't deal with that right now. 13133 */ 13134 mtx_lock(&softc->ctl_lock); 13135 13136 /* 13137 * If we don't have a LUN for this, just toss the sense 13138 * information. 13139 */ 13140 port = ctl_io_port(&ctsio->io_hdr); 13141 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13142 if ((targ_lun < CTL_MAX_LUNS) 13143 && (softc->ctl_luns[targ_lun] != NULL)) 13144 lun = softc->ctl_luns[targ_lun]; 13145 else 13146 goto bailout; 13147 13148 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13149 13150 mtx_lock(&lun->lun_lock); 13151 /* 13152 * Already have CA set for this LUN...toss the sense information. 13153 */ 13154 if (ctl_is_set(lun->have_ca, initidx)) { 13155 mtx_unlock(&lun->lun_lock); 13156 goto bailout; 13157 } 13158 13159 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13160 MIN(sizeof(lun->pending_sense[initidx]), 13161 sizeof(io->scsiio.sense_data))); 13162 ctl_set_mask(lun->have_ca, initidx); 13163 mtx_unlock(&lun->lun_lock); 13164 13165bailout: 13166 mtx_unlock(&softc->ctl_lock); 13167 13168 ctl_free_io(io); 13169 13170 return (CTL_RETVAL_COMPLETE); 13171} 13172#endif 13173 13174/* 13175 * Primary command inlet from frontend ports. All SCSI and task I/O 13176 * requests must go through this function. 13177 */ 13178int 13179ctl_queue(union ctl_io *io) 13180{ 13181 struct ctl_port *port; 13182 13183 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13184 13185#ifdef CTL_TIME_IO 13186 io->io_hdr.start_time = time_uptime; 13187 getbintime(&io->io_hdr.start_bt); 13188#endif /* CTL_TIME_IO */ 13189 13190 /* Map FE-specific LUN ID into global one. */ 13191 port = ctl_io_port(&io->io_hdr); 13192 io->io_hdr.nexus.targ_mapped_lun = 13193 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13194 13195 switch (io->io_hdr.io_type) { 13196 case CTL_IO_SCSI: 13197 case CTL_IO_TASK: 13198 if (ctl_debug & CTL_DEBUG_CDB) 13199 ctl_io_print(io); 13200 ctl_enqueue_incoming(io); 13201 break; 13202 default: 13203 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13204 return (EINVAL); 13205 } 13206 13207 return (CTL_RETVAL_COMPLETE); 13208} 13209 13210#ifdef CTL_IO_DELAY 13211static void 13212ctl_done_timer_wakeup(void *arg) 13213{ 13214 union ctl_io *io; 13215 13216 io = (union ctl_io *)arg; 13217 ctl_done(io); 13218} 13219#endif /* CTL_IO_DELAY */ 13220 13221void 13222ctl_done(union ctl_io *io) 13223{ 13224 13225 /* 13226 * Enable this to catch duplicate completion issues. 13227 */ 13228#if 0 13229 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13230 printf("%s: type %d msg %d cdb %x iptl: " 13231 "%u:%u:%u tag 0x%04x " 13232 "flag %#x status %x\n", 13233 __func__, 13234 io->io_hdr.io_type, 13235 io->io_hdr.msg_type, 13236 io->scsiio.cdb[0], 13237 io->io_hdr.nexus.initid, 13238 io->io_hdr.nexus.targ_port, 13239 io->io_hdr.nexus.targ_lun, 13240 (io->io_hdr.io_type == 13241 CTL_IO_TASK) ? 13242 io->taskio.tag_num : 13243 io->scsiio.tag_num, 13244 io->io_hdr.flags, 13245 io->io_hdr.status); 13246 } else 13247 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13248#endif 13249 13250 /* 13251 * This is an internal copy of an I/O, and should not go through 13252 * the normal done processing logic. 13253 */ 13254 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13255 return; 13256 13257#ifdef CTL_IO_DELAY 13258 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13259 struct ctl_lun *lun; 13260 13261 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13262 13263 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13264 } else { 13265 struct ctl_lun *lun; 13266 13267 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13268 13269 if ((lun != NULL) 13270 && (lun->delay_info.done_delay > 0)) { 13271 13272 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13273 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13274 callout_reset(&io->io_hdr.delay_callout, 13275 lun->delay_info.done_delay * hz, 13276 ctl_done_timer_wakeup, io); 13277 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13278 lun->delay_info.done_delay = 0; 13279 return; 13280 } 13281 } 13282#endif /* CTL_IO_DELAY */ 13283 13284 ctl_enqueue_done(io); 13285} 13286 13287static void 13288ctl_work_thread(void *arg) 13289{ 13290 struct ctl_thread *thr = (struct ctl_thread *)arg; 13291 struct ctl_softc *softc = thr->ctl_softc; 13292 union ctl_io *io; 13293 int retval; 13294 13295 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13296 13297 for (;;) { 13298 retval = 0; 13299 13300 /* 13301 * We handle the queues in this order: 13302 * - ISC 13303 * - done queue (to free up resources, unblock other commands) 13304 * - RtR queue 13305 * - incoming queue 13306 * 13307 * If those queues are empty, we break out of the loop and 13308 * go to sleep. 13309 */ 13310 mtx_lock(&thr->queue_lock); 13311 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13312 if (io != NULL) { 13313 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13314 mtx_unlock(&thr->queue_lock); 13315 ctl_handle_isc(io); 13316 continue; 13317 } 13318 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13319 if (io != NULL) { 13320 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13321 /* clear any blocked commands, call fe_done */ 13322 mtx_unlock(&thr->queue_lock); 13323 retval = ctl_process_done(io); 13324 continue; 13325 } 13326 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13327 if (io != NULL) { 13328 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13329 mtx_unlock(&thr->queue_lock); 13330 if (io->io_hdr.io_type == CTL_IO_TASK) 13331 ctl_run_task(io); 13332 else 13333 ctl_scsiio_precheck(softc, &io->scsiio); 13334 continue; 13335 } 13336 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13337 if (io != NULL) { 13338 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13339 mtx_unlock(&thr->queue_lock); 13340 retval = ctl_scsiio(&io->scsiio); 13341 if (retval != CTL_RETVAL_COMPLETE) 13342 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13343 continue; 13344 } 13345 13346 /* Sleep until we have something to do. */ 13347 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13348 } 13349} 13350 13351static void 13352ctl_lun_thread(void *arg) 13353{ 13354 struct ctl_softc *softc = (struct ctl_softc *)arg; 13355 struct ctl_be_lun *be_lun; 13356 int retval; 13357 13358 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13359 13360 for (;;) { 13361 retval = 0; 13362 mtx_lock(&softc->ctl_lock); 13363 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13364 if (be_lun != NULL) { 13365 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13366 mtx_unlock(&softc->ctl_lock); 13367 ctl_create_lun(be_lun); 13368 continue; 13369 } 13370 13371 /* Sleep until we have something to do. */ 13372 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13373 PDROP | PRIBIO, "-", 0); 13374 } 13375} 13376 13377static void 13378ctl_thresh_thread(void *arg) 13379{ 13380 struct ctl_softc *softc = (struct ctl_softc *)arg; 13381 struct ctl_lun *lun; 13382 struct ctl_be_lun *be_lun; 13383 struct scsi_da_rw_recovery_page *rwpage; 13384 struct ctl_logical_block_provisioning_page *page; 13385 const char *attr; 13386 union ctl_ha_msg msg; 13387 uint64_t thres, val; 13388 int i, e, set; 13389 13390 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13391 13392 for (;;) { 13393 mtx_lock(&softc->ctl_lock); 13394 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13395 be_lun = lun->be_lun; 13396 if ((lun->flags & CTL_LUN_DISABLED) || 13397 (lun->flags & CTL_LUN_OFFLINE) || 13398 lun->backend->lun_attr == NULL) 13399 continue; 13400 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13401 softc->ha_mode == CTL_HA_MODE_XFER) 13402 continue; 13403 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 13404 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 13405 continue; 13406 e = 0; 13407 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 13408 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13409 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13410 continue; 13411 thres = scsi_4btoul(page->descr[i].count); 13412 thres <<= CTL_LBP_EXPONENT; 13413 switch (page->descr[i].resource) { 13414 case 0x01: 13415 attr = "blocksavail"; 13416 break; 13417 case 0x02: 13418 attr = "blocksused"; 13419 break; 13420 case 0xf1: 13421 attr = "poolblocksavail"; 13422 break; 13423 case 0xf2: 13424 attr = "poolblocksused"; 13425 break; 13426 default: 13427 continue; 13428 } 13429 mtx_unlock(&softc->ctl_lock); // XXX 13430 val = lun->backend->lun_attr( 13431 lun->be_lun->be_lun, attr); 13432 mtx_lock(&softc->ctl_lock); 13433 if (val == UINT64_MAX) 13434 continue; 13435 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13436 == SLBPPD_ARMING_INC) 13437 e = (val >= thres); 13438 else 13439 e = (val <= thres); 13440 if (e) 13441 break; 13442 } 13443 mtx_lock(&lun->lun_lock); 13444 if (e) { 13445 scsi_u64to8b((uint8_t *)&page->descr[i] - 13446 (uint8_t *)page, lun->ua_tpt_info); 13447 if (lun->lasttpt == 0 || 13448 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13449 lun->lasttpt = time_uptime; 13450 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13451 set = 1; 13452 } else 13453 set = 0; 13454 } else { 13455 lun->lasttpt = 0; 13456 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13457 set = -1; 13458 } 13459 mtx_unlock(&lun->lun_lock); 13460 if (set != 0 && 13461 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13462 /* Send msg to other side. */ 13463 bzero(&msg.ua, sizeof(msg.ua)); 13464 msg.hdr.msg_type = CTL_MSG_UA; 13465 msg.hdr.nexus.initid = -1; 13466 msg.hdr.nexus.targ_port = -1; 13467 msg.hdr.nexus.targ_lun = lun->lun; 13468 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13469 msg.ua.ua_all = 1; 13470 msg.ua.ua_set = (set > 0); 13471 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13472 memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); 13473 mtx_unlock(&softc->ctl_lock); // XXX 13474 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13475 sizeof(msg.ua), M_WAITOK); 13476 mtx_lock(&softc->ctl_lock); 13477 } 13478 } 13479 mtx_unlock(&softc->ctl_lock); 13480 pause("-", CTL_LBP_PERIOD * hz); 13481 } 13482} 13483 13484static void 13485ctl_enqueue_incoming(union ctl_io *io) 13486{ 13487 struct ctl_softc *softc = control_softc; 13488 struct ctl_thread *thr; 13489 u_int idx; 13490 13491 idx = (io->io_hdr.nexus.targ_port * 127 + 13492 io->io_hdr.nexus.initid) % worker_threads; 13493 thr = &softc->threads[idx]; 13494 mtx_lock(&thr->queue_lock); 13495 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13496 mtx_unlock(&thr->queue_lock); 13497 wakeup(thr); 13498} 13499 13500static void 13501ctl_enqueue_rtr(union ctl_io *io) 13502{ 13503 struct ctl_softc *softc = control_softc; 13504 struct ctl_thread *thr; 13505 13506 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13507 mtx_lock(&thr->queue_lock); 13508 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13509 mtx_unlock(&thr->queue_lock); 13510 wakeup(thr); 13511} 13512 13513static void 13514ctl_enqueue_done(union ctl_io *io) 13515{ 13516 struct ctl_softc *softc = control_softc; 13517 struct ctl_thread *thr; 13518 13519 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13520 mtx_lock(&thr->queue_lock); 13521 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13522 mtx_unlock(&thr->queue_lock); 13523 wakeup(thr); 13524} 13525 13526static void 13527ctl_enqueue_isc(union ctl_io *io) 13528{ 13529 struct ctl_softc *softc = control_softc; 13530 struct ctl_thread *thr; 13531 13532 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13533 mtx_lock(&thr->queue_lock); 13534 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13535 mtx_unlock(&thr->queue_lock); 13536 wakeup(thr); 13537} 13538 13539/* 13540 * vim: ts=8 13541 */ 13542