ctl.c revision 288725
1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id$ 35 */ 36/* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42#define _CTL_C 43 44#include <sys/cdefs.h> 45__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 288725 2015-10-05 08:48:47Z mav $"); 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/ctype.h> 50#include <sys/kernel.h> 51#include <sys/types.h> 52#include <sys/kthread.h> 53#include <sys/bio.h> 54#include <sys/fcntl.h> 55#include <sys/lock.h> 56#include <sys/module.h> 57#include <sys/mutex.h> 58#include <sys/condvar.h> 59#include <sys/malloc.h> 60#include <sys/conf.h> 61#include <sys/ioccom.h> 62#include <sys/queue.h> 63#include <sys/sbuf.h> 64#include <sys/smp.h> 65#include <sys/endian.h> 66#include <sys/sysctl.h> 67#include <vm/uma.h> 68 69#include <cam/cam.h> 70#include <cam/scsi/scsi_all.h> 71#include <cam/scsi/scsi_da.h> 72#include <cam/ctl/ctl_io.h> 73#include <cam/ctl/ctl.h> 74#include <cam/ctl/ctl_frontend.h> 75#include <cam/ctl/ctl_util.h> 76#include <cam/ctl/ctl_backend.h> 77#include <cam/ctl/ctl_ioctl.h> 78#include <cam/ctl/ctl_ha.h> 79#include <cam/ctl/ctl_private.h> 80#include <cam/ctl/ctl_debug.h> 81#include <cam/ctl/ctl_scsi_all.h> 82#include <cam/ctl/ctl_error.h> 83 84struct ctl_softc *control_softc = NULL; 85 86/* 87 * Size and alignment macros needed for Copan-specific HA hardware. These 88 * can go away when the HA code is re-written, and uses busdma for any 89 * hardware. 90 */ 91#define CTL_ALIGN_8B(target, source, type) \ 92 if (((uint32_t)source & 0x7) != 0) \ 93 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 94 else \ 95 target = (type)source; 96 97#define CTL_SIZE_8B(target, size) \ 98 if ((size & 0x7) != 0) \ 99 target = size + (0x8 - (size & 0x7)); \ 100 else \ 101 target = size; 102 103#define CTL_ALIGN_8B_MARGIN 16 104 105/* 106 * Template mode pages. 107 */ 108 109/* 110 * Note that these are default values only. The actual values will be 111 * filled in when the user does a mode sense. 112 */ 113const static struct copan_debugconf_subpage debugconf_page_default = { 114 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 115 DBGCNF_SUBPAGE_CODE, /* subpage */ 116 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 117 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 118 DBGCNF_VERSION, /* page_version */ 119 {CTL_TIME_IO_DEFAULT_SECS>>8, 120 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 121}; 122 123const static struct copan_debugconf_subpage debugconf_page_changeable = { 124 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 125 DBGCNF_SUBPAGE_CODE, /* subpage */ 126 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 127 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 128 0, /* page_version */ 129 {0xff,0xff}, /* ctl_time_io_secs */ 130}; 131 132const static struct scsi_da_rw_recovery_page rw_er_page_default = { 133 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 134 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 135 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 136 /*read_retry_count*/0, 137 /*correction_span*/0, 138 /*head_offset_count*/0, 139 /*data_strobe_offset_cnt*/0, 140 /*byte8*/SMS_RWER_LBPERE, 141 /*write_retry_count*/0, 142 /*reserved2*/0, 143 /*recovery_time_limit*/{0, 0}, 144}; 145 146const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 147 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 148 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 149 /*byte3*/0, 150 /*read_retry_count*/0, 151 /*correction_span*/0, 152 /*head_offset_count*/0, 153 /*data_strobe_offset_cnt*/0, 154 /*byte8*/0, 155 /*write_retry_count*/0, 156 /*reserved2*/0, 157 /*recovery_time_limit*/{0, 0}, 158}; 159 160const static struct scsi_format_page format_page_default = { 161 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 162 /*page_length*/sizeof(struct scsi_format_page) - 2, 163 /*tracks_per_zone*/ {0, 0}, 164 /*alt_sectors_per_zone*/ {0, 0}, 165 /*alt_tracks_per_zone*/ {0, 0}, 166 /*alt_tracks_per_lun*/ {0, 0}, 167 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 168 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 169 /*bytes_per_sector*/ {0, 0}, 170 /*interleave*/ {0, 0}, 171 /*track_skew*/ {0, 0}, 172 /*cylinder_skew*/ {0, 0}, 173 /*flags*/ SFP_HSEC, 174 /*reserved*/ {0, 0, 0} 175}; 176 177const static struct scsi_format_page format_page_changeable = { 178 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 179 /*page_length*/sizeof(struct scsi_format_page) - 2, 180 /*tracks_per_zone*/ {0, 0}, 181 /*alt_sectors_per_zone*/ {0, 0}, 182 /*alt_tracks_per_zone*/ {0, 0}, 183 /*alt_tracks_per_lun*/ {0, 0}, 184 /*sectors_per_track*/ {0, 0}, 185 /*bytes_per_sector*/ {0, 0}, 186 /*interleave*/ {0, 0}, 187 /*track_skew*/ {0, 0}, 188 /*cylinder_skew*/ {0, 0}, 189 /*flags*/ 0, 190 /*reserved*/ {0, 0, 0} 191}; 192 193const static struct scsi_rigid_disk_page rigid_disk_page_default = { 194 /*page_code*/SMS_RIGID_DISK_PAGE, 195 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 196 /*cylinders*/ {0, 0, 0}, 197 /*heads*/ CTL_DEFAULT_HEADS, 198 /*start_write_precomp*/ {0, 0, 0}, 199 /*start_reduced_current*/ {0, 0, 0}, 200 /*step_rate*/ {0, 0}, 201 /*landing_zone_cylinder*/ {0, 0, 0}, 202 /*rpl*/ SRDP_RPL_DISABLED, 203 /*rotational_offset*/ 0, 204 /*reserved1*/ 0, 205 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 206 CTL_DEFAULT_ROTATION_RATE & 0xff}, 207 /*reserved2*/ {0, 0} 208}; 209 210const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 211 /*page_code*/SMS_RIGID_DISK_PAGE, 212 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 213 /*cylinders*/ {0, 0, 0}, 214 /*heads*/ 0, 215 /*start_write_precomp*/ {0, 0, 0}, 216 /*start_reduced_current*/ {0, 0, 0}, 217 /*step_rate*/ {0, 0}, 218 /*landing_zone_cylinder*/ {0, 0, 0}, 219 /*rpl*/ 0, 220 /*rotational_offset*/ 0, 221 /*reserved1*/ 0, 222 /*rotation_rate*/ {0, 0}, 223 /*reserved2*/ {0, 0} 224}; 225 226const static struct scsi_caching_page caching_page_default = { 227 /*page_code*/SMS_CACHING_PAGE, 228 /*page_length*/sizeof(struct scsi_caching_page) - 2, 229 /*flags1*/ SCP_DISC | SCP_WCE, 230 /*ret_priority*/ 0, 231 /*disable_pf_transfer_len*/ {0xff, 0xff}, 232 /*min_prefetch*/ {0, 0}, 233 /*max_prefetch*/ {0xff, 0xff}, 234 /*max_pf_ceiling*/ {0xff, 0xff}, 235 /*flags2*/ 0, 236 /*cache_segments*/ 0, 237 /*cache_seg_size*/ {0, 0}, 238 /*reserved*/ 0, 239 /*non_cache_seg_size*/ {0, 0, 0} 240}; 241 242const static struct scsi_caching_page caching_page_changeable = { 243 /*page_code*/SMS_CACHING_PAGE, 244 /*page_length*/sizeof(struct scsi_caching_page) - 2, 245 /*flags1*/ SCP_WCE | SCP_RCD, 246 /*ret_priority*/ 0, 247 /*disable_pf_transfer_len*/ {0, 0}, 248 /*min_prefetch*/ {0, 0}, 249 /*max_prefetch*/ {0, 0}, 250 /*max_pf_ceiling*/ {0, 0}, 251 /*flags2*/ 0, 252 /*cache_segments*/ 0, 253 /*cache_seg_size*/ {0, 0}, 254 /*reserved*/ 0, 255 /*non_cache_seg_size*/ {0, 0, 0} 256}; 257 258const static struct scsi_control_page control_page_default = { 259 /*page_code*/SMS_CONTROL_MODE_PAGE, 260 /*page_length*/sizeof(struct scsi_control_page) - 2, 261 /*rlec*/0, 262 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 263 /*eca_and_aen*/0, 264 /*flags4*/SCP_TAS, 265 /*aen_holdoff_period*/{0, 0}, 266 /*busy_timeout_period*/{0, 0}, 267 /*extended_selftest_completion_time*/{0, 0} 268}; 269 270const static struct scsi_control_page control_page_changeable = { 271 /*page_code*/SMS_CONTROL_MODE_PAGE, 272 /*page_length*/sizeof(struct scsi_control_page) - 2, 273 /*rlec*/SCP_DSENSE, 274 /*queue_flags*/SCP_QUEUE_ALG_MASK, 275 /*eca_and_aen*/SCP_SWP, 276 /*flags4*/0, 277 /*aen_holdoff_period*/{0, 0}, 278 /*busy_timeout_period*/{0, 0}, 279 /*extended_selftest_completion_time*/{0, 0} 280}; 281 282const static struct scsi_info_exceptions_page ie_page_default = { 283 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 284 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 285 /*info_flags*/SIEP_FLAGS_DEXCPT, 286 /*mrie*/0, 287 /*interval_timer*/{0, 0, 0, 0}, 288 /*report_count*/{0, 0, 0, 0} 289}; 290 291const static struct scsi_info_exceptions_page ie_page_changeable = { 292 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 293 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 294 /*info_flags*/0, 295 /*mrie*/0, 296 /*interval_timer*/{0, 0, 0, 0}, 297 /*report_count*/{0, 0, 0, 0} 298}; 299 300#define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 301 302const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 303 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 304 /*subpage_code*/0x02, 305 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 306 /*flags*/0, 307 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 308 /*descr*/{}}, 309 {{/*flags*/0, 310 /*resource*/0x01, 311 /*reserved*/{0, 0}, 312 /*count*/{0, 0, 0, 0}}, 313 {/*flags*/0, 314 /*resource*/0x02, 315 /*reserved*/{0, 0}, 316 /*count*/{0, 0, 0, 0}}, 317 {/*flags*/0, 318 /*resource*/0xf1, 319 /*reserved*/{0, 0}, 320 /*count*/{0, 0, 0, 0}}, 321 {/*flags*/0, 322 /*resource*/0xf2, 323 /*reserved*/{0, 0}, 324 /*count*/{0, 0, 0, 0}} 325 } 326}; 327 328const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 329 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 330 /*subpage_code*/0x02, 331 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 332 /*flags*/0, 333 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 334 /*descr*/{}}, 335 {{/*flags*/0, 336 /*resource*/0, 337 /*reserved*/{0, 0}, 338 /*count*/{0, 0, 0, 0}}, 339 {/*flags*/0, 340 /*resource*/0, 341 /*reserved*/{0, 0}, 342 /*count*/{0, 0, 0, 0}}, 343 {/*flags*/0, 344 /*resource*/0, 345 /*reserved*/{0, 0}, 346 /*count*/{0, 0, 0, 0}}, 347 {/*flags*/0, 348 /*resource*/0, 349 /*reserved*/{0, 0}, 350 /*count*/{0, 0, 0, 0}} 351 } 352}; 353 354/* 355 * XXX KDM move these into the softc. 356 */ 357static int rcv_sync_msg; 358static uint8_t ctl_pause_rtr; 359 360SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 361static int worker_threads = -1; 362TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads); 363SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 364 &worker_threads, 1, "Number of worker threads"); 365static int ctl_debug = CTL_DEBUG_NONE; 366TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug); 367SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 368 &ctl_debug, 0, "Enabled debug flags"); 369 370/* 371 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 372 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 373 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 374 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 375 */ 376#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 377 378#ifdef notyet 379static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 380 int param); 381static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 382#endif 383static int ctl_init(void); 384void ctl_shutdown(void); 385static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 386static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 387static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 388static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 389 struct ctl_ooa *ooa_hdr, 390 struct ctl_ooa_entry *kern_entries); 391static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 392 struct thread *td); 393static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 394 struct ctl_be_lun *be_lun); 395static int ctl_free_lun(struct ctl_lun *lun); 396static void ctl_create_lun(struct ctl_be_lun *be_lun); 397static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); 398/** 399static void ctl_failover_change_pages(struct ctl_softc *softc, 400 struct ctl_scsiio *ctsio, int master); 401**/ 402 403static int ctl_do_mode_select(union ctl_io *io); 404static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 405 uint64_t res_key, uint64_t sa_res_key, 406 uint8_t type, uint32_t residx, 407 struct ctl_scsiio *ctsio, 408 struct scsi_per_res_out *cdb, 409 struct scsi_per_res_out_parms* param); 410static void ctl_pro_preempt_other(struct ctl_lun *lun, 411 union ctl_ha_msg *msg); 412static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 413static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 414static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 415static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 416static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 417static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 418static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 419 int alloc_len); 420static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 421 int alloc_len); 422static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 423static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 424static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 425static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 426static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 427static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 428 bool seq); 429static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 430static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 431 union ctl_io *pending_io, union ctl_io *ooa_io); 432static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 433 union ctl_io *starting_io); 434static int ctl_check_blocked(struct ctl_lun *lun); 435static int ctl_scsiio_lun_check(struct ctl_lun *lun, 436 const struct ctl_cmd_entry *entry, 437 struct ctl_scsiio *ctsio); 438//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 439#ifdef notyet 440static void ctl_failover(void); 441#endif 442static void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 443 ctl_ua_type ua_type); 444static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 445 struct ctl_scsiio *ctsio); 446static int ctl_scsiio(struct ctl_scsiio *ctsio); 447 448static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 449static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 450 ctl_ua_type ua_type); 451static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 452 ctl_ua_type ua_type); 453static int ctl_abort_task(union ctl_io *io); 454static int ctl_abort_task_set(union ctl_io *io); 455static int ctl_i_t_nexus_reset(union ctl_io *io); 456static void ctl_run_task(union ctl_io *io); 457#ifdef CTL_IO_DELAY 458static void ctl_datamove_timer_wakeup(void *arg); 459static void ctl_done_timer_wakeup(void *arg); 460#endif /* CTL_IO_DELAY */ 461 462static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 463static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 464static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 465static void ctl_datamove_remote_write(union ctl_io *io); 466static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 467static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 468static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 469static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 470 ctl_ha_dt_cb callback); 471static void ctl_datamove_remote_read(union ctl_io *io); 472static void ctl_datamove_remote(union ctl_io *io); 473static int ctl_process_done(union ctl_io *io); 474static void ctl_lun_thread(void *arg); 475static void ctl_thresh_thread(void *arg); 476static void ctl_work_thread(void *arg); 477static void ctl_enqueue_incoming(union ctl_io *io); 478static void ctl_enqueue_rtr(union ctl_io *io); 479static void ctl_enqueue_done(union ctl_io *io); 480#ifdef notyet 481static void ctl_enqueue_isc(union ctl_io *io); 482#endif 483static const struct ctl_cmd_entry * 484 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 485static const struct ctl_cmd_entry * 486 ctl_validate_command(struct ctl_scsiio *ctsio); 487static int ctl_cmd_applicable(uint8_t lun_type, 488 const struct ctl_cmd_entry *entry); 489 490/* 491 * Load the serialization table. This isn't very pretty, but is probably 492 * the easiest way to do it. 493 */ 494#include "ctl_ser_table.c" 495 496/* 497 * We only need to define open, close and ioctl routines for this driver. 498 */ 499static struct cdevsw ctl_cdevsw = { 500 .d_version = D_VERSION, 501 .d_flags = 0, 502 .d_open = ctl_open, 503 .d_close = ctl_close, 504 .d_ioctl = ctl_ioctl, 505 .d_name = "ctl", 506}; 507 508 509MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 510 511static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 512 513static moduledata_t ctl_moduledata = { 514 "ctl", 515 ctl_module_event_handler, 516 NULL 517}; 518 519DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 520MODULE_VERSION(ctl, 1); 521 522#ifdef notyet 523static void 524ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 525 union ctl_ha_msg *msg_info) 526{ 527 struct ctl_scsiio *ctsio; 528 529 if (msg_info->hdr.original_sc == NULL) { 530 printf("%s: original_sc == NULL!\n", __func__); 531 /* XXX KDM now what? */ 532 return; 533 } 534 535 ctsio = &msg_info->hdr.original_sc->scsiio; 536 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 537 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 538 ctsio->io_hdr.status = msg_info->hdr.status; 539 ctsio->scsi_status = msg_info->scsi.scsi_status; 540 ctsio->sense_len = msg_info->scsi.sense_len; 541 ctsio->sense_residual = msg_info->scsi.sense_residual; 542 ctsio->residual = msg_info->scsi.residual; 543 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 544 sizeof(ctsio->sense_data)); 545 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 546 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 547 ctl_enqueue_isc((union ctl_io *)ctsio); 548} 549 550static void 551ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 552 union ctl_ha_msg *msg_info) 553{ 554 struct ctl_scsiio *ctsio; 555 556 if (msg_info->hdr.serializing_sc == NULL) { 557 printf("%s: serializing_sc == NULL!\n", __func__); 558 /* XXX KDM now what? */ 559 return; 560 } 561 562 ctsio = &msg_info->hdr.serializing_sc->scsiio; 563#if 0 564 /* 565 * Attempt to catch the situation where an I/O has 566 * been freed, and we're using it again. 567 */ 568 if (ctsio->io_hdr.io_type == 0xff) { 569 union ctl_io *tmp_io; 570 tmp_io = (union ctl_io *)ctsio; 571 printf("%s: %p use after free!\n", __func__, 572 ctsio); 573 printf("%s: type %d msg %d cdb %x iptl: " 574 "%d:%d:%d:%d tag 0x%04x " 575 "flag %#x status %x\n", 576 __func__, 577 tmp_io->io_hdr.io_type, 578 tmp_io->io_hdr.msg_type, 579 tmp_io->scsiio.cdb[0], 580 tmp_io->io_hdr.nexus.initid.id, 581 tmp_io->io_hdr.nexus.targ_port, 582 tmp_io->io_hdr.nexus.targ_target.id, 583 tmp_io->io_hdr.nexus.targ_lun, 584 (tmp_io->io_hdr.io_type == 585 CTL_IO_TASK) ? 586 tmp_io->taskio.tag_num : 587 tmp_io->scsiio.tag_num, 588 tmp_io->io_hdr.flags, 589 tmp_io->io_hdr.status); 590 } 591#endif 592 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 593 ctl_enqueue_isc((union ctl_io *)ctsio); 594} 595 596/* 597 * ISC (Inter Shelf Communication) event handler. Events from the HA 598 * subsystem come in here. 599 */ 600static void 601ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 602{ 603 struct ctl_softc *softc; 604 union ctl_io *io; 605 struct ctl_prio *presio; 606 ctl_ha_status isc_status; 607 608 softc = control_softc; 609 io = NULL; 610 611 612#if 0 613 printf("CTL: Isc Msg event %d\n", event); 614#endif 615 if (event == CTL_HA_EVT_MSG_RECV) { 616 union ctl_ha_msg msg_info; 617 618 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 619 sizeof(msg_info), /*wait*/ 0); 620#if 0 621 printf("CTL: msg_type %d\n", msg_info.msg_type); 622#endif 623 if (isc_status != 0) { 624 printf("Error receiving message, status = %d\n", 625 isc_status); 626 return; 627 } 628 629 switch (msg_info.hdr.msg_type) { 630 case CTL_MSG_SERIALIZE: 631#if 0 632 printf("Serialize\n"); 633#endif 634 io = ctl_alloc_io_nowait(softc->othersc_pool); 635 if (io == NULL) { 636 printf("ctl_isc_event_handler: can't allocate " 637 "ctl_io!\n"); 638 /* Bad Juju */ 639 /* Need to set busy and send msg back */ 640 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 641 msg_info.hdr.status = CTL_SCSI_ERROR; 642 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 643 msg_info.scsi.sense_len = 0; 644 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 645 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 646 } 647 goto bailout; 648 } 649 ctl_zero_io(io); 650 // populate ctsio from msg_info 651 io->io_hdr.io_type = CTL_IO_SCSI; 652 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 653 io->io_hdr.original_sc = msg_info.hdr.original_sc; 654#if 0 655 printf("pOrig %x\n", (int)msg_info.original_sc); 656#endif 657 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 658 CTL_FLAG_IO_ACTIVE; 659 /* 660 * If we're in serialization-only mode, we don't 661 * want to go through full done processing. Thus 662 * the COPY flag. 663 * 664 * XXX KDM add another flag that is more specific. 665 */ 666 if (softc->ha_mode == CTL_HA_MODE_SER_ONLY) 667 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 668 io->io_hdr.nexus = msg_info.hdr.nexus; 669#if 0 670 printf("targ %d, port %d, iid %d, lun %d\n", 671 io->io_hdr.nexus.targ_target.id, 672 io->io_hdr.nexus.targ_port, 673 io->io_hdr.nexus.initid.id, 674 io->io_hdr.nexus.targ_lun); 675#endif 676 io->scsiio.tag_num = msg_info.scsi.tag_num; 677 io->scsiio.tag_type = msg_info.scsi.tag_type; 678 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 679 CTL_MAX_CDBLEN); 680 if (softc->ha_mode == CTL_HA_MODE_XFER) { 681 const struct ctl_cmd_entry *entry; 682 683 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 684 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 685 io->io_hdr.flags |= 686 entry->flags & CTL_FLAG_DATA_MASK; 687 } 688 ctl_enqueue_isc(io); 689 break; 690 691 /* Performed on the Originating SC, XFER mode only */ 692 case CTL_MSG_DATAMOVE: { 693 struct ctl_sg_entry *sgl; 694 int i, j; 695 696 io = msg_info.hdr.original_sc; 697 if (io == NULL) { 698 printf("%s: original_sc == NULL!\n", __func__); 699 /* XXX KDM do something here */ 700 break; 701 } 702 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 703 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 704 /* 705 * Keep track of this, we need to send it back over 706 * when the datamove is complete. 707 */ 708 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 709 710 if (msg_info.dt.sg_sequence == 0) { 711 /* 712 * XXX KDM we use the preallocated S/G list 713 * here, but we'll need to change this to 714 * dynamic allocation if we need larger S/G 715 * lists. 716 */ 717 if (msg_info.dt.kern_sg_entries > 718 sizeof(io->io_hdr.remote_sglist) / 719 sizeof(io->io_hdr.remote_sglist[0])) { 720 printf("%s: number of S/G entries " 721 "needed %u > allocated num %zd\n", 722 __func__, 723 msg_info.dt.kern_sg_entries, 724 sizeof(io->io_hdr.remote_sglist)/ 725 sizeof(io->io_hdr.remote_sglist[0])); 726 727 /* 728 * XXX KDM send a message back to 729 * the other side to shut down the 730 * DMA. The error will come back 731 * through via the normal channel. 732 */ 733 break; 734 } 735 sgl = io->io_hdr.remote_sglist; 736 memset(sgl, 0, 737 sizeof(io->io_hdr.remote_sglist)); 738 739 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 740 741 io->scsiio.kern_sg_entries = 742 msg_info.dt.kern_sg_entries; 743 io->scsiio.rem_sg_entries = 744 msg_info.dt.kern_sg_entries; 745 io->scsiio.kern_data_len = 746 msg_info.dt.kern_data_len; 747 io->scsiio.kern_total_len = 748 msg_info.dt.kern_total_len; 749 io->scsiio.kern_data_resid = 750 msg_info.dt.kern_data_resid; 751 io->scsiio.kern_rel_offset = 752 msg_info.dt.kern_rel_offset; 753 /* 754 * Clear out per-DMA flags. 755 */ 756 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 757 /* 758 * Add per-DMA flags that are set for this 759 * particular DMA request. 760 */ 761 io->io_hdr.flags |= msg_info.dt.flags & 762 CTL_FLAG_RDMA_MASK; 763 } else 764 sgl = (struct ctl_sg_entry *) 765 io->scsiio.kern_data_ptr; 766 767 for (i = msg_info.dt.sent_sg_entries, j = 0; 768 i < (msg_info.dt.sent_sg_entries + 769 msg_info.dt.cur_sg_entries); i++, j++) { 770 sgl[i].addr = msg_info.dt.sg_list[j].addr; 771 sgl[i].len = msg_info.dt.sg_list[j].len; 772 773#if 0 774 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 775 __func__, 776 msg_info.dt.sg_list[j].addr, 777 msg_info.dt.sg_list[j].len, 778 sgl[i].addr, sgl[i].len, j, i); 779#endif 780 } 781#if 0 782 memcpy(&sgl[msg_info.dt.sent_sg_entries], 783 msg_info.dt.sg_list, 784 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 785#endif 786 787 /* 788 * If this is the last piece of the I/O, we've got 789 * the full S/G list. Queue processing in the thread. 790 * Otherwise wait for the next piece. 791 */ 792 if (msg_info.dt.sg_last != 0) 793 ctl_enqueue_isc(io); 794 break; 795 } 796 /* Performed on the Serializing (primary) SC, XFER mode only */ 797 case CTL_MSG_DATAMOVE_DONE: { 798 if (msg_info.hdr.serializing_sc == NULL) { 799 printf("%s: serializing_sc == NULL!\n", 800 __func__); 801 /* XXX KDM now what? */ 802 break; 803 } 804 /* 805 * We grab the sense information here in case 806 * there was a failure, so we can return status 807 * back to the initiator. 808 */ 809 io = msg_info.hdr.serializing_sc; 810 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 811 io->io_hdr.status = msg_info.hdr.status; 812 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 813 io->scsiio.sense_len = msg_info.scsi.sense_len; 814 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 815 io->io_hdr.port_status = msg_info.scsi.fetd_status; 816 io->scsiio.residual = msg_info.scsi.residual; 817 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 818 sizeof(io->scsiio.sense_data)); 819 ctl_enqueue_isc(io); 820 break; 821 } 822 823 /* Preformed on Originating SC, SER_ONLY mode */ 824 case CTL_MSG_R2R: 825 io = msg_info.hdr.original_sc; 826 if (io == NULL) { 827 printf("%s: Major Bummer\n", __func__); 828 return; 829 } else { 830#if 0 831 printf("pOrig %x\n",(int) ctsio); 832#endif 833 } 834 io->io_hdr.msg_type = CTL_MSG_R2R; 835 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 836 ctl_enqueue_isc(io); 837 break; 838 839 /* 840 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 841 * mode. 842 * Performed on the Originating (i.e. secondary) SC in XFER 843 * mode 844 */ 845 case CTL_MSG_FINISH_IO: 846 if (softc->ha_mode == CTL_HA_MODE_XFER) 847 ctl_isc_handler_finish_xfer(softc, 848 &msg_info); 849 else 850 ctl_isc_handler_finish_ser_only(softc, 851 &msg_info); 852 break; 853 854 /* Preformed on Originating SC */ 855 case CTL_MSG_BAD_JUJU: 856 io = msg_info.hdr.original_sc; 857 if (io == NULL) { 858 printf("%s: Bad JUJU!, original_sc is NULL!\n", 859 __func__); 860 break; 861 } 862 ctl_copy_sense_data(&msg_info, io); 863 /* 864 * IO should have already been cleaned up on other 865 * SC so clear this flag so we won't send a message 866 * back to finish the IO there. 867 */ 868 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 869 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 870 871 /* io = msg_info.hdr.serializing_sc; */ 872 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 873 ctl_enqueue_isc(io); 874 break; 875 876 /* Handle resets sent from the other side */ 877 case CTL_MSG_MANAGE_TASKS: { 878 struct ctl_taskio *taskio; 879 taskio = (struct ctl_taskio *)ctl_alloc_io_nowait( 880 softc->othersc_pool); 881 if (taskio == NULL) { 882 printf("ctl_isc_event_handler: can't allocate " 883 "ctl_io!\n"); 884 /* Bad Juju */ 885 /* should I just call the proper reset func 886 here??? */ 887 goto bailout; 888 } 889 ctl_zero_io((union ctl_io *)taskio); 890 taskio->io_hdr.io_type = CTL_IO_TASK; 891 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 892 taskio->io_hdr.nexus = msg_info.hdr.nexus; 893 taskio->task_action = msg_info.task.task_action; 894 taskio->tag_num = msg_info.task.tag_num; 895 taskio->tag_type = msg_info.task.tag_type; 896#ifdef CTL_TIME_IO 897 taskio->io_hdr.start_time = time_uptime; 898 getbintime(&taskio->io_hdr.start_bt); 899#if 0 900 cs_prof_gettime(&taskio->io_hdr.start_ticks); 901#endif 902#endif /* CTL_TIME_IO */ 903 ctl_run_task((union ctl_io *)taskio); 904 break; 905 } 906 /* Persistent Reserve action which needs attention */ 907 case CTL_MSG_PERS_ACTION: 908 presio = (struct ctl_prio *)ctl_alloc_io_nowait( 909 softc->othersc_pool); 910 if (presio == NULL) { 911 printf("ctl_isc_event_handler: can't allocate " 912 "ctl_io!\n"); 913 /* Bad Juju */ 914 /* Need to set busy and send msg back */ 915 goto bailout; 916 } 917 ctl_zero_io((union ctl_io *)presio); 918 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 919 presio->pr_msg = msg_info.pr; 920 ctl_enqueue_isc((union ctl_io *)presio); 921 break; 922 case CTL_MSG_SYNC_FE: 923 rcv_sync_msg = 1; 924 break; 925 default: 926 printf("How did I get here?\n"); 927 } 928 } else if (event == CTL_HA_EVT_MSG_SENT) { 929 if (param != CTL_HA_STATUS_SUCCESS) { 930 printf("Bad status from ctl_ha_msg_send status %d\n", 931 param); 932 } 933 return; 934 } else if (event == CTL_HA_EVT_DISCONNECT) { 935 printf("CTL: Got a disconnect from Isc\n"); 936 return; 937 } else { 938 printf("ctl_isc_event_handler: Unknown event %d\n", event); 939 return; 940 } 941 942bailout: 943 return; 944} 945 946static void 947ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 948{ 949 struct scsi_sense_data *sense; 950 951 sense = &dest->scsiio.sense_data; 952 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 953 dest->scsiio.scsi_status = src->scsi.scsi_status; 954 dest->scsiio.sense_len = src->scsi.sense_len; 955 dest->io_hdr.status = src->hdr.status; 956} 957#endif 958 959static void 960ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 961{ 962 ctl_ua_type *pu; 963 964 mtx_assert(&lun->lun_lock, MA_OWNED); 965 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 966 if (pu == NULL) 967 return; 968 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 969} 970 971static void 972ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 973{ 974 int i, j; 975 976 mtx_assert(&lun->lun_lock, MA_OWNED); 977 for (i = 0; i < CTL_MAX_PORTS; i++) { 978 if (lun->pending_ua[i] == NULL) 979 continue; 980 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 981 if (i * CTL_MAX_INIT_PER_PORT + j == except) 982 continue; 983 lun->pending_ua[i][j] |= ua; 984 } 985 } 986} 987 988static void 989ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 990{ 991 ctl_ua_type *pu; 992 993 mtx_assert(&lun->lun_lock, MA_OWNED); 994 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 995 if (pu == NULL) 996 return; 997 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 998} 999 1000static void 1001ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1002{ 1003 int i, j; 1004 1005 mtx_assert(&lun->lun_lock, MA_OWNED); 1006 for (i = 0; i < CTL_MAX_PORTS; i++) { 1007 if (lun->pending_ua[i] == NULL) 1008 continue; 1009 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1010 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1011 continue; 1012 lun->pending_ua[i][j] &= ~ua; 1013 } 1014 } 1015} 1016 1017static void 1018ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1019 ctl_ua_type ua_type) 1020{ 1021 struct ctl_lun *lun; 1022 1023 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1024 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1025 mtx_lock(&lun->lun_lock); 1026 ctl_clr_ua(lun, initidx, ua_type); 1027 mtx_unlock(&lun->lun_lock); 1028 } 1029} 1030 1031static int 1032ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS) 1033{ 1034 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1035 struct ctl_lun *lun; 1036 int error, value; 1037 1038 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) 1039 value = 0; 1040 else 1041 value = 1; 1042 1043 error = sysctl_handle_int(oidp, &value, 0, req); 1044 if ((error != 0) || (req->newptr == NULL)) 1045 return (error); 1046 1047 mtx_lock(&softc->ctl_lock); 1048 if (value == 0) 1049 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1050 else 1051 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1052 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1053 mtx_lock(&lun->lun_lock); 1054 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1055 mtx_unlock(&lun->lun_lock); 1056 } 1057 mtx_unlock(&softc->ctl_lock); 1058 return (0); 1059} 1060 1061static int 1062ctl_init(void) 1063{ 1064 struct ctl_softc *softc; 1065 void *other_pool; 1066 int i, error, retval; 1067 //int isc_retval; 1068 1069 retval = 0; 1070 ctl_pause_rtr = 0; 1071 rcv_sync_msg = 0; 1072 1073 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1074 M_WAITOK | M_ZERO); 1075 softc = control_softc; 1076 1077 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1078 "cam/ctl"); 1079 1080 softc->dev->si_drv1 = softc; 1081 1082 /* 1083 * By default, return a "bad LUN" peripheral qualifier for unknown 1084 * LUNs. The user can override this default using the tunable or 1085 * sysctl. See the comment in ctl_inquiry_std() for more details. 1086 */ 1087 softc->inquiry_pq_no_lun = 1; 1088 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 1089 &softc->inquiry_pq_no_lun); 1090 sysctl_ctx_init(&softc->sysctl_ctx); 1091 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1092 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1093 CTLFLAG_RD, 0, "CAM Target Layer"); 1094 1095 if (softc->sysctl_tree == NULL) { 1096 printf("%s: unable to allocate sysctl tree\n", __func__); 1097 destroy_dev(softc->dev); 1098 free(control_softc, M_DEVBUF); 1099 control_softc = NULL; 1100 return (ENOMEM); 1101 } 1102 1103 SYSCTL_ADD_INT(&softc->sysctl_ctx, 1104 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 1105 "inquiry_pq_no_lun", CTLFLAG_RW, 1106 &softc->inquiry_pq_no_lun, 0, 1107 "Report no lun possible for invalid LUNs"); 1108 1109 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1110 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1111 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1112 softc->open_count = 0; 1113 1114 /* 1115 * Default to actually sending a SYNCHRONIZE CACHE command down to 1116 * the drive. 1117 */ 1118 softc->flags = CTL_FLAG_REAL_SYNC; 1119 1120 /* 1121 * In Copan's HA scheme, the "master" and "slave" roles are 1122 * figured out through the slot the controller is in. Although it 1123 * is an active/active system, someone has to be in charge. 1124 */ 1125 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1126 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1127 "HA head ID (0 - no HA)"); 1128 if (softc->ha_id == 0) { 1129 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1130 softc->is_single = 1; 1131 softc->port_offset = 0; 1132 } else 1133 softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS; 1134 softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT; 1135 1136 STAILQ_INIT(&softc->lun_list); 1137 STAILQ_INIT(&softc->pending_lun_queue); 1138 STAILQ_INIT(&softc->fe_list); 1139 STAILQ_INIT(&softc->port_list); 1140 STAILQ_INIT(&softc->be_list); 1141 ctl_tpc_init(softc); 1142 1143 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1144 &other_pool) != 0) 1145 { 1146 printf("ctl: can't allocate %d entry other SC pool, " 1147 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1148 return (ENOMEM); 1149 } 1150 softc->othersc_pool = other_pool; 1151 1152 if (worker_threads <= 0) 1153 worker_threads = max(1, mp_ncpus / 4); 1154 if (worker_threads > CTL_MAX_THREADS) 1155 worker_threads = CTL_MAX_THREADS; 1156 1157 for (i = 0; i < worker_threads; i++) { 1158 struct ctl_thread *thr = &softc->threads[i]; 1159 1160 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1161 thr->ctl_softc = softc; 1162 STAILQ_INIT(&thr->incoming_queue); 1163 STAILQ_INIT(&thr->rtr_queue); 1164 STAILQ_INIT(&thr->done_queue); 1165 STAILQ_INIT(&thr->isc_queue); 1166 1167 error = kproc_kthread_add(ctl_work_thread, thr, 1168 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1169 if (error != 0) { 1170 printf("error creating CTL work thread!\n"); 1171 ctl_pool_free(other_pool); 1172 return (error); 1173 } 1174 } 1175 error = kproc_kthread_add(ctl_lun_thread, softc, 1176 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1177 if (error != 0) { 1178 printf("error creating CTL lun thread!\n"); 1179 ctl_pool_free(other_pool); 1180 return (error); 1181 } 1182 error = kproc_kthread_add(ctl_thresh_thread, softc, 1183 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1184 if (error != 0) { 1185 printf("error creating CTL threshold thread!\n"); 1186 ctl_pool_free(other_pool); 1187 return (error); 1188 } 1189 1190 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1191 OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN, 1192 softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head"); 1193 1194#ifdef CTL_IO_DELAY 1195 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1196 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1197 sizeof(struct callout), CTL_TIMER_BYTES); 1198 return (EINVAL); 1199 } 1200#endif /* CTL_IO_DELAY */ 1201 1202 return (0); 1203} 1204 1205void 1206ctl_shutdown(void) 1207{ 1208 struct ctl_softc *softc; 1209 struct ctl_lun *lun, *next_lun; 1210 1211 softc = (struct ctl_softc *)control_softc; 1212 1213 mtx_lock(&softc->ctl_lock); 1214 1215 /* 1216 * Free up each LUN. 1217 */ 1218 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1219 next_lun = STAILQ_NEXT(lun, links); 1220 ctl_free_lun(lun); 1221 } 1222 1223 mtx_unlock(&softc->ctl_lock); 1224 1225#if 0 1226 ctl_shutdown_thread(softc->work_thread); 1227 mtx_destroy(&softc->queue_lock); 1228#endif 1229 1230 ctl_tpc_shutdown(softc); 1231 uma_zdestroy(softc->io_zone); 1232 mtx_destroy(&softc->ctl_lock); 1233 1234 destroy_dev(softc->dev); 1235 1236 sysctl_ctx_free(&softc->sysctl_ctx); 1237 1238 free(control_softc, M_DEVBUF); 1239 control_softc = NULL; 1240} 1241 1242static int 1243ctl_module_event_handler(module_t mod, int what, void *arg) 1244{ 1245 1246 switch (what) { 1247 case MOD_LOAD: 1248 return (ctl_init()); 1249 case MOD_UNLOAD: 1250 return (EBUSY); 1251 default: 1252 return (EOPNOTSUPP); 1253 } 1254} 1255 1256/* 1257 * XXX KDM should we do some access checks here? Bump a reference count to 1258 * prevent a CTL module from being unloaded while someone has it open? 1259 */ 1260static int 1261ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1262{ 1263 return (0); 1264} 1265 1266static int 1267ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1268{ 1269 return (0); 1270} 1271 1272int 1273ctl_port_enable(ctl_port_type port_type) 1274{ 1275 struct ctl_softc *softc = control_softc; 1276 struct ctl_port *port; 1277 1278 if (softc->is_single == 0) { 1279 union ctl_ha_msg msg_info; 1280 int isc_retval; 1281 1282#if 0 1283 printf("%s: HA mode, synchronizing frontend enable\n", 1284 __func__); 1285#endif 1286 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1287 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1288 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1289 printf("Sync msg send error retval %d\n", isc_retval); 1290 } 1291 if (!rcv_sync_msg) { 1292 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1293 sizeof(msg_info), 1); 1294 } 1295#if 0 1296 printf("CTL:Frontend Enable\n"); 1297 } else { 1298 printf("%s: single mode, skipping frontend synchronization\n", 1299 __func__); 1300#endif 1301 } 1302 1303 STAILQ_FOREACH(port, &softc->port_list, links) { 1304 if (port_type & port->port_type) 1305 { 1306#if 0 1307 printf("port %d\n", port->targ_port); 1308#endif 1309 ctl_port_online(port); 1310 } 1311 } 1312 1313 return (0); 1314} 1315 1316int 1317ctl_port_disable(ctl_port_type port_type) 1318{ 1319 struct ctl_softc *softc; 1320 struct ctl_port *port; 1321 1322 softc = control_softc; 1323 1324 STAILQ_FOREACH(port, &softc->port_list, links) { 1325 if (port_type & port->port_type) 1326 ctl_port_offline(port); 1327 } 1328 1329 return (0); 1330} 1331 1332/* 1333 * Returns 0 for success, 1 for failure. 1334 * Currently the only failure mode is if there aren't enough entries 1335 * allocated. So, in case of a failure, look at num_entries_dropped, 1336 * reallocate and try again. 1337 */ 1338int 1339ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1340 int *num_entries_filled, int *num_entries_dropped, 1341 ctl_port_type port_type, int no_virtual) 1342{ 1343 struct ctl_softc *softc; 1344 struct ctl_port *port; 1345 int entries_dropped, entries_filled; 1346 int retval; 1347 int i; 1348 1349 softc = control_softc; 1350 1351 retval = 0; 1352 entries_filled = 0; 1353 entries_dropped = 0; 1354 1355 i = 0; 1356 mtx_lock(&softc->ctl_lock); 1357 STAILQ_FOREACH(port, &softc->port_list, links) { 1358 struct ctl_port_entry *entry; 1359 1360 if ((port->port_type & port_type) == 0) 1361 continue; 1362 1363 if ((no_virtual != 0) 1364 && (port->virtual_port != 0)) 1365 continue; 1366 1367 if (entries_filled >= num_entries_alloced) { 1368 entries_dropped++; 1369 continue; 1370 } 1371 entry = &entries[i]; 1372 1373 entry->port_type = port->port_type; 1374 strlcpy(entry->port_name, port->port_name, 1375 sizeof(entry->port_name)); 1376 entry->physical_port = port->physical_port; 1377 entry->virtual_port = port->virtual_port; 1378 entry->wwnn = port->wwnn; 1379 entry->wwpn = port->wwpn; 1380 1381 i++; 1382 entries_filled++; 1383 } 1384 1385 mtx_unlock(&softc->ctl_lock); 1386 1387 if (entries_dropped > 0) 1388 retval = 1; 1389 1390 *num_entries_dropped = entries_dropped; 1391 *num_entries_filled = entries_filled; 1392 1393 return (retval); 1394} 1395 1396/* 1397 * Remove an initiator by port number and initiator ID. 1398 * Returns 0 for success, -1 for failure. 1399 */ 1400int 1401ctl_remove_initiator(struct ctl_port *port, int iid) 1402{ 1403 struct ctl_softc *softc = control_softc; 1404 1405 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1406 1407 if (iid > CTL_MAX_INIT_PER_PORT) { 1408 printf("%s: initiator ID %u > maximun %u!\n", 1409 __func__, iid, CTL_MAX_INIT_PER_PORT); 1410 return (-1); 1411 } 1412 1413 mtx_lock(&softc->ctl_lock); 1414 port->wwpn_iid[iid].in_use--; 1415 port->wwpn_iid[iid].last_use = time_uptime; 1416 mtx_unlock(&softc->ctl_lock); 1417 1418 return (0); 1419} 1420 1421/* 1422 * Add an initiator to the initiator map. 1423 * Returns iid for success, < 0 for failure. 1424 */ 1425int 1426ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1427{ 1428 struct ctl_softc *softc = control_softc; 1429 time_t best_time; 1430 int i, best; 1431 1432 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1433 1434 if (iid >= CTL_MAX_INIT_PER_PORT) { 1435 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1436 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1437 free(name, M_CTL); 1438 return (-1); 1439 } 1440 1441 mtx_lock(&softc->ctl_lock); 1442 1443 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1444 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1445 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1446 iid = i; 1447 break; 1448 } 1449 if (name != NULL && port->wwpn_iid[i].name != NULL && 1450 strcmp(name, port->wwpn_iid[i].name) == 0) { 1451 iid = i; 1452 break; 1453 } 1454 } 1455 } 1456 1457 if (iid < 0) { 1458 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1459 if (port->wwpn_iid[i].in_use == 0 && 1460 port->wwpn_iid[i].wwpn == 0 && 1461 port->wwpn_iid[i].name == NULL) { 1462 iid = i; 1463 break; 1464 } 1465 } 1466 } 1467 1468 if (iid < 0) { 1469 best = -1; 1470 best_time = INT32_MAX; 1471 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1472 if (port->wwpn_iid[i].in_use == 0) { 1473 if (port->wwpn_iid[i].last_use < best_time) { 1474 best = i; 1475 best_time = port->wwpn_iid[i].last_use; 1476 } 1477 } 1478 } 1479 iid = best; 1480 } 1481 1482 if (iid < 0) { 1483 mtx_unlock(&softc->ctl_lock); 1484 free(name, M_CTL); 1485 return (-2); 1486 } 1487 1488 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1489 /* 1490 * This is not an error yet. 1491 */ 1492 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1493#if 0 1494 printf("%s: port %d iid %u WWPN %#jx arrived" 1495 " again\n", __func__, port->targ_port, 1496 iid, (uintmax_t)wwpn); 1497#endif 1498 goto take; 1499 } 1500 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1501 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1502#if 0 1503 printf("%s: port %d iid %u name '%s' arrived" 1504 " again\n", __func__, port->targ_port, 1505 iid, name); 1506#endif 1507 goto take; 1508 } 1509 1510 /* 1511 * This is an error, but what do we do about it? The 1512 * driver is telling us we have a new WWPN for this 1513 * initiator ID, so we pretty much need to use it. 1514 */ 1515 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1516 " but WWPN %#jx '%s' is still at that address\n", 1517 __func__, port->targ_port, iid, wwpn, name, 1518 (uintmax_t)port->wwpn_iid[iid].wwpn, 1519 port->wwpn_iid[iid].name); 1520 1521 /* 1522 * XXX KDM clear have_ca and ua_pending on each LUN for 1523 * this initiator. 1524 */ 1525 } 1526take: 1527 free(port->wwpn_iid[iid].name, M_CTL); 1528 port->wwpn_iid[iid].name = name; 1529 port->wwpn_iid[iid].wwpn = wwpn; 1530 port->wwpn_iid[iid].in_use++; 1531 mtx_unlock(&softc->ctl_lock); 1532 1533 return (iid); 1534} 1535 1536static int 1537ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1538{ 1539 int len; 1540 1541 switch (port->port_type) { 1542 case CTL_PORT_FC: 1543 { 1544 struct scsi_transportid_fcp *id = 1545 (struct scsi_transportid_fcp *)buf; 1546 if (port->wwpn_iid[iid].wwpn == 0) 1547 return (0); 1548 memset(id, 0, sizeof(*id)); 1549 id->format_protocol = SCSI_PROTO_FC; 1550 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1551 return (sizeof(*id)); 1552 } 1553 case CTL_PORT_ISCSI: 1554 { 1555 struct scsi_transportid_iscsi_port *id = 1556 (struct scsi_transportid_iscsi_port *)buf; 1557 if (port->wwpn_iid[iid].name == NULL) 1558 return (0); 1559 memset(id, 0, 256); 1560 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1561 SCSI_PROTO_ISCSI; 1562 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1563 len = roundup2(min(len, 252), 4); 1564 scsi_ulto2b(len, id->additional_length); 1565 return (sizeof(*id) + len); 1566 } 1567 case CTL_PORT_SAS: 1568 { 1569 struct scsi_transportid_sas *id = 1570 (struct scsi_transportid_sas *)buf; 1571 if (port->wwpn_iid[iid].wwpn == 0) 1572 return (0); 1573 memset(id, 0, sizeof(*id)); 1574 id->format_protocol = SCSI_PROTO_SAS; 1575 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1576 return (sizeof(*id)); 1577 } 1578 default: 1579 { 1580 struct scsi_transportid_spi *id = 1581 (struct scsi_transportid_spi *)buf; 1582 memset(id, 0, sizeof(*id)); 1583 id->format_protocol = SCSI_PROTO_SPI; 1584 scsi_ulto2b(iid, id->scsi_addr); 1585 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1586 return (sizeof(*id)); 1587 } 1588 } 1589} 1590 1591/* 1592 * Serialize a command that went down the "wrong" side, and so was sent to 1593 * this controller for execution. The logic is a little different than the 1594 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1595 * sent back to the other side, but in the success case, we execute the 1596 * command on this side (XFER mode) or tell the other side to execute it 1597 * (SER_ONLY mode). 1598 */ 1599static int 1600ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1601{ 1602 struct ctl_softc *softc; 1603 union ctl_ha_msg msg_info; 1604 struct ctl_lun *lun; 1605 int retval = 0; 1606 uint32_t targ_lun; 1607 1608 softc = control_softc; 1609 1610 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1611 lun = softc->ctl_luns[targ_lun]; 1612 if (lun==NULL) 1613 { 1614 /* 1615 * Why isn't LUN defined? The other side wouldn't 1616 * send a cmd if the LUN is undefined. 1617 */ 1618 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1619 1620 /* "Logical unit not supported" */ 1621 ctl_set_sense_data(&msg_info.scsi.sense_data, 1622 lun, 1623 /*sense_format*/SSD_TYPE_NONE, 1624 /*current_error*/ 1, 1625 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1626 /*asc*/ 0x25, 1627 /*ascq*/ 0x00, 1628 SSD_ELEM_NONE); 1629 1630 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1631 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1632 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1633 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1634 msg_info.hdr.serializing_sc = NULL; 1635 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1636 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1637 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1638 } 1639 return(1); 1640 1641 } 1642 1643 mtx_lock(&lun->lun_lock); 1644 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1645 1646 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1647 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1648 ooa_links))) { 1649 case CTL_ACTION_BLOCK: 1650 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1651 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1652 blocked_links); 1653 break; 1654 case CTL_ACTION_PASS: 1655 case CTL_ACTION_SKIP: 1656 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1657 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1658 ctl_enqueue_rtr((union ctl_io *)ctsio); 1659 } else { 1660 1661 /* send msg back to other side */ 1662 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1663 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1664 msg_info.hdr.msg_type = CTL_MSG_R2R; 1665#if 0 1666 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1667#endif 1668 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1669 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1670 } 1671 } 1672 break; 1673 case CTL_ACTION_OVERLAP: 1674 /* OVERLAPPED COMMANDS ATTEMPTED */ 1675 ctl_set_sense_data(&msg_info.scsi.sense_data, 1676 lun, 1677 /*sense_format*/SSD_TYPE_NONE, 1678 /*current_error*/ 1, 1679 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1680 /*asc*/ 0x4E, 1681 /*ascq*/ 0x00, 1682 SSD_ELEM_NONE); 1683 1684 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1685 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1686 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1687 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1688 msg_info.hdr.serializing_sc = NULL; 1689 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1690#if 0 1691 printf("BAD JUJU:Major Bummer Overlap\n"); 1692#endif 1693 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1694 retval = 1; 1695 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1696 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1697 } 1698 break; 1699 case CTL_ACTION_OVERLAP_TAG: 1700 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1701 ctl_set_sense_data(&msg_info.scsi.sense_data, 1702 lun, 1703 /*sense_format*/SSD_TYPE_NONE, 1704 /*current_error*/ 1, 1705 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1706 /*asc*/ 0x4D, 1707 /*ascq*/ ctsio->tag_num & 0xff, 1708 SSD_ELEM_NONE); 1709 1710 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1711 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1712 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1713 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1714 msg_info.hdr.serializing_sc = NULL; 1715 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1716#if 0 1717 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1718#endif 1719 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1720 retval = 1; 1721 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1722 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1723 } 1724 break; 1725 case CTL_ACTION_ERROR: 1726 default: 1727 /* "Internal target failure" */ 1728 ctl_set_sense_data(&msg_info.scsi.sense_data, 1729 lun, 1730 /*sense_format*/SSD_TYPE_NONE, 1731 /*current_error*/ 1, 1732 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1733 /*asc*/ 0x44, 1734 /*ascq*/ 0x00, 1735 SSD_ELEM_NONE); 1736 1737 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1738 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1739 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1740 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1741 msg_info.hdr.serializing_sc = NULL; 1742 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1743#if 0 1744 printf("BAD JUJU:Major Bummer HW Error\n"); 1745#endif 1746 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1747 retval = 1; 1748 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1749 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1750 } 1751 break; 1752 } 1753 mtx_unlock(&lun->lun_lock); 1754 return (retval); 1755} 1756 1757/* 1758 * Returns 0 for success, errno for failure. 1759 */ 1760static int 1761ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 1762 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 1763{ 1764 union ctl_io *io; 1765 int retval; 1766 1767 retval = 0; 1768 1769 mtx_lock(&lun->lun_lock); 1770 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 1771 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 1772 ooa_links)) { 1773 struct ctl_ooa_entry *entry; 1774 1775 /* 1776 * If we've got more than we can fit, just count the 1777 * remaining entries. 1778 */ 1779 if (*cur_fill_num >= ooa_hdr->alloc_num) 1780 continue; 1781 1782 entry = &kern_entries[*cur_fill_num]; 1783 1784 entry->tag_num = io->scsiio.tag_num; 1785 entry->lun_num = lun->lun; 1786#ifdef CTL_TIME_IO 1787 entry->start_bt = io->io_hdr.start_bt; 1788#endif 1789 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 1790 entry->cdb_len = io->scsiio.cdb_len; 1791 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 1792 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 1793 1794 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 1795 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 1796 1797 if (io->io_hdr.flags & CTL_FLAG_ABORT) 1798 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 1799 1800 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 1801 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 1802 1803 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 1804 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 1805 } 1806 mtx_unlock(&lun->lun_lock); 1807 1808 return (retval); 1809} 1810 1811static void * 1812ctl_copyin_alloc(void *user_addr, int len, char *error_str, 1813 size_t error_str_len) 1814{ 1815 void *kptr; 1816 1817 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 1818 1819 if (copyin(user_addr, kptr, len) != 0) { 1820 snprintf(error_str, error_str_len, "Error copying %d bytes " 1821 "from user address %p to kernel address %p", len, 1822 user_addr, kptr); 1823 free(kptr, M_CTL); 1824 return (NULL); 1825 } 1826 1827 return (kptr); 1828} 1829 1830static void 1831ctl_free_args(int num_args, struct ctl_be_arg *args) 1832{ 1833 int i; 1834 1835 if (args == NULL) 1836 return; 1837 1838 for (i = 0; i < num_args; i++) { 1839 free(args[i].kname, M_CTL); 1840 free(args[i].kvalue, M_CTL); 1841 } 1842 1843 free(args, M_CTL); 1844} 1845 1846static struct ctl_be_arg * 1847ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 1848 char *error_str, size_t error_str_len) 1849{ 1850 struct ctl_be_arg *args; 1851 int i; 1852 1853 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 1854 error_str, error_str_len); 1855 1856 if (args == NULL) 1857 goto bailout; 1858 1859 for (i = 0; i < num_args; i++) { 1860 args[i].kname = NULL; 1861 args[i].kvalue = NULL; 1862 } 1863 1864 for (i = 0; i < num_args; i++) { 1865 uint8_t *tmpptr; 1866 1867 args[i].kname = ctl_copyin_alloc(args[i].name, 1868 args[i].namelen, error_str, error_str_len); 1869 if (args[i].kname == NULL) 1870 goto bailout; 1871 1872 if (args[i].kname[args[i].namelen - 1] != '\0') { 1873 snprintf(error_str, error_str_len, "Argument %d " 1874 "name is not NUL-terminated", i); 1875 goto bailout; 1876 } 1877 1878 if (args[i].flags & CTL_BEARG_RD) { 1879 tmpptr = ctl_copyin_alloc(args[i].value, 1880 args[i].vallen, error_str, error_str_len); 1881 if (tmpptr == NULL) 1882 goto bailout; 1883 if ((args[i].flags & CTL_BEARG_ASCII) 1884 && (tmpptr[args[i].vallen - 1] != '\0')) { 1885 snprintf(error_str, error_str_len, "Argument " 1886 "%d value is not NUL-terminated", i); 1887 goto bailout; 1888 } 1889 args[i].kvalue = tmpptr; 1890 } else { 1891 args[i].kvalue = malloc(args[i].vallen, 1892 M_CTL, M_WAITOK | M_ZERO); 1893 } 1894 } 1895 1896 return (args); 1897bailout: 1898 1899 ctl_free_args(num_args, args); 1900 1901 return (NULL); 1902} 1903 1904static void 1905ctl_copyout_args(int num_args, struct ctl_be_arg *args) 1906{ 1907 int i; 1908 1909 for (i = 0; i < num_args; i++) { 1910 if (args[i].flags & CTL_BEARG_WR) 1911 copyout(args[i].kvalue, args[i].value, args[i].vallen); 1912 } 1913} 1914 1915/* 1916 * Escape characters that are illegal or not recommended in XML. 1917 */ 1918int 1919ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 1920{ 1921 char *end = str + size; 1922 int retval; 1923 1924 retval = 0; 1925 1926 for (; *str && str < end; str++) { 1927 switch (*str) { 1928 case '&': 1929 retval = sbuf_printf(sb, "&"); 1930 break; 1931 case '>': 1932 retval = sbuf_printf(sb, ">"); 1933 break; 1934 case '<': 1935 retval = sbuf_printf(sb, "<"); 1936 break; 1937 default: 1938 retval = sbuf_putc(sb, *str); 1939 break; 1940 } 1941 1942 if (retval != 0) 1943 break; 1944 1945 } 1946 1947 return (retval); 1948} 1949 1950static void 1951ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 1952{ 1953 struct scsi_vpd_id_descriptor *desc; 1954 int i; 1955 1956 if (id == NULL || id->len < 4) 1957 return; 1958 desc = (struct scsi_vpd_id_descriptor *)id->data; 1959 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 1960 case SVPD_ID_TYPE_T10: 1961 sbuf_printf(sb, "t10."); 1962 break; 1963 case SVPD_ID_TYPE_EUI64: 1964 sbuf_printf(sb, "eui."); 1965 break; 1966 case SVPD_ID_TYPE_NAA: 1967 sbuf_printf(sb, "naa."); 1968 break; 1969 case SVPD_ID_TYPE_SCSI_NAME: 1970 break; 1971 } 1972 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 1973 case SVPD_ID_CODESET_BINARY: 1974 for (i = 0; i < desc->length; i++) 1975 sbuf_printf(sb, "%02x", desc->identifier[i]); 1976 break; 1977 case SVPD_ID_CODESET_ASCII: 1978 sbuf_printf(sb, "%.*s", (int)desc->length, 1979 (char *)desc->identifier); 1980 break; 1981 case SVPD_ID_CODESET_UTF8: 1982 sbuf_printf(sb, "%s", (char *)desc->identifier); 1983 break; 1984 } 1985} 1986 1987static int 1988ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 1989 struct thread *td) 1990{ 1991 struct ctl_softc *softc; 1992 int retval; 1993 1994 softc = control_softc; 1995 1996 retval = 0; 1997 1998 switch (cmd) { 1999 case CTL_IO: 2000 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2001 break; 2002 case CTL_ENABLE_PORT: 2003 case CTL_DISABLE_PORT: 2004 case CTL_SET_PORT_WWNS: { 2005 struct ctl_port *port; 2006 struct ctl_port_entry *entry; 2007 2008 entry = (struct ctl_port_entry *)addr; 2009 2010 mtx_lock(&softc->ctl_lock); 2011 STAILQ_FOREACH(port, &softc->port_list, links) { 2012 int action, done; 2013 2014 action = 0; 2015 done = 0; 2016 2017 if ((entry->port_type == CTL_PORT_NONE) 2018 && (entry->targ_port == port->targ_port)) { 2019 /* 2020 * If the user only wants to enable or 2021 * disable or set WWNs on a specific port, 2022 * do the operation and we're done. 2023 */ 2024 action = 1; 2025 done = 1; 2026 } else if (entry->port_type & port->port_type) { 2027 /* 2028 * Compare the user's type mask with the 2029 * particular frontend type to see if we 2030 * have a match. 2031 */ 2032 action = 1; 2033 done = 0; 2034 2035 /* 2036 * Make sure the user isn't trying to set 2037 * WWNs on multiple ports at the same time. 2038 */ 2039 if (cmd == CTL_SET_PORT_WWNS) { 2040 printf("%s: Can't set WWNs on " 2041 "multiple ports\n", __func__); 2042 retval = EINVAL; 2043 break; 2044 } 2045 } 2046 if (action != 0) { 2047 /* 2048 * XXX KDM we have to drop the lock here, 2049 * because the online/offline operations 2050 * can potentially block. We need to 2051 * reference count the frontends so they 2052 * can't go away, 2053 */ 2054 mtx_unlock(&softc->ctl_lock); 2055 2056 if (cmd == CTL_ENABLE_PORT) { 2057 ctl_port_online(port); 2058 } else if (cmd == CTL_DISABLE_PORT) { 2059 ctl_port_offline(port); 2060 } 2061 2062 mtx_lock(&softc->ctl_lock); 2063 2064 if (cmd == CTL_SET_PORT_WWNS) 2065 ctl_port_set_wwns(port, 2066 (entry->flags & CTL_PORT_WWNN_VALID) ? 2067 1 : 0, entry->wwnn, 2068 (entry->flags & CTL_PORT_WWPN_VALID) ? 2069 1 : 0, entry->wwpn); 2070 } 2071 if (done != 0) 2072 break; 2073 } 2074 mtx_unlock(&softc->ctl_lock); 2075 break; 2076 } 2077 case CTL_GET_PORT_LIST: { 2078 struct ctl_port *port; 2079 struct ctl_port_list *list; 2080 int i; 2081 2082 list = (struct ctl_port_list *)addr; 2083 2084 if (list->alloc_len != (list->alloc_num * 2085 sizeof(struct ctl_port_entry))) { 2086 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2087 "alloc_num %u * sizeof(struct ctl_port_entry) " 2088 "%zu\n", __func__, list->alloc_len, 2089 list->alloc_num, sizeof(struct ctl_port_entry)); 2090 retval = EINVAL; 2091 break; 2092 } 2093 list->fill_len = 0; 2094 list->fill_num = 0; 2095 list->dropped_num = 0; 2096 i = 0; 2097 mtx_lock(&softc->ctl_lock); 2098 STAILQ_FOREACH(port, &softc->port_list, links) { 2099 struct ctl_port_entry entry, *list_entry; 2100 2101 if (list->fill_num >= list->alloc_num) { 2102 list->dropped_num++; 2103 continue; 2104 } 2105 2106 entry.port_type = port->port_type; 2107 strlcpy(entry.port_name, port->port_name, 2108 sizeof(entry.port_name)); 2109 entry.targ_port = port->targ_port; 2110 entry.physical_port = port->physical_port; 2111 entry.virtual_port = port->virtual_port; 2112 entry.wwnn = port->wwnn; 2113 entry.wwpn = port->wwpn; 2114 if (port->status & CTL_PORT_STATUS_ONLINE) 2115 entry.online = 1; 2116 else 2117 entry.online = 0; 2118 2119 list_entry = &list->entries[i]; 2120 2121 retval = copyout(&entry, list_entry, sizeof(entry)); 2122 if (retval != 0) { 2123 printf("%s: CTL_GET_PORT_LIST: copyout " 2124 "returned %d\n", __func__, retval); 2125 break; 2126 } 2127 i++; 2128 list->fill_num++; 2129 list->fill_len += sizeof(entry); 2130 } 2131 mtx_unlock(&softc->ctl_lock); 2132 2133 /* 2134 * If this is non-zero, we had a copyout fault, so there's 2135 * probably no point in attempting to set the status inside 2136 * the structure. 2137 */ 2138 if (retval != 0) 2139 break; 2140 2141 if (list->dropped_num > 0) 2142 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2143 else 2144 list->status = CTL_PORT_LIST_OK; 2145 break; 2146 } 2147 case CTL_DUMP_OOA: { 2148 struct ctl_lun *lun; 2149 union ctl_io *io; 2150 char printbuf[128]; 2151 struct sbuf sb; 2152 2153 mtx_lock(&softc->ctl_lock); 2154 printf("Dumping OOA queues:\n"); 2155 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2156 mtx_lock(&lun->lun_lock); 2157 for (io = (union ctl_io *)TAILQ_FIRST( 2158 &lun->ooa_queue); io != NULL; 2159 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2160 ooa_links)) { 2161 sbuf_new(&sb, printbuf, sizeof(printbuf), 2162 SBUF_FIXEDLEN); 2163 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2164 (intmax_t)lun->lun, 2165 io->scsiio.tag_num, 2166 (io->io_hdr.flags & 2167 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2168 (io->io_hdr.flags & 2169 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2170 (io->io_hdr.flags & 2171 CTL_FLAG_ABORT) ? " ABORT" : "", 2172 (io->io_hdr.flags & 2173 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2174 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2175 sbuf_finish(&sb); 2176 printf("%s\n", sbuf_data(&sb)); 2177 } 2178 mtx_unlock(&lun->lun_lock); 2179 } 2180 printf("OOA queues dump done\n"); 2181 mtx_unlock(&softc->ctl_lock); 2182 break; 2183 } 2184 case CTL_GET_OOA: { 2185 struct ctl_lun *lun; 2186 struct ctl_ooa *ooa_hdr; 2187 struct ctl_ooa_entry *entries; 2188 uint32_t cur_fill_num; 2189 2190 ooa_hdr = (struct ctl_ooa *)addr; 2191 2192 if ((ooa_hdr->alloc_len == 0) 2193 || (ooa_hdr->alloc_num == 0)) { 2194 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2195 "must be non-zero\n", __func__, 2196 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2197 retval = EINVAL; 2198 break; 2199 } 2200 2201 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2202 sizeof(struct ctl_ooa_entry))) { 2203 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2204 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2205 __func__, ooa_hdr->alloc_len, 2206 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2207 retval = EINVAL; 2208 break; 2209 } 2210 2211 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2212 if (entries == NULL) { 2213 printf("%s: could not allocate %d bytes for OOA " 2214 "dump\n", __func__, ooa_hdr->alloc_len); 2215 retval = ENOMEM; 2216 break; 2217 } 2218 2219 mtx_lock(&softc->ctl_lock); 2220 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2221 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2222 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2223 mtx_unlock(&softc->ctl_lock); 2224 free(entries, M_CTL); 2225 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2226 __func__, (uintmax_t)ooa_hdr->lun_num); 2227 retval = EINVAL; 2228 break; 2229 } 2230 2231 cur_fill_num = 0; 2232 2233 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2234 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2235 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2236 ooa_hdr, entries); 2237 if (retval != 0) 2238 break; 2239 } 2240 if (retval != 0) { 2241 mtx_unlock(&softc->ctl_lock); 2242 free(entries, M_CTL); 2243 break; 2244 } 2245 } else { 2246 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2247 2248 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2249 entries); 2250 } 2251 mtx_unlock(&softc->ctl_lock); 2252 2253 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2254 ooa_hdr->fill_len = ooa_hdr->fill_num * 2255 sizeof(struct ctl_ooa_entry); 2256 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2257 if (retval != 0) { 2258 printf("%s: error copying out %d bytes for OOA dump\n", 2259 __func__, ooa_hdr->fill_len); 2260 } 2261 2262 getbintime(&ooa_hdr->cur_bt); 2263 2264 if (cur_fill_num > ooa_hdr->alloc_num) { 2265 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2266 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2267 } else { 2268 ooa_hdr->dropped_num = 0; 2269 ooa_hdr->status = CTL_OOA_OK; 2270 } 2271 2272 free(entries, M_CTL); 2273 break; 2274 } 2275 case CTL_CHECK_OOA: { 2276 union ctl_io *io; 2277 struct ctl_lun *lun; 2278 struct ctl_ooa_info *ooa_info; 2279 2280 2281 ooa_info = (struct ctl_ooa_info *)addr; 2282 2283 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2284 ooa_info->status = CTL_OOA_INVALID_LUN; 2285 break; 2286 } 2287 mtx_lock(&softc->ctl_lock); 2288 lun = softc->ctl_luns[ooa_info->lun_id]; 2289 if (lun == NULL) { 2290 mtx_unlock(&softc->ctl_lock); 2291 ooa_info->status = CTL_OOA_INVALID_LUN; 2292 break; 2293 } 2294 mtx_lock(&lun->lun_lock); 2295 mtx_unlock(&softc->ctl_lock); 2296 ooa_info->num_entries = 0; 2297 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2298 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2299 &io->io_hdr, ooa_links)) { 2300 ooa_info->num_entries++; 2301 } 2302 mtx_unlock(&lun->lun_lock); 2303 2304 ooa_info->status = CTL_OOA_SUCCESS; 2305 2306 break; 2307 } 2308 case CTL_DELAY_IO: { 2309 struct ctl_io_delay_info *delay_info; 2310#ifdef CTL_IO_DELAY 2311 struct ctl_lun *lun; 2312#endif /* CTL_IO_DELAY */ 2313 2314 delay_info = (struct ctl_io_delay_info *)addr; 2315 2316#ifdef CTL_IO_DELAY 2317 mtx_lock(&softc->ctl_lock); 2318 2319 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2320 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2321 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2322 } else { 2323 lun = softc->ctl_luns[delay_info->lun_id]; 2324 mtx_lock(&lun->lun_lock); 2325 2326 delay_info->status = CTL_DELAY_STATUS_OK; 2327 2328 switch (delay_info->delay_type) { 2329 case CTL_DELAY_TYPE_CONT: 2330 break; 2331 case CTL_DELAY_TYPE_ONESHOT: 2332 break; 2333 default: 2334 delay_info->status = 2335 CTL_DELAY_STATUS_INVALID_TYPE; 2336 break; 2337 } 2338 2339 switch (delay_info->delay_loc) { 2340 case CTL_DELAY_LOC_DATAMOVE: 2341 lun->delay_info.datamove_type = 2342 delay_info->delay_type; 2343 lun->delay_info.datamove_delay = 2344 delay_info->delay_secs; 2345 break; 2346 case CTL_DELAY_LOC_DONE: 2347 lun->delay_info.done_type = 2348 delay_info->delay_type; 2349 lun->delay_info.done_delay = 2350 delay_info->delay_secs; 2351 break; 2352 default: 2353 delay_info->status = 2354 CTL_DELAY_STATUS_INVALID_LOC; 2355 break; 2356 } 2357 mtx_unlock(&lun->lun_lock); 2358 } 2359 2360 mtx_unlock(&softc->ctl_lock); 2361#else 2362 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2363#endif /* CTL_IO_DELAY */ 2364 break; 2365 } 2366 case CTL_REALSYNC_SET: { 2367 int *syncstate; 2368 2369 syncstate = (int *)addr; 2370 2371 mtx_lock(&softc->ctl_lock); 2372 switch (*syncstate) { 2373 case 0: 2374 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2375 break; 2376 case 1: 2377 softc->flags |= CTL_FLAG_REAL_SYNC; 2378 break; 2379 default: 2380 retval = EINVAL; 2381 break; 2382 } 2383 mtx_unlock(&softc->ctl_lock); 2384 break; 2385 } 2386 case CTL_REALSYNC_GET: { 2387 int *syncstate; 2388 2389 syncstate = (int*)addr; 2390 2391 mtx_lock(&softc->ctl_lock); 2392 if (softc->flags & CTL_FLAG_REAL_SYNC) 2393 *syncstate = 1; 2394 else 2395 *syncstate = 0; 2396 mtx_unlock(&softc->ctl_lock); 2397 2398 break; 2399 } 2400 case CTL_SETSYNC: 2401 case CTL_GETSYNC: { 2402 struct ctl_sync_info *sync_info; 2403 struct ctl_lun *lun; 2404 2405 sync_info = (struct ctl_sync_info *)addr; 2406 2407 mtx_lock(&softc->ctl_lock); 2408 lun = softc->ctl_luns[sync_info->lun_id]; 2409 if (lun == NULL) { 2410 mtx_unlock(&softc->ctl_lock); 2411 sync_info->status = CTL_GS_SYNC_NO_LUN; 2412 } 2413 /* 2414 * Get or set the sync interval. We're not bounds checking 2415 * in the set case, hopefully the user won't do something 2416 * silly. 2417 */ 2418 mtx_lock(&lun->lun_lock); 2419 mtx_unlock(&softc->ctl_lock); 2420 if (cmd == CTL_GETSYNC) 2421 sync_info->sync_interval = lun->sync_interval; 2422 else 2423 lun->sync_interval = sync_info->sync_interval; 2424 mtx_unlock(&lun->lun_lock); 2425 2426 sync_info->status = CTL_GS_SYNC_OK; 2427 2428 break; 2429 } 2430 case CTL_GETSTATS: { 2431 struct ctl_stats *stats; 2432 struct ctl_lun *lun; 2433 int i; 2434 2435 stats = (struct ctl_stats *)addr; 2436 2437 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2438 stats->alloc_len) { 2439 stats->status = CTL_SS_NEED_MORE_SPACE; 2440 stats->num_luns = softc->num_luns; 2441 break; 2442 } 2443 /* 2444 * XXX KDM no locking here. If the LUN list changes, 2445 * things can blow up. 2446 */ 2447 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2448 i++, lun = STAILQ_NEXT(lun, links)) { 2449 retval = copyout(&lun->stats, &stats->lun_stats[i], 2450 sizeof(lun->stats)); 2451 if (retval != 0) 2452 break; 2453 } 2454 stats->num_luns = softc->num_luns; 2455 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2456 softc->num_luns; 2457 stats->status = CTL_SS_OK; 2458#ifdef CTL_TIME_IO 2459 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2460#else 2461 stats->flags = CTL_STATS_FLAG_NONE; 2462#endif 2463 getnanouptime(&stats->timestamp); 2464 break; 2465 } 2466 case CTL_ERROR_INJECT: { 2467 struct ctl_error_desc *err_desc, *new_err_desc; 2468 struct ctl_lun *lun; 2469 2470 err_desc = (struct ctl_error_desc *)addr; 2471 2472 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2473 M_WAITOK | M_ZERO); 2474 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2475 2476 mtx_lock(&softc->ctl_lock); 2477 lun = softc->ctl_luns[err_desc->lun_id]; 2478 if (lun == NULL) { 2479 mtx_unlock(&softc->ctl_lock); 2480 free(new_err_desc, M_CTL); 2481 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2482 __func__, (uintmax_t)err_desc->lun_id); 2483 retval = EINVAL; 2484 break; 2485 } 2486 mtx_lock(&lun->lun_lock); 2487 mtx_unlock(&softc->ctl_lock); 2488 2489 /* 2490 * We could do some checking here to verify the validity 2491 * of the request, but given the complexity of error 2492 * injection requests, the checking logic would be fairly 2493 * complex. 2494 * 2495 * For now, if the request is invalid, it just won't get 2496 * executed and might get deleted. 2497 */ 2498 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2499 2500 /* 2501 * XXX KDM check to make sure the serial number is unique, 2502 * in case we somehow manage to wrap. That shouldn't 2503 * happen for a very long time, but it's the right thing to 2504 * do. 2505 */ 2506 new_err_desc->serial = lun->error_serial; 2507 err_desc->serial = lun->error_serial; 2508 lun->error_serial++; 2509 2510 mtx_unlock(&lun->lun_lock); 2511 break; 2512 } 2513 case CTL_ERROR_INJECT_DELETE: { 2514 struct ctl_error_desc *delete_desc, *desc, *desc2; 2515 struct ctl_lun *lun; 2516 int delete_done; 2517 2518 delete_desc = (struct ctl_error_desc *)addr; 2519 delete_done = 0; 2520 2521 mtx_lock(&softc->ctl_lock); 2522 lun = softc->ctl_luns[delete_desc->lun_id]; 2523 if (lun == NULL) { 2524 mtx_unlock(&softc->ctl_lock); 2525 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2526 __func__, (uintmax_t)delete_desc->lun_id); 2527 retval = EINVAL; 2528 break; 2529 } 2530 mtx_lock(&lun->lun_lock); 2531 mtx_unlock(&softc->ctl_lock); 2532 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2533 if (desc->serial != delete_desc->serial) 2534 continue; 2535 2536 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2537 links); 2538 free(desc, M_CTL); 2539 delete_done = 1; 2540 } 2541 mtx_unlock(&lun->lun_lock); 2542 if (delete_done == 0) { 2543 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2544 "error serial %ju on LUN %u\n", __func__, 2545 delete_desc->serial, delete_desc->lun_id); 2546 retval = EINVAL; 2547 break; 2548 } 2549 break; 2550 } 2551 case CTL_DUMP_STRUCTS: { 2552 int i, j, k; 2553 struct ctl_port *port; 2554 struct ctl_frontend *fe; 2555 2556 mtx_lock(&softc->ctl_lock); 2557 printf("CTL Persistent Reservation information start:\n"); 2558 for (i = 0; i < CTL_MAX_LUNS; i++) { 2559 struct ctl_lun *lun; 2560 2561 lun = softc->ctl_luns[i]; 2562 2563 if ((lun == NULL) 2564 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2565 continue; 2566 2567 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 2568 if (lun->pr_keys[j] == NULL) 2569 continue; 2570 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2571 if (lun->pr_keys[j][k] == 0) 2572 continue; 2573 printf(" LUN %d port %d iid %d key " 2574 "%#jx\n", i, j, k, 2575 (uintmax_t)lun->pr_keys[j][k]); 2576 } 2577 } 2578 } 2579 printf("CTL Persistent Reservation information end\n"); 2580 printf("CTL Ports:\n"); 2581 STAILQ_FOREACH(port, &softc->port_list, links) { 2582 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2583 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2584 port->frontend->name, port->port_type, 2585 port->physical_port, port->virtual_port, 2586 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2587 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2588 if (port->wwpn_iid[j].in_use == 0 && 2589 port->wwpn_iid[j].wwpn == 0 && 2590 port->wwpn_iid[j].name == NULL) 2591 continue; 2592 2593 printf(" iid %u use %d WWPN %#jx '%s'\n", 2594 j, port->wwpn_iid[j].in_use, 2595 (uintmax_t)port->wwpn_iid[j].wwpn, 2596 port->wwpn_iid[j].name); 2597 } 2598 } 2599 printf("CTL Port information end\n"); 2600 mtx_unlock(&softc->ctl_lock); 2601 /* 2602 * XXX KDM calling this without a lock. We'd likely want 2603 * to drop the lock before calling the frontend's dump 2604 * routine anyway. 2605 */ 2606 printf("CTL Frontends:\n"); 2607 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2608 printf(" Frontend '%s'\n", fe->name); 2609 if (fe->fe_dump != NULL) 2610 fe->fe_dump(); 2611 } 2612 printf("CTL Frontend information end\n"); 2613 break; 2614 } 2615 case CTL_LUN_REQ: { 2616 struct ctl_lun_req *lun_req; 2617 struct ctl_backend_driver *backend; 2618 2619 lun_req = (struct ctl_lun_req *)addr; 2620 2621 backend = ctl_backend_find(lun_req->backend); 2622 if (backend == NULL) { 2623 lun_req->status = CTL_LUN_ERROR; 2624 snprintf(lun_req->error_str, 2625 sizeof(lun_req->error_str), 2626 "Backend \"%s\" not found.", 2627 lun_req->backend); 2628 break; 2629 } 2630 if (lun_req->num_be_args > 0) { 2631 lun_req->kern_be_args = ctl_copyin_args( 2632 lun_req->num_be_args, 2633 lun_req->be_args, 2634 lun_req->error_str, 2635 sizeof(lun_req->error_str)); 2636 if (lun_req->kern_be_args == NULL) { 2637 lun_req->status = CTL_LUN_ERROR; 2638 break; 2639 } 2640 } 2641 2642 retval = backend->ioctl(dev, cmd, addr, flag, td); 2643 2644 if (lun_req->num_be_args > 0) { 2645 ctl_copyout_args(lun_req->num_be_args, 2646 lun_req->kern_be_args); 2647 ctl_free_args(lun_req->num_be_args, 2648 lun_req->kern_be_args); 2649 } 2650 break; 2651 } 2652 case CTL_LUN_LIST: { 2653 struct sbuf *sb; 2654 struct ctl_lun *lun; 2655 struct ctl_lun_list *list; 2656 struct ctl_option *opt; 2657 2658 list = (struct ctl_lun_list *)addr; 2659 2660 /* 2661 * Allocate a fixed length sbuf here, based on the length 2662 * of the user's buffer. We could allocate an auto-extending 2663 * buffer, and then tell the user how much larger our 2664 * amount of data is than his buffer, but that presents 2665 * some problems: 2666 * 2667 * 1. The sbuf(9) routines use a blocking malloc, and so 2668 * we can't hold a lock while calling them with an 2669 * auto-extending buffer. 2670 * 2671 * 2. There is not currently a LUN reference counting 2672 * mechanism, outside of outstanding transactions on 2673 * the LUN's OOA queue. So a LUN could go away on us 2674 * while we're getting the LUN number, backend-specific 2675 * information, etc. Thus, given the way things 2676 * currently work, we need to hold the CTL lock while 2677 * grabbing LUN information. 2678 * 2679 * So, from the user's standpoint, the best thing to do is 2680 * allocate what he thinks is a reasonable buffer length, 2681 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2682 * double the buffer length and try again. (And repeat 2683 * that until he succeeds.) 2684 */ 2685 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2686 if (sb == NULL) { 2687 list->status = CTL_LUN_LIST_ERROR; 2688 snprintf(list->error_str, sizeof(list->error_str), 2689 "Unable to allocate %d bytes for LUN list", 2690 list->alloc_len); 2691 break; 2692 } 2693 2694 sbuf_printf(sb, "<ctllunlist>\n"); 2695 2696 mtx_lock(&softc->ctl_lock); 2697 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2698 mtx_lock(&lun->lun_lock); 2699 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 2700 (uintmax_t)lun->lun); 2701 2702 /* 2703 * Bail out as soon as we see that we've overfilled 2704 * the buffer. 2705 */ 2706 if (retval != 0) 2707 break; 2708 2709 retval = sbuf_printf(sb, "\t<backend_type>%s" 2710 "</backend_type>\n", 2711 (lun->backend == NULL) ? "none" : 2712 lun->backend->name); 2713 2714 if (retval != 0) 2715 break; 2716 2717 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 2718 lun->be_lun->lun_type); 2719 2720 if (retval != 0) 2721 break; 2722 2723 if (lun->backend == NULL) { 2724 retval = sbuf_printf(sb, "</lun>\n"); 2725 if (retval != 0) 2726 break; 2727 continue; 2728 } 2729 2730 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 2731 (lun->be_lun->maxlba > 0) ? 2732 lun->be_lun->maxlba + 1 : 0); 2733 2734 if (retval != 0) 2735 break; 2736 2737 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 2738 lun->be_lun->blocksize); 2739 2740 if (retval != 0) 2741 break; 2742 2743 retval = sbuf_printf(sb, "\t<serial_number>"); 2744 2745 if (retval != 0) 2746 break; 2747 2748 retval = ctl_sbuf_printf_esc(sb, 2749 lun->be_lun->serial_num, 2750 sizeof(lun->be_lun->serial_num)); 2751 2752 if (retval != 0) 2753 break; 2754 2755 retval = sbuf_printf(sb, "</serial_number>\n"); 2756 2757 if (retval != 0) 2758 break; 2759 2760 retval = sbuf_printf(sb, "\t<device_id>"); 2761 2762 if (retval != 0) 2763 break; 2764 2765 retval = ctl_sbuf_printf_esc(sb, 2766 lun->be_lun->device_id, 2767 sizeof(lun->be_lun->device_id)); 2768 2769 if (retval != 0) 2770 break; 2771 2772 retval = sbuf_printf(sb, "</device_id>\n"); 2773 2774 if (retval != 0) 2775 break; 2776 2777 if (lun->backend->lun_info != NULL) { 2778 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 2779 if (retval != 0) 2780 break; 2781 } 2782 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 2783 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 2784 opt->name, opt->value, opt->name); 2785 if (retval != 0) 2786 break; 2787 } 2788 2789 retval = sbuf_printf(sb, "</lun>\n"); 2790 2791 if (retval != 0) 2792 break; 2793 mtx_unlock(&lun->lun_lock); 2794 } 2795 if (lun != NULL) 2796 mtx_unlock(&lun->lun_lock); 2797 mtx_unlock(&softc->ctl_lock); 2798 2799 if ((retval != 0) 2800 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 2801 retval = 0; 2802 sbuf_delete(sb); 2803 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 2804 snprintf(list->error_str, sizeof(list->error_str), 2805 "Out of space, %d bytes is too small", 2806 list->alloc_len); 2807 break; 2808 } 2809 2810 sbuf_finish(sb); 2811 2812 retval = copyout(sbuf_data(sb), list->lun_xml, 2813 sbuf_len(sb) + 1); 2814 2815 list->fill_len = sbuf_len(sb) + 1; 2816 list->status = CTL_LUN_LIST_OK; 2817 sbuf_delete(sb); 2818 break; 2819 } 2820 case CTL_ISCSI: { 2821 struct ctl_iscsi *ci; 2822 struct ctl_frontend *fe; 2823 2824 ci = (struct ctl_iscsi *)addr; 2825 2826 fe = ctl_frontend_find("iscsi"); 2827 if (fe == NULL) { 2828 ci->status = CTL_ISCSI_ERROR; 2829 snprintf(ci->error_str, sizeof(ci->error_str), 2830 "Frontend \"iscsi\" not found."); 2831 break; 2832 } 2833 2834 retval = fe->ioctl(dev, cmd, addr, flag, td); 2835 break; 2836 } 2837 case CTL_PORT_REQ: { 2838 struct ctl_req *req; 2839 struct ctl_frontend *fe; 2840 2841 req = (struct ctl_req *)addr; 2842 2843 fe = ctl_frontend_find(req->driver); 2844 if (fe == NULL) { 2845 req->status = CTL_LUN_ERROR; 2846 snprintf(req->error_str, sizeof(req->error_str), 2847 "Frontend \"%s\" not found.", req->driver); 2848 break; 2849 } 2850 if (req->num_args > 0) { 2851 req->kern_args = ctl_copyin_args(req->num_args, 2852 req->args, req->error_str, sizeof(req->error_str)); 2853 if (req->kern_args == NULL) { 2854 req->status = CTL_LUN_ERROR; 2855 break; 2856 } 2857 } 2858 2859 retval = fe->ioctl(dev, cmd, addr, flag, td); 2860 2861 if (req->num_args > 0) { 2862 ctl_copyout_args(req->num_args, req->kern_args); 2863 ctl_free_args(req->num_args, req->kern_args); 2864 } 2865 break; 2866 } 2867 case CTL_PORT_LIST: { 2868 struct sbuf *sb; 2869 struct ctl_port *port; 2870 struct ctl_lun_list *list; 2871 struct ctl_option *opt; 2872 int j; 2873 uint32_t plun; 2874 2875 list = (struct ctl_lun_list *)addr; 2876 2877 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2878 if (sb == NULL) { 2879 list->status = CTL_LUN_LIST_ERROR; 2880 snprintf(list->error_str, sizeof(list->error_str), 2881 "Unable to allocate %d bytes for LUN list", 2882 list->alloc_len); 2883 break; 2884 } 2885 2886 sbuf_printf(sb, "<ctlportlist>\n"); 2887 2888 mtx_lock(&softc->ctl_lock); 2889 STAILQ_FOREACH(port, &softc->port_list, links) { 2890 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 2891 (uintmax_t)port->targ_port); 2892 2893 /* 2894 * Bail out as soon as we see that we've overfilled 2895 * the buffer. 2896 */ 2897 if (retval != 0) 2898 break; 2899 2900 retval = sbuf_printf(sb, "\t<frontend_type>%s" 2901 "</frontend_type>\n", port->frontend->name); 2902 if (retval != 0) 2903 break; 2904 2905 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 2906 port->port_type); 2907 if (retval != 0) 2908 break; 2909 2910 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 2911 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 2912 if (retval != 0) 2913 break; 2914 2915 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 2916 port->port_name); 2917 if (retval != 0) 2918 break; 2919 2920 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 2921 port->physical_port); 2922 if (retval != 0) 2923 break; 2924 2925 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 2926 port->virtual_port); 2927 if (retval != 0) 2928 break; 2929 2930 if (port->target_devid != NULL) { 2931 sbuf_printf(sb, "\t<target>"); 2932 ctl_id_sbuf(port->target_devid, sb); 2933 sbuf_printf(sb, "</target>\n"); 2934 } 2935 2936 if (port->port_devid != NULL) { 2937 sbuf_printf(sb, "\t<port>"); 2938 ctl_id_sbuf(port->port_devid, sb); 2939 sbuf_printf(sb, "</port>\n"); 2940 } 2941 2942 if (port->port_info != NULL) { 2943 retval = port->port_info(port->onoff_arg, sb); 2944 if (retval != 0) 2945 break; 2946 } 2947 STAILQ_FOREACH(opt, &port->options, links) { 2948 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 2949 opt->name, opt->value, opt->name); 2950 if (retval != 0) 2951 break; 2952 } 2953 2954 if (port->lun_map != NULL) { 2955 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 2956 for (j = 0; j < CTL_MAX_LUNS; j++) { 2957 plun = ctl_lun_map_from_port(port, j); 2958 if (plun >= CTL_MAX_LUNS) 2959 continue; 2960 sbuf_printf(sb, 2961 "\t<lun id=\"%u\">%u</lun>\n", 2962 j, plun); 2963 } 2964 } 2965 2966 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2967 if (port->wwpn_iid[j].in_use == 0 || 2968 (port->wwpn_iid[j].wwpn == 0 && 2969 port->wwpn_iid[j].name == NULL)) 2970 continue; 2971 2972 if (port->wwpn_iid[j].name != NULL) 2973 retval = sbuf_printf(sb, 2974 "\t<initiator id=\"%u\">%s</initiator>\n", 2975 j, port->wwpn_iid[j].name); 2976 else 2977 retval = sbuf_printf(sb, 2978 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 2979 j, port->wwpn_iid[j].wwpn); 2980 if (retval != 0) 2981 break; 2982 } 2983 if (retval != 0) 2984 break; 2985 2986 retval = sbuf_printf(sb, "</targ_port>\n"); 2987 if (retval != 0) 2988 break; 2989 } 2990 mtx_unlock(&softc->ctl_lock); 2991 2992 if ((retval != 0) 2993 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 2994 retval = 0; 2995 sbuf_delete(sb); 2996 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 2997 snprintf(list->error_str, sizeof(list->error_str), 2998 "Out of space, %d bytes is too small", 2999 list->alloc_len); 3000 break; 3001 } 3002 3003 sbuf_finish(sb); 3004 3005 retval = copyout(sbuf_data(sb), list->lun_xml, 3006 sbuf_len(sb) + 1); 3007 3008 list->fill_len = sbuf_len(sb) + 1; 3009 list->status = CTL_LUN_LIST_OK; 3010 sbuf_delete(sb); 3011 break; 3012 } 3013 case CTL_LUN_MAP: { 3014 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3015 struct ctl_port *port; 3016 3017 mtx_lock(&softc->ctl_lock); 3018 if (lm->port >= CTL_MAX_PORTS || 3019 (port = softc->ctl_ports[lm->port]) == NULL) { 3020 mtx_unlock(&softc->ctl_lock); 3021 return (ENXIO); 3022 } 3023 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3024 if (lm->plun < CTL_MAX_LUNS) { 3025 if (lm->lun == UINT32_MAX) 3026 retval = ctl_lun_map_unset(port, lm->plun); 3027 else if (lm->lun < CTL_MAX_LUNS && 3028 softc->ctl_luns[lm->lun] != NULL) 3029 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3030 else 3031 return (ENXIO); 3032 } else if (lm->plun == UINT32_MAX) { 3033 if (lm->lun == UINT32_MAX) 3034 retval = ctl_lun_map_deinit(port); 3035 else 3036 retval = ctl_lun_map_init(port); 3037 } else 3038 return (ENXIO); 3039 break; 3040 } 3041 default: { 3042 /* XXX KDM should we fix this? */ 3043#if 0 3044 struct ctl_backend_driver *backend; 3045 unsigned int type; 3046 int found; 3047 3048 found = 0; 3049 3050 /* 3051 * We encode the backend type as the ioctl type for backend 3052 * ioctls. So parse it out here, and then search for a 3053 * backend of this type. 3054 */ 3055 type = _IOC_TYPE(cmd); 3056 3057 STAILQ_FOREACH(backend, &softc->be_list, links) { 3058 if (backend->type == type) { 3059 found = 1; 3060 break; 3061 } 3062 } 3063 if (found == 0) { 3064 printf("ctl: unknown ioctl command %#lx or backend " 3065 "%d\n", cmd, type); 3066 retval = EINVAL; 3067 break; 3068 } 3069 retval = backend->ioctl(dev, cmd, addr, flag, td); 3070#endif 3071 retval = ENOTTY; 3072 break; 3073 } 3074 } 3075 return (retval); 3076} 3077 3078uint32_t 3079ctl_get_initindex(struct ctl_nexus *nexus) 3080{ 3081 if (nexus->targ_port < CTL_MAX_PORTS) 3082 return (nexus->initid.id + 3083 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3084 else 3085 return (nexus->initid.id + 3086 ((nexus->targ_port - CTL_MAX_PORTS) * 3087 CTL_MAX_INIT_PER_PORT)); 3088} 3089 3090uint32_t 3091ctl_get_resindex(struct ctl_nexus *nexus) 3092{ 3093 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3094} 3095 3096uint32_t 3097ctl_port_idx(int port_num) 3098{ 3099 if (port_num < CTL_MAX_PORTS) 3100 return(port_num); 3101 else 3102 return(port_num - CTL_MAX_PORTS); 3103} 3104 3105int 3106ctl_lun_map_init(struct ctl_port *port) 3107{ 3108 struct ctl_softc *softc = control_softc; 3109 struct ctl_lun *lun; 3110 uint32_t i; 3111 3112 if (port->lun_map == NULL) 3113 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3114 M_CTL, M_NOWAIT); 3115 if (port->lun_map == NULL) 3116 return (ENOMEM); 3117 for (i = 0; i < CTL_MAX_LUNS; i++) 3118 port->lun_map[i] = UINT32_MAX; 3119 if (port->status & CTL_PORT_STATUS_ONLINE && 3120 port->lun_disable != NULL) { 3121 STAILQ_FOREACH(lun, &softc->lun_list, links) 3122 port->lun_disable(port->targ_lun_arg, lun->lun); 3123 } 3124 return (0); 3125} 3126 3127int 3128ctl_lun_map_deinit(struct ctl_port *port) 3129{ 3130 struct ctl_softc *softc = control_softc; 3131 struct ctl_lun *lun; 3132 3133 if (port->lun_map == NULL) 3134 return (0); 3135 free(port->lun_map, M_CTL); 3136 port->lun_map = NULL; 3137 if (port->status & CTL_PORT_STATUS_ONLINE && 3138 port->lun_enable != NULL) { 3139 STAILQ_FOREACH(lun, &softc->lun_list, links) 3140 port->lun_enable(port->targ_lun_arg, lun->lun); 3141 } 3142 return (0); 3143} 3144 3145int 3146ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3147{ 3148 int status; 3149 uint32_t old; 3150 3151 if (port->lun_map == NULL) { 3152 status = ctl_lun_map_init(port); 3153 if (status != 0) 3154 return (status); 3155 } 3156 old = port->lun_map[plun]; 3157 port->lun_map[plun] = glun; 3158 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS && 3159 port->lun_enable != NULL) 3160 port->lun_enable(port->targ_lun_arg, plun); 3161 return (0); 3162} 3163 3164int 3165ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3166{ 3167 uint32_t old; 3168 3169 if (port->lun_map == NULL) 3170 return (0); 3171 old = port->lun_map[plun]; 3172 port->lun_map[plun] = UINT32_MAX; 3173 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS && 3174 port->lun_disable != NULL) 3175 port->lun_disable(port->targ_lun_arg, plun); 3176 return (0); 3177} 3178 3179uint32_t 3180ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3181{ 3182 3183 if (port == NULL) 3184 return (UINT32_MAX); 3185 if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS) 3186 return (lun_id); 3187 return (port->lun_map[lun_id]); 3188} 3189 3190uint32_t 3191ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3192{ 3193 uint32_t i; 3194 3195 if (port == NULL) 3196 return (UINT32_MAX); 3197 if (port->lun_map == NULL) 3198 return (lun_id); 3199 for (i = 0; i < CTL_MAX_LUNS; i++) { 3200 if (port->lun_map[i] == lun_id) 3201 return (i); 3202 } 3203 return (UINT32_MAX); 3204} 3205 3206static struct ctl_port * 3207ctl_io_port(struct ctl_io_hdr *io_hdr) 3208{ 3209 int port_num; 3210 3211 port_num = io_hdr->nexus.targ_port; 3212 return (control_softc->ctl_ports[ctl_port_idx(port_num)]); 3213} 3214 3215/* 3216 * Note: This only works for bitmask sizes that are at least 32 bits, and 3217 * that are a power of 2. 3218 */ 3219int 3220ctl_ffz(uint32_t *mask, uint32_t size) 3221{ 3222 uint32_t num_chunks, num_pieces; 3223 int i, j; 3224 3225 num_chunks = (size >> 5); 3226 if (num_chunks == 0) 3227 num_chunks++; 3228 num_pieces = MIN((sizeof(uint32_t) * 8), size); 3229 3230 for (i = 0; i < num_chunks; i++) { 3231 for (j = 0; j < num_pieces; j++) { 3232 if ((mask[i] & (1 << j)) == 0) 3233 return ((i << 5) + j); 3234 } 3235 } 3236 3237 return (-1); 3238} 3239 3240int 3241ctl_set_mask(uint32_t *mask, uint32_t bit) 3242{ 3243 uint32_t chunk, piece; 3244 3245 chunk = bit >> 5; 3246 piece = bit % (sizeof(uint32_t) * 8); 3247 3248 if ((mask[chunk] & (1 << piece)) != 0) 3249 return (-1); 3250 else 3251 mask[chunk] |= (1 << piece); 3252 3253 return (0); 3254} 3255 3256int 3257ctl_clear_mask(uint32_t *mask, uint32_t bit) 3258{ 3259 uint32_t chunk, piece; 3260 3261 chunk = bit >> 5; 3262 piece = bit % (sizeof(uint32_t) * 8); 3263 3264 if ((mask[chunk] & (1 << piece)) == 0) 3265 return (-1); 3266 else 3267 mask[chunk] &= ~(1 << piece); 3268 3269 return (0); 3270} 3271 3272int 3273ctl_is_set(uint32_t *mask, uint32_t bit) 3274{ 3275 uint32_t chunk, piece; 3276 3277 chunk = bit >> 5; 3278 piece = bit % (sizeof(uint32_t) * 8); 3279 3280 if ((mask[chunk] & (1 << piece)) == 0) 3281 return (0); 3282 else 3283 return (1); 3284} 3285 3286static uint64_t 3287ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3288{ 3289 uint64_t *t; 3290 3291 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3292 if (t == NULL) 3293 return (0); 3294 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3295} 3296 3297static void 3298ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3299{ 3300 uint64_t *t; 3301 3302 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3303 if (t == NULL) 3304 return; 3305 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3306} 3307 3308static void 3309ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3310{ 3311 uint64_t *p; 3312 u_int i; 3313 3314 i = residx/CTL_MAX_INIT_PER_PORT; 3315 if (lun->pr_keys[i] != NULL) 3316 return; 3317 mtx_unlock(&lun->lun_lock); 3318 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3319 M_WAITOK | M_ZERO); 3320 mtx_lock(&lun->lun_lock); 3321 if (lun->pr_keys[i] == NULL) 3322 lun->pr_keys[i] = p; 3323 else 3324 free(p, M_CTL); 3325} 3326 3327static void 3328ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3329{ 3330 uint64_t *t; 3331 3332 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3333 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3334 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3335} 3336 3337/* 3338 * ctl_softc, pool_name, total_ctl_io are passed in. 3339 * npool is passed out. 3340 */ 3341int 3342ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3343 uint32_t total_ctl_io, void **npool) 3344{ 3345#ifdef IO_POOLS 3346 struct ctl_io_pool *pool; 3347 3348 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3349 M_NOWAIT | M_ZERO); 3350 if (pool == NULL) 3351 return (ENOMEM); 3352 3353 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3354 pool->ctl_softc = ctl_softc; 3355 pool->zone = uma_zsecond_create(pool->name, NULL, 3356 NULL, NULL, NULL, ctl_softc->io_zone); 3357 /* uma_prealloc(pool->zone, total_ctl_io); */ 3358 3359 *npool = pool; 3360#else 3361 *npool = ctl_softc->io_zone; 3362#endif 3363 return (0); 3364} 3365 3366void 3367ctl_pool_free(struct ctl_io_pool *pool) 3368{ 3369 3370 if (pool == NULL) 3371 return; 3372 3373#ifdef IO_POOLS 3374 uma_zdestroy(pool->zone); 3375 free(pool, M_CTL); 3376#endif 3377} 3378 3379union ctl_io * 3380ctl_alloc_io(void *pool_ref) 3381{ 3382 union ctl_io *io; 3383#ifdef IO_POOLS 3384 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3385 3386 io = uma_zalloc(pool->zone, M_WAITOK); 3387#else 3388 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3389#endif 3390 if (io != NULL) 3391 io->io_hdr.pool = pool_ref; 3392 return (io); 3393} 3394 3395union ctl_io * 3396ctl_alloc_io_nowait(void *pool_ref) 3397{ 3398 union ctl_io *io; 3399#ifdef IO_POOLS 3400 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3401 3402 io = uma_zalloc(pool->zone, M_NOWAIT); 3403#else 3404 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3405#endif 3406 if (io != NULL) 3407 io->io_hdr.pool = pool_ref; 3408 return (io); 3409} 3410 3411void 3412ctl_free_io(union ctl_io *io) 3413{ 3414#ifdef IO_POOLS 3415 struct ctl_io_pool *pool; 3416#endif 3417 3418 if (io == NULL) 3419 return; 3420 3421#ifdef IO_POOLS 3422 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3423 uma_zfree(pool->zone, io); 3424#else 3425 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3426#endif 3427} 3428 3429void 3430ctl_zero_io(union ctl_io *io) 3431{ 3432 void *pool_ref; 3433 3434 if (io == NULL) 3435 return; 3436 3437 /* 3438 * May need to preserve linked list pointers at some point too. 3439 */ 3440 pool_ref = io->io_hdr.pool; 3441 memset(io, 0, sizeof(*io)); 3442 io->io_hdr.pool = pool_ref; 3443} 3444 3445/* 3446 * This routine is currently used for internal copies of ctl_ios that need 3447 * to persist for some reason after we've already returned status to the 3448 * FETD. (Thus the flag set.) 3449 * 3450 * XXX XXX 3451 * Note that this makes a blind copy of all fields in the ctl_io, except 3452 * for the pool reference. This includes any memory that has been 3453 * allocated! That memory will no longer be valid after done has been 3454 * called, so this would be VERY DANGEROUS for command that actually does 3455 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3456 * start and stop commands, which don't transfer any data, so this is not a 3457 * problem. If it is used for anything else, the caller would also need to 3458 * allocate data buffer space and this routine would need to be modified to 3459 * copy the data buffer(s) as well. 3460 */ 3461void 3462ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3463{ 3464 void *pool_ref; 3465 3466 if ((src == NULL) 3467 || (dest == NULL)) 3468 return; 3469 3470 /* 3471 * May need to preserve linked list pointers at some point too. 3472 */ 3473 pool_ref = dest->io_hdr.pool; 3474 3475 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest))); 3476 3477 dest->io_hdr.pool = pool_ref; 3478 /* 3479 * We need to know that this is an internal copy, and doesn't need 3480 * to get passed back to the FETD that allocated it. 3481 */ 3482 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3483} 3484 3485int 3486ctl_expand_number(const char *buf, uint64_t *num) 3487{ 3488 char *endptr; 3489 uint64_t number; 3490 unsigned shift; 3491 3492 number = strtoq(buf, &endptr, 0); 3493 3494 switch (tolower((unsigned char)*endptr)) { 3495 case 'e': 3496 shift = 60; 3497 break; 3498 case 'p': 3499 shift = 50; 3500 break; 3501 case 't': 3502 shift = 40; 3503 break; 3504 case 'g': 3505 shift = 30; 3506 break; 3507 case 'm': 3508 shift = 20; 3509 break; 3510 case 'k': 3511 shift = 10; 3512 break; 3513 case 'b': 3514 case '\0': /* No unit. */ 3515 *num = number; 3516 return (0); 3517 default: 3518 /* Unrecognized unit. */ 3519 return (-1); 3520 } 3521 3522 if ((number << shift) >> shift != number) { 3523 /* Overflow */ 3524 return (-1); 3525 } 3526 *num = number << shift; 3527 return (0); 3528} 3529 3530 3531/* 3532 * This routine could be used in the future to load default and/or saved 3533 * mode page parameters for a particuar lun. 3534 */ 3535static int 3536ctl_init_page_index(struct ctl_lun *lun) 3537{ 3538 int i; 3539 struct ctl_page_index *page_index; 3540 const char *value; 3541 uint64_t ival; 3542 3543 memcpy(&lun->mode_pages.index, page_index_template, 3544 sizeof(page_index_template)); 3545 3546 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3547 3548 page_index = &lun->mode_pages.index[i]; 3549 /* 3550 * If this is a disk-only mode page, there's no point in 3551 * setting it up. For some pages, we have to have some 3552 * basic information about the disk in order to calculate the 3553 * mode page data. 3554 */ 3555 if ((lun->be_lun->lun_type != T_DIRECT) 3556 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3557 continue; 3558 3559 switch (page_index->page_code & SMPH_PC_MASK) { 3560 case SMS_RW_ERROR_RECOVERY_PAGE: { 3561 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3562 panic("subpage is incorrect!"); 3563 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3564 &rw_er_page_default, 3565 sizeof(rw_er_page_default)); 3566 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3567 &rw_er_page_changeable, 3568 sizeof(rw_er_page_changeable)); 3569 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3570 &rw_er_page_default, 3571 sizeof(rw_er_page_default)); 3572 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3573 &rw_er_page_default, 3574 sizeof(rw_er_page_default)); 3575 page_index->page_data = 3576 (uint8_t *)lun->mode_pages.rw_er_page; 3577 break; 3578 } 3579 case SMS_FORMAT_DEVICE_PAGE: { 3580 struct scsi_format_page *format_page; 3581 3582 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3583 panic("subpage is incorrect!"); 3584 3585 /* 3586 * Sectors per track are set above. Bytes per 3587 * sector need to be set here on a per-LUN basis. 3588 */ 3589 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3590 &format_page_default, 3591 sizeof(format_page_default)); 3592 memcpy(&lun->mode_pages.format_page[ 3593 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3594 sizeof(format_page_changeable)); 3595 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3596 &format_page_default, 3597 sizeof(format_page_default)); 3598 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3599 &format_page_default, 3600 sizeof(format_page_default)); 3601 3602 format_page = &lun->mode_pages.format_page[ 3603 CTL_PAGE_CURRENT]; 3604 scsi_ulto2b(lun->be_lun->blocksize, 3605 format_page->bytes_per_sector); 3606 3607 format_page = &lun->mode_pages.format_page[ 3608 CTL_PAGE_DEFAULT]; 3609 scsi_ulto2b(lun->be_lun->blocksize, 3610 format_page->bytes_per_sector); 3611 3612 format_page = &lun->mode_pages.format_page[ 3613 CTL_PAGE_SAVED]; 3614 scsi_ulto2b(lun->be_lun->blocksize, 3615 format_page->bytes_per_sector); 3616 3617 page_index->page_data = 3618 (uint8_t *)lun->mode_pages.format_page; 3619 break; 3620 } 3621 case SMS_RIGID_DISK_PAGE: { 3622 struct scsi_rigid_disk_page *rigid_disk_page; 3623 uint32_t sectors_per_cylinder; 3624 uint64_t cylinders; 3625#ifndef __XSCALE__ 3626 int shift; 3627#endif /* !__XSCALE__ */ 3628 3629 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3630 panic("invalid subpage value %d", 3631 page_index->subpage); 3632 3633 /* 3634 * Rotation rate and sectors per track are set 3635 * above. We calculate the cylinders here based on 3636 * capacity. Due to the number of heads and 3637 * sectors per track we're using, smaller arrays 3638 * may turn out to have 0 cylinders. Linux and 3639 * FreeBSD don't pay attention to these mode pages 3640 * to figure out capacity, but Solaris does. It 3641 * seems to deal with 0 cylinders just fine, and 3642 * works out a fake geometry based on the capacity. 3643 */ 3644 memcpy(&lun->mode_pages.rigid_disk_page[ 3645 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 3646 sizeof(rigid_disk_page_default)); 3647 memcpy(&lun->mode_pages.rigid_disk_page[ 3648 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 3649 sizeof(rigid_disk_page_changeable)); 3650 3651 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 3652 CTL_DEFAULT_HEADS; 3653 3654 /* 3655 * The divide method here will be more accurate, 3656 * probably, but results in floating point being 3657 * used in the kernel on i386 (__udivdi3()). On the 3658 * XScale, though, __udivdi3() is implemented in 3659 * software. 3660 * 3661 * The shift method for cylinder calculation is 3662 * accurate if sectors_per_cylinder is a power of 3663 * 2. Otherwise it might be slightly off -- you 3664 * might have a bit of a truncation problem. 3665 */ 3666#ifdef __XSCALE__ 3667 cylinders = (lun->be_lun->maxlba + 1) / 3668 sectors_per_cylinder; 3669#else 3670 for (shift = 31; shift > 0; shift--) { 3671 if (sectors_per_cylinder & (1 << shift)) 3672 break; 3673 } 3674 cylinders = (lun->be_lun->maxlba + 1) >> shift; 3675#endif 3676 3677 /* 3678 * We've basically got 3 bytes, or 24 bits for the 3679 * cylinder size in the mode page. If we're over, 3680 * just round down to 2^24. 3681 */ 3682 if (cylinders > 0xffffff) 3683 cylinders = 0xffffff; 3684 3685 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3686 CTL_PAGE_DEFAULT]; 3687 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3688 3689 if ((value = ctl_get_opt(&lun->be_lun->options, 3690 "rpm")) != NULL) { 3691 scsi_ulto2b(strtol(value, NULL, 0), 3692 rigid_disk_page->rotation_rate); 3693 } 3694 3695 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 3696 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3697 sizeof(rigid_disk_page_default)); 3698 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 3699 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3700 sizeof(rigid_disk_page_default)); 3701 3702 page_index->page_data = 3703 (uint8_t *)lun->mode_pages.rigid_disk_page; 3704 break; 3705 } 3706 case SMS_CACHING_PAGE: { 3707 struct scsi_caching_page *caching_page; 3708 3709 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3710 panic("invalid subpage value %d", 3711 page_index->subpage); 3712 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 3713 &caching_page_default, 3714 sizeof(caching_page_default)); 3715 memcpy(&lun->mode_pages.caching_page[ 3716 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 3717 sizeof(caching_page_changeable)); 3718 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3719 &caching_page_default, 3720 sizeof(caching_page_default)); 3721 caching_page = &lun->mode_pages.caching_page[ 3722 CTL_PAGE_SAVED]; 3723 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 3724 if (value != NULL && strcmp(value, "off") == 0) 3725 caching_page->flags1 &= ~SCP_WCE; 3726 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 3727 if (value != NULL && strcmp(value, "off") == 0) 3728 caching_page->flags1 |= SCP_RCD; 3729 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 3730 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3731 sizeof(caching_page_default)); 3732 page_index->page_data = 3733 (uint8_t *)lun->mode_pages.caching_page; 3734 break; 3735 } 3736 case SMS_CONTROL_MODE_PAGE: { 3737 struct scsi_control_page *control_page; 3738 3739 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3740 panic("invalid subpage value %d", 3741 page_index->subpage); 3742 3743 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 3744 &control_page_default, 3745 sizeof(control_page_default)); 3746 memcpy(&lun->mode_pages.control_page[ 3747 CTL_PAGE_CHANGEABLE], &control_page_changeable, 3748 sizeof(control_page_changeable)); 3749 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 3750 &control_page_default, 3751 sizeof(control_page_default)); 3752 control_page = &lun->mode_pages.control_page[ 3753 CTL_PAGE_SAVED]; 3754 value = ctl_get_opt(&lun->be_lun->options, "reordering"); 3755 if (value != NULL && strcmp(value, "unrestricted") == 0) { 3756 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; 3757 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; 3758 } 3759 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 3760 &lun->mode_pages.control_page[CTL_PAGE_SAVED], 3761 sizeof(control_page_default)); 3762 page_index->page_data = 3763 (uint8_t *)lun->mode_pages.control_page; 3764 break; 3765 3766 } 3767 case SMS_INFO_EXCEPTIONS_PAGE: { 3768 switch (page_index->subpage) { 3769 case SMS_SUBPAGE_PAGE_0: 3770 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 3771 &ie_page_default, 3772 sizeof(ie_page_default)); 3773 memcpy(&lun->mode_pages.ie_page[ 3774 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 3775 sizeof(ie_page_changeable)); 3776 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 3777 &ie_page_default, 3778 sizeof(ie_page_default)); 3779 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 3780 &ie_page_default, 3781 sizeof(ie_page_default)); 3782 page_index->page_data = 3783 (uint8_t *)lun->mode_pages.ie_page; 3784 break; 3785 case 0x02: { 3786 struct ctl_logical_block_provisioning_page *page; 3787 3788 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 3789 &lbp_page_default, 3790 sizeof(lbp_page_default)); 3791 memcpy(&lun->mode_pages.lbp_page[ 3792 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 3793 sizeof(lbp_page_changeable)); 3794 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 3795 &lbp_page_default, 3796 sizeof(lbp_page_default)); 3797 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 3798 value = ctl_get_opt(&lun->be_lun->options, 3799 "avail-threshold"); 3800 if (value != NULL && 3801 ctl_expand_number(value, &ival) == 0) { 3802 page->descr[0].flags |= SLBPPD_ENABLED | 3803 SLBPPD_ARMING_DEC; 3804 if (lun->be_lun->blocksize) 3805 ival /= lun->be_lun->blocksize; 3806 else 3807 ival /= 512; 3808 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3809 page->descr[0].count); 3810 } 3811 value = ctl_get_opt(&lun->be_lun->options, 3812 "used-threshold"); 3813 if (value != NULL && 3814 ctl_expand_number(value, &ival) == 0) { 3815 page->descr[1].flags |= SLBPPD_ENABLED | 3816 SLBPPD_ARMING_INC; 3817 if (lun->be_lun->blocksize) 3818 ival /= lun->be_lun->blocksize; 3819 else 3820 ival /= 512; 3821 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3822 page->descr[1].count); 3823 } 3824 value = ctl_get_opt(&lun->be_lun->options, 3825 "pool-avail-threshold"); 3826 if (value != NULL && 3827 ctl_expand_number(value, &ival) == 0) { 3828 page->descr[2].flags |= SLBPPD_ENABLED | 3829 SLBPPD_ARMING_DEC; 3830 if (lun->be_lun->blocksize) 3831 ival /= lun->be_lun->blocksize; 3832 else 3833 ival /= 512; 3834 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3835 page->descr[2].count); 3836 } 3837 value = ctl_get_opt(&lun->be_lun->options, 3838 "pool-used-threshold"); 3839 if (value != NULL && 3840 ctl_expand_number(value, &ival) == 0) { 3841 page->descr[3].flags |= SLBPPD_ENABLED | 3842 SLBPPD_ARMING_INC; 3843 if (lun->be_lun->blocksize) 3844 ival /= lun->be_lun->blocksize; 3845 else 3846 ival /= 512; 3847 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3848 page->descr[3].count); 3849 } 3850 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 3851 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 3852 sizeof(lbp_page_default)); 3853 page_index->page_data = 3854 (uint8_t *)lun->mode_pages.lbp_page; 3855 }} 3856 break; 3857 } 3858 case SMS_VENDOR_SPECIFIC_PAGE:{ 3859 switch (page_index->subpage) { 3860 case DBGCNF_SUBPAGE_CODE: { 3861 struct copan_debugconf_subpage *current_page, 3862 *saved_page; 3863 3864 memcpy(&lun->mode_pages.debugconf_subpage[ 3865 CTL_PAGE_CURRENT], 3866 &debugconf_page_default, 3867 sizeof(debugconf_page_default)); 3868 memcpy(&lun->mode_pages.debugconf_subpage[ 3869 CTL_PAGE_CHANGEABLE], 3870 &debugconf_page_changeable, 3871 sizeof(debugconf_page_changeable)); 3872 memcpy(&lun->mode_pages.debugconf_subpage[ 3873 CTL_PAGE_DEFAULT], 3874 &debugconf_page_default, 3875 sizeof(debugconf_page_default)); 3876 memcpy(&lun->mode_pages.debugconf_subpage[ 3877 CTL_PAGE_SAVED], 3878 &debugconf_page_default, 3879 sizeof(debugconf_page_default)); 3880 page_index->page_data = 3881 (uint8_t *)lun->mode_pages.debugconf_subpage; 3882 3883 current_page = (struct copan_debugconf_subpage *) 3884 (page_index->page_data + 3885 (page_index->page_len * 3886 CTL_PAGE_CURRENT)); 3887 saved_page = (struct copan_debugconf_subpage *) 3888 (page_index->page_data + 3889 (page_index->page_len * 3890 CTL_PAGE_SAVED)); 3891 break; 3892 } 3893 default: 3894 panic("invalid subpage value %d", 3895 page_index->subpage); 3896 break; 3897 } 3898 break; 3899 } 3900 default: 3901 panic("invalid page value %d", 3902 page_index->page_code & SMPH_PC_MASK); 3903 break; 3904 } 3905 } 3906 3907 return (CTL_RETVAL_COMPLETE); 3908} 3909 3910static int 3911ctl_init_log_page_index(struct ctl_lun *lun) 3912{ 3913 struct ctl_page_index *page_index; 3914 int i, j, k, prev; 3915 3916 memcpy(&lun->log_pages.index, log_page_index_template, 3917 sizeof(log_page_index_template)); 3918 3919 prev = -1; 3920 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 3921 3922 page_index = &lun->log_pages.index[i]; 3923 /* 3924 * If this is a disk-only mode page, there's no point in 3925 * setting it up. For some pages, we have to have some 3926 * basic information about the disk in order to calculate the 3927 * mode page data. 3928 */ 3929 if ((lun->be_lun->lun_type != T_DIRECT) 3930 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3931 continue; 3932 3933 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 3934 lun->backend->lun_attr == NULL) 3935 continue; 3936 3937 if (page_index->page_code != prev) { 3938 lun->log_pages.pages_page[j] = page_index->page_code; 3939 prev = page_index->page_code; 3940 j++; 3941 } 3942 lun->log_pages.subpages_page[k*2] = page_index->page_code; 3943 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 3944 k++; 3945 } 3946 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 3947 lun->log_pages.index[0].page_len = j; 3948 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 3949 lun->log_pages.index[1].page_len = k * 2; 3950 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 3951 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 3952 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 3953 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 3954 3955 return (CTL_RETVAL_COMPLETE); 3956} 3957 3958static int 3959hex2bin(const char *str, uint8_t *buf, int buf_size) 3960{ 3961 int i; 3962 u_char c; 3963 3964 memset(buf, 0, buf_size); 3965 while (isspace(str[0])) 3966 str++; 3967 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 3968 str += 2; 3969 buf_size *= 2; 3970 for (i = 0; str[i] != 0 && i < buf_size; i++) { 3971 c = str[i]; 3972 if (isdigit(c)) 3973 c -= '0'; 3974 else if (isalpha(c)) 3975 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 3976 else 3977 break; 3978 if (c >= 16) 3979 break; 3980 if ((i & 1) == 0) 3981 buf[i / 2] |= (c << 4); 3982 else 3983 buf[i / 2] |= c; 3984 } 3985 return ((i + 1) / 2); 3986} 3987 3988/* 3989 * LUN allocation. 3990 * 3991 * Requirements: 3992 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 3993 * wants us to allocate the LUN and he can block. 3994 * - ctl_softc is always set 3995 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 3996 * 3997 * Returns 0 for success, non-zero (errno) for failure. 3998 */ 3999static int 4000ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4001 struct ctl_be_lun *const be_lun) 4002{ 4003 struct ctl_lun *nlun, *lun; 4004 struct scsi_vpd_id_descriptor *desc; 4005 struct scsi_vpd_id_t10 *t10id; 4006 const char *eui, *naa, *scsiname, *vendor, *value; 4007 int lun_number, i, lun_malloced; 4008 int devidlen, idlen1, idlen2 = 0, len; 4009 4010 if (be_lun == NULL) 4011 return (EINVAL); 4012 4013 /* 4014 * We currently only support Direct Access or Processor LUN types. 4015 */ 4016 switch (be_lun->lun_type) { 4017 case T_DIRECT: 4018 break; 4019 case T_PROCESSOR: 4020 break; 4021 case T_SEQUENTIAL: 4022 case T_CHANGER: 4023 default: 4024 be_lun->lun_config_status(be_lun->be_lun, 4025 CTL_LUN_CONFIG_FAILURE); 4026 break; 4027 } 4028 if (ctl_lun == NULL) { 4029 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4030 lun_malloced = 1; 4031 } else { 4032 lun_malloced = 0; 4033 lun = ctl_lun; 4034 } 4035 4036 memset(lun, 0, sizeof(*lun)); 4037 if (lun_malloced) 4038 lun->flags = CTL_LUN_MALLOCED; 4039 4040 /* Generate LUN ID. */ 4041 devidlen = max(CTL_DEVID_MIN_LEN, 4042 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4043 idlen1 = sizeof(*t10id) + devidlen; 4044 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4045 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4046 if (scsiname != NULL) { 4047 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4048 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4049 } 4050 eui = ctl_get_opt(&be_lun->options, "eui"); 4051 if (eui != NULL) { 4052 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4053 } 4054 naa = ctl_get_opt(&be_lun->options, "naa"); 4055 if (naa != NULL) { 4056 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4057 } 4058 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4059 M_CTL, M_WAITOK | M_ZERO); 4060 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4061 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4062 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4063 desc->length = idlen1; 4064 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4065 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4066 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4067 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4068 } else { 4069 strncpy(t10id->vendor, vendor, 4070 min(sizeof(t10id->vendor), strlen(vendor))); 4071 } 4072 strncpy((char *)t10id->vendor_spec_id, 4073 (char *)be_lun->device_id, devidlen); 4074 if (scsiname != NULL) { 4075 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4076 desc->length); 4077 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4078 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4079 SVPD_ID_TYPE_SCSI_NAME; 4080 desc->length = idlen2; 4081 strlcpy(desc->identifier, scsiname, idlen2); 4082 } 4083 if (eui != NULL) { 4084 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4085 desc->length); 4086 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4087 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4088 SVPD_ID_TYPE_EUI64; 4089 desc->length = hex2bin(eui, desc->identifier, 16); 4090 desc->length = desc->length > 12 ? 16 : 4091 (desc->length > 8 ? 12 : 8); 4092 len -= 16 - desc->length; 4093 } 4094 if (naa != NULL) { 4095 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4096 desc->length); 4097 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4098 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4099 SVPD_ID_TYPE_NAA; 4100 desc->length = hex2bin(naa, desc->identifier, 16); 4101 desc->length = desc->length > 8 ? 16 : 8; 4102 len -= 16 - desc->length; 4103 } 4104 lun->lun_devid->len = len; 4105 4106 mtx_lock(&ctl_softc->ctl_lock); 4107 /* 4108 * See if the caller requested a particular LUN number. If so, see 4109 * if it is available. Otherwise, allocate the first available LUN. 4110 */ 4111 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4112 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4113 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4114 mtx_unlock(&ctl_softc->ctl_lock); 4115 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4116 printf("ctl: requested LUN ID %d is higher " 4117 "than CTL_MAX_LUNS - 1 (%d)\n", 4118 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4119 } else { 4120 /* 4121 * XXX KDM return an error, or just assign 4122 * another LUN ID in this case?? 4123 */ 4124 printf("ctl: requested LUN ID %d is already " 4125 "in use\n", be_lun->req_lun_id); 4126 } 4127 if (lun->flags & CTL_LUN_MALLOCED) 4128 free(lun, M_CTL); 4129 be_lun->lun_config_status(be_lun->be_lun, 4130 CTL_LUN_CONFIG_FAILURE); 4131 return (ENOSPC); 4132 } 4133 lun_number = be_lun->req_lun_id; 4134 } else { 4135 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4136 if (lun_number == -1) { 4137 mtx_unlock(&ctl_softc->ctl_lock); 4138 printf("ctl: can't allocate LUN, out of LUNs\n"); 4139 if (lun->flags & CTL_LUN_MALLOCED) 4140 free(lun, M_CTL); 4141 be_lun->lun_config_status(be_lun->be_lun, 4142 CTL_LUN_CONFIG_FAILURE); 4143 return (ENOSPC); 4144 } 4145 } 4146 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4147 4148 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4149 lun->lun = lun_number; 4150 lun->be_lun = be_lun; 4151 /* 4152 * The processor LUN is always enabled. Disk LUNs come on line 4153 * disabled, and must be enabled by the backend. 4154 */ 4155 lun->flags |= CTL_LUN_DISABLED; 4156 lun->backend = be_lun->be; 4157 be_lun->ctl_lun = lun; 4158 be_lun->lun_id = lun_number; 4159 atomic_add_int(&be_lun->be->num_luns, 1); 4160 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4161 lun->flags |= CTL_LUN_OFFLINE; 4162 4163 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4164 lun->flags |= CTL_LUN_STOPPED; 4165 4166 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4167 lun->flags |= CTL_LUN_INOPERABLE; 4168 4169 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4170 lun->flags |= CTL_LUN_PRIMARY_SC; 4171 4172 value = ctl_get_opt(&be_lun->options, "readonly"); 4173 if (value != NULL && strcmp(value, "on") == 0) 4174 lun->flags |= CTL_LUN_READONLY; 4175 4176 lun->serseq = CTL_LUN_SERSEQ_OFF; 4177 if (be_lun->flags & CTL_LUN_FLAG_SERSEQ_READ) 4178 lun->serseq = CTL_LUN_SERSEQ_READ; 4179 value = ctl_get_opt(&be_lun->options, "serseq"); 4180 if (value != NULL && strcmp(value, "on") == 0) 4181 lun->serseq = CTL_LUN_SERSEQ_ON; 4182 else if (value != NULL && strcmp(value, "read") == 0) 4183 lun->serseq = CTL_LUN_SERSEQ_READ; 4184 else if (value != NULL && strcmp(value, "off") == 0) 4185 lun->serseq = CTL_LUN_SERSEQ_OFF; 4186 4187 lun->ctl_softc = ctl_softc; 4188#ifdef CTL_TIME_IO 4189 lun->last_busy = getsbinuptime(); 4190#endif 4191 TAILQ_INIT(&lun->ooa_queue); 4192 TAILQ_INIT(&lun->blocked_queue); 4193 STAILQ_INIT(&lun->error_list); 4194 ctl_tpc_lun_init(lun); 4195 4196 /* 4197 * Initialize the mode and log page index. 4198 */ 4199 ctl_init_page_index(lun); 4200 ctl_init_log_page_index(lun); 4201 4202 /* 4203 * Now, before we insert this lun on the lun list, set the lun 4204 * inventory changed UA for all other luns. 4205 */ 4206 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4207 mtx_lock(&nlun->lun_lock); 4208 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4209 mtx_unlock(&nlun->lun_lock); 4210 } 4211 4212 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4213 4214 ctl_softc->ctl_luns[lun_number] = lun; 4215 4216 ctl_softc->num_luns++; 4217 4218 /* Setup statistics gathering */ 4219 lun->stats.device_type = be_lun->lun_type; 4220 lun->stats.lun_number = lun_number; 4221 if (lun->stats.device_type == T_DIRECT) 4222 lun->stats.blocksize = be_lun->blocksize; 4223 else 4224 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4225 for (i = 0;i < CTL_MAX_PORTS;i++) 4226 lun->stats.ports[i].targ_port = i; 4227 4228 mtx_unlock(&ctl_softc->ctl_lock); 4229 4230 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4231 return (0); 4232} 4233 4234/* 4235 * Delete a LUN. 4236 * Assumptions: 4237 * - LUN has already been marked invalid and any pending I/O has been taken 4238 * care of. 4239 */ 4240static int 4241ctl_free_lun(struct ctl_lun *lun) 4242{ 4243 struct ctl_softc *softc; 4244 struct ctl_lun *nlun; 4245 int i; 4246 4247 softc = lun->ctl_softc; 4248 4249 mtx_assert(&softc->ctl_lock, MA_OWNED); 4250 4251 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4252 4253 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4254 4255 softc->ctl_luns[lun->lun] = NULL; 4256 4257 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4258 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4259 4260 softc->num_luns--; 4261 4262 /* 4263 * Tell the backend to free resources, if this LUN has a backend. 4264 */ 4265 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4266 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4267 4268 ctl_tpc_lun_shutdown(lun); 4269 mtx_destroy(&lun->lun_lock); 4270 free(lun->lun_devid, M_CTL); 4271 for (i = 0; i < CTL_MAX_PORTS; i++) 4272 free(lun->pending_ua[i], M_CTL); 4273 for (i = 0; i < 2 * CTL_MAX_PORTS; i++) 4274 free(lun->pr_keys[i], M_CTL); 4275 free(lun->write_buffer, M_CTL); 4276 if (lun->flags & CTL_LUN_MALLOCED) 4277 free(lun, M_CTL); 4278 4279 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4280 mtx_lock(&nlun->lun_lock); 4281 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4282 mtx_unlock(&nlun->lun_lock); 4283 } 4284 4285 return (0); 4286} 4287 4288static void 4289ctl_create_lun(struct ctl_be_lun *be_lun) 4290{ 4291 struct ctl_softc *softc; 4292 4293 softc = control_softc; 4294 4295 /* 4296 * ctl_alloc_lun() should handle all potential failure cases. 4297 */ 4298 ctl_alloc_lun(softc, NULL, be_lun); 4299} 4300 4301int 4302ctl_add_lun(struct ctl_be_lun *be_lun) 4303{ 4304 struct ctl_softc *softc = control_softc; 4305 4306 mtx_lock(&softc->ctl_lock); 4307 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4308 mtx_unlock(&softc->ctl_lock); 4309 wakeup(&softc->pending_lun_queue); 4310 4311 return (0); 4312} 4313 4314int 4315ctl_enable_lun(struct ctl_be_lun *be_lun) 4316{ 4317 struct ctl_softc *softc; 4318 struct ctl_port *port, *nport; 4319 struct ctl_lun *lun; 4320 int retval; 4321 4322 lun = (struct ctl_lun *)be_lun->ctl_lun; 4323 softc = lun->ctl_softc; 4324 4325 mtx_lock(&softc->ctl_lock); 4326 mtx_lock(&lun->lun_lock); 4327 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4328 /* 4329 * eh? Why did we get called if the LUN is already 4330 * enabled? 4331 */ 4332 mtx_unlock(&lun->lun_lock); 4333 mtx_unlock(&softc->ctl_lock); 4334 return (0); 4335 } 4336 lun->flags &= ~CTL_LUN_DISABLED; 4337 mtx_unlock(&lun->lun_lock); 4338 4339 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) { 4340 nport = STAILQ_NEXT(port, links); 4341 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4342 port->lun_map != NULL || port->lun_enable == NULL) 4343 continue; 4344 4345 /* 4346 * Drop the lock while we call the FETD's enable routine. 4347 * This can lead to a callback into CTL (at least in the 4348 * case of the internal initiator frontend. 4349 */ 4350 mtx_unlock(&softc->ctl_lock); 4351 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4352 mtx_lock(&softc->ctl_lock); 4353 if (retval != 0) { 4354 printf("%s: FETD %s port %d returned error " 4355 "%d for lun_enable on lun %jd\n", 4356 __func__, port->port_name, port->targ_port, 4357 retval, (intmax_t)lun->lun); 4358 } 4359 } 4360 4361 mtx_unlock(&softc->ctl_lock); 4362 4363 return (0); 4364} 4365 4366int 4367ctl_disable_lun(struct ctl_be_lun *be_lun) 4368{ 4369 struct ctl_softc *softc; 4370 struct ctl_port *port; 4371 struct ctl_lun *lun; 4372 int retval; 4373 4374 lun = (struct ctl_lun *)be_lun->ctl_lun; 4375 softc = lun->ctl_softc; 4376 4377 mtx_lock(&softc->ctl_lock); 4378 mtx_lock(&lun->lun_lock); 4379 if (lun->flags & CTL_LUN_DISABLED) { 4380 mtx_unlock(&lun->lun_lock); 4381 mtx_unlock(&softc->ctl_lock); 4382 return (0); 4383 } 4384 lun->flags |= CTL_LUN_DISABLED; 4385 mtx_unlock(&lun->lun_lock); 4386 4387 STAILQ_FOREACH(port, &softc->port_list, links) { 4388 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4389 port->lun_map != NULL || port->lun_disable == NULL) 4390 continue; 4391 4392 /* 4393 * Drop the lock before we call the frontend's disable 4394 * routine, to avoid lock order reversals. 4395 * 4396 * XXX KDM what happens if the frontend list changes while 4397 * we're traversing it? It's unlikely, but should be handled. 4398 */ 4399 mtx_unlock(&softc->ctl_lock); 4400 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4401 mtx_lock(&softc->ctl_lock); 4402 if (retval != 0) { 4403 printf("%s: FETD %s port %d returned error " 4404 "%d for lun_disable on lun %jd\n", 4405 __func__, port->port_name, port->targ_port, 4406 retval, (intmax_t)lun->lun); 4407 } 4408 } 4409 4410 mtx_unlock(&softc->ctl_lock); 4411 4412 return (0); 4413} 4414 4415int 4416ctl_start_lun(struct ctl_be_lun *be_lun) 4417{ 4418 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4419 4420 mtx_lock(&lun->lun_lock); 4421 lun->flags &= ~CTL_LUN_STOPPED; 4422 mtx_unlock(&lun->lun_lock); 4423 return (0); 4424} 4425 4426int 4427ctl_stop_lun(struct ctl_be_lun *be_lun) 4428{ 4429 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4430 4431 mtx_lock(&lun->lun_lock); 4432 lun->flags |= CTL_LUN_STOPPED; 4433 mtx_unlock(&lun->lun_lock); 4434 return (0); 4435} 4436 4437int 4438ctl_lun_offline(struct ctl_be_lun *be_lun) 4439{ 4440 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4441 4442 mtx_lock(&lun->lun_lock); 4443 lun->flags |= CTL_LUN_OFFLINE; 4444 mtx_unlock(&lun->lun_lock); 4445 return (0); 4446} 4447 4448int 4449ctl_lun_online(struct ctl_be_lun *be_lun) 4450{ 4451 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4452 4453 mtx_lock(&lun->lun_lock); 4454 lun->flags &= ~CTL_LUN_OFFLINE; 4455 mtx_unlock(&lun->lun_lock); 4456 return (0); 4457} 4458 4459int 4460ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4461{ 4462 struct ctl_softc *softc; 4463 struct ctl_lun *lun; 4464 4465 lun = (struct ctl_lun *)be_lun->ctl_lun; 4466 softc = lun->ctl_softc; 4467 4468 mtx_lock(&lun->lun_lock); 4469 4470 /* 4471 * The LUN needs to be disabled before it can be marked invalid. 4472 */ 4473 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4474 mtx_unlock(&lun->lun_lock); 4475 return (-1); 4476 } 4477 /* 4478 * Mark the LUN invalid. 4479 */ 4480 lun->flags |= CTL_LUN_INVALID; 4481 4482 /* 4483 * If there is nothing in the OOA queue, go ahead and free the LUN. 4484 * If we have something in the OOA queue, we'll free it when the 4485 * last I/O completes. 4486 */ 4487 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4488 mtx_unlock(&lun->lun_lock); 4489 mtx_lock(&softc->ctl_lock); 4490 ctl_free_lun(lun); 4491 mtx_unlock(&softc->ctl_lock); 4492 } else 4493 mtx_unlock(&lun->lun_lock); 4494 4495 return (0); 4496} 4497 4498int 4499ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4500{ 4501 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4502 4503 mtx_lock(&lun->lun_lock); 4504 lun->flags |= CTL_LUN_INOPERABLE; 4505 mtx_unlock(&lun->lun_lock); 4506 return (0); 4507} 4508 4509int 4510ctl_lun_operable(struct ctl_be_lun *be_lun) 4511{ 4512 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4513 4514 mtx_lock(&lun->lun_lock); 4515 lun->flags &= ~CTL_LUN_INOPERABLE; 4516 mtx_unlock(&lun->lun_lock); 4517 return (0); 4518} 4519 4520void 4521ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4522{ 4523 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4524 4525 mtx_lock(&lun->lun_lock); 4526 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 4527 mtx_unlock(&lun->lun_lock); 4528} 4529 4530/* 4531 * Backend "memory move is complete" callback for requests that never 4532 * make it down to say RAIDCore's configuration code. 4533 */ 4534int 4535ctl_config_move_done(union ctl_io *io) 4536{ 4537 int retval; 4538 4539 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4540 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 4541 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 4542 4543 if ((io->io_hdr.port_status != 0) && 4544 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4545 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4546 /* 4547 * For hardware error sense keys, the sense key 4548 * specific value is defined to be a retry count, 4549 * but we use it to pass back an internal FETD 4550 * error code. XXX KDM Hopefully the FETD is only 4551 * using 16 bits for an error code, since that's 4552 * all the space we have in the sks field. 4553 */ 4554 ctl_set_internal_failure(&io->scsiio, 4555 /*sks_valid*/ 1, 4556 /*retry_count*/ 4557 io->io_hdr.port_status); 4558 } 4559 4560 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 4561 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4562 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 4563 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4564 /* 4565 * XXX KDM just assuming a single pointer here, and not a 4566 * S/G list. If we start using S/G lists for config data, 4567 * we'll need to know how to clean them up here as well. 4568 */ 4569 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4570 free(io->scsiio.kern_data_ptr, M_CTL); 4571 ctl_done(io); 4572 retval = CTL_RETVAL_COMPLETE; 4573 } else { 4574 /* 4575 * XXX KDM now we need to continue data movement. Some 4576 * options: 4577 * - call ctl_scsiio() again? We don't do this for data 4578 * writes, because for those at least we know ahead of 4579 * time where the write will go and how long it is. For 4580 * config writes, though, that information is largely 4581 * contained within the write itself, thus we need to 4582 * parse out the data again. 4583 * 4584 * - Call some other function once the data is in? 4585 */ 4586 if (ctl_debug & CTL_DEBUG_CDB_DATA) 4587 ctl_data_print(io); 4588 4589 /* 4590 * XXX KDM call ctl_scsiio() again for now, and check flag 4591 * bits to see whether we're allocated or not. 4592 */ 4593 retval = ctl_scsiio(&io->scsiio); 4594 } 4595 return (retval); 4596} 4597 4598/* 4599 * This gets called by a backend driver when it is done with a 4600 * data_submit method. 4601 */ 4602void 4603ctl_data_submit_done(union ctl_io *io) 4604{ 4605 /* 4606 * If the IO_CONT flag is set, we need to call the supplied 4607 * function to continue processing the I/O, instead of completing 4608 * the I/O just yet. 4609 * 4610 * If there is an error, though, we don't want to keep processing. 4611 * Instead, just send status back to the initiator. 4612 */ 4613 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4614 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4615 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4616 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4617 io->scsiio.io_cont(io); 4618 return; 4619 } 4620 ctl_done(io); 4621} 4622 4623/* 4624 * This gets called by a backend driver when it is done with a 4625 * configuration write. 4626 */ 4627void 4628ctl_config_write_done(union ctl_io *io) 4629{ 4630 uint8_t *buf; 4631 4632 /* 4633 * If the IO_CONT flag is set, we need to call the supplied 4634 * function to continue processing the I/O, instead of completing 4635 * the I/O just yet. 4636 * 4637 * If there is an error, though, we don't want to keep processing. 4638 * Instead, just send status back to the initiator. 4639 */ 4640 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4641 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4642 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4643 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4644 io->scsiio.io_cont(io); 4645 return; 4646 } 4647 /* 4648 * Since a configuration write can be done for commands that actually 4649 * have data allocated, like write buffer, and commands that have 4650 * no data, like start/stop unit, we need to check here. 4651 */ 4652 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4653 buf = io->scsiio.kern_data_ptr; 4654 else 4655 buf = NULL; 4656 ctl_done(io); 4657 if (buf) 4658 free(buf, M_CTL); 4659} 4660 4661void 4662ctl_config_read_done(union ctl_io *io) 4663{ 4664 uint8_t *buf; 4665 4666 /* 4667 * If there is some error -- we are done, skip data transfer. 4668 */ 4669 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 4670 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4671 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 4672 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4673 buf = io->scsiio.kern_data_ptr; 4674 else 4675 buf = NULL; 4676 ctl_done(io); 4677 if (buf) 4678 free(buf, M_CTL); 4679 return; 4680 } 4681 4682 /* 4683 * If the IO_CONT flag is set, we need to call the supplied 4684 * function to continue processing the I/O, instead of completing 4685 * the I/O just yet. 4686 */ 4687 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 4688 io->scsiio.io_cont(io); 4689 return; 4690 } 4691 4692 ctl_datamove(io); 4693} 4694 4695/* 4696 * SCSI release command. 4697 */ 4698int 4699ctl_scsi_release(struct ctl_scsiio *ctsio) 4700{ 4701 int length, longid, thirdparty_id, resv_id; 4702 struct ctl_lun *lun; 4703 uint32_t residx; 4704 4705 length = 0; 4706 resv_id = 0; 4707 4708 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4709 4710 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 4711 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4712 4713 switch (ctsio->cdb[0]) { 4714 case RELEASE_10: { 4715 struct scsi_release_10 *cdb; 4716 4717 cdb = (struct scsi_release_10 *)ctsio->cdb; 4718 4719 if (cdb->byte2 & SR10_LONGID) 4720 longid = 1; 4721 else 4722 thirdparty_id = cdb->thirdparty_id; 4723 4724 resv_id = cdb->resv_id; 4725 length = scsi_2btoul(cdb->length); 4726 break; 4727 } 4728 } 4729 4730 4731 /* 4732 * XXX KDM right now, we only support LUN reservation. We don't 4733 * support 3rd party reservations, or extent reservations, which 4734 * might actually need the parameter list. If we've gotten this 4735 * far, we've got a LUN reservation. Anything else got kicked out 4736 * above. So, according to SPC, ignore the length. 4737 */ 4738 length = 0; 4739 4740 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 4741 && (length > 0)) { 4742 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 4743 ctsio->kern_data_len = length; 4744 ctsio->kern_total_len = length; 4745 ctsio->kern_data_resid = 0; 4746 ctsio->kern_rel_offset = 0; 4747 ctsio->kern_sg_entries = 0; 4748 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 4749 ctsio->be_move_done = ctl_config_move_done; 4750 ctl_datamove((union ctl_io *)ctsio); 4751 4752 return (CTL_RETVAL_COMPLETE); 4753 } 4754 4755 if (length > 0) 4756 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 4757 4758 mtx_lock(&lun->lun_lock); 4759 4760 /* 4761 * According to SPC, it is not an error for an intiator to attempt 4762 * to release a reservation on a LUN that isn't reserved, or that 4763 * is reserved by another initiator. The reservation can only be 4764 * released, though, by the initiator who made it or by one of 4765 * several reset type events. 4766 */ 4767 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 4768 lun->flags &= ~CTL_LUN_RESERVED; 4769 4770 mtx_unlock(&lun->lun_lock); 4771 4772 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 4773 free(ctsio->kern_data_ptr, M_CTL); 4774 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 4775 } 4776 4777 ctl_set_success(ctsio); 4778 ctl_done((union ctl_io *)ctsio); 4779 return (CTL_RETVAL_COMPLETE); 4780} 4781 4782int 4783ctl_scsi_reserve(struct ctl_scsiio *ctsio) 4784{ 4785 int extent, thirdparty, longid; 4786 int resv_id, length; 4787 uint64_t thirdparty_id; 4788 struct ctl_lun *lun; 4789 uint32_t residx; 4790 4791 extent = 0; 4792 thirdparty = 0; 4793 longid = 0; 4794 resv_id = 0; 4795 length = 0; 4796 thirdparty_id = 0; 4797 4798 CTL_DEBUG_PRINT(("ctl_reserve\n")); 4799 4800 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 4801 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4802 4803 switch (ctsio->cdb[0]) { 4804 case RESERVE_10: { 4805 struct scsi_reserve_10 *cdb; 4806 4807 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 4808 4809 if (cdb->byte2 & SR10_LONGID) 4810 longid = 1; 4811 else 4812 thirdparty_id = cdb->thirdparty_id; 4813 4814 resv_id = cdb->resv_id; 4815 length = scsi_2btoul(cdb->length); 4816 break; 4817 } 4818 } 4819 4820 /* 4821 * XXX KDM right now, we only support LUN reservation. We don't 4822 * support 3rd party reservations, or extent reservations, which 4823 * might actually need the parameter list. If we've gotten this 4824 * far, we've got a LUN reservation. Anything else got kicked out 4825 * above. So, according to SPC, ignore the length. 4826 */ 4827 length = 0; 4828 4829 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 4830 && (length > 0)) { 4831 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 4832 ctsio->kern_data_len = length; 4833 ctsio->kern_total_len = length; 4834 ctsio->kern_data_resid = 0; 4835 ctsio->kern_rel_offset = 0; 4836 ctsio->kern_sg_entries = 0; 4837 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 4838 ctsio->be_move_done = ctl_config_move_done; 4839 ctl_datamove((union ctl_io *)ctsio); 4840 4841 return (CTL_RETVAL_COMPLETE); 4842 } 4843 4844 if (length > 0) 4845 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 4846 4847 mtx_lock(&lun->lun_lock); 4848 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 4849 ctl_set_reservation_conflict(ctsio); 4850 goto bailout; 4851 } 4852 4853 lun->flags |= CTL_LUN_RESERVED; 4854 lun->res_idx = residx; 4855 4856 ctl_set_success(ctsio); 4857 4858bailout: 4859 mtx_unlock(&lun->lun_lock); 4860 4861 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 4862 free(ctsio->kern_data_ptr, M_CTL); 4863 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 4864 } 4865 4866 ctl_done((union ctl_io *)ctsio); 4867 return (CTL_RETVAL_COMPLETE); 4868} 4869 4870int 4871ctl_start_stop(struct ctl_scsiio *ctsio) 4872{ 4873 struct scsi_start_stop_unit *cdb; 4874 struct ctl_lun *lun; 4875 int retval; 4876 4877 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 4878 4879 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4880 retval = 0; 4881 4882 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 4883 4884 /* 4885 * XXX KDM 4886 * We don't support the immediate bit on a stop unit. In order to 4887 * do that, we would need to code up a way to know that a stop is 4888 * pending, and hold off any new commands until it completes, one 4889 * way or another. Then we could accept or reject those commands 4890 * depending on its status. We would almost need to do the reverse 4891 * of what we do below for an immediate start -- return the copy of 4892 * the ctl_io to the FETD with status to send to the host (and to 4893 * free the copy!) and then free the original I/O once the stop 4894 * actually completes. That way, the OOA queue mechanism can work 4895 * to block commands that shouldn't proceed. Another alternative 4896 * would be to put the copy in the queue in place of the original, 4897 * and return the original back to the caller. That could be 4898 * slightly safer.. 4899 */ 4900 if ((cdb->byte2 & SSS_IMMED) 4901 && ((cdb->how & SSS_START) == 0)) { 4902 ctl_set_invalid_field(ctsio, 4903 /*sks_valid*/ 1, 4904 /*command*/ 1, 4905 /*field*/ 1, 4906 /*bit_valid*/ 1, 4907 /*bit*/ 0); 4908 ctl_done((union ctl_io *)ctsio); 4909 return (CTL_RETVAL_COMPLETE); 4910 } 4911 4912 if ((lun->flags & CTL_LUN_PR_RESERVED) 4913 && ((cdb->how & SSS_START)==0)) { 4914 uint32_t residx; 4915 4916 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 4917 if (ctl_get_prkey(lun, residx) == 0 4918 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 4919 4920 ctl_set_reservation_conflict(ctsio); 4921 ctl_done((union ctl_io *)ctsio); 4922 return (CTL_RETVAL_COMPLETE); 4923 } 4924 } 4925 4926 /* 4927 * If there is no backend on this device, we can't start or stop 4928 * it. In theory we shouldn't get any start/stop commands in the 4929 * first place at this level if the LUN doesn't have a backend. 4930 * That should get stopped by the command decode code. 4931 */ 4932 if (lun->backend == NULL) { 4933 ctl_set_invalid_opcode(ctsio); 4934 ctl_done((union ctl_io *)ctsio); 4935 return (CTL_RETVAL_COMPLETE); 4936 } 4937 4938 /* 4939 * XXX KDM Copan-specific offline behavior. 4940 * Figure out a reasonable way to port this? 4941 */ 4942#ifdef NEEDTOPORT 4943 mtx_lock(&lun->lun_lock); 4944 4945 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 4946 && (lun->flags & CTL_LUN_OFFLINE)) { 4947 /* 4948 * If the LUN is offline, and the on/offline bit isn't set, 4949 * reject the start or stop. Otherwise, let it through. 4950 */ 4951 mtx_unlock(&lun->lun_lock); 4952 ctl_set_lun_not_ready(ctsio); 4953 ctl_done((union ctl_io *)ctsio); 4954 } else { 4955 mtx_unlock(&lun->lun_lock); 4956#endif /* NEEDTOPORT */ 4957 /* 4958 * This could be a start or a stop when we're online, 4959 * or a stop/offline or start/online. A start or stop when 4960 * we're offline is covered in the case above. 4961 */ 4962 /* 4963 * In the non-immediate case, we send the request to 4964 * the backend and return status to the user when 4965 * it is done. 4966 * 4967 * In the immediate case, we allocate a new ctl_io 4968 * to hold a copy of the request, and send that to 4969 * the backend. We then set good status on the 4970 * user's request and return it immediately. 4971 */ 4972 if (cdb->byte2 & SSS_IMMED) { 4973 union ctl_io *new_io; 4974 4975 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 4976 ctl_copy_io((union ctl_io *)ctsio, new_io); 4977 retval = lun->backend->config_write(new_io); 4978 ctl_set_success(ctsio); 4979 ctl_done((union ctl_io *)ctsio); 4980 } else { 4981 retval = lun->backend->config_write( 4982 (union ctl_io *)ctsio); 4983 } 4984#ifdef NEEDTOPORT 4985 } 4986#endif 4987 return (retval); 4988} 4989 4990/* 4991 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 4992 * we don't really do anything with the LBA and length fields if the user 4993 * passes them in. Instead we'll just flush out the cache for the entire 4994 * LUN. 4995 */ 4996int 4997ctl_sync_cache(struct ctl_scsiio *ctsio) 4998{ 4999 struct ctl_lun *lun; 5000 struct ctl_softc *softc; 5001 struct ctl_lba_len_flags *lbalen; 5002 uint64_t starting_lba; 5003 uint32_t block_count; 5004 int retval; 5005 uint8_t byte2; 5006 5007 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5008 5009 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5010 softc = lun->ctl_softc; 5011 retval = 0; 5012 5013 switch (ctsio->cdb[0]) { 5014 case SYNCHRONIZE_CACHE: { 5015 struct scsi_sync_cache *cdb; 5016 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5017 5018 starting_lba = scsi_4btoul(cdb->begin_lba); 5019 block_count = scsi_2btoul(cdb->lb_count); 5020 byte2 = cdb->byte2; 5021 break; 5022 } 5023 case SYNCHRONIZE_CACHE_16: { 5024 struct scsi_sync_cache_16 *cdb; 5025 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5026 5027 starting_lba = scsi_8btou64(cdb->begin_lba); 5028 block_count = scsi_4btoul(cdb->lb_count); 5029 byte2 = cdb->byte2; 5030 break; 5031 } 5032 default: 5033 ctl_set_invalid_opcode(ctsio); 5034 ctl_done((union ctl_io *)ctsio); 5035 goto bailout; 5036 break; /* NOTREACHED */ 5037 } 5038 5039 /* 5040 * We check the LBA and length, but don't do anything with them. 5041 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5042 * get flushed. This check will just help satisfy anyone who wants 5043 * to see an error for an out of range LBA. 5044 */ 5045 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5046 ctl_set_lba_out_of_range(ctsio); 5047 ctl_done((union ctl_io *)ctsio); 5048 goto bailout; 5049 } 5050 5051 /* 5052 * If this LUN has no backend, we can't flush the cache anyway. 5053 */ 5054 if (lun->backend == NULL) { 5055 ctl_set_invalid_opcode(ctsio); 5056 ctl_done((union ctl_io *)ctsio); 5057 goto bailout; 5058 } 5059 5060 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5061 lbalen->lba = starting_lba; 5062 lbalen->len = block_count; 5063 lbalen->flags = byte2; 5064 5065 /* 5066 * Check to see whether we're configured to send the SYNCHRONIZE 5067 * CACHE command directly to the back end. 5068 */ 5069 mtx_lock(&lun->lun_lock); 5070 if ((softc->flags & CTL_FLAG_REAL_SYNC) 5071 && (++(lun->sync_count) >= lun->sync_interval)) { 5072 lun->sync_count = 0; 5073 mtx_unlock(&lun->lun_lock); 5074 retval = lun->backend->config_write((union ctl_io *)ctsio); 5075 } else { 5076 mtx_unlock(&lun->lun_lock); 5077 ctl_set_success(ctsio); 5078 ctl_done((union ctl_io *)ctsio); 5079 } 5080 5081bailout: 5082 5083 return (retval); 5084} 5085 5086int 5087ctl_format(struct ctl_scsiio *ctsio) 5088{ 5089 struct scsi_format *cdb; 5090 struct ctl_lun *lun; 5091 int length, defect_list_len; 5092 5093 CTL_DEBUG_PRINT(("ctl_format\n")); 5094 5095 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5096 5097 cdb = (struct scsi_format *)ctsio->cdb; 5098 5099 length = 0; 5100 if (cdb->byte2 & SF_FMTDATA) { 5101 if (cdb->byte2 & SF_LONGLIST) 5102 length = sizeof(struct scsi_format_header_long); 5103 else 5104 length = sizeof(struct scsi_format_header_short); 5105 } 5106 5107 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5108 && (length > 0)) { 5109 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5110 ctsio->kern_data_len = length; 5111 ctsio->kern_total_len = length; 5112 ctsio->kern_data_resid = 0; 5113 ctsio->kern_rel_offset = 0; 5114 ctsio->kern_sg_entries = 0; 5115 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5116 ctsio->be_move_done = ctl_config_move_done; 5117 ctl_datamove((union ctl_io *)ctsio); 5118 5119 return (CTL_RETVAL_COMPLETE); 5120 } 5121 5122 defect_list_len = 0; 5123 5124 if (cdb->byte2 & SF_FMTDATA) { 5125 if (cdb->byte2 & SF_LONGLIST) { 5126 struct scsi_format_header_long *header; 5127 5128 header = (struct scsi_format_header_long *) 5129 ctsio->kern_data_ptr; 5130 5131 defect_list_len = scsi_4btoul(header->defect_list_len); 5132 if (defect_list_len != 0) { 5133 ctl_set_invalid_field(ctsio, 5134 /*sks_valid*/ 1, 5135 /*command*/ 0, 5136 /*field*/ 2, 5137 /*bit_valid*/ 0, 5138 /*bit*/ 0); 5139 goto bailout; 5140 } 5141 } else { 5142 struct scsi_format_header_short *header; 5143 5144 header = (struct scsi_format_header_short *) 5145 ctsio->kern_data_ptr; 5146 5147 defect_list_len = scsi_2btoul(header->defect_list_len); 5148 if (defect_list_len != 0) { 5149 ctl_set_invalid_field(ctsio, 5150 /*sks_valid*/ 1, 5151 /*command*/ 0, 5152 /*field*/ 2, 5153 /*bit_valid*/ 0, 5154 /*bit*/ 0); 5155 goto bailout; 5156 } 5157 } 5158 } 5159 5160 /* 5161 * The format command will clear out the "Medium format corrupted" 5162 * status if set by the configuration code. That status is really 5163 * just a way to notify the host that we have lost the media, and 5164 * get them to issue a command that will basically make them think 5165 * they're blowing away the media. 5166 */ 5167 mtx_lock(&lun->lun_lock); 5168 lun->flags &= ~CTL_LUN_INOPERABLE; 5169 mtx_unlock(&lun->lun_lock); 5170 5171 ctl_set_success(ctsio); 5172bailout: 5173 5174 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5175 free(ctsio->kern_data_ptr, M_CTL); 5176 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5177 } 5178 5179 ctl_done((union ctl_io *)ctsio); 5180 return (CTL_RETVAL_COMPLETE); 5181} 5182 5183int 5184ctl_read_buffer(struct ctl_scsiio *ctsio) 5185{ 5186 struct scsi_read_buffer *cdb; 5187 struct ctl_lun *lun; 5188 int buffer_offset, len; 5189 static uint8_t descr[4]; 5190 static uint8_t echo_descr[4] = { 0 }; 5191 5192 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5193 5194 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5195 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5196 5197 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5198 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5199 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5200 ctl_set_invalid_field(ctsio, 5201 /*sks_valid*/ 1, 5202 /*command*/ 1, 5203 /*field*/ 1, 5204 /*bit_valid*/ 1, 5205 /*bit*/ 4); 5206 ctl_done((union ctl_io *)ctsio); 5207 return (CTL_RETVAL_COMPLETE); 5208 } 5209 5210 len = scsi_3btoul(cdb->length); 5211 buffer_offset = scsi_3btoul(cdb->offset); 5212 5213 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5214 ctl_set_invalid_field(ctsio, 5215 /*sks_valid*/ 1, 5216 /*command*/ 1, 5217 /*field*/ 6, 5218 /*bit_valid*/ 0, 5219 /*bit*/ 0); 5220 ctl_done((union ctl_io *)ctsio); 5221 return (CTL_RETVAL_COMPLETE); 5222 } 5223 5224 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5225 descr[0] = 0; 5226 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5227 ctsio->kern_data_ptr = descr; 5228 len = min(len, sizeof(descr)); 5229 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5230 ctsio->kern_data_ptr = echo_descr; 5231 len = min(len, sizeof(echo_descr)); 5232 } else { 5233 if (lun->write_buffer == NULL) { 5234 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5235 M_CTL, M_WAITOK); 5236 } 5237 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5238 } 5239 ctsio->kern_data_len = len; 5240 ctsio->kern_total_len = len; 5241 ctsio->kern_data_resid = 0; 5242 ctsio->kern_rel_offset = 0; 5243 ctsio->kern_sg_entries = 0; 5244 ctl_set_success(ctsio); 5245 ctsio->be_move_done = ctl_config_move_done; 5246 ctl_datamove((union ctl_io *)ctsio); 5247 return (CTL_RETVAL_COMPLETE); 5248} 5249 5250int 5251ctl_write_buffer(struct ctl_scsiio *ctsio) 5252{ 5253 struct scsi_write_buffer *cdb; 5254 struct ctl_lun *lun; 5255 int buffer_offset, len; 5256 5257 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5258 5259 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5260 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5261 5262 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5263 ctl_set_invalid_field(ctsio, 5264 /*sks_valid*/ 1, 5265 /*command*/ 1, 5266 /*field*/ 1, 5267 /*bit_valid*/ 1, 5268 /*bit*/ 4); 5269 ctl_done((union ctl_io *)ctsio); 5270 return (CTL_RETVAL_COMPLETE); 5271 } 5272 5273 len = scsi_3btoul(cdb->length); 5274 buffer_offset = scsi_3btoul(cdb->offset); 5275 5276 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5277 ctl_set_invalid_field(ctsio, 5278 /*sks_valid*/ 1, 5279 /*command*/ 1, 5280 /*field*/ 6, 5281 /*bit_valid*/ 0, 5282 /*bit*/ 0); 5283 ctl_done((union ctl_io *)ctsio); 5284 return (CTL_RETVAL_COMPLETE); 5285 } 5286 5287 /* 5288 * If we've got a kernel request that hasn't been malloced yet, 5289 * malloc it and tell the caller the data buffer is here. 5290 */ 5291 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5292 if (lun->write_buffer == NULL) { 5293 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5294 M_CTL, M_WAITOK); 5295 } 5296 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5297 ctsio->kern_data_len = len; 5298 ctsio->kern_total_len = len; 5299 ctsio->kern_data_resid = 0; 5300 ctsio->kern_rel_offset = 0; 5301 ctsio->kern_sg_entries = 0; 5302 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5303 ctsio->be_move_done = ctl_config_move_done; 5304 ctl_datamove((union ctl_io *)ctsio); 5305 5306 return (CTL_RETVAL_COMPLETE); 5307 } 5308 5309 ctl_set_success(ctsio); 5310 ctl_done((union ctl_io *)ctsio); 5311 return (CTL_RETVAL_COMPLETE); 5312} 5313 5314int 5315ctl_write_same(struct ctl_scsiio *ctsio) 5316{ 5317 struct ctl_lun *lun; 5318 struct ctl_lba_len_flags *lbalen; 5319 uint64_t lba; 5320 uint32_t num_blocks; 5321 int len, retval; 5322 uint8_t byte2; 5323 5324 retval = CTL_RETVAL_COMPLETE; 5325 5326 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5327 5328 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5329 5330 switch (ctsio->cdb[0]) { 5331 case WRITE_SAME_10: { 5332 struct scsi_write_same_10 *cdb; 5333 5334 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5335 5336 lba = scsi_4btoul(cdb->addr); 5337 num_blocks = scsi_2btoul(cdb->length); 5338 byte2 = cdb->byte2; 5339 break; 5340 } 5341 case WRITE_SAME_16: { 5342 struct scsi_write_same_16 *cdb; 5343 5344 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5345 5346 lba = scsi_8btou64(cdb->addr); 5347 num_blocks = scsi_4btoul(cdb->length); 5348 byte2 = cdb->byte2; 5349 break; 5350 } 5351 default: 5352 /* 5353 * We got a command we don't support. This shouldn't 5354 * happen, commands should be filtered out above us. 5355 */ 5356 ctl_set_invalid_opcode(ctsio); 5357 ctl_done((union ctl_io *)ctsio); 5358 5359 return (CTL_RETVAL_COMPLETE); 5360 break; /* NOTREACHED */ 5361 } 5362 5363 /* NDOB and ANCHOR flags can be used only together with UNMAP */ 5364 if ((byte2 & SWS_UNMAP) == 0 && 5365 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { 5366 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5367 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5368 ctl_done((union ctl_io *)ctsio); 5369 return (CTL_RETVAL_COMPLETE); 5370 } 5371 5372 /* 5373 * The first check is to make sure we're in bounds, the second 5374 * check is to catch wrap-around problems. If the lba + num blocks 5375 * is less than the lba, then we've wrapped around and the block 5376 * range is invalid anyway. 5377 */ 5378 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5379 || ((lba + num_blocks) < lba)) { 5380 ctl_set_lba_out_of_range(ctsio); 5381 ctl_done((union ctl_io *)ctsio); 5382 return (CTL_RETVAL_COMPLETE); 5383 } 5384 5385 /* Zero number of blocks means "to the last logical block" */ 5386 if (num_blocks == 0) { 5387 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5388 ctl_set_invalid_field(ctsio, 5389 /*sks_valid*/ 0, 5390 /*command*/ 1, 5391 /*field*/ 0, 5392 /*bit_valid*/ 0, 5393 /*bit*/ 0); 5394 ctl_done((union ctl_io *)ctsio); 5395 return (CTL_RETVAL_COMPLETE); 5396 } 5397 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5398 } 5399 5400 len = lun->be_lun->blocksize; 5401 5402 /* 5403 * If we've got a kernel request that hasn't been malloced yet, 5404 * malloc it and tell the caller the data buffer is here. 5405 */ 5406 if ((byte2 & SWS_NDOB) == 0 && 5407 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5408 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5409 ctsio->kern_data_len = len; 5410 ctsio->kern_total_len = len; 5411 ctsio->kern_data_resid = 0; 5412 ctsio->kern_rel_offset = 0; 5413 ctsio->kern_sg_entries = 0; 5414 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5415 ctsio->be_move_done = ctl_config_move_done; 5416 ctl_datamove((union ctl_io *)ctsio); 5417 5418 return (CTL_RETVAL_COMPLETE); 5419 } 5420 5421 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5422 lbalen->lba = lba; 5423 lbalen->len = num_blocks; 5424 lbalen->flags = byte2; 5425 retval = lun->backend->config_write((union ctl_io *)ctsio); 5426 5427 return (retval); 5428} 5429 5430int 5431ctl_unmap(struct ctl_scsiio *ctsio) 5432{ 5433 struct ctl_lun *lun; 5434 struct scsi_unmap *cdb; 5435 struct ctl_ptr_len_flags *ptrlen; 5436 struct scsi_unmap_header *hdr; 5437 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5438 uint64_t lba; 5439 uint32_t num_blocks; 5440 int len, retval; 5441 uint8_t byte2; 5442 5443 retval = CTL_RETVAL_COMPLETE; 5444 5445 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5446 5447 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5448 cdb = (struct scsi_unmap *)ctsio->cdb; 5449 5450 len = scsi_2btoul(cdb->length); 5451 byte2 = cdb->byte2; 5452 5453 /* 5454 * If we've got a kernel request that hasn't been malloced yet, 5455 * malloc it and tell the caller the data buffer is here. 5456 */ 5457 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5458 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5459 ctsio->kern_data_len = len; 5460 ctsio->kern_total_len = len; 5461 ctsio->kern_data_resid = 0; 5462 ctsio->kern_rel_offset = 0; 5463 ctsio->kern_sg_entries = 0; 5464 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5465 ctsio->be_move_done = ctl_config_move_done; 5466 ctl_datamove((union ctl_io *)ctsio); 5467 5468 return (CTL_RETVAL_COMPLETE); 5469 } 5470 5471 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5472 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5473 if (len < sizeof (*hdr) || 5474 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5475 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5476 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5477 ctl_set_invalid_field(ctsio, 5478 /*sks_valid*/ 0, 5479 /*command*/ 0, 5480 /*field*/ 0, 5481 /*bit_valid*/ 0, 5482 /*bit*/ 0); 5483 goto done; 5484 } 5485 len = scsi_2btoul(hdr->desc_length); 5486 buf = (struct scsi_unmap_desc *)(hdr + 1); 5487 end = buf + len / sizeof(*buf); 5488 5489 endnz = buf; 5490 for (range = buf; range < end; range++) { 5491 lba = scsi_8btou64(range->lba); 5492 num_blocks = scsi_4btoul(range->length); 5493 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5494 || ((lba + num_blocks) < lba)) { 5495 ctl_set_lba_out_of_range(ctsio); 5496 ctl_done((union ctl_io *)ctsio); 5497 return (CTL_RETVAL_COMPLETE); 5498 } 5499 if (num_blocks != 0) 5500 endnz = range + 1; 5501 } 5502 5503 /* 5504 * Block backend can not handle zero last range. 5505 * Filter it out and return if there is nothing left. 5506 */ 5507 len = (uint8_t *)endnz - (uint8_t *)buf; 5508 if (len == 0) { 5509 ctl_set_success(ctsio); 5510 goto done; 5511 } 5512 5513 mtx_lock(&lun->lun_lock); 5514 ptrlen = (struct ctl_ptr_len_flags *) 5515 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5516 ptrlen->ptr = (void *)buf; 5517 ptrlen->len = len; 5518 ptrlen->flags = byte2; 5519 ctl_check_blocked(lun); 5520 mtx_unlock(&lun->lun_lock); 5521 5522 retval = lun->backend->config_write((union ctl_io *)ctsio); 5523 return (retval); 5524 5525done: 5526 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5527 free(ctsio->kern_data_ptr, M_CTL); 5528 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5529 } 5530 ctl_done((union ctl_io *)ctsio); 5531 return (CTL_RETVAL_COMPLETE); 5532} 5533 5534/* 5535 * Note that this function currently doesn't actually do anything inside 5536 * CTL to enforce things if the DQue bit is turned on. 5537 * 5538 * Also note that this function can't be used in the default case, because 5539 * the DQue bit isn't set in the changeable mask for the control mode page 5540 * anyway. This is just here as an example for how to implement a page 5541 * handler, and a placeholder in case we want to allow the user to turn 5542 * tagged queueing on and off. 5543 * 5544 * The D_SENSE bit handling is functional, however, and will turn 5545 * descriptor sense on and off for a given LUN. 5546 */ 5547int 5548ctl_control_page_handler(struct ctl_scsiio *ctsio, 5549 struct ctl_page_index *page_index, uint8_t *page_ptr) 5550{ 5551 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5552 struct ctl_lun *lun; 5553 int set_ua; 5554 uint32_t initidx; 5555 5556 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5557 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5558 set_ua = 0; 5559 5560 user_cp = (struct scsi_control_page *)page_ptr; 5561 current_cp = (struct scsi_control_page *) 5562 (page_index->page_data + (page_index->page_len * 5563 CTL_PAGE_CURRENT)); 5564 saved_cp = (struct scsi_control_page *) 5565 (page_index->page_data + (page_index->page_len * 5566 CTL_PAGE_SAVED)); 5567 5568 mtx_lock(&lun->lun_lock); 5569 if (((current_cp->rlec & SCP_DSENSE) == 0) 5570 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 5571 /* 5572 * Descriptor sense is currently turned off and the user 5573 * wants to turn it on. 5574 */ 5575 current_cp->rlec |= SCP_DSENSE; 5576 saved_cp->rlec |= SCP_DSENSE; 5577 lun->flags |= CTL_LUN_SENSE_DESC; 5578 set_ua = 1; 5579 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 5580 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 5581 /* 5582 * Descriptor sense is currently turned on, and the user 5583 * wants to turn it off. 5584 */ 5585 current_cp->rlec &= ~SCP_DSENSE; 5586 saved_cp->rlec &= ~SCP_DSENSE; 5587 lun->flags &= ~CTL_LUN_SENSE_DESC; 5588 set_ua = 1; 5589 } 5590 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 5591 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 5592 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5593 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5594 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5595 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5596 set_ua = 1; 5597 } 5598 if ((current_cp->eca_and_aen & SCP_SWP) != 5599 (user_cp->eca_and_aen & SCP_SWP)) { 5600 current_cp->eca_and_aen &= ~SCP_SWP; 5601 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5602 saved_cp->eca_and_aen &= ~SCP_SWP; 5603 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5604 set_ua = 1; 5605 } 5606 if (set_ua != 0) 5607 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5608 mtx_unlock(&lun->lun_lock); 5609 5610 return (0); 5611} 5612 5613int 5614ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 5615 struct ctl_page_index *page_index, uint8_t *page_ptr) 5616{ 5617 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 5618 struct ctl_lun *lun; 5619 int set_ua; 5620 uint32_t initidx; 5621 5622 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5623 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5624 set_ua = 0; 5625 5626 user_cp = (struct scsi_caching_page *)page_ptr; 5627 current_cp = (struct scsi_caching_page *) 5628 (page_index->page_data + (page_index->page_len * 5629 CTL_PAGE_CURRENT)); 5630 saved_cp = (struct scsi_caching_page *) 5631 (page_index->page_data + (page_index->page_len * 5632 CTL_PAGE_SAVED)); 5633 5634 mtx_lock(&lun->lun_lock); 5635 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 5636 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 5637 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5638 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5639 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5640 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5641 set_ua = 1; 5642 } 5643 if (set_ua != 0) 5644 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5645 mtx_unlock(&lun->lun_lock); 5646 5647 return (0); 5648} 5649 5650int 5651ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 5652 struct ctl_page_index *page_index, 5653 uint8_t *page_ptr) 5654{ 5655 uint8_t *c; 5656 int i; 5657 5658 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 5659 ctl_time_io_secs = 5660 (c[0] << 8) | 5661 (c[1] << 0) | 5662 0; 5663 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 5664 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 5665 printf("page data:"); 5666 for (i=0; i<8; i++) 5667 printf(" %.2x",page_ptr[i]); 5668 printf("\n"); 5669 return (0); 5670} 5671 5672int 5673ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 5674 struct ctl_page_index *page_index, 5675 int pc) 5676{ 5677 struct copan_debugconf_subpage *page; 5678 5679 page = (struct copan_debugconf_subpage *)page_index->page_data + 5680 (page_index->page_len * pc); 5681 5682 switch (pc) { 5683 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 5684 case SMS_PAGE_CTRL_DEFAULT >> 6: 5685 case SMS_PAGE_CTRL_SAVED >> 6: 5686 /* 5687 * We don't update the changable or default bits for this page. 5688 */ 5689 break; 5690 case SMS_PAGE_CTRL_CURRENT >> 6: 5691 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 5692 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 5693 break; 5694 default: 5695#ifdef NEEDTOPORT 5696 EPRINT(0, "Invalid PC %d!!", pc); 5697#endif /* NEEDTOPORT */ 5698 break; 5699 } 5700 return (0); 5701} 5702 5703 5704static int 5705ctl_do_mode_select(union ctl_io *io) 5706{ 5707 struct scsi_mode_page_header *page_header; 5708 struct ctl_page_index *page_index; 5709 struct ctl_scsiio *ctsio; 5710 int control_dev, page_len; 5711 int page_len_offset, page_len_size; 5712 union ctl_modepage_info *modepage_info; 5713 struct ctl_lun *lun; 5714 int *len_left, *len_used; 5715 int retval, i; 5716 5717 ctsio = &io->scsiio; 5718 page_index = NULL; 5719 page_len = 0; 5720 retval = CTL_RETVAL_COMPLETE; 5721 5722 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5723 5724 if (lun->be_lun->lun_type != T_DIRECT) 5725 control_dev = 1; 5726 else 5727 control_dev = 0; 5728 5729 modepage_info = (union ctl_modepage_info *) 5730 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5731 len_left = &modepage_info->header.len_left; 5732 len_used = &modepage_info->header.len_used; 5733 5734do_next_page: 5735 5736 page_header = (struct scsi_mode_page_header *) 5737 (ctsio->kern_data_ptr + *len_used); 5738 5739 if (*len_left == 0) { 5740 free(ctsio->kern_data_ptr, M_CTL); 5741 ctl_set_success(ctsio); 5742 ctl_done((union ctl_io *)ctsio); 5743 return (CTL_RETVAL_COMPLETE); 5744 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5745 5746 free(ctsio->kern_data_ptr, M_CTL); 5747 ctl_set_param_len_error(ctsio); 5748 ctl_done((union ctl_io *)ctsio); 5749 return (CTL_RETVAL_COMPLETE); 5750 5751 } else if ((page_header->page_code & SMPH_SPF) 5752 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 5753 5754 free(ctsio->kern_data_ptr, M_CTL); 5755 ctl_set_param_len_error(ctsio); 5756 ctl_done((union ctl_io *)ctsio); 5757 return (CTL_RETVAL_COMPLETE); 5758 } 5759 5760 5761 /* 5762 * XXX KDM should we do something with the block descriptor? 5763 */ 5764 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 5765 5766 if ((control_dev != 0) 5767 && (lun->mode_pages.index[i].page_flags & 5768 CTL_PAGE_FLAG_DISK_ONLY)) 5769 continue; 5770 5771 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 5772 (page_header->page_code & SMPH_PC_MASK)) 5773 continue; 5774 5775 /* 5776 * If neither page has a subpage code, then we've got a 5777 * match. 5778 */ 5779 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 5780 && ((page_header->page_code & SMPH_SPF) == 0)) { 5781 page_index = &lun->mode_pages.index[i]; 5782 page_len = page_header->page_length; 5783 break; 5784 } 5785 5786 /* 5787 * If both pages have subpages, then the subpage numbers 5788 * have to match. 5789 */ 5790 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 5791 && (page_header->page_code & SMPH_SPF)) { 5792 struct scsi_mode_page_header_sp *sph; 5793 5794 sph = (struct scsi_mode_page_header_sp *)page_header; 5795 5796 if (lun->mode_pages.index[i].subpage == 5797 sph->subpage) { 5798 page_index = &lun->mode_pages.index[i]; 5799 page_len = scsi_2btoul(sph->page_length); 5800 break; 5801 } 5802 } 5803 } 5804 5805 /* 5806 * If we couldn't find the page, or if we don't have a mode select 5807 * handler for it, send back an error to the user. 5808 */ 5809 if ((page_index == NULL) 5810 || (page_index->select_handler == NULL)) { 5811 ctl_set_invalid_field(ctsio, 5812 /*sks_valid*/ 1, 5813 /*command*/ 0, 5814 /*field*/ *len_used, 5815 /*bit_valid*/ 0, 5816 /*bit*/ 0); 5817 free(ctsio->kern_data_ptr, M_CTL); 5818 ctl_done((union ctl_io *)ctsio); 5819 return (CTL_RETVAL_COMPLETE); 5820 } 5821 5822 if (page_index->page_code & SMPH_SPF) { 5823 page_len_offset = 2; 5824 page_len_size = 2; 5825 } else { 5826 page_len_size = 1; 5827 page_len_offset = 1; 5828 } 5829 5830 /* 5831 * If the length the initiator gives us isn't the one we specify in 5832 * the mode page header, or if they didn't specify enough data in 5833 * the CDB to avoid truncating this page, kick out the request. 5834 */ 5835 if ((page_len != (page_index->page_len - page_len_offset - 5836 page_len_size)) 5837 || (*len_left < page_index->page_len)) { 5838 5839 5840 ctl_set_invalid_field(ctsio, 5841 /*sks_valid*/ 1, 5842 /*command*/ 0, 5843 /*field*/ *len_used + page_len_offset, 5844 /*bit_valid*/ 0, 5845 /*bit*/ 0); 5846 free(ctsio->kern_data_ptr, M_CTL); 5847 ctl_done((union ctl_io *)ctsio); 5848 return (CTL_RETVAL_COMPLETE); 5849 } 5850 5851 /* 5852 * Run through the mode page, checking to make sure that the bits 5853 * the user changed are actually legal for him to change. 5854 */ 5855 for (i = 0; i < page_index->page_len; i++) { 5856 uint8_t *user_byte, *change_mask, *current_byte; 5857 int bad_bit; 5858 int j; 5859 5860 user_byte = (uint8_t *)page_header + i; 5861 change_mask = page_index->page_data + 5862 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 5863 current_byte = page_index->page_data + 5864 (page_index->page_len * CTL_PAGE_CURRENT) + i; 5865 5866 /* 5867 * Check to see whether the user set any bits in this byte 5868 * that he is not allowed to set. 5869 */ 5870 if ((*user_byte & ~(*change_mask)) == 5871 (*current_byte & ~(*change_mask))) 5872 continue; 5873 5874 /* 5875 * Go through bit by bit to determine which one is illegal. 5876 */ 5877 bad_bit = 0; 5878 for (j = 7; j >= 0; j--) { 5879 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 5880 (((1 << i) & ~(*change_mask)) & *current_byte)) { 5881 bad_bit = i; 5882 break; 5883 } 5884 } 5885 ctl_set_invalid_field(ctsio, 5886 /*sks_valid*/ 1, 5887 /*command*/ 0, 5888 /*field*/ *len_used + i, 5889 /*bit_valid*/ 1, 5890 /*bit*/ bad_bit); 5891 free(ctsio->kern_data_ptr, M_CTL); 5892 ctl_done((union ctl_io *)ctsio); 5893 return (CTL_RETVAL_COMPLETE); 5894 } 5895 5896 /* 5897 * Decrement these before we call the page handler, since we may 5898 * end up getting called back one way or another before the handler 5899 * returns to this context. 5900 */ 5901 *len_left -= page_index->page_len; 5902 *len_used += page_index->page_len; 5903 5904 retval = page_index->select_handler(ctsio, page_index, 5905 (uint8_t *)page_header); 5906 5907 /* 5908 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 5909 * wait until this queued command completes to finish processing 5910 * the mode page. If it returns anything other than 5911 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 5912 * already set the sense information, freed the data pointer, and 5913 * completed the io for us. 5914 */ 5915 if (retval != CTL_RETVAL_COMPLETE) 5916 goto bailout_no_done; 5917 5918 /* 5919 * If the initiator sent us more than one page, parse the next one. 5920 */ 5921 if (*len_left > 0) 5922 goto do_next_page; 5923 5924 ctl_set_success(ctsio); 5925 free(ctsio->kern_data_ptr, M_CTL); 5926 ctl_done((union ctl_io *)ctsio); 5927 5928bailout_no_done: 5929 5930 return (CTL_RETVAL_COMPLETE); 5931 5932} 5933 5934int 5935ctl_mode_select(struct ctl_scsiio *ctsio) 5936{ 5937 int param_len, pf, sp; 5938 int header_size, bd_len; 5939 int len_left, len_used; 5940 struct ctl_page_index *page_index; 5941 struct ctl_lun *lun; 5942 int control_dev, page_len; 5943 union ctl_modepage_info *modepage_info; 5944 int retval; 5945 5946 pf = 0; 5947 sp = 0; 5948 page_len = 0; 5949 len_used = 0; 5950 len_left = 0; 5951 retval = 0; 5952 bd_len = 0; 5953 page_index = NULL; 5954 5955 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5956 5957 if (lun->be_lun->lun_type != T_DIRECT) 5958 control_dev = 1; 5959 else 5960 control_dev = 0; 5961 5962 switch (ctsio->cdb[0]) { 5963 case MODE_SELECT_6: { 5964 struct scsi_mode_select_6 *cdb; 5965 5966 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 5967 5968 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 5969 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 5970 5971 param_len = cdb->length; 5972 header_size = sizeof(struct scsi_mode_header_6); 5973 break; 5974 } 5975 case MODE_SELECT_10: { 5976 struct scsi_mode_select_10 *cdb; 5977 5978 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 5979 5980 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 5981 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 5982 5983 param_len = scsi_2btoul(cdb->length); 5984 header_size = sizeof(struct scsi_mode_header_10); 5985 break; 5986 } 5987 default: 5988 ctl_set_invalid_opcode(ctsio); 5989 ctl_done((union ctl_io *)ctsio); 5990 return (CTL_RETVAL_COMPLETE); 5991 break; /* NOTREACHED */ 5992 } 5993 5994 /* 5995 * From SPC-3: 5996 * "A parameter list length of zero indicates that the Data-Out Buffer 5997 * shall be empty. This condition shall not be considered as an error." 5998 */ 5999 if (param_len == 0) { 6000 ctl_set_success(ctsio); 6001 ctl_done((union ctl_io *)ctsio); 6002 return (CTL_RETVAL_COMPLETE); 6003 } 6004 6005 /* 6006 * Since we'll hit this the first time through, prior to 6007 * allocation, we don't need to free a data buffer here. 6008 */ 6009 if (param_len < header_size) { 6010 ctl_set_param_len_error(ctsio); 6011 ctl_done((union ctl_io *)ctsio); 6012 return (CTL_RETVAL_COMPLETE); 6013 } 6014 6015 /* 6016 * Allocate the data buffer and grab the user's data. In theory, 6017 * we shouldn't have to sanity check the parameter list length here 6018 * because the maximum size is 64K. We should be able to malloc 6019 * that much without too many problems. 6020 */ 6021 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6022 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6023 ctsio->kern_data_len = param_len; 6024 ctsio->kern_total_len = param_len; 6025 ctsio->kern_data_resid = 0; 6026 ctsio->kern_rel_offset = 0; 6027 ctsio->kern_sg_entries = 0; 6028 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6029 ctsio->be_move_done = ctl_config_move_done; 6030 ctl_datamove((union ctl_io *)ctsio); 6031 6032 return (CTL_RETVAL_COMPLETE); 6033 } 6034 6035 switch (ctsio->cdb[0]) { 6036 case MODE_SELECT_6: { 6037 struct scsi_mode_header_6 *mh6; 6038 6039 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6040 bd_len = mh6->blk_desc_len; 6041 break; 6042 } 6043 case MODE_SELECT_10: { 6044 struct scsi_mode_header_10 *mh10; 6045 6046 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6047 bd_len = scsi_2btoul(mh10->blk_desc_len); 6048 break; 6049 } 6050 default: 6051 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6052 break; 6053 } 6054 6055 if (param_len < (header_size + bd_len)) { 6056 free(ctsio->kern_data_ptr, M_CTL); 6057 ctl_set_param_len_error(ctsio); 6058 ctl_done((union ctl_io *)ctsio); 6059 return (CTL_RETVAL_COMPLETE); 6060 } 6061 6062 /* 6063 * Set the IO_CONT flag, so that if this I/O gets passed to 6064 * ctl_config_write_done(), it'll get passed back to 6065 * ctl_do_mode_select() for further processing, or completion if 6066 * we're all done. 6067 */ 6068 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6069 ctsio->io_cont = ctl_do_mode_select; 6070 6071 modepage_info = (union ctl_modepage_info *) 6072 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6073 6074 memset(modepage_info, 0, sizeof(*modepage_info)); 6075 6076 len_left = param_len - header_size - bd_len; 6077 len_used = header_size + bd_len; 6078 6079 modepage_info->header.len_left = len_left; 6080 modepage_info->header.len_used = len_used; 6081 6082 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6083} 6084 6085int 6086ctl_mode_sense(struct ctl_scsiio *ctsio) 6087{ 6088 struct ctl_lun *lun; 6089 int pc, page_code, dbd, llba, subpage; 6090 int alloc_len, page_len, header_len, total_len; 6091 struct scsi_mode_block_descr *block_desc; 6092 struct ctl_page_index *page_index; 6093 int control_dev; 6094 6095 dbd = 0; 6096 llba = 0; 6097 block_desc = NULL; 6098 page_index = NULL; 6099 6100 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6101 6102 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6103 6104 if (lun->be_lun->lun_type != T_DIRECT) 6105 control_dev = 1; 6106 else 6107 control_dev = 0; 6108 6109 switch (ctsio->cdb[0]) { 6110 case MODE_SENSE_6: { 6111 struct scsi_mode_sense_6 *cdb; 6112 6113 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6114 6115 header_len = sizeof(struct scsi_mode_hdr_6); 6116 if (cdb->byte2 & SMS_DBD) 6117 dbd = 1; 6118 else 6119 header_len += sizeof(struct scsi_mode_block_descr); 6120 6121 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6122 page_code = cdb->page & SMS_PAGE_CODE; 6123 subpage = cdb->subpage; 6124 alloc_len = cdb->length; 6125 break; 6126 } 6127 case MODE_SENSE_10: { 6128 struct scsi_mode_sense_10 *cdb; 6129 6130 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6131 6132 header_len = sizeof(struct scsi_mode_hdr_10); 6133 6134 if (cdb->byte2 & SMS_DBD) 6135 dbd = 1; 6136 else 6137 header_len += sizeof(struct scsi_mode_block_descr); 6138 if (cdb->byte2 & SMS10_LLBAA) 6139 llba = 1; 6140 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6141 page_code = cdb->page & SMS_PAGE_CODE; 6142 subpage = cdb->subpage; 6143 alloc_len = scsi_2btoul(cdb->length); 6144 break; 6145 } 6146 default: 6147 ctl_set_invalid_opcode(ctsio); 6148 ctl_done((union ctl_io *)ctsio); 6149 return (CTL_RETVAL_COMPLETE); 6150 break; /* NOTREACHED */ 6151 } 6152 6153 /* 6154 * We have to make a first pass through to calculate the size of 6155 * the pages that match the user's query. Then we allocate enough 6156 * memory to hold it, and actually copy the data into the buffer. 6157 */ 6158 switch (page_code) { 6159 case SMS_ALL_PAGES_PAGE: { 6160 int i; 6161 6162 page_len = 0; 6163 6164 /* 6165 * At the moment, values other than 0 and 0xff here are 6166 * reserved according to SPC-3. 6167 */ 6168 if ((subpage != SMS_SUBPAGE_PAGE_0) 6169 && (subpage != SMS_SUBPAGE_ALL)) { 6170 ctl_set_invalid_field(ctsio, 6171 /*sks_valid*/ 1, 6172 /*command*/ 1, 6173 /*field*/ 3, 6174 /*bit_valid*/ 0, 6175 /*bit*/ 0); 6176 ctl_done((union ctl_io *)ctsio); 6177 return (CTL_RETVAL_COMPLETE); 6178 } 6179 6180 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6181 if ((control_dev != 0) 6182 && (lun->mode_pages.index[i].page_flags & 6183 CTL_PAGE_FLAG_DISK_ONLY)) 6184 continue; 6185 6186 /* 6187 * We don't use this subpage if the user didn't 6188 * request all subpages. 6189 */ 6190 if ((lun->mode_pages.index[i].subpage != 0) 6191 && (subpage == SMS_SUBPAGE_PAGE_0)) 6192 continue; 6193 6194#if 0 6195 printf("found page %#x len %d\n", 6196 lun->mode_pages.index[i].page_code & 6197 SMPH_PC_MASK, 6198 lun->mode_pages.index[i].page_len); 6199#endif 6200 page_len += lun->mode_pages.index[i].page_len; 6201 } 6202 break; 6203 } 6204 default: { 6205 int i; 6206 6207 page_len = 0; 6208 6209 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6210 /* Look for the right page code */ 6211 if ((lun->mode_pages.index[i].page_code & 6212 SMPH_PC_MASK) != page_code) 6213 continue; 6214 6215 /* Look for the right subpage or the subpage wildcard*/ 6216 if ((lun->mode_pages.index[i].subpage != subpage) 6217 && (subpage != SMS_SUBPAGE_ALL)) 6218 continue; 6219 6220 /* Make sure the page is supported for this dev type */ 6221 if ((control_dev != 0) 6222 && (lun->mode_pages.index[i].page_flags & 6223 CTL_PAGE_FLAG_DISK_ONLY)) 6224 continue; 6225 6226#if 0 6227 printf("found page %#x len %d\n", 6228 lun->mode_pages.index[i].page_code & 6229 SMPH_PC_MASK, 6230 lun->mode_pages.index[i].page_len); 6231#endif 6232 6233 page_len += lun->mode_pages.index[i].page_len; 6234 } 6235 6236 if (page_len == 0) { 6237 ctl_set_invalid_field(ctsio, 6238 /*sks_valid*/ 1, 6239 /*command*/ 1, 6240 /*field*/ 2, 6241 /*bit_valid*/ 1, 6242 /*bit*/ 5); 6243 ctl_done((union ctl_io *)ctsio); 6244 return (CTL_RETVAL_COMPLETE); 6245 } 6246 break; 6247 } 6248 } 6249 6250 total_len = header_len + page_len; 6251#if 0 6252 printf("header_len = %d, page_len = %d, total_len = %d\n", 6253 header_len, page_len, total_len); 6254#endif 6255 6256 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6257 ctsio->kern_sg_entries = 0; 6258 ctsio->kern_data_resid = 0; 6259 ctsio->kern_rel_offset = 0; 6260 if (total_len < alloc_len) { 6261 ctsio->residual = alloc_len - total_len; 6262 ctsio->kern_data_len = total_len; 6263 ctsio->kern_total_len = total_len; 6264 } else { 6265 ctsio->residual = 0; 6266 ctsio->kern_data_len = alloc_len; 6267 ctsio->kern_total_len = alloc_len; 6268 } 6269 6270 switch (ctsio->cdb[0]) { 6271 case MODE_SENSE_6: { 6272 struct scsi_mode_hdr_6 *header; 6273 6274 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6275 6276 header->datalen = MIN(total_len - 1, 254); 6277 if (control_dev == 0) { 6278 header->dev_specific = 0x10; /* DPOFUA */ 6279 if ((lun->flags & CTL_LUN_READONLY) || 6280 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6281 .eca_and_aen & SCP_SWP) != 0) 6282 header->dev_specific |= 0x80; /* WP */ 6283 } 6284 if (dbd) 6285 header->block_descr_len = 0; 6286 else 6287 header->block_descr_len = 6288 sizeof(struct scsi_mode_block_descr); 6289 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6290 break; 6291 } 6292 case MODE_SENSE_10: { 6293 struct scsi_mode_hdr_10 *header; 6294 int datalen; 6295 6296 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6297 6298 datalen = MIN(total_len - 2, 65533); 6299 scsi_ulto2b(datalen, header->datalen); 6300 if (control_dev == 0) { 6301 header->dev_specific = 0x10; /* DPOFUA */ 6302 if ((lun->flags & CTL_LUN_READONLY) || 6303 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6304 .eca_and_aen & SCP_SWP) != 0) 6305 header->dev_specific |= 0x80; /* WP */ 6306 } 6307 if (dbd) 6308 scsi_ulto2b(0, header->block_descr_len); 6309 else 6310 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6311 header->block_descr_len); 6312 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6313 break; 6314 } 6315 default: 6316 panic("invalid CDB type %#x", ctsio->cdb[0]); 6317 break; /* NOTREACHED */ 6318 } 6319 6320 /* 6321 * If we've got a disk, use its blocksize in the block 6322 * descriptor. Otherwise, just set it to 0. 6323 */ 6324 if (dbd == 0) { 6325 if (control_dev == 0) 6326 scsi_ulto3b(lun->be_lun->blocksize, 6327 block_desc->block_len); 6328 else 6329 scsi_ulto3b(0, block_desc->block_len); 6330 } 6331 6332 switch (page_code) { 6333 case SMS_ALL_PAGES_PAGE: { 6334 int i, data_used; 6335 6336 data_used = header_len; 6337 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6338 struct ctl_page_index *page_index; 6339 6340 page_index = &lun->mode_pages.index[i]; 6341 6342 if ((control_dev != 0) 6343 && (page_index->page_flags & 6344 CTL_PAGE_FLAG_DISK_ONLY)) 6345 continue; 6346 6347 /* 6348 * We don't use this subpage if the user didn't 6349 * request all subpages. We already checked (above) 6350 * to make sure the user only specified a subpage 6351 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6352 */ 6353 if ((page_index->subpage != 0) 6354 && (subpage == SMS_SUBPAGE_PAGE_0)) 6355 continue; 6356 6357 /* 6358 * Call the handler, if it exists, to update the 6359 * page to the latest values. 6360 */ 6361 if (page_index->sense_handler != NULL) 6362 page_index->sense_handler(ctsio, page_index,pc); 6363 6364 memcpy(ctsio->kern_data_ptr + data_used, 6365 page_index->page_data + 6366 (page_index->page_len * pc), 6367 page_index->page_len); 6368 data_used += page_index->page_len; 6369 } 6370 break; 6371 } 6372 default: { 6373 int i, data_used; 6374 6375 data_used = header_len; 6376 6377 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6378 struct ctl_page_index *page_index; 6379 6380 page_index = &lun->mode_pages.index[i]; 6381 6382 /* Look for the right page code */ 6383 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6384 continue; 6385 6386 /* Look for the right subpage or the subpage wildcard*/ 6387 if ((page_index->subpage != subpage) 6388 && (subpage != SMS_SUBPAGE_ALL)) 6389 continue; 6390 6391 /* Make sure the page is supported for this dev type */ 6392 if ((control_dev != 0) 6393 && (page_index->page_flags & 6394 CTL_PAGE_FLAG_DISK_ONLY)) 6395 continue; 6396 6397 /* 6398 * Call the handler, if it exists, to update the 6399 * page to the latest values. 6400 */ 6401 if (page_index->sense_handler != NULL) 6402 page_index->sense_handler(ctsio, page_index,pc); 6403 6404 memcpy(ctsio->kern_data_ptr + data_used, 6405 page_index->page_data + 6406 (page_index->page_len * pc), 6407 page_index->page_len); 6408 data_used += page_index->page_len; 6409 } 6410 break; 6411 } 6412 } 6413 6414 ctl_set_success(ctsio); 6415 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6416 ctsio->be_move_done = ctl_config_move_done; 6417 ctl_datamove((union ctl_io *)ctsio); 6418 return (CTL_RETVAL_COMPLETE); 6419} 6420 6421int 6422ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6423 struct ctl_page_index *page_index, 6424 int pc) 6425{ 6426 struct ctl_lun *lun; 6427 struct scsi_log_param_header *phdr; 6428 uint8_t *data; 6429 uint64_t val; 6430 6431 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6432 data = page_index->page_data; 6433 6434 if (lun->backend->lun_attr != NULL && 6435 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6436 != UINT64_MAX) { 6437 phdr = (struct scsi_log_param_header *)data; 6438 scsi_ulto2b(0x0001, phdr->param_code); 6439 phdr->param_control = SLP_LBIN | SLP_LP; 6440 phdr->param_len = 8; 6441 data = (uint8_t *)(phdr + 1); 6442 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6443 data[4] = 0x02; /* per-pool */ 6444 data += phdr->param_len; 6445 } 6446 6447 if (lun->backend->lun_attr != NULL && 6448 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6449 != UINT64_MAX) { 6450 phdr = (struct scsi_log_param_header *)data; 6451 scsi_ulto2b(0x0002, phdr->param_code); 6452 phdr->param_control = SLP_LBIN | SLP_LP; 6453 phdr->param_len = 8; 6454 data = (uint8_t *)(phdr + 1); 6455 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6456 data[4] = 0x01; /* per-LUN */ 6457 data += phdr->param_len; 6458 } 6459 6460 if (lun->backend->lun_attr != NULL && 6461 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6462 != UINT64_MAX) { 6463 phdr = (struct scsi_log_param_header *)data; 6464 scsi_ulto2b(0x00f1, phdr->param_code); 6465 phdr->param_control = SLP_LBIN | SLP_LP; 6466 phdr->param_len = 8; 6467 data = (uint8_t *)(phdr + 1); 6468 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6469 data[4] = 0x02; /* per-pool */ 6470 data += phdr->param_len; 6471 } 6472 6473 if (lun->backend->lun_attr != NULL && 6474 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6475 != UINT64_MAX) { 6476 phdr = (struct scsi_log_param_header *)data; 6477 scsi_ulto2b(0x00f2, phdr->param_code); 6478 phdr->param_control = SLP_LBIN | SLP_LP; 6479 phdr->param_len = 8; 6480 data = (uint8_t *)(phdr + 1); 6481 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6482 data[4] = 0x02; /* per-pool */ 6483 data += phdr->param_len; 6484 } 6485 6486 page_index->page_len = data - page_index->page_data; 6487 return (0); 6488} 6489 6490int 6491ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6492 struct ctl_page_index *page_index, 6493 int pc) 6494{ 6495 struct ctl_lun *lun; 6496 struct stat_page *data; 6497 uint64_t rn, wn, rb, wb; 6498 struct bintime rt, wt; 6499 int i; 6500 6501 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6502 data = (struct stat_page *)page_index->page_data; 6503 6504 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6505 data->sap.hdr.param_control = SLP_LBIN; 6506 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6507 sizeof(struct scsi_log_param_header); 6508 rn = wn = rb = wb = 0; 6509 bintime_clear(&rt); 6510 bintime_clear(&wt); 6511 for (i = 0; i < CTL_MAX_PORTS; i++) { 6512 rn += lun->stats.ports[i].operations[CTL_STATS_READ]; 6513 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE]; 6514 rb += lun->stats.ports[i].bytes[CTL_STATS_READ]; 6515 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE]; 6516 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]); 6517 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]); 6518 } 6519 scsi_u64to8b(rn, data->sap.read_num); 6520 scsi_u64to8b(wn, data->sap.write_num); 6521 if (lun->stats.blocksize > 0) { 6522 scsi_u64to8b(wb / lun->stats.blocksize, 6523 data->sap.recvieved_lba); 6524 scsi_u64to8b(rb / lun->stats.blocksize, 6525 data->sap.transmitted_lba); 6526 } 6527 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000), 6528 data->sap.read_int); 6529 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000), 6530 data->sap.write_int); 6531 scsi_u64to8b(0, data->sap.weighted_num); 6532 scsi_u64to8b(0, data->sap.weighted_int); 6533 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6534 data->it.hdr.param_control = SLP_LBIN; 6535 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6536 sizeof(struct scsi_log_param_header); 6537#ifdef CTL_TIME_IO 6538 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6539#endif 6540 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6541 data->it.hdr.param_control = SLP_LBIN; 6542 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6543 sizeof(struct scsi_log_param_header); 6544 scsi_ulto4b(3, data->ti.exponent); 6545 scsi_ulto4b(1, data->ti.integer); 6546 6547 page_index->page_len = sizeof(*data); 6548 return (0); 6549} 6550 6551int 6552ctl_log_sense(struct ctl_scsiio *ctsio) 6553{ 6554 struct ctl_lun *lun; 6555 int i, pc, page_code, subpage; 6556 int alloc_len, total_len; 6557 struct ctl_page_index *page_index; 6558 struct scsi_log_sense *cdb; 6559 struct scsi_log_header *header; 6560 6561 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6562 6563 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6564 cdb = (struct scsi_log_sense *)ctsio->cdb; 6565 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6566 page_code = cdb->page & SLS_PAGE_CODE; 6567 subpage = cdb->subpage; 6568 alloc_len = scsi_2btoul(cdb->length); 6569 6570 page_index = NULL; 6571 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6572 page_index = &lun->log_pages.index[i]; 6573 6574 /* Look for the right page code */ 6575 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6576 continue; 6577 6578 /* Look for the right subpage or the subpage wildcard*/ 6579 if (page_index->subpage != subpage) 6580 continue; 6581 6582 break; 6583 } 6584 if (i >= CTL_NUM_LOG_PAGES) { 6585 ctl_set_invalid_field(ctsio, 6586 /*sks_valid*/ 1, 6587 /*command*/ 1, 6588 /*field*/ 2, 6589 /*bit_valid*/ 0, 6590 /*bit*/ 0); 6591 ctl_done((union ctl_io *)ctsio); 6592 return (CTL_RETVAL_COMPLETE); 6593 } 6594 6595 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6596 6597 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6598 ctsio->kern_sg_entries = 0; 6599 ctsio->kern_data_resid = 0; 6600 ctsio->kern_rel_offset = 0; 6601 if (total_len < alloc_len) { 6602 ctsio->residual = alloc_len - total_len; 6603 ctsio->kern_data_len = total_len; 6604 ctsio->kern_total_len = total_len; 6605 } else { 6606 ctsio->residual = 0; 6607 ctsio->kern_data_len = alloc_len; 6608 ctsio->kern_total_len = alloc_len; 6609 } 6610 6611 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6612 header->page = page_index->page_code; 6613 if (page_index->subpage) { 6614 header->page |= SL_SPF; 6615 header->subpage = page_index->subpage; 6616 } 6617 scsi_ulto2b(page_index->page_len, header->datalen); 6618 6619 /* 6620 * Call the handler, if it exists, to update the 6621 * page to the latest values. 6622 */ 6623 if (page_index->sense_handler != NULL) 6624 page_index->sense_handler(ctsio, page_index, pc); 6625 6626 memcpy(header + 1, page_index->page_data, page_index->page_len); 6627 6628 ctl_set_success(ctsio); 6629 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6630 ctsio->be_move_done = ctl_config_move_done; 6631 ctl_datamove((union ctl_io *)ctsio); 6632 return (CTL_RETVAL_COMPLETE); 6633} 6634 6635int 6636ctl_read_capacity(struct ctl_scsiio *ctsio) 6637{ 6638 struct scsi_read_capacity *cdb; 6639 struct scsi_read_capacity_data *data; 6640 struct ctl_lun *lun; 6641 uint32_t lba; 6642 6643 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6644 6645 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6646 6647 lba = scsi_4btoul(cdb->addr); 6648 if (((cdb->pmi & SRC_PMI) == 0) 6649 && (lba != 0)) { 6650 ctl_set_invalid_field(/*ctsio*/ ctsio, 6651 /*sks_valid*/ 1, 6652 /*command*/ 1, 6653 /*field*/ 2, 6654 /*bit_valid*/ 0, 6655 /*bit*/ 0); 6656 ctl_done((union ctl_io *)ctsio); 6657 return (CTL_RETVAL_COMPLETE); 6658 } 6659 6660 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6661 6662 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6663 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6664 ctsio->residual = 0; 6665 ctsio->kern_data_len = sizeof(*data); 6666 ctsio->kern_total_len = sizeof(*data); 6667 ctsio->kern_data_resid = 0; 6668 ctsio->kern_rel_offset = 0; 6669 ctsio->kern_sg_entries = 0; 6670 6671 /* 6672 * If the maximum LBA is greater than 0xfffffffe, the user must 6673 * issue a SERVICE ACTION IN (16) command, with the read capacity 6674 * serivce action set. 6675 */ 6676 if (lun->be_lun->maxlba > 0xfffffffe) 6677 scsi_ulto4b(0xffffffff, data->addr); 6678 else 6679 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6680 6681 /* 6682 * XXX KDM this may not be 512 bytes... 6683 */ 6684 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6685 6686 ctl_set_success(ctsio); 6687 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6688 ctsio->be_move_done = ctl_config_move_done; 6689 ctl_datamove((union ctl_io *)ctsio); 6690 return (CTL_RETVAL_COMPLETE); 6691} 6692 6693int 6694ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6695{ 6696 struct scsi_read_capacity_16 *cdb; 6697 struct scsi_read_capacity_data_long *data; 6698 struct ctl_lun *lun; 6699 uint64_t lba; 6700 uint32_t alloc_len; 6701 6702 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6703 6704 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6705 6706 alloc_len = scsi_4btoul(cdb->alloc_len); 6707 lba = scsi_8btou64(cdb->addr); 6708 6709 if ((cdb->reladr & SRC16_PMI) 6710 && (lba != 0)) { 6711 ctl_set_invalid_field(/*ctsio*/ ctsio, 6712 /*sks_valid*/ 1, 6713 /*command*/ 1, 6714 /*field*/ 2, 6715 /*bit_valid*/ 0, 6716 /*bit*/ 0); 6717 ctl_done((union ctl_io *)ctsio); 6718 return (CTL_RETVAL_COMPLETE); 6719 } 6720 6721 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6722 6723 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6724 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6725 6726 if (sizeof(*data) < alloc_len) { 6727 ctsio->residual = alloc_len - sizeof(*data); 6728 ctsio->kern_data_len = sizeof(*data); 6729 ctsio->kern_total_len = sizeof(*data); 6730 } else { 6731 ctsio->residual = 0; 6732 ctsio->kern_data_len = alloc_len; 6733 ctsio->kern_total_len = alloc_len; 6734 } 6735 ctsio->kern_data_resid = 0; 6736 ctsio->kern_rel_offset = 0; 6737 ctsio->kern_sg_entries = 0; 6738 6739 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6740 /* XXX KDM this may not be 512 bytes... */ 6741 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6742 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 6743 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 6744 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 6745 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 6746 6747 ctl_set_success(ctsio); 6748 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6749 ctsio->be_move_done = ctl_config_move_done; 6750 ctl_datamove((union ctl_io *)ctsio); 6751 return (CTL_RETVAL_COMPLETE); 6752} 6753 6754int 6755ctl_get_lba_status(struct ctl_scsiio *ctsio) 6756{ 6757 struct scsi_get_lba_status *cdb; 6758 struct scsi_get_lba_status_data *data; 6759 struct ctl_lun *lun; 6760 struct ctl_lba_len_flags *lbalen; 6761 uint64_t lba; 6762 uint32_t alloc_len, total_len; 6763 int retval; 6764 6765 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 6766 6767 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6768 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 6769 lba = scsi_8btou64(cdb->addr); 6770 alloc_len = scsi_4btoul(cdb->alloc_len); 6771 6772 if (lba > lun->be_lun->maxlba) { 6773 ctl_set_lba_out_of_range(ctsio); 6774 ctl_done((union ctl_io *)ctsio); 6775 return (CTL_RETVAL_COMPLETE); 6776 } 6777 6778 total_len = sizeof(*data) + sizeof(data->descr[0]); 6779 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6780 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 6781 6782 if (total_len < alloc_len) { 6783 ctsio->residual = alloc_len - total_len; 6784 ctsio->kern_data_len = total_len; 6785 ctsio->kern_total_len = total_len; 6786 } else { 6787 ctsio->residual = 0; 6788 ctsio->kern_data_len = alloc_len; 6789 ctsio->kern_total_len = alloc_len; 6790 } 6791 ctsio->kern_data_resid = 0; 6792 ctsio->kern_rel_offset = 0; 6793 ctsio->kern_sg_entries = 0; 6794 6795 /* Fill dummy data in case backend can't tell anything. */ 6796 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 6797 scsi_u64to8b(lba, data->descr[0].addr); 6798 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 6799 data->descr[0].length); 6800 data->descr[0].status = 0; /* Mapped or unknown. */ 6801 6802 ctl_set_success(ctsio); 6803 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6804 ctsio->be_move_done = ctl_config_move_done; 6805 6806 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 6807 lbalen->lba = lba; 6808 lbalen->len = total_len; 6809 lbalen->flags = 0; 6810 retval = lun->backend->config_read((union ctl_io *)ctsio); 6811 return (CTL_RETVAL_COMPLETE); 6812} 6813 6814int 6815ctl_read_defect(struct ctl_scsiio *ctsio) 6816{ 6817 struct scsi_read_defect_data_10 *ccb10; 6818 struct scsi_read_defect_data_12 *ccb12; 6819 struct scsi_read_defect_data_hdr_10 *data10; 6820 struct scsi_read_defect_data_hdr_12 *data12; 6821 uint32_t alloc_len, data_len; 6822 uint8_t format; 6823 6824 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 6825 6826 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 6827 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 6828 format = ccb10->format; 6829 alloc_len = scsi_2btoul(ccb10->alloc_length); 6830 data_len = sizeof(*data10); 6831 } else { 6832 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 6833 format = ccb12->format; 6834 alloc_len = scsi_4btoul(ccb12->alloc_length); 6835 data_len = sizeof(*data12); 6836 } 6837 if (alloc_len == 0) { 6838 ctl_set_success(ctsio); 6839 ctl_done((union ctl_io *)ctsio); 6840 return (CTL_RETVAL_COMPLETE); 6841 } 6842 6843 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 6844 if (data_len < alloc_len) { 6845 ctsio->residual = alloc_len - data_len; 6846 ctsio->kern_data_len = data_len; 6847 ctsio->kern_total_len = data_len; 6848 } else { 6849 ctsio->residual = 0; 6850 ctsio->kern_data_len = alloc_len; 6851 ctsio->kern_total_len = alloc_len; 6852 } 6853 ctsio->kern_data_resid = 0; 6854 ctsio->kern_rel_offset = 0; 6855 ctsio->kern_sg_entries = 0; 6856 6857 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 6858 data10 = (struct scsi_read_defect_data_hdr_10 *) 6859 ctsio->kern_data_ptr; 6860 data10->format = format; 6861 scsi_ulto2b(0, data10->length); 6862 } else { 6863 data12 = (struct scsi_read_defect_data_hdr_12 *) 6864 ctsio->kern_data_ptr; 6865 data12->format = format; 6866 scsi_ulto2b(0, data12->generation); 6867 scsi_ulto4b(0, data12->length); 6868 } 6869 6870 ctl_set_success(ctsio); 6871 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6872 ctsio->be_move_done = ctl_config_move_done; 6873 ctl_datamove((union ctl_io *)ctsio); 6874 return (CTL_RETVAL_COMPLETE); 6875} 6876 6877int 6878ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 6879{ 6880 struct scsi_maintenance_in *cdb; 6881 int retval; 6882 int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os; 6883 int num_target_port_groups, num_target_ports; 6884 struct ctl_lun *lun; 6885 struct ctl_softc *softc; 6886 struct ctl_port *port; 6887 struct scsi_target_group_data *rtg_ptr; 6888 struct scsi_target_group_data_extended *rtg_ext_ptr; 6889 struct scsi_target_port_group_descriptor *tpg_desc; 6890 6891 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 6892 6893 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 6894 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6895 softc = lun->ctl_softc; 6896 6897 retval = CTL_RETVAL_COMPLETE; 6898 6899 switch (cdb->byte2 & STG_PDF_MASK) { 6900 case STG_PDF_LENGTH: 6901 ext = 0; 6902 break; 6903 case STG_PDF_EXTENDED: 6904 ext = 1; 6905 break; 6906 default: 6907 ctl_set_invalid_field(/*ctsio*/ ctsio, 6908 /*sks_valid*/ 1, 6909 /*command*/ 1, 6910 /*field*/ 2, 6911 /*bit_valid*/ 1, 6912 /*bit*/ 5); 6913 ctl_done((union ctl_io *)ctsio); 6914 return(retval); 6915 } 6916 6917 if (softc->is_single) 6918 num_target_port_groups = 1; 6919 else 6920 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 6921 num_target_ports = 0; 6922 mtx_lock(&softc->ctl_lock); 6923 STAILQ_FOREACH(port, &softc->port_list, links) { 6924 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 6925 continue; 6926 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 6927 continue; 6928 num_target_ports++; 6929 } 6930 mtx_unlock(&softc->ctl_lock); 6931 6932 if (ext) 6933 total_len = sizeof(struct scsi_target_group_data_extended); 6934 else 6935 total_len = sizeof(struct scsi_target_group_data); 6936 total_len += sizeof(struct scsi_target_port_group_descriptor) * 6937 num_target_port_groups + 6938 sizeof(struct scsi_target_port_descriptor) * 6939 num_target_ports * num_target_port_groups; 6940 6941 alloc_len = scsi_4btoul(cdb->length); 6942 6943 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6944 6945 ctsio->kern_sg_entries = 0; 6946 6947 if (total_len < alloc_len) { 6948 ctsio->residual = alloc_len - total_len; 6949 ctsio->kern_data_len = total_len; 6950 ctsio->kern_total_len = total_len; 6951 } else { 6952 ctsio->residual = 0; 6953 ctsio->kern_data_len = alloc_len; 6954 ctsio->kern_total_len = alloc_len; 6955 } 6956 ctsio->kern_data_resid = 0; 6957 ctsio->kern_rel_offset = 0; 6958 6959 if (ext) { 6960 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 6961 ctsio->kern_data_ptr; 6962 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 6963 rtg_ext_ptr->format_type = 0x10; 6964 rtg_ext_ptr->implicit_transition_time = 0; 6965 tpg_desc = &rtg_ext_ptr->groups[0]; 6966 } else { 6967 rtg_ptr = (struct scsi_target_group_data *) 6968 ctsio->kern_data_ptr; 6969 scsi_ulto4b(total_len - 4, rtg_ptr->length); 6970 tpg_desc = &rtg_ptr->groups[0]; 6971 } 6972 6973 mtx_lock(&softc->ctl_lock); 6974 pg = softc->port_offset / CTL_MAX_PORTS; 6975 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) { 6976 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) { 6977 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6978 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 6979 } else if (lun->flags & CTL_LUN_PRIMARY_SC) { 6980 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6981 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6982 } else { 6983 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 6984 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6985 } 6986 } else { 6987 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; 6988 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 6989 } 6990 for (g = 0; g < num_target_port_groups; g++) { 6991 tpg_desc->pref_state = (g == pg) ? gs : os; 6992 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP; 6993 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 6994 tpg_desc->status = TPG_IMPLICIT; 6995 pc = 0; 6996 STAILQ_FOREACH(port, &softc->port_list, links) { 6997 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 6998 continue; 6999 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7000 continue; 7001 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 7002 scsi_ulto2b(p, tpg_desc->descriptors[pc]. 7003 relative_target_port_identifier); 7004 pc++; 7005 } 7006 tpg_desc->target_port_count = pc; 7007 tpg_desc = (struct scsi_target_port_group_descriptor *) 7008 &tpg_desc->descriptors[pc]; 7009 } 7010 mtx_unlock(&softc->ctl_lock); 7011 7012 ctl_set_success(ctsio); 7013 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7014 ctsio->be_move_done = ctl_config_move_done; 7015 ctl_datamove((union ctl_io *)ctsio); 7016 return(retval); 7017} 7018 7019int 7020ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7021{ 7022 struct ctl_lun *lun; 7023 struct scsi_report_supported_opcodes *cdb; 7024 const struct ctl_cmd_entry *entry, *sentry; 7025 struct scsi_report_supported_opcodes_all *all; 7026 struct scsi_report_supported_opcodes_descr *descr; 7027 struct scsi_report_supported_opcodes_one *one; 7028 int retval; 7029 int alloc_len, total_len; 7030 int opcode, service_action, i, j, num; 7031 7032 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7033 7034 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7035 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7036 7037 retval = CTL_RETVAL_COMPLETE; 7038 7039 opcode = cdb->requested_opcode; 7040 service_action = scsi_2btoul(cdb->requested_service_action); 7041 switch (cdb->options & RSO_OPTIONS_MASK) { 7042 case RSO_OPTIONS_ALL: 7043 num = 0; 7044 for (i = 0; i < 256; i++) { 7045 entry = &ctl_cmd_table[i]; 7046 if (entry->flags & CTL_CMD_FLAG_SA5) { 7047 for (j = 0; j < 32; j++) { 7048 sentry = &((const struct ctl_cmd_entry *) 7049 entry->execute)[j]; 7050 if (ctl_cmd_applicable( 7051 lun->be_lun->lun_type, sentry)) 7052 num++; 7053 } 7054 } else { 7055 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7056 entry)) 7057 num++; 7058 } 7059 } 7060 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7061 num * sizeof(struct scsi_report_supported_opcodes_descr); 7062 break; 7063 case RSO_OPTIONS_OC: 7064 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7065 ctl_set_invalid_field(/*ctsio*/ ctsio, 7066 /*sks_valid*/ 1, 7067 /*command*/ 1, 7068 /*field*/ 2, 7069 /*bit_valid*/ 1, 7070 /*bit*/ 2); 7071 ctl_done((union ctl_io *)ctsio); 7072 return (CTL_RETVAL_COMPLETE); 7073 } 7074 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7075 break; 7076 case RSO_OPTIONS_OC_SA: 7077 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7078 service_action >= 32) { 7079 ctl_set_invalid_field(/*ctsio*/ ctsio, 7080 /*sks_valid*/ 1, 7081 /*command*/ 1, 7082 /*field*/ 2, 7083 /*bit_valid*/ 1, 7084 /*bit*/ 2); 7085 ctl_done((union ctl_io *)ctsio); 7086 return (CTL_RETVAL_COMPLETE); 7087 } 7088 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7089 break; 7090 default: 7091 ctl_set_invalid_field(/*ctsio*/ ctsio, 7092 /*sks_valid*/ 1, 7093 /*command*/ 1, 7094 /*field*/ 2, 7095 /*bit_valid*/ 1, 7096 /*bit*/ 2); 7097 ctl_done((union ctl_io *)ctsio); 7098 return (CTL_RETVAL_COMPLETE); 7099 } 7100 7101 alloc_len = scsi_4btoul(cdb->length); 7102 7103 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7104 7105 ctsio->kern_sg_entries = 0; 7106 7107 if (total_len < alloc_len) { 7108 ctsio->residual = alloc_len - total_len; 7109 ctsio->kern_data_len = total_len; 7110 ctsio->kern_total_len = total_len; 7111 } else { 7112 ctsio->residual = 0; 7113 ctsio->kern_data_len = alloc_len; 7114 ctsio->kern_total_len = alloc_len; 7115 } 7116 ctsio->kern_data_resid = 0; 7117 ctsio->kern_rel_offset = 0; 7118 7119 switch (cdb->options & RSO_OPTIONS_MASK) { 7120 case RSO_OPTIONS_ALL: 7121 all = (struct scsi_report_supported_opcodes_all *) 7122 ctsio->kern_data_ptr; 7123 num = 0; 7124 for (i = 0; i < 256; i++) { 7125 entry = &ctl_cmd_table[i]; 7126 if (entry->flags & CTL_CMD_FLAG_SA5) { 7127 for (j = 0; j < 32; j++) { 7128 sentry = &((const struct ctl_cmd_entry *) 7129 entry->execute)[j]; 7130 if (!ctl_cmd_applicable( 7131 lun->be_lun->lun_type, sentry)) 7132 continue; 7133 descr = &all->descr[num++]; 7134 descr->opcode = i; 7135 scsi_ulto2b(j, descr->service_action); 7136 descr->flags = RSO_SERVACTV; 7137 scsi_ulto2b(sentry->length, 7138 descr->cdb_length); 7139 } 7140 } else { 7141 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7142 entry)) 7143 continue; 7144 descr = &all->descr[num++]; 7145 descr->opcode = i; 7146 scsi_ulto2b(0, descr->service_action); 7147 descr->flags = 0; 7148 scsi_ulto2b(entry->length, descr->cdb_length); 7149 } 7150 } 7151 scsi_ulto4b( 7152 num * sizeof(struct scsi_report_supported_opcodes_descr), 7153 all->length); 7154 break; 7155 case RSO_OPTIONS_OC: 7156 one = (struct scsi_report_supported_opcodes_one *) 7157 ctsio->kern_data_ptr; 7158 entry = &ctl_cmd_table[opcode]; 7159 goto fill_one; 7160 case RSO_OPTIONS_OC_SA: 7161 one = (struct scsi_report_supported_opcodes_one *) 7162 ctsio->kern_data_ptr; 7163 entry = &ctl_cmd_table[opcode]; 7164 entry = &((const struct ctl_cmd_entry *) 7165 entry->execute)[service_action]; 7166fill_one: 7167 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7168 one->support = 3; 7169 scsi_ulto2b(entry->length, one->cdb_length); 7170 one->cdb_usage[0] = opcode; 7171 memcpy(&one->cdb_usage[1], entry->usage, 7172 entry->length - 1); 7173 } else 7174 one->support = 1; 7175 break; 7176 } 7177 7178 ctl_set_success(ctsio); 7179 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7180 ctsio->be_move_done = ctl_config_move_done; 7181 ctl_datamove((union ctl_io *)ctsio); 7182 return(retval); 7183} 7184 7185int 7186ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7187{ 7188 struct scsi_report_supported_tmf *cdb; 7189 struct scsi_report_supported_tmf_data *data; 7190 int retval; 7191 int alloc_len, total_len; 7192 7193 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7194 7195 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7196 7197 retval = CTL_RETVAL_COMPLETE; 7198 7199 total_len = sizeof(struct scsi_report_supported_tmf_data); 7200 alloc_len = scsi_4btoul(cdb->length); 7201 7202 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7203 7204 ctsio->kern_sg_entries = 0; 7205 7206 if (total_len < alloc_len) { 7207 ctsio->residual = alloc_len - total_len; 7208 ctsio->kern_data_len = total_len; 7209 ctsio->kern_total_len = total_len; 7210 } else { 7211 ctsio->residual = 0; 7212 ctsio->kern_data_len = alloc_len; 7213 ctsio->kern_total_len = alloc_len; 7214 } 7215 ctsio->kern_data_resid = 0; 7216 ctsio->kern_rel_offset = 0; 7217 7218 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7219 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS; 7220 data->byte2 |= RST_ITNRS; 7221 7222 ctl_set_success(ctsio); 7223 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7224 ctsio->be_move_done = ctl_config_move_done; 7225 ctl_datamove((union ctl_io *)ctsio); 7226 return (retval); 7227} 7228 7229int 7230ctl_report_timestamp(struct ctl_scsiio *ctsio) 7231{ 7232 struct scsi_report_timestamp *cdb; 7233 struct scsi_report_timestamp_data *data; 7234 struct timeval tv; 7235 int64_t timestamp; 7236 int retval; 7237 int alloc_len, total_len; 7238 7239 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7240 7241 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7242 7243 retval = CTL_RETVAL_COMPLETE; 7244 7245 total_len = sizeof(struct scsi_report_timestamp_data); 7246 alloc_len = scsi_4btoul(cdb->length); 7247 7248 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7249 7250 ctsio->kern_sg_entries = 0; 7251 7252 if (total_len < alloc_len) { 7253 ctsio->residual = alloc_len - total_len; 7254 ctsio->kern_data_len = total_len; 7255 ctsio->kern_total_len = total_len; 7256 } else { 7257 ctsio->residual = 0; 7258 ctsio->kern_data_len = alloc_len; 7259 ctsio->kern_total_len = alloc_len; 7260 } 7261 ctsio->kern_data_resid = 0; 7262 ctsio->kern_rel_offset = 0; 7263 7264 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7265 scsi_ulto2b(sizeof(*data) - 2, data->length); 7266 data->origin = RTS_ORIG_OUTSIDE; 7267 getmicrotime(&tv); 7268 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7269 scsi_ulto4b(timestamp >> 16, data->timestamp); 7270 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7271 7272 ctl_set_success(ctsio); 7273 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7274 ctsio->be_move_done = ctl_config_move_done; 7275 ctl_datamove((union ctl_io *)ctsio); 7276 return (retval); 7277} 7278 7279int 7280ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7281{ 7282 struct scsi_per_res_in *cdb; 7283 int alloc_len, total_len = 0; 7284 /* struct scsi_per_res_in_rsrv in_data; */ 7285 struct ctl_lun *lun; 7286 struct ctl_softc *softc; 7287 uint64_t key; 7288 7289 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7290 7291 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7292 7293 alloc_len = scsi_2btoul(cdb->length); 7294 7295 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7296 softc = lun->ctl_softc; 7297 7298retry: 7299 mtx_lock(&lun->lun_lock); 7300 switch (cdb->action) { 7301 case SPRI_RK: /* read keys */ 7302 total_len = sizeof(struct scsi_per_res_in_keys) + 7303 lun->pr_key_count * 7304 sizeof(struct scsi_per_res_key); 7305 break; 7306 case SPRI_RR: /* read reservation */ 7307 if (lun->flags & CTL_LUN_PR_RESERVED) 7308 total_len = sizeof(struct scsi_per_res_in_rsrv); 7309 else 7310 total_len = sizeof(struct scsi_per_res_in_header); 7311 break; 7312 case SPRI_RC: /* report capabilities */ 7313 total_len = sizeof(struct scsi_per_res_cap); 7314 break; 7315 case SPRI_RS: /* read full status */ 7316 total_len = sizeof(struct scsi_per_res_in_header) + 7317 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7318 lun->pr_key_count; 7319 break; 7320 default: 7321 panic("Invalid PR type %x", cdb->action); 7322 } 7323 mtx_unlock(&lun->lun_lock); 7324 7325 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7326 7327 if (total_len < alloc_len) { 7328 ctsio->residual = alloc_len - total_len; 7329 ctsio->kern_data_len = total_len; 7330 ctsio->kern_total_len = total_len; 7331 } else { 7332 ctsio->residual = 0; 7333 ctsio->kern_data_len = alloc_len; 7334 ctsio->kern_total_len = alloc_len; 7335 } 7336 7337 ctsio->kern_data_resid = 0; 7338 ctsio->kern_rel_offset = 0; 7339 ctsio->kern_sg_entries = 0; 7340 7341 mtx_lock(&lun->lun_lock); 7342 switch (cdb->action) { 7343 case SPRI_RK: { // read keys 7344 struct scsi_per_res_in_keys *res_keys; 7345 int i, key_count; 7346 7347 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7348 7349 /* 7350 * We had to drop the lock to allocate our buffer, which 7351 * leaves time for someone to come in with another 7352 * persistent reservation. (That is unlikely, though, 7353 * since this should be the only persistent reservation 7354 * command active right now.) 7355 */ 7356 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7357 (lun->pr_key_count * 7358 sizeof(struct scsi_per_res_key)))){ 7359 mtx_unlock(&lun->lun_lock); 7360 free(ctsio->kern_data_ptr, M_CTL); 7361 printf("%s: reservation length changed, retrying\n", 7362 __func__); 7363 goto retry; 7364 } 7365 7366 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7367 7368 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7369 lun->pr_key_count, res_keys->header.length); 7370 7371 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7372 if ((key = ctl_get_prkey(lun, i)) == 0) 7373 continue; 7374 7375 /* 7376 * We used lun->pr_key_count to calculate the 7377 * size to allocate. If it turns out the number of 7378 * initiators with the registered flag set is 7379 * larger than that (i.e. they haven't been kept in 7380 * sync), we've got a problem. 7381 */ 7382 if (key_count >= lun->pr_key_count) { 7383#ifdef NEEDTOPORT 7384 csevent_log(CSC_CTL | CSC_SHELF_SW | 7385 CTL_PR_ERROR, 7386 csevent_LogType_Fault, 7387 csevent_AlertLevel_Yellow, 7388 csevent_FRU_ShelfController, 7389 csevent_FRU_Firmware, 7390 csevent_FRU_Unknown, 7391 "registered keys %d >= key " 7392 "count %d", key_count, 7393 lun->pr_key_count); 7394#endif 7395 key_count++; 7396 continue; 7397 } 7398 scsi_u64to8b(key, res_keys->keys[key_count].key); 7399 key_count++; 7400 } 7401 break; 7402 } 7403 case SPRI_RR: { // read reservation 7404 struct scsi_per_res_in_rsrv *res; 7405 int tmp_len, header_only; 7406 7407 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7408 7409 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7410 7411 if (lun->flags & CTL_LUN_PR_RESERVED) 7412 { 7413 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7414 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7415 res->header.length); 7416 header_only = 0; 7417 } else { 7418 tmp_len = sizeof(struct scsi_per_res_in_header); 7419 scsi_ulto4b(0, res->header.length); 7420 header_only = 1; 7421 } 7422 7423 /* 7424 * We had to drop the lock to allocate our buffer, which 7425 * leaves time for someone to come in with another 7426 * persistent reservation. (That is unlikely, though, 7427 * since this should be the only persistent reservation 7428 * command active right now.) 7429 */ 7430 if (tmp_len != total_len) { 7431 mtx_unlock(&lun->lun_lock); 7432 free(ctsio->kern_data_ptr, M_CTL); 7433 printf("%s: reservation status changed, retrying\n", 7434 __func__); 7435 goto retry; 7436 } 7437 7438 /* 7439 * No reservation held, so we're done. 7440 */ 7441 if (header_only != 0) 7442 break; 7443 7444 /* 7445 * If the registration is an All Registrants type, the key 7446 * is 0, since it doesn't really matter. 7447 */ 7448 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7449 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7450 res->data.reservation); 7451 } 7452 res->data.scopetype = lun->res_type; 7453 break; 7454 } 7455 case SPRI_RC: //report capabilities 7456 { 7457 struct scsi_per_res_cap *res_cap; 7458 uint16_t type_mask; 7459 7460 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7461 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7462 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 7463 type_mask = SPRI_TM_WR_EX_AR | 7464 SPRI_TM_EX_AC_RO | 7465 SPRI_TM_WR_EX_RO | 7466 SPRI_TM_EX_AC | 7467 SPRI_TM_WR_EX | 7468 SPRI_TM_EX_AC_AR; 7469 scsi_ulto2b(type_mask, res_cap->type_mask); 7470 break; 7471 } 7472 case SPRI_RS: { // read full status 7473 struct scsi_per_res_in_full *res_status; 7474 struct scsi_per_res_in_full_desc *res_desc; 7475 struct ctl_port *port; 7476 int i, len; 7477 7478 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7479 7480 /* 7481 * We had to drop the lock to allocate our buffer, which 7482 * leaves time for someone to come in with another 7483 * persistent reservation. (That is unlikely, though, 7484 * since this should be the only persistent reservation 7485 * command active right now.) 7486 */ 7487 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7488 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7489 lun->pr_key_count)){ 7490 mtx_unlock(&lun->lun_lock); 7491 free(ctsio->kern_data_ptr, M_CTL); 7492 printf("%s: reservation length changed, retrying\n", 7493 __func__); 7494 goto retry; 7495 } 7496 7497 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7498 7499 res_desc = &res_status->desc[0]; 7500 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7501 if ((key = ctl_get_prkey(lun, i)) == 0) 7502 continue; 7503 7504 scsi_u64to8b(key, res_desc->res_key.key); 7505 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7506 (lun->pr_res_idx == i || 7507 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7508 res_desc->flags = SPRI_FULL_R_HOLDER; 7509 res_desc->scopetype = lun->res_type; 7510 } 7511 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7512 res_desc->rel_trgt_port_id); 7513 len = 0; 7514 port = softc->ctl_ports[ 7515 ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)]; 7516 if (port != NULL) 7517 len = ctl_create_iid(port, 7518 i % CTL_MAX_INIT_PER_PORT, 7519 res_desc->transport_id); 7520 scsi_ulto4b(len, res_desc->additional_length); 7521 res_desc = (struct scsi_per_res_in_full_desc *) 7522 &res_desc->transport_id[len]; 7523 } 7524 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7525 res_status->header.length); 7526 break; 7527 } 7528 default: 7529 /* 7530 * This is a bug, because we just checked for this above, 7531 * and should have returned an error. 7532 */ 7533 panic("Invalid PR type %x", cdb->action); 7534 break; /* NOTREACHED */ 7535 } 7536 mtx_unlock(&lun->lun_lock); 7537 7538 ctl_set_success(ctsio); 7539 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7540 ctsio->be_move_done = ctl_config_move_done; 7541 ctl_datamove((union ctl_io *)ctsio); 7542 return (CTL_RETVAL_COMPLETE); 7543} 7544 7545static void 7546ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua) 7547{ 7548 int off = lun->ctl_softc->persis_offset; 7549 7550 if (residx >= off && residx < off + CTL_MAX_INITIATORS) 7551 ctl_est_ua(lun, residx - off, ua); 7552} 7553 7554/* 7555 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7556 * it should return. 7557 */ 7558static int 7559ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7560 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7561 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7562 struct scsi_per_res_out_parms* param) 7563{ 7564 union ctl_ha_msg persis_io; 7565 int retval, i; 7566 int isc_retval; 7567 7568 retval = 0; 7569 7570 mtx_lock(&lun->lun_lock); 7571 if (sa_res_key == 0) { 7572 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7573 /* validate scope and type */ 7574 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7575 SPR_LU_SCOPE) { 7576 mtx_unlock(&lun->lun_lock); 7577 ctl_set_invalid_field(/*ctsio*/ ctsio, 7578 /*sks_valid*/ 1, 7579 /*command*/ 1, 7580 /*field*/ 2, 7581 /*bit_valid*/ 1, 7582 /*bit*/ 4); 7583 ctl_done((union ctl_io *)ctsio); 7584 return (1); 7585 } 7586 7587 if (type>8 || type==2 || type==4 || type==0) { 7588 mtx_unlock(&lun->lun_lock); 7589 ctl_set_invalid_field(/*ctsio*/ ctsio, 7590 /*sks_valid*/ 1, 7591 /*command*/ 1, 7592 /*field*/ 2, 7593 /*bit_valid*/ 1, 7594 /*bit*/ 0); 7595 ctl_done((union ctl_io *)ctsio); 7596 return (1); 7597 } 7598 7599 /* 7600 * Unregister everybody else and build UA for 7601 * them 7602 */ 7603 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7604 if (i == residx || ctl_get_prkey(lun, i) == 0) 7605 continue; 7606 7607 ctl_clr_prkey(lun, i); 7608 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7609 } 7610 lun->pr_key_count = 1; 7611 lun->res_type = type; 7612 if (lun->res_type != SPR_TYPE_WR_EX_AR 7613 && lun->res_type != SPR_TYPE_EX_AC_AR) 7614 lun->pr_res_idx = residx; 7615 7616 /* send msg to other side */ 7617 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7618 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7619 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7620 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7621 persis_io.pr.pr_info.res_type = type; 7622 memcpy(persis_io.pr.pr_info.sa_res_key, 7623 param->serv_act_res_key, 7624 sizeof(param->serv_act_res_key)); 7625 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7626 &persis_io, sizeof(persis_io), 0)) > 7627 CTL_HA_STATUS_SUCCESS) { 7628 printf("CTL:Persis Out error returned " 7629 "from ctl_ha_msg_send %d\n", 7630 isc_retval); 7631 } 7632 } else { 7633 /* not all registrants */ 7634 mtx_unlock(&lun->lun_lock); 7635 free(ctsio->kern_data_ptr, M_CTL); 7636 ctl_set_invalid_field(ctsio, 7637 /*sks_valid*/ 1, 7638 /*command*/ 0, 7639 /*field*/ 8, 7640 /*bit_valid*/ 0, 7641 /*bit*/ 0); 7642 ctl_done((union ctl_io *)ctsio); 7643 return (1); 7644 } 7645 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7646 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7647 int found = 0; 7648 7649 if (res_key == sa_res_key) { 7650 /* special case */ 7651 /* 7652 * The spec implies this is not good but doesn't 7653 * say what to do. There are two choices either 7654 * generate a res conflict or check condition 7655 * with illegal field in parameter data. Since 7656 * that is what is done when the sa_res_key is 7657 * zero I'll take that approach since this has 7658 * to do with the sa_res_key. 7659 */ 7660 mtx_unlock(&lun->lun_lock); 7661 free(ctsio->kern_data_ptr, M_CTL); 7662 ctl_set_invalid_field(ctsio, 7663 /*sks_valid*/ 1, 7664 /*command*/ 0, 7665 /*field*/ 8, 7666 /*bit_valid*/ 0, 7667 /*bit*/ 0); 7668 ctl_done((union ctl_io *)ctsio); 7669 return (1); 7670 } 7671 7672 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7673 if (ctl_get_prkey(lun, i) != sa_res_key) 7674 continue; 7675 7676 found = 1; 7677 ctl_clr_prkey(lun, i); 7678 lun->pr_key_count--; 7679 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7680 } 7681 if (!found) { 7682 mtx_unlock(&lun->lun_lock); 7683 free(ctsio->kern_data_ptr, M_CTL); 7684 ctl_set_reservation_conflict(ctsio); 7685 ctl_done((union ctl_io *)ctsio); 7686 return (CTL_RETVAL_COMPLETE); 7687 } 7688 /* send msg to other side */ 7689 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7690 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7691 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7692 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7693 persis_io.pr.pr_info.res_type = type; 7694 memcpy(persis_io.pr.pr_info.sa_res_key, 7695 param->serv_act_res_key, 7696 sizeof(param->serv_act_res_key)); 7697 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7698 &persis_io, sizeof(persis_io), 0)) > 7699 CTL_HA_STATUS_SUCCESS) { 7700 printf("CTL:Persis Out error returned from " 7701 "ctl_ha_msg_send %d\n", isc_retval); 7702 } 7703 } else { 7704 /* Reserved but not all registrants */ 7705 /* sa_res_key is res holder */ 7706 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7707 /* validate scope and type */ 7708 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7709 SPR_LU_SCOPE) { 7710 mtx_unlock(&lun->lun_lock); 7711 ctl_set_invalid_field(/*ctsio*/ ctsio, 7712 /*sks_valid*/ 1, 7713 /*command*/ 1, 7714 /*field*/ 2, 7715 /*bit_valid*/ 1, 7716 /*bit*/ 4); 7717 ctl_done((union ctl_io *)ctsio); 7718 return (1); 7719 } 7720 7721 if (type>8 || type==2 || type==4 || type==0) { 7722 mtx_unlock(&lun->lun_lock); 7723 ctl_set_invalid_field(/*ctsio*/ ctsio, 7724 /*sks_valid*/ 1, 7725 /*command*/ 1, 7726 /*field*/ 2, 7727 /*bit_valid*/ 1, 7728 /*bit*/ 0); 7729 ctl_done((union ctl_io *)ctsio); 7730 return (1); 7731 } 7732 7733 /* 7734 * Do the following: 7735 * if sa_res_key != res_key remove all 7736 * registrants w/sa_res_key and generate UA 7737 * for these registrants(Registrations 7738 * Preempted) if it wasn't an exclusive 7739 * reservation generate UA(Reservations 7740 * Preempted) for all other registered nexuses 7741 * if the type has changed. Establish the new 7742 * reservation and holder. If res_key and 7743 * sa_res_key are the same do the above 7744 * except don't unregister the res holder. 7745 */ 7746 7747 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7748 if (i == residx || ctl_get_prkey(lun, i) == 0) 7749 continue; 7750 7751 if (sa_res_key == ctl_get_prkey(lun, i)) { 7752 ctl_clr_prkey(lun, i); 7753 lun->pr_key_count--; 7754 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7755 } else if (type != lun->res_type 7756 && (lun->res_type == SPR_TYPE_WR_EX_RO 7757 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 7758 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 7759 } 7760 } 7761 lun->res_type = type; 7762 if (lun->res_type != SPR_TYPE_WR_EX_AR 7763 && lun->res_type != SPR_TYPE_EX_AC_AR) 7764 lun->pr_res_idx = residx; 7765 else 7766 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7767 7768 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7769 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7770 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7771 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7772 persis_io.pr.pr_info.res_type = type; 7773 memcpy(persis_io.pr.pr_info.sa_res_key, 7774 param->serv_act_res_key, 7775 sizeof(param->serv_act_res_key)); 7776 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7777 &persis_io, sizeof(persis_io), 0)) > 7778 CTL_HA_STATUS_SUCCESS) { 7779 printf("CTL:Persis Out error returned " 7780 "from ctl_ha_msg_send %d\n", 7781 isc_retval); 7782 } 7783 } else { 7784 /* 7785 * sa_res_key is not the res holder just 7786 * remove registrants 7787 */ 7788 int found=0; 7789 7790 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7791 if (sa_res_key != ctl_get_prkey(lun, i)) 7792 continue; 7793 7794 found = 1; 7795 ctl_clr_prkey(lun, i); 7796 lun->pr_key_count--; 7797 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7798 } 7799 7800 if (!found) { 7801 mtx_unlock(&lun->lun_lock); 7802 free(ctsio->kern_data_ptr, M_CTL); 7803 ctl_set_reservation_conflict(ctsio); 7804 ctl_done((union ctl_io *)ctsio); 7805 return (1); 7806 } 7807 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7808 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7809 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7810 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7811 persis_io.pr.pr_info.res_type = type; 7812 memcpy(persis_io.pr.pr_info.sa_res_key, 7813 param->serv_act_res_key, 7814 sizeof(param->serv_act_res_key)); 7815 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 7816 &persis_io, sizeof(persis_io), 0)) > 7817 CTL_HA_STATUS_SUCCESS) { 7818 printf("CTL:Persis Out error returned " 7819 "from ctl_ha_msg_send %d\n", 7820 isc_retval); 7821 } 7822 } 7823 } 7824 7825 lun->PRGeneration++; 7826 mtx_unlock(&lun->lun_lock); 7827 7828 return (retval); 7829} 7830 7831static void 7832ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7833{ 7834 uint64_t sa_res_key; 7835 int i; 7836 7837 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7838 7839 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7840 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7841 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 7842 if (sa_res_key == 0) { 7843 /* 7844 * Unregister everybody else and build UA for 7845 * them 7846 */ 7847 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7848 if (i == msg->pr.pr_info.residx || 7849 ctl_get_prkey(lun, i) == 0) 7850 continue; 7851 7852 ctl_clr_prkey(lun, i); 7853 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7854 } 7855 7856 lun->pr_key_count = 1; 7857 lun->res_type = msg->pr.pr_info.res_type; 7858 if (lun->res_type != SPR_TYPE_WR_EX_AR 7859 && lun->res_type != SPR_TYPE_EX_AC_AR) 7860 lun->pr_res_idx = msg->pr.pr_info.residx; 7861 } else { 7862 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7863 if (sa_res_key == ctl_get_prkey(lun, i)) 7864 continue; 7865 7866 ctl_clr_prkey(lun, i); 7867 lun->pr_key_count--; 7868 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7869 } 7870 } 7871 } else { 7872 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 7873 if (i == msg->pr.pr_info.residx || 7874 ctl_get_prkey(lun, i) == 0) 7875 continue; 7876 7877 if (sa_res_key == ctl_get_prkey(lun, i)) { 7878 ctl_clr_prkey(lun, i); 7879 lun->pr_key_count--; 7880 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 7881 } else if (msg->pr.pr_info.res_type != lun->res_type 7882 && (lun->res_type == SPR_TYPE_WR_EX_RO 7883 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 7884 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 7885 } 7886 } 7887 lun->res_type = msg->pr.pr_info.res_type; 7888 if (lun->res_type != SPR_TYPE_WR_EX_AR 7889 && lun->res_type != SPR_TYPE_EX_AC_AR) 7890 lun->pr_res_idx = msg->pr.pr_info.residx; 7891 else 7892 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7893 } 7894 lun->PRGeneration++; 7895 7896} 7897 7898 7899int 7900ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 7901{ 7902 int retval; 7903 int isc_retval; 7904 u_int32_t param_len; 7905 struct scsi_per_res_out *cdb; 7906 struct ctl_lun *lun; 7907 struct scsi_per_res_out_parms* param; 7908 struct ctl_softc *softc; 7909 uint32_t residx; 7910 uint64_t res_key, sa_res_key, key; 7911 uint8_t type; 7912 union ctl_ha_msg persis_io; 7913 int i; 7914 7915 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 7916 7917 retval = CTL_RETVAL_COMPLETE; 7918 7919 cdb = (struct scsi_per_res_out *)ctsio->cdb; 7920 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7921 softc = lun->ctl_softc; 7922 7923 /* 7924 * We only support whole-LUN scope. The scope & type are ignored for 7925 * register, register and ignore existing key and clear. 7926 * We sometimes ignore scope and type on preempts too!! 7927 * Verify reservation type here as well. 7928 */ 7929 type = cdb->scope_type & SPR_TYPE_MASK; 7930 if ((cdb->action == SPRO_RESERVE) 7931 || (cdb->action == SPRO_RELEASE)) { 7932 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 7933 ctl_set_invalid_field(/*ctsio*/ ctsio, 7934 /*sks_valid*/ 1, 7935 /*command*/ 1, 7936 /*field*/ 2, 7937 /*bit_valid*/ 1, 7938 /*bit*/ 4); 7939 ctl_done((union ctl_io *)ctsio); 7940 return (CTL_RETVAL_COMPLETE); 7941 } 7942 7943 if (type>8 || type==2 || type==4 || type==0) { 7944 ctl_set_invalid_field(/*ctsio*/ ctsio, 7945 /*sks_valid*/ 1, 7946 /*command*/ 1, 7947 /*field*/ 2, 7948 /*bit_valid*/ 1, 7949 /*bit*/ 0); 7950 ctl_done((union ctl_io *)ctsio); 7951 return (CTL_RETVAL_COMPLETE); 7952 } 7953 } 7954 7955 param_len = scsi_4btoul(cdb->length); 7956 7957 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 7958 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 7959 ctsio->kern_data_len = param_len; 7960 ctsio->kern_total_len = param_len; 7961 ctsio->kern_data_resid = 0; 7962 ctsio->kern_rel_offset = 0; 7963 ctsio->kern_sg_entries = 0; 7964 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7965 ctsio->be_move_done = ctl_config_move_done; 7966 ctl_datamove((union ctl_io *)ctsio); 7967 7968 return (CTL_RETVAL_COMPLETE); 7969 } 7970 7971 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 7972 7973 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 7974 res_key = scsi_8btou64(param->res_key.key); 7975 sa_res_key = scsi_8btou64(param->serv_act_res_key); 7976 7977 /* 7978 * Validate the reservation key here except for SPRO_REG_IGNO 7979 * This must be done for all other service actions 7980 */ 7981 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 7982 mtx_lock(&lun->lun_lock); 7983 if ((key = ctl_get_prkey(lun, residx)) != 0) { 7984 if (res_key != key) { 7985 /* 7986 * The current key passed in doesn't match 7987 * the one the initiator previously 7988 * registered. 7989 */ 7990 mtx_unlock(&lun->lun_lock); 7991 free(ctsio->kern_data_ptr, M_CTL); 7992 ctl_set_reservation_conflict(ctsio); 7993 ctl_done((union ctl_io *)ctsio); 7994 return (CTL_RETVAL_COMPLETE); 7995 } 7996 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 7997 /* 7998 * We are not registered 7999 */ 8000 mtx_unlock(&lun->lun_lock); 8001 free(ctsio->kern_data_ptr, M_CTL); 8002 ctl_set_reservation_conflict(ctsio); 8003 ctl_done((union ctl_io *)ctsio); 8004 return (CTL_RETVAL_COMPLETE); 8005 } else if (res_key != 0) { 8006 /* 8007 * We are not registered and trying to register but 8008 * the register key isn't zero. 8009 */ 8010 mtx_unlock(&lun->lun_lock); 8011 free(ctsio->kern_data_ptr, M_CTL); 8012 ctl_set_reservation_conflict(ctsio); 8013 ctl_done((union ctl_io *)ctsio); 8014 return (CTL_RETVAL_COMPLETE); 8015 } 8016 mtx_unlock(&lun->lun_lock); 8017 } 8018 8019 switch (cdb->action & SPRO_ACTION_MASK) { 8020 case SPRO_REGISTER: 8021 case SPRO_REG_IGNO: { 8022 8023#if 0 8024 printf("Registration received\n"); 8025#endif 8026 8027 /* 8028 * We don't support any of these options, as we report in 8029 * the read capabilities request (see 8030 * ctl_persistent_reserve_in(), above). 8031 */ 8032 if ((param->flags & SPR_SPEC_I_PT) 8033 || (param->flags & SPR_ALL_TG_PT) 8034 || (param->flags & SPR_APTPL)) { 8035 int bit_ptr; 8036 8037 if (param->flags & SPR_APTPL) 8038 bit_ptr = 0; 8039 else if (param->flags & SPR_ALL_TG_PT) 8040 bit_ptr = 2; 8041 else /* SPR_SPEC_I_PT */ 8042 bit_ptr = 3; 8043 8044 free(ctsio->kern_data_ptr, M_CTL); 8045 ctl_set_invalid_field(ctsio, 8046 /*sks_valid*/ 1, 8047 /*command*/ 0, 8048 /*field*/ 20, 8049 /*bit_valid*/ 1, 8050 /*bit*/ bit_ptr); 8051 ctl_done((union ctl_io *)ctsio); 8052 return (CTL_RETVAL_COMPLETE); 8053 } 8054 8055 mtx_lock(&lun->lun_lock); 8056 8057 /* 8058 * The initiator wants to clear the 8059 * key/unregister. 8060 */ 8061 if (sa_res_key == 0) { 8062 if ((res_key == 0 8063 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8064 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8065 && ctl_get_prkey(lun, residx) == 0)) { 8066 mtx_unlock(&lun->lun_lock); 8067 goto done; 8068 } 8069 8070 ctl_clr_prkey(lun, residx); 8071 lun->pr_key_count--; 8072 8073 if (residx == lun->pr_res_idx) { 8074 lun->flags &= ~CTL_LUN_PR_RESERVED; 8075 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8076 8077 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8078 || lun->res_type == SPR_TYPE_EX_AC_RO) 8079 && lun->pr_key_count) { 8080 /* 8081 * If the reservation is a registrants 8082 * only type we need to generate a UA 8083 * for other registered inits. The 8084 * sense code should be RESERVATIONS 8085 * RELEASED 8086 */ 8087 8088 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 8089 if (ctl_get_prkey(lun, i + 8090 softc->persis_offset) == 0) 8091 continue; 8092 ctl_est_ua(lun, i, 8093 CTL_UA_RES_RELEASE); 8094 } 8095 } 8096 lun->res_type = 0; 8097 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8098 if (lun->pr_key_count==0) { 8099 lun->flags &= ~CTL_LUN_PR_RESERVED; 8100 lun->res_type = 0; 8101 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8102 } 8103 } 8104 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8105 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8106 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8107 persis_io.pr.pr_info.residx = residx; 8108 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8109 &persis_io, sizeof(persis_io), 0 )) > 8110 CTL_HA_STATUS_SUCCESS) { 8111 printf("CTL:Persis Out error returned from " 8112 "ctl_ha_msg_send %d\n", isc_retval); 8113 } 8114 } else /* sa_res_key != 0 */ { 8115 8116 /* 8117 * If we aren't registered currently then increment 8118 * the key count and set the registered flag. 8119 */ 8120 ctl_alloc_prkey(lun, residx); 8121 if (ctl_get_prkey(lun, residx) == 0) 8122 lun->pr_key_count++; 8123 ctl_set_prkey(lun, residx, sa_res_key); 8124 8125 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8126 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8127 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8128 persis_io.pr.pr_info.residx = residx; 8129 memcpy(persis_io.pr.pr_info.sa_res_key, 8130 param->serv_act_res_key, 8131 sizeof(param->serv_act_res_key)); 8132 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8133 &persis_io, sizeof(persis_io), 0)) > 8134 CTL_HA_STATUS_SUCCESS) { 8135 printf("CTL:Persis Out error returned from " 8136 "ctl_ha_msg_send %d\n", isc_retval); 8137 } 8138 } 8139 lun->PRGeneration++; 8140 mtx_unlock(&lun->lun_lock); 8141 8142 break; 8143 } 8144 case SPRO_RESERVE: 8145#if 0 8146 printf("Reserve executed type %d\n", type); 8147#endif 8148 mtx_lock(&lun->lun_lock); 8149 if (lun->flags & CTL_LUN_PR_RESERVED) { 8150 /* 8151 * if this isn't the reservation holder and it's 8152 * not a "all registrants" type or if the type is 8153 * different then we have a conflict 8154 */ 8155 if ((lun->pr_res_idx != residx 8156 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8157 || lun->res_type != type) { 8158 mtx_unlock(&lun->lun_lock); 8159 free(ctsio->kern_data_ptr, M_CTL); 8160 ctl_set_reservation_conflict(ctsio); 8161 ctl_done((union ctl_io *)ctsio); 8162 return (CTL_RETVAL_COMPLETE); 8163 } 8164 mtx_unlock(&lun->lun_lock); 8165 } else /* create a reservation */ { 8166 /* 8167 * If it's not an "all registrants" type record 8168 * reservation holder 8169 */ 8170 if (type != SPR_TYPE_WR_EX_AR 8171 && type != SPR_TYPE_EX_AC_AR) 8172 lun->pr_res_idx = residx; /* Res holder */ 8173 else 8174 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8175 8176 lun->flags |= CTL_LUN_PR_RESERVED; 8177 lun->res_type = type; 8178 8179 mtx_unlock(&lun->lun_lock); 8180 8181 /* send msg to other side */ 8182 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8183 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8184 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8185 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8186 persis_io.pr.pr_info.res_type = type; 8187 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8188 &persis_io, sizeof(persis_io), 0)) > 8189 CTL_HA_STATUS_SUCCESS) { 8190 printf("CTL:Persis Out error returned from " 8191 "ctl_ha_msg_send %d\n", isc_retval); 8192 } 8193 } 8194 break; 8195 8196 case SPRO_RELEASE: 8197 mtx_lock(&lun->lun_lock); 8198 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8199 /* No reservation exists return good status */ 8200 mtx_unlock(&lun->lun_lock); 8201 goto done; 8202 } 8203 /* 8204 * Is this nexus a reservation holder? 8205 */ 8206 if (lun->pr_res_idx != residx 8207 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8208 /* 8209 * not a res holder return good status but 8210 * do nothing 8211 */ 8212 mtx_unlock(&lun->lun_lock); 8213 goto done; 8214 } 8215 8216 if (lun->res_type != type) { 8217 mtx_unlock(&lun->lun_lock); 8218 free(ctsio->kern_data_ptr, M_CTL); 8219 ctl_set_illegal_pr_release(ctsio); 8220 ctl_done((union ctl_io *)ctsio); 8221 return (CTL_RETVAL_COMPLETE); 8222 } 8223 8224 /* okay to release */ 8225 lun->flags &= ~CTL_LUN_PR_RESERVED; 8226 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8227 lun->res_type = 0; 8228 8229 /* 8230 * if this isn't an exclusive access 8231 * res generate UA for all other 8232 * registrants. 8233 */ 8234 if (type != SPR_TYPE_EX_AC 8235 && type != SPR_TYPE_WR_EX) { 8236 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8237 if (i == residx || 8238 ctl_get_prkey(lun, 8239 i + softc->persis_offset) == 0) 8240 continue; 8241 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8242 } 8243 } 8244 mtx_unlock(&lun->lun_lock); 8245 /* Send msg to other side */ 8246 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8247 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8248 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8249 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8250 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8251 printf("CTL:Persis Out error returned from " 8252 "ctl_ha_msg_send %d\n", isc_retval); 8253 } 8254 break; 8255 8256 case SPRO_CLEAR: 8257 /* send msg to other side */ 8258 8259 mtx_lock(&lun->lun_lock); 8260 lun->flags &= ~CTL_LUN_PR_RESERVED; 8261 lun->res_type = 0; 8262 lun->pr_key_count = 0; 8263 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8264 8265 ctl_clr_prkey(lun, residx); 8266 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 8267 if (ctl_get_prkey(lun, i) != 0) { 8268 ctl_clr_prkey(lun, i); 8269 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8270 } 8271 lun->PRGeneration++; 8272 mtx_unlock(&lun->lun_lock); 8273 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8274 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8275 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8276 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8277 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8278 printf("CTL:Persis Out error returned from " 8279 "ctl_ha_msg_send %d\n", isc_retval); 8280 } 8281 break; 8282 8283 case SPRO_PREEMPT: 8284 case SPRO_PRE_ABO: { 8285 int nretval; 8286 8287 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8288 residx, ctsio, cdb, param); 8289 if (nretval != 0) 8290 return (CTL_RETVAL_COMPLETE); 8291 break; 8292 } 8293 default: 8294 panic("Invalid PR type %x", cdb->action); 8295 } 8296 8297done: 8298 free(ctsio->kern_data_ptr, M_CTL); 8299 ctl_set_success(ctsio); 8300 ctl_done((union ctl_io *)ctsio); 8301 8302 return (retval); 8303} 8304 8305/* 8306 * This routine is for handling a message from the other SC pertaining to 8307 * persistent reserve out. All the error checking will have been done 8308 * so only perorming the action need be done here to keep the two 8309 * in sync. 8310 */ 8311static void 8312ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8313{ 8314 struct ctl_lun *lun; 8315 struct ctl_softc *softc; 8316 int i; 8317 uint32_t targ_lun; 8318 8319 softc = control_softc; 8320 8321 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8322 lun = softc->ctl_luns[targ_lun]; 8323 mtx_lock(&lun->lun_lock); 8324 switch(msg->pr.pr_info.action) { 8325 case CTL_PR_REG_KEY: 8326 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8327 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8328 lun->pr_key_count++; 8329 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8330 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8331 lun->PRGeneration++; 8332 break; 8333 8334 case CTL_PR_UNREG_KEY: 8335 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8336 lun->pr_key_count--; 8337 8338 /* XXX Need to see if the reservation has been released */ 8339 /* if so do we need to generate UA? */ 8340 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8341 lun->flags &= ~CTL_LUN_PR_RESERVED; 8342 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8343 8344 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8345 || lun->res_type == SPR_TYPE_EX_AC_RO) 8346 && lun->pr_key_count) { 8347 /* 8348 * If the reservation is a registrants 8349 * only type we need to generate a UA 8350 * for other registered inits. The 8351 * sense code should be RESERVATIONS 8352 * RELEASED 8353 */ 8354 8355 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8356 if (ctl_get_prkey(lun, i + 8357 softc->persis_offset) == 0) 8358 continue; 8359 8360 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8361 } 8362 } 8363 lun->res_type = 0; 8364 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8365 if (lun->pr_key_count==0) { 8366 lun->flags &= ~CTL_LUN_PR_RESERVED; 8367 lun->res_type = 0; 8368 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8369 } 8370 } 8371 lun->PRGeneration++; 8372 break; 8373 8374 case CTL_PR_RESERVE: 8375 lun->flags |= CTL_LUN_PR_RESERVED; 8376 lun->res_type = msg->pr.pr_info.res_type; 8377 lun->pr_res_idx = msg->pr.pr_info.residx; 8378 8379 break; 8380 8381 case CTL_PR_RELEASE: 8382 /* 8383 * if this isn't an exclusive access res generate UA for all 8384 * other registrants. 8385 */ 8386 if (lun->res_type != SPR_TYPE_EX_AC 8387 && lun->res_type != SPR_TYPE_WR_EX) { 8388 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8389 if (ctl_get_prkey(lun, i + softc->persis_offset) != 0) 8390 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8391 } 8392 8393 lun->flags &= ~CTL_LUN_PR_RESERVED; 8394 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8395 lun->res_type = 0; 8396 break; 8397 8398 case CTL_PR_PREEMPT: 8399 ctl_pro_preempt_other(lun, msg); 8400 break; 8401 case CTL_PR_CLEAR: 8402 lun->flags &= ~CTL_LUN_PR_RESERVED; 8403 lun->res_type = 0; 8404 lun->pr_key_count = 0; 8405 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8406 8407 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8408 if (ctl_get_prkey(lun, i) == 0) 8409 continue; 8410 ctl_clr_prkey(lun, i); 8411 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8412 } 8413 lun->PRGeneration++; 8414 break; 8415 } 8416 8417 mtx_unlock(&lun->lun_lock); 8418} 8419 8420int 8421ctl_read_write(struct ctl_scsiio *ctsio) 8422{ 8423 struct ctl_lun *lun; 8424 struct ctl_lba_len_flags *lbalen; 8425 uint64_t lba; 8426 uint32_t num_blocks; 8427 int flags, retval; 8428 int isread; 8429 8430 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8431 8432 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8433 8434 flags = 0; 8435 retval = CTL_RETVAL_COMPLETE; 8436 8437 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8438 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8439 switch (ctsio->cdb[0]) { 8440 case READ_6: 8441 case WRITE_6: { 8442 struct scsi_rw_6 *cdb; 8443 8444 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8445 8446 lba = scsi_3btoul(cdb->addr); 8447 /* only 5 bits are valid in the most significant address byte */ 8448 lba &= 0x1fffff; 8449 num_blocks = cdb->length; 8450 /* 8451 * This is correct according to SBC-2. 8452 */ 8453 if (num_blocks == 0) 8454 num_blocks = 256; 8455 break; 8456 } 8457 case READ_10: 8458 case WRITE_10: { 8459 struct scsi_rw_10 *cdb; 8460 8461 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8462 if (cdb->byte2 & SRW10_FUA) 8463 flags |= CTL_LLF_FUA; 8464 if (cdb->byte2 & SRW10_DPO) 8465 flags |= CTL_LLF_DPO; 8466 lba = scsi_4btoul(cdb->addr); 8467 num_blocks = scsi_2btoul(cdb->length); 8468 break; 8469 } 8470 case WRITE_VERIFY_10: { 8471 struct scsi_write_verify_10 *cdb; 8472 8473 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8474 flags |= CTL_LLF_FUA; 8475 if (cdb->byte2 & SWV_DPO) 8476 flags |= CTL_LLF_DPO; 8477 lba = scsi_4btoul(cdb->addr); 8478 num_blocks = scsi_2btoul(cdb->length); 8479 break; 8480 } 8481 case READ_12: 8482 case WRITE_12: { 8483 struct scsi_rw_12 *cdb; 8484 8485 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8486 if (cdb->byte2 & SRW12_FUA) 8487 flags |= CTL_LLF_FUA; 8488 if (cdb->byte2 & SRW12_DPO) 8489 flags |= CTL_LLF_DPO; 8490 lba = scsi_4btoul(cdb->addr); 8491 num_blocks = scsi_4btoul(cdb->length); 8492 break; 8493 } 8494 case WRITE_VERIFY_12: { 8495 struct scsi_write_verify_12 *cdb; 8496 8497 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8498 flags |= CTL_LLF_FUA; 8499 if (cdb->byte2 & SWV_DPO) 8500 flags |= CTL_LLF_DPO; 8501 lba = scsi_4btoul(cdb->addr); 8502 num_blocks = scsi_4btoul(cdb->length); 8503 break; 8504 } 8505 case READ_16: 8506 case WRITE_16: { 8507 struct scsi_rw_16 *cdb; 8508 8509 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8510 if (cdb->byte2 & SRW12_FUA) 8511 flags |= CTL_LLF_FUA; 8512 if (cdb->byte2 & SRW12_DPO) 8513 flags |= CTL_LLF_DPO; 8514 lba = scsi_8btou64(cdb->addr); 8515 num_blocks = scsi_4btoul(cdb->length); 8516 break; 8517 } 8518 case WRITE_ATOMIC_16: { 8519 struct scsi_rw_16 *cdb; 8520 8521 if (lun->be_lun->atomicblock == 0) { 8522 ctl_set_invalid_opcode(ctsio); 8523 ctl_done((union ctl_io *)ctsio); 8524 return (CTL_RETVAL_COMPLETE); 8525 } 8526 8527 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8528 if (cdb->byte2 & SRW12_FUA) 8529 flags |= CTL_LLF_FUA; 8530 if (cdb->byte2 & SRW12_DPO) 8531 flags |= CTL_LLF_DPO; 8532 lba = scsi_8btou64(cdb->addr); 8533 num_blocks = scsi_4btoul(cdb->length); 8534 if (num_blocks > lun->be_lun->atomicblock) { 8535 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8536 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8537 /*bit*/ 0); 8538 ctl_done((union ctl_io *)ctsio); 8539 return (CTL_RETVAL_COMPLETE); 8540 } 8541 break; 8542 } 8543 case WRITE_VERIFY_16: { 8544 struct scsi_write_verify_16 *cdb; 8545 8546 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8547 flags |= CTL_LLF_FUA; 8548 if (cdb->byte2 & SWV_DPO) 8549 flags |= CTL_LLF_DPO; 8550 lba = scsi_8btou64(cdb->addr); 8551 num_blocks = scsi_4btoul(cdb->length); 8552 break; 8553 } 8554 default: 8555 /* 8556 * We got a command we don't support. This shouldn't 8557 * happen, commands should be filtered out above us. 8558 */ 8559 ctl_set_invalid_opcode(ctsio); 8560 ctl_done((union ctl_io *)ctsio); 8561 8562 return (CTL_RETVAL_COMPLETE); 8563 break; /* NOTREACHED */ 8564 } 8565 8566 /* 8567 * The first check is to make sure we're in bounds, the second 8568 * check is to catch wrap-around problems. If the lba + num blocks 8569 * is less than the lba, then we've wrapped around and the block 8570 * range is invalid anyway. 8571 */ 8572 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8573 || ((lba + num_blocks) < lba)) { 8574 ctl_set_lba_out_of_range(ctsio); 8575 ctl_done((union ctl_io *)ctsio); 8576 return (CTL_RETVAL_COMPLETE); 8577 } 8578 8579 /* 8580 * According to SBC-3, a transfer length of 0 is not an error. 8581 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8582 * translates to 256 blocks for those commands. 8583 */ 8584 if (num_blocks == 0) { 8585 ctl_set_success(ctsio); 8586 ctl_done((union ctl_io *)ctsio); 8587 return (CTL_RETVAL_COMPLETE); 8588 } 8589 8590 /* Set FUA and/or DPO if caches are disabled. */ 8591 if (isread) { 8592 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8593 SCP_RCD) != 0) 8594 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8595 } else { 8596 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8597 SCP_WCE) == 0) 8598 flags |= CTL_LLF_FUA; 8599 } 8600 8601 lbalen = (struct ctl_lba_len_flags *) 8602 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8603 lbalen->lba = lba; 8604 lbalen->len = num_blocks; 8605 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8606 8607 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8608 ctsio->kern_rel_offset = 0; 8609 8610 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8611 8612 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8613 8614 return (retval); 8615} 8616 8617static int 8618ctl_cnw_cont(union ctl_io *io) 8619{ 8620 struct ctl_scsiio *ctsio; 8621 struct ctl_lun *lun; 8622 struct ctl_lba_len_flags *lbalen; 8623 int retval; 8624 8625 ctsio = &io->scsiio; 8626 ctsio->io_hdr.status = CTL_STATUS_NONE; 8627 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8628 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8629 lbalen = (struct ctl_lba_len_flags *) 8630 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8631 lbalen->flags &= ~CTL_LLF_COMPARE; 8632 lbalen->flags |= CTL_LLF_WRITE; 8633 8634 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8635 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8636 return (retval); 8637} 8638 8639int 8640ctl_cnw(struct ctl_scsiio *ctsio) 8641{ 8642 struct ctl_lun *lun; 8643 struct ctl_lba_len_flags *lbalen; 8644 uint64_t lba; 8645 uint32_t num_blocks; 8646 int flags, retval; 8647 8648 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8649 8650 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8651 8652 flags = 0; 8653 retval = CTL_RETVAL_COMPLETE; 8654 8655 switch (ctsio->cdb[0]) { 8656 case COMPARE_AND_WRITE: { 8657 struct scsi_compare_and_write *cdb; 8658 8659 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8660 if (cdb->byte2 & SRW10_FUA) 8661 flags |= CTL_LLF_FUA; 8662 if (cdb->byte2 & SRW10_DPO) 8663 flags |= CTL_LLF_DPO; 8664 lba = scsi_8btou64(cdb->addr); 8665 num_blocks = cdb->length; 8666 break; 8667 } 8668 default: 8669 /* 8670 * We got a command we don't support. This shouldn't 8671 * happen, commands should be filtered out above us. 8672 */ 8673 ctl_set_invalid_opcode(ctsio); 8674 ctl_done((union ctl_io *)ctsio); 8675 8676 return (CTL_RETVAL_COMPLETE); 8677 break; /* NOTREACHED */ 8678 } 8679 8680 /* 8681 * The first check is to make sure we're in bounds, the second 8682 * check is to catch wrap-around problems. If the lba + num blocks 8683 * is less than the lba, then we've wrapped around and the block 8684 * range is invalid anyway. 8685 */ 8686 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8687 || ((lba + num_blocks) < lba)) { 8688 ctl_set_lba_out_of_range(ctsio); 8689 ctl_done((union ctl_io *)ctsio); 8690 return (CTL_RETVAL_COMPLETE); 8691 } 8692 8693 /* 8694 * According to SBC-3, a transfer length of 0 is not an error. 8695 */ 8696 if (num_blocks == 0) { 8697 ctl_set_success(ctsio); 8698 ctl_done((union ctl_io *)ctsio); 8699 return (CTL_RETVAL_COMPLETE); 8700 } 8701 8702 /* Set FUA if write cache is disabled. */ 8703 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8704 SCP_WCE) == 0) 8705 flags |= CTL_LLF_FUA; 8706 8707 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8708 ctsio->kern_rel_offset = 0; 8709 8710 /* 8711 * Set the IO_CONT flag, so that if this I/O gets passed to 8712 * ctl_data_submit_done(), it'll get passed back to 8713 * ctl_ctl_cnw_cont() for further processing. 8714 */ 8715 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8716 ctsio->io_cont = ctl_cnw_cont; 8717 8718 lbalen = (struct ctl_lba_len_flags *) 8719 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8720 lbalen->lba = lba; 8721 lbalen->len = num_blocks; 8722 lbalen->flags = CTL_LLF_COMPARE | flags; 8723 8724 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8725 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8726 return (retval); 8727} 8728 8729int 8730ctl_verify(struct ctl_scsiio *ctsio) 8731{ 8732 struct ctl_lun *lun; 8733 struct ctl_lba_len_flags *lbalen; 8734 uint64_t lba; 8735 uint32_t num_blocks; 8736 int bytchk, flags; 8737 int retval; 8738 8739 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8740 8741 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8742 8743 bytchk = 0; 8744 flags = CTL_LLF_FUA; 8745 retval = CTL_RETVAL_COMPLETE; 8746 8747 switch (ctsio->cdb[0]) { 8748 case VERIFY_10: { 8749 struct scsi_verify_10 *cdb; 8750 8751 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8752 if (cdb->byte2 & SVFY_BYTCHK) 8753 bytchk = 1; 8754 if (cdb->byte2 & SVFY_DPO) 8755 flags |= CTL_LLF_DPO; 8756 lba = scsi_4btoul(cdb->addr); 8757 num_blocks = scsi_2btoul(cdb->length); 8758 break; 8759 } 8760 case VERIFY_12: { 8761 struct scsi_verify_12 *cdb; 8762 8763 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8764 if (cdb->byte2 & SVFY_BYTCHK) 8765 bytchk = 1; 8766 if (cdb->byte2 & SVFY_DPO) 8767 flags |= CTL_LLF_DPO; 8768 lba = scsi_4btoul(cdb->addr); 8769 num_blocks = scsi_4btoul(cdb->length); 8770 break; 8771 } 8772 case VERIFY_16: { 8773 struct scsi_rw_16 *cdb; 8774 8775 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8776 if (cdb->byte2 & SVFY_BYTCHK) 8777 bytchk = 1; 8778 if (cdb->byte2 & SVFY_DPO) 8779 flags |= CTL_LLF_DPO; 8780 lba = scsi_8btou64(cdb->addr); 8781 num_blocks = scsi_4btoul(cdb->length); 8782 break; 8783 } 8784 default: 8785 /* 8786 * We got a command we don't support. This shouldn't 8787 * happen, commands should be filtered out above us. 8788 */ 8789 ctl_set_invalid_opcode(ctsio); 8790 ctl_done((union ctl_io *)ctsio); 8791 return (CTL_RETVAL_COMPLETE); 8792 } 8793 8794 /* 8795 * The first check is to make sure we're in bounds, the second 8796 * check is to catch wrap-around problems. If the lba + num blocks 8797 * is less than the lba, then we've wrapped around and the block 8798 * range is invalid anyway. 8799 */ 8800 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8801 || ((lba + num_blocks) < lba)) { 8802 ctl_set_lba_out_of_range(ctsio); 8803 ctl_done((union ctl_io *)ctsio); 8804 return (CTL_RETVAL_COMPLETE); 8805 } 8806 8807 /* 8808 * According to SBC-3, a transfer length of 0 is not an error. 8809 */ 8810 if (num_blocks == 0) { 8811 ctl_set_success(ctsio); 8812 ctl_done((union ctl_io *)ctsio); 8813 return (CTL_RETVAL_COMPLETE); 8814 } 8815 8816 lbalen = (struct ctl_lba_len_flags *) 8817 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8818 lbalen->lba = lba; 8819 lbalen->len = num_blocks; 8820 if (bytchk) { 8821 lbalen->flags = CTL_LLF_COMPARE | flags; 8822 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8823 } else { 8824 lbalen->flags = CTL_LLF_VERIFY | flags; 8825 ctsio->kern_total_len = 0; 8826 } 8827 ctsio->kern_rel_offset = 0; 8828 8829 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 8830 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8831 return (retval); 8832} 8833 8834int 8835ctl_report_luns(struct ctl_scsiio *ctsio) 8836{ 8837 struct ctl_softc *softc = control_softc; 8838 struct scsi_report_luns *cdb; 8839 struct scsi_report_luns_data *lun_data; 8840 struct ctl_lun *lun, *request_lun; 8841 struct ctl_port *port; 8842 int num_luns, retval; 8843 uint32_t alloc_len, lun_datalen; 8844 int num_filled, well_known; 8845 uint32_t initidx, targ_lun_id, lun_id; 8846 8847 retval = CTL_RETVAL_COMPLETE; 8848 well_known = 0; 8849 8850 cdb = (struct scsi_report_luns *)ctsio->cdb; 8851 port = ctl_io_port(&ctsio->io_hdr); 8852 8853 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 8854 8855 mtx_lock(&softc->ctl_lock); 8856 num_luns = 0; 8857 for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) { 8858 if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS) 8859 num_luns++; 8860 } 8861 mtx_unlock(&softc->ctl_lock); 8862 8863 switch (cdb->select_report) { 8864 case RPL_REPORT_DEFAULT: 8865 case RPL_REPORT_ALL: 8866 break; 8867 case RPL_REPORT_WELLKNOWN: 8868 well_known = 1; 8869 num_luns = 0; 8870 break; 8871 default: 8872 ctl_set_invalid_field(ctsio, 8873 /*sks_valid*/ 1, 8874 /*command*/ 1, 8875 /*field*/ 2, 8876 /*bit_valid*/ 0, 8877 /*bit*/ 0); 8878 ctl_done((union ctl_io *)ctsio); 8879 return (retval); 8880 break; /* NOTREACHED */ 8881 } 8882 8883 alloc_len = scsi_4btoul(cdb->length); 8884 /* 8885 * The initiator has to allocate at least 16 bytes for this request, 8886 * so he can at least get the header and the first LUN. Otherwise 8887 * we reject the request (per SPC-3 rev 14, section 6.21). 8888 */ 8889 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 8890 sizeof(struct scsi_report_luns_lundata))) { 8891 ctl_set_invalid_field(ctsio, 8892 /*sks_valid*/ 1, 8893 /*command*/ 1, 8894 /*field*/ 6, 8895 /*bit_valid*/ 0, 8896 /*bit*/ 0); 8897 ctl_done((union ctl_io *)ctsio); 8898 return (retval); 8899 } 8900 8901 request_lun = (struct ctl_lun *) 8902 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8903 8904 lun_datalen = sizeof(*lun_data) + 8905 (num_luns * sizeof(struct scsi_report_luns_lundata)); 8906 8907 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 8908 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 8909 ctsio->kern_sg_entries = 0; 8910 8911 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8912 8913 mtx_lock(&softc->ctl_lock); 8914 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 8915 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 8916 if (lun_id >= CTL_MAX_LUNS) 8917 continue; 8918 lun = softc->ctl_luns[lun_id]; 8919 if (lun == NULL) 8920 continue; 8921 8922 if (targ_lun_id <= 0xff) { 8923 /* 8924 * Peripheral addressing method, bus number 0. 8925 */ 8926 lun_data->luns[num_filled].lundata[0] = 8927 RPL_LUNDATA_ATYP_PERIPH; 8928 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 8929 num_filled++; 8930 } else if (targ_lun_id <= 0x3fff) { 8931 /* 8932 * Flat addressing method. 8933 */ 8934 lun_data->luns[num_filled].lundata[0] = 8935 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); 8936 lun_data->luns[num_filled].lundata[1] = 8937 (targ_lun_id & 0xff); 8938 num_filled++; 8939 } else if (targ_lun_id <= 0xffffff) { 8940 /* 8941 * Extended flat addressing method. 8942 */ 8943 lun_data->luns[num_filled].lundata[0] = 8944 RPL_LUNDATA_ATYP_EXTLUN | 0x12; 8945 scsi_ulto3b(targ_lun_id, 8946 &lun_data->luns[num_filled].lundata[1]); 8947 num_filled++; 8948 } else { 8949 printf("ctl_report_luns: bogus LUN number %jd, " 8950 "skipping\n", (intmax_t)targ_lun_id); 8951 } 8952 /* 8953 * According to SPC-3, rev 14 section 6.21: 8954 * 8955 * "The execution of a REPORT LUNS command to any valid and 8956 * installed logical unit shall clear the REPORTED LUNS DATA 8957 * HAS CHANGED unit attention condition for all logical 8958 * units of that target with respect to the requesting 8959 * initiator. A valid and installed logical unit is one 8960 * having a PERIPHERAL QUALIFIER of 000b in the standard 8961 * INQUIRY data (see 6.4.2)." 8962 * 8963 * If request_lun is NULL, the LUN this report luns command 8964 * was issued to is either disabled or doesn't exist. In that 8965 * case, we shouldn't clear any pending lun change unit 8966 * attention. 8967 */ 8968 if (request_lun != NULL) { 8969 mtx_lock(&lun->lun_lock); 8970 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 8971 mtx_unlock(&lun->lun_lock); 8972 } 8973 } 8974 mtx_unlock(&softc->ctl_lock); 8975 8976 /* 8977 * It's quite possible that we've returned fewer LUNs than we allocated 8978 * space for. Trim it. 8979 */ 8980 lun_datalen = sizeof(*lun_data) + 8981 (num_filled * sizeof(struct scsi_report_luns_lundata)); 8982 8983 if (lun_datalen < alloc_len) { 8984 ctsio->residual = alloc_len - lun_datalen; 8985 ctsio->kern_data_len = lun_datalen; 8986 ctsio->kern_total_len = lun_datalen; 8987 } else { 8988 ctsio->residual = 0; 8989 ctsio->kern_data_len = alloc_len; 8990 ctsio->kern_total_len = alloc_len; 8991 } 8992 ctsio->kern_data_resid = 0; 8993 ctsio->kern_rel_offset = 0; 8994 ctsio->kern_sg_entries = 0; 8995 8996 /* 8997 * We set this to the actual data length, regardless of how much 8998 * space we actually have to return results. If the user looks at 8999 * this value, he'll know whether or not he allocated enough space 9000 * and reissue the command if necessary. We don't support well 9001 * known logical units, so if the user asks for that, return none. 9002 */ 9003 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9004 9005 /* 9006 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9007 * this request. 9008 */ 9009 ctl_set_success(ctsio); 9010 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9011 ctsio->be_move_done = ctl_config_move_done; 9012 ctl_datamove((union ctl_io *)ctsio); 9013 return (retval); 9014} 9015 9016int 9017ctl_request_sense(struct ctl_scsiio *ctsio) 9018{ 9019 struct scsi_request_sense *cdb; 9020 struct scsi_sense_data *sense_ptr; 9021 struct ctl_softc *ctl_softc; 9022 struct ctl_lun *lun; 9023 uint32_t initidx; 9024 int have_error; 9025 scsi_sense_data_type sense_format; 9026 ctl_ua_type ua_type; 9027 9028 cdb = (struct scsi_request_sense *)ctsio->cdb; 9029 9030 ctl_softc = control_softc; 9031 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9032 9033 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9034 9035 /* 9036 * Determine which sense format the user wants. 9037 */ 9038 if (cdb->byte2 & SRS_DESC) 9039 sense_format = SSD_TYPE_DESC; 9040 else 9041 sense_format = SSD_TYPE_FIXED; 9042 9043 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9044 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9045 ctsio->kern_sg_entries = 0; 9046 9047 /* 9048 * struct scsi_sense_data, which is currently set to 256 bytes, is 9049 * larger than the largest allowed value for the length field in the 9050 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9051 */ 9052 ctsio->residual = 0; 9053 ctsio->kern_data_len = cdb->length; 9054 ctsio->kern_total_len = cdb->length; 9055 9056 ctsio->kern_data_resid = 0; 9057 ctsio->kern_rel_offset = 0; 9058 ctsio->kern_sg_entries = 0; 9059 9060 /* 9061 * If we don't have a LUN, we don't have any pending sense. 9062 */ 9063 if (lun == NULL) 9064 goto no_sense; 9065 9066 have_error = 0; 9067 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9068 /* 9069 * Check for pending sense, and then for pending unit attentions. 9070 * Pending sense gets returned first, then pending unit attentions. 9071 */ 9072 mtx_lock(&lun->lun_lock); 9073#ifdef CTL_WITH_CA 9074 if (ctl_is_set(lun->have_ca, initidx)) { 9075 scsi_sense_data_type stored_format; 9076 9077 /* 9078 * Check to see which sense format was used for the stored 9079 * sense data. 9080 */ 9081 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9082 9083 /* 9084 * If the user requested a different sense format than the 9085 * one we stored, then we need to convert it to the other 9086 * format. If we're going from descriptor to fixed format 9087 * sense data, we may lose things in translation, depending 9088 * on what options were used. 9089 * 9090 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9091 * for some reason we'll just copy it out as-is. 9092 */ 9093 if ((stored_format == SSD_TYPE_FIXED) 9094 && (sense_format == SSD_TYPE_DESC)) 9095 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9096 &lun->pending_sense[initidx], 9097 (struct scsi_sense_data_desc *)sense_ptr); 9098 else if ((stored_format == SSD_TYPE_DESC) 9099 && (sense_format == SSD_TYPE_FIXED)) 9100 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9101 &lun->pending_sense[initidx], 9102 (struct scsi_sense_data_fixed *)sense_ptr); 9103 else 9104 memcpy(sense_ptr, &lun->pending_sense[initidx], 9105 MIN(sizeof(*sense_ptr), 9106 sizeof(lun->pending_sense[initidx]))); 9107 9108 ctl_clear_mask(lun->have_ca, initidx); 9109 have_error = 1; 9110 } else 9111#endif 9112 { 9113 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); 9114 if (ua_type != CTL_UA_NONE) 9115 have_error = 1; 9116 if (ua_type == CTL_UA_LUN_CHANGE) { 9117 mtx_unlock(&lun->lun_lock); 9118 mtx_lock(&ctl_softc->ctl_lock); 9119 ctl_clr_ua_allluns(ctl_softc, initidx, ua_type); 9120 mtx_unlock(&ctl_softc->ctl_lock); 9121 mtx_lock(&lun->lun_lock); 9122 } 9123 9124 } 9125 mtx_unlock(&lun->lun_lock); 9126 9127 /* 9128 * We already have a pending error, return it. 9129 */ 9130 if (have_error != 0) { 9131 /* 9132 * We report the SCSI status as OK, since the status of the 9133 * request sense command itself is OK. 9134 * We report 0 for the sense length, because we aren't doing 9135 * autosense in this case. We're reporting sense as 9136 * parameter data. 9137 */ 9138 ctl_set_success(ctsio); 9139 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9140 ctsio->be_move_done = ctl_config_move_done; 9141 ctl_datamove((union ctl_io *)ctsio); 9142 return (CTL_RETVAL_COMPLETE); 9143 } 9144 9145no_sense: 9146 9147 /* 9148 * No sense information to report, so we report that everything is 9149 * okay. 9150 */ 9151 ctl_set_sense_data(sense_ptr, 9152 lun, 9153 sense_format, 9154 /*current_error*/ 1, 9155 /*sense_key*/ SSD_KEY_NO_SENSE, 9156 /*asc*/ 0x00, 9157 /*ascq*/ 0x00, 9158 SSD_ELEM_NONE); 9159 9160 /* 9161 * We report 0 for the sense length, because we aren't doing 9162 * autosense in this case. We're reporting sense as parameter data. 9163 */ 9164 ctl_set_success(ctsio); 9165 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9166 ctsio->be_move_done = ctl_config_move_done; 9167 ctl_datamove((union ctl_io *)ctsio); 9168 return (CTL_RETVAL_COMPLETE); 9169} 9170 9171int 9172ctl_tur(struct ctl_scsiio *ctsio) 9173{ 9174 9175 CTL_DEBUG_PRINT(("ctl_tur\n")); 9176 9177 ctl_set_success(ctsio); 9178 ctl_done((union ctl_io *)ctsio); 9179 9180 return (CTL_RETVAL_COMPLETE); 9181} 9182 9183#ifdef notyet 9184static int 9185ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 9186{ 9187 9188} 9189#endif 9190 9191/* 9192 * SCSI VPD page 0x00, the Supported VPD Pages page. 9193 */ 9194static int 9195ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9196{ 9197 struct scsi_vpd_supported_pages *pages; 9198 int sup_page_size; 9199 struct ctl_lun *lun; 9200 int p; 9201 9202 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9203 9204 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9205 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9206 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9207 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9208 ctsio->kern_sg_entries = 0; 9209 9210 if (sup_page_size < alloc_len) { 9211 ctsio->residual = alloc_len - sup_page_size; 9212 ctsio->kern_data_len = sup_page_size; 9213 ctsio->kern_total_len = sup_page_size; 9214 } else { 9215 ctsio->residual = 0; 9216 ctsio->kern_data_len = alloc_len; 9217 ctsio->kern_total_len = alloc_len; 9218 } 9219 ctsio->kern_data_resid = 0; 9220 ctsio->kern_rel_offset = 0; 9221 ctsio->kern_sg_entries = 0; 9222 9223 /* 9224 * The control device is always connected. The disk device, on the 9225 * other hand, may not be online all the time. Need to change this 9226 * to figure out whether the disk device is actually online or not. 9227 */ 9228 if (lun != NULL) 9229 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9230 lun->be_lun->lun_type; 9231 else 9232 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9233 9234 p = 0; 9235 /* Supported VPD pages */ 9236 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9237 /* Serial Number */ 9238 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9239 /* Device Identification */ 9240 pages->page_list[p++] = SVPD_DEVICE_ID; 9241 /* Extended INQUIRY Data */ 9242 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9243 /* Mode Page Policy */ 9244 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9245 /* SCSI Ports */ 9246 pages->page_list[p++] = SVPD_SCSI_PORTS; 9247 /* Third-party Copy */ 9248 pages->page_list[p++] = SVPD_SCSI_TPC; 9249 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9250 /* Block limits */ 9251 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9252 /* Block Device Characteristics */ 9253 pages->page_list[p++] = SVPD_BDC; 9254 /* Logical Block Provisioning */ 9255 pages->page_list[p++] = SVPD_LBP; 9256 } 9257 pages->length = p; 9258 9259 ctl_set_success(ctsio); 9260 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9261 ctsio->be_move_done = ctl_config_move_done; 9262 ctl_datamove((union ctl_io *)ctsio); 9263 return (CTL_RETVAL_COMPLETE); 9264} 9265 9266/* 9267 * SCSI VPD page 0x80, the Unit Serial Number page. 9268 */ 9269static int 9270ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9271{ 9272 struct scsi_vpd_unit_serial_number *sn_ptr; 9273 struct ctl_lun *lun; 9274 int data_len; 9275 9276 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9277 9278 data_len = 4 + CTL_SN_LEN; 9279 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9280 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9281 if (data_len < alloc_len) { 9282 ctsio->residual = alloc_len - data_len; 9283 ctsio->kern_data_len = data_len; 9284 ctsio->kern_total_len = data_len; 9285 } else { 9286 ctsio->residual = 0; 9287 ctsio->kern_data_len = alloc_len; 9288 ctsio->kern_total_len = alloc_len; 9289 } 9290 ctsio->kern_data_resid = 0; 9291 ctsio->kern_rel_offset = 0; 9292 ctsio->kern_sg_entries = 0; 9293 9294 /* 9295 * The control device is always connected. The disk device, on the 9296 * other hand, may not be online all the time. Need to change this 9297 * to figure out whether the disk device is actually online or not. 9298 */ 9299 if (lun != NULL) 9300 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9301 lun->be_lun->lun_type; 9302 else 9303 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9304 9305 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9306 sn_ptr->length = CTL_SN_LEN; 9307 /* 9308 * If we don't have a LUN, we just leave the serial number as 9309 * all spaces. 9310 */ 9311 if (lun != NULL) { 9312 strncpy((char *)sn_ptr->serial_num, 9313 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9314 } else 9315 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9316 9317 ctl_set_success(ctsio); 9318 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9319 ctsio->be_move_done = ctl_config_move_done; 9320 ctl_datamove((union ctl_io *)ctsio); 9321 return (CTL_RETVAL_COMPLETE); 9322} 9323 9324 9325/* 9326 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9327 */ 9328static int 9329ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9330{ 9331 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9332 struct ctl_lun *lun; 9333 int data_len; 9334 9335 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9336 9337 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9338 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9339 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9340 ctsio->kern_sg_entries = 0; 9341 9342 if (data_len < alloc_len) { 9343 ctsio->residual = alloc_len - data_len; 9344 ctsio->kern_data_len = data_len; 9345 ctsio->kern_total_len = data_len; 9346 } else { 9347 ctsio->residual = 0; 9348 ctsio->kern_data_len = alloc_len; 9349 ctsio->kern_total_len = alloc_len; 9350 } 9351 ctsio->kern_data_resid = 0; 9352 ctsio->kern_rel_offset = 0; 9353 ctsio->kern_sg_entries = 0; 9354 9355 /* 9356 * The control device is always connected. The disk device, on the 9357 * other hand, may not be online all the time. 9358 */ 9359 if (lun != NULL) 9360 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9361 lun->be_lun->lun_type; 9362 else 9363 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9364 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9365 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9366 /* 9367 * We support head of queue, ordered and simple tags. 9368 */ 9369 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9370 /* 9371 * Volatile cache supported. 9372 */ 9373 eid_ptr->flags3 = SVPD_EID_V_SUP; 9374 9375 /* 9376 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9377 * attention for a particular IT nexus on all LUNs once we report 9378 * it to that nexus once. This bit is required as of SPC-4. 9379 */ 9380 eid_ptr->flags4 = SVPD_EID_LUICLT; 9381 9382 /* 9383 * XXX KDM in order to correctly answer this, we would need 9384 * information from the SIM to determine how much sense data it 9385 * can send. So this would really be a path inquiry field, most 9386 * likely. This can be set to a maximum of 252 according to SPC-4, 9387 * but the hardware may or may not be able to support that much. 9388 * 0 just means that the maximum sense data length is not reported. 9389 */ 9390 eid_ptr->max_sense_length = 0; 9391 9392 ctl_set_success(ctsio); 9393 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9394 ctsio->be_move_done = ctl_config_move_done; 9395 ctl_datamove((union ctl_io *)ctsio); 9396 return (CTL_RETVAL_COMPLETE); 9397} 9398 9399static int 9400ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9401{ 9402 struct scsi_vpd_mode_page_policy *mpp_ptr; 9403 struct ctl_lun *lun; 9404 int data_len; 9405 9406 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9407 9408 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9409 sizeof(struct scsi_vpd_mode_page_policy_descr); 9410 9411 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9412 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9413 ctsio->kern_sg_entries = 0; 9414 9415 if (data_len < alloc_len) { 9416 ctsio->residual = alloc_len - data_len; 9417 ctsio->kern_data_len = data_len; 9418 ctsio->kern_total_len = data_len; 9419 } else { 9420 ctsio->residual = 0; 9421 ctsio->kern_data_len = alloc_len; 9422 ctsio->kern_total_len = alloc_len; 9423 } 9424 ctsio->kern_data_resid = 0; 9425 ctsio->kern_rel_offset = 0; 9426 ctsio->kern_sg_entries = 0; 9427 9428 /* 9429 * The control device is always connected. The disk device, on the 9430 * other hand, may not be online all the time. 9431 */ 9432 if (lun != NULL) 9433 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9434 lun->be_lun->lun_type; 9435 else 9436 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9437 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9438 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9439 mpp_ptr->descr[0].page_code = 0x3f; 9440 mpp_ptr->descr[0].subpage_code = 0xff; 9441 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9442 9443 ctl_set_success(ctsio); 9444 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9445 ctsio->be_move_done = ctl_config_move_done; 9446 ctl_datamove((union ctl_io *)ctsio); 9447 return (CTL_RETVAL_COMPLETE); 9448} 9449 9450/* 9451 * SCSI VPD page 0x83, the Device Identification page. 9452 */ 9453static int 9454ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9455{ 9456 struct scsi_vpd_device_id *devid_ptr; 9457 struct scsi_vpd_id_descriptor *desc; 9458 struct ctl_softc *softc; 9459 struct ctl_lun *lun; 9460 struct ctl_port *port; 9461 int data_len; 9462 uint8_t proto; 9463 9464 softc = control_softc; 9465 9466 port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 9467 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9468 9469 data_len = sizeof(struct scsi_vpd_device_id) + 9470 sizeof(struct scsi_vpd_id_descriptor) + 9471 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9472 sizeof(struct scsi_vpd_id_descriptor) + 9473 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9474 if (lun && lun->lun_devid) 9475 data_len += lun->lun_devid->len; 9476 if (port->port_devid) 9477 data_len += port->port_devid->len; 9478 if (port->target_devid) 9479 data_len += port->target_devid->len; 9480 9481 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9482 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9483 ctsio->kern_sg_entries = 0; 9484 9485 if (data_len < alloc_len) { 9486 ctsio->residual = alloc_len - data_len; 9487 ctsio->kern_data_len = data_len; 9488 ctsio->kern_total_len = data_len; 9489 } else { 9490 ctsio->residual = 0; 9491 ctsio->kern_data_len = alloc_len; 9492 ctsio->kern_total_len = alloc_len; 9493 } 9494 ctsio->kern_data_resid = 0; 9495 ctsio->kern_rel_offset = 0; 9496 ctsio->kern_sg_entries = 0; 9497 9498 /* 9499 * The control device is always connected. The disk device, on the 9500 * other hand, may not be online all the time. 9501 */ 9502 if (lun != NULL) 9503 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9504 lun->be_lun->lun_type; 9505 else 9506 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9507 devid_ptr->page_code = SVPD_DEVICE_ID; 9508 scsi_ulto2b(data_len - 4, devid_ptr->length); 9509 9510 if (port->port_type == CTL_PORT_FC) 9511 proto = SCSI_PROTO_FC << 4; 9512 else if (port->port_type == CTL_PORT_ISCSI) 9513 proto = SCSI_PROTO_ISCSI << 4; 9514 else 9515 proto = SCSI_PROTO_SPI << 4; 9516 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9517 9518 /* 9519 * We're using a LUN association here. i.e., this device ID is a 9520 * per-LUN identifier. 9521 */ 9522 if (lun && lun->lun_devid) { 9523 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9524 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9525 lun->lun_devid->len); 9526 } 9527 9528 /* 9529 * This is for the WWPN which is a port association. 9530 */ 9531 if (port->port_devid) { 9532 memcpy(desc, port->port_devid->data, port->port_devid->len); 9533 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9534 port->port_devid->len); 9535 } 9536 9537 /* 9538 * This is for the Relative Target Port(type 4h) identifier 9539 */ 9540 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9541 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9542 SVPD_ID_TYPE_RELTARG; 9543 desc->length = 4; 9544 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9545 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9546 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9547 9548 /* 9549 * This is for the Target Port Group(type 5h) identifier 9550 */ 9551 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9552 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9553 SVPD_ID_TYPE_TPORTGRP; 9554 desc->length = 4; 9555 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1, 9556 &desc->identifier[2]); 9557 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9558 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9559 9560 /* 9561 * This is for the Target identifier 9562 */ 9563 if (port->target_devid) { 9564 memcpy(desc, port->target_devid->data, port->target_devid->len); 9565 } 9566 9567 ctl_set_success(ctsio); 9568 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9569 ctsio->be_move_done = ctl_config_move_done; 9570 ctl_datamove((union ctl_io *)ctsio); 9571 return (CTL_RETVAL_COMPLETE); 9572} 9573 9574static int 9575ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9576{ 9577 struct ctl_softc *softc = control_softc; 9578 struct scsi_vpd_scsi_ports *sp; 9579 struct scsi_vpd_port_designation *pd; 9580 struct scsi_vpd_port_designation_cont *pdc; 9581 struct ctl_lun *lun; 9582 struct ctl_port *port; 9583 int data_len, num_target_ports, iid_len, id_len, g, pg, p; 9584 int num_target_port_groups; 9585 9586 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9587 9588 if (softc->is_single) 9589 num_target_port_groups = 1; 9590 else 9591 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 9592 num_target_ports = 0; 9593 iid_len = 0; 9594 id_len = 0; 9595 mtx_lock(&softc->ctl_lock); 9596 STAILQ_FOREACH(port, &softc->port_list, links) { 9597 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9598 continue; 9599 if (lun != NULL && 9600 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9601 continue; 9602 num_target_ports++; 9603 if (port->init_devid) 9604 iid_len += port->init_devid->len; 9605 if (port->port_devid) 9606 id_len += port->port_devid->len; 9607 } 9608 mtx_unlock(&softc->ctl_lock); 9609 9610 data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups * 9611 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9612 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9613 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9614 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9615 ctsio->kern_sg_entries = 0; 9616 9617 if (data_len < alloc_len) { 9618 ctsio->residual = alloc_len - data_len; 9619 ctsio->kern_data_len = data_len; 9620 ctsio->kern_total_len = data_len; 9621 } else { 9622 ctsio->residual = 0; 9623 ctsio->kern_data_len = alloc_len; 9624 ctsio->kern_total_len = alloc_len; 9625 } 9626 ctsio->kern_data_resid = 0; 9627 ctsio->kern_rel_offset = 0; 9628 ctsio->kern_sg_entries = 0; 9629 9630 /* 9631 * The control device is always connected. The disk device, on the 9632 * other hand, may not be online all the time. Need to change this 9633 * to figure out whether the disk device is actually online or not. 9634 */ 9635 if (lun != NULL) 9636 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9637 lun->be_lun->lun_type; 9638 else 9639 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9640 9641 sp->page_code = SVPD_SCSI_PORTS; 9642 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9643 sp->page_length); 9644 pd = &sp->design[0]; 9645 9646 mtx_lock(&softc->ctl_lock); 9647 pg = softc->port_offset / CTL_MAX_PORTS; 9648 for (g = 0; g < num_target_port_groups; g++) { 9649 STAILQ_FOREACH(port, &softc->port_list, links) { 9650 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9651 continue; 9652 if (lun != NULL && 9653 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9654 continue; 9655 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 9656 scsi_ulto2b(p, pd->relative_port_id); 9657 if (port->init_devid && g == pg) { 9658 iid_len = port->init_devid->len; 9659 memcpy(pd->initiator_transportid, 9660 port->init_devid->data, port->init_devid->len); 9661 } else 9662 iid_len = 0; 9663 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9664 pdc = (struct scsi_vpd_port_designation_cont *) 9665 (&pd->initiator_transportid[iid_len]); 9666 if (port->port_devid && g == pg) { 9667 id_len = port->port_devid->len; 9668 memcpy(pdc->target_port_descriptors, 9669 port->port_devid->data, port->port_devid->len); 9670 } else 9671 id_len = 0; 9672 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9673 pd = (struct scsi_vpd_port_designation *) 9674 ((uint8_t *)pdc->target_port_descriptors + id_len); 9675 } 9676 } 9677 mtx_unlock(&softc->ctl_lock); 9678 9679 ctl_set_success(ctsio); 9680 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9681 ctsio->be_move_done = ctl_config_move_done; 9682 ctl_datamove((union ctl_io *)ctsio); 9683 return (CTL_RETVAL_COMPLETE); 9684} 9685 9686static int 9687ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9688{ 9689 struct scsi_vpd_block_limits *bl_ptr; 9690 struct ctl_lun *lun; 9691 int bs; 9692 9693 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9694 9695 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9696 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9697 ctsio->kern_sg_entries = 0; 9698 9699 if (sizeof(*bl_ptr) < alloc_len) { 9700 ctsio->residual = alloc_len - sizeof(*bl_ptr); 9701 ctsio->kern_data_len = sizeof(*bl_ptr); 9702 ctsio->kern_total_len = sizeof(*bl_ptr); 9703 } else { 9704 ctsio->residual = 0; 9705 ctsio->kern_data_len = alloc_len; 9706 ctsio->kern_total_len = alloc_len; 9707 } 9708 ctsio->kern_data_resid = 0; 9709 ctsio->kern_rel_offset = 0; 9710 ctsio->kern_sg_entries = 0; 9711 9712 /* 9713 * The control device is always connected. The disk device, on the 9714 * other hand, may not be online all the time. Need to change this 9715 * to figure out whether the disk device is actually online or not. 9716 */ 9717 if (lun != NULL) 9718 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9719 lun->be_lun->lun_type; 9720 else 9721 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9722 9723 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9724 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9725 bl_ptr->max_cmp_write_len = 0xff; 9726 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9727 if (lun != NULL) { 9728 bs = lun->be_lun->blocksize; 9729 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9730 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9731 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 9732 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 9733 if (lun->be_lun->ublockexp != 0) { 9734 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9735 bl_ptr->opt_unmap_grain); 9736 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9737 bl_ptr->unmap_grain_align); 9738 } 9739 } 9740 scsi_ulto4b(lun->be_lun->atomicblock, 9741 bl_ptr->max_atomic_transfer_length); 9742 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9743 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9744 } 9745 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 9746 9747 ctl_set_success(ctsio); 9748 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9749 ctsio->be_move_done = ctl_config_move_done; 9750 ctl_datamove((union ctl_io *)ctsio); 9751 return (CTL_RETVAL_COMPLETE); 9752} 9753 9754static int 9755ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9756{ 9757 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9758 struct ctl_lun *lun; 9759 const char *value; 9760 u_int i; 9761 9762 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9763 9764 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9765 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9766 ctsio->kern_sg_entries = 0; 9767 9768 if (sizeof(*bdc_ptr) < alloc_len) { 9769 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 9770 ctsio->kern_data_len = sizeof(*bdc_ptr); 9771 ctsio->kern_total_len = sizeof(*bdc_ptr); 9772 } else { 9773 ctsio->residual = 0; 9774 ctsio->kern_data_len = alloc_len; 9775 ctsio->kern_total_len = alloc_len; 9776 } 9777 ctsio->kern_data_resid = 0; 9778 ctsio->kern_rel_offset = 0; 9779 ctsio->kern_sg_entries = 0; 9780 9781 /* 9782 * The control device is always connected. The disk device, on the 9783 * other hand, may not be online all the time. Need to change this 9784 * to figure out whether the disk device is actually online or not. 9785 */ 9786 if (lun != NULL) 9787 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9788 lun->be_lun->lun_type; 9789 else 9790 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9791 bdc_ptr->page_code = SVPD_BDC; 9792 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9793 if (lun != NULL && 9794 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 9795 i = strtol(value, NULL, 0); 9796 else 9797 i = CTL_DEFAULT_ROTATION_RATE; 9798 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9799 if (lun != NULL && 9800 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 9801 i = strtol(value, NULL, 0); 9802 else 9803 i = 0; 9804 bdc_ptr->wab_wac_ff = (i & 0x0f); 9805 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 9806 9807 ctl_set_success(ctsio); 9808 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9809 ctsio->be_move_done = ctl_config_move_done; 9810 ctl_datamove((union ctl_io *)ctsio); 9811 return (CTL_RETVAL_COMPLETE); 9812} 9813 9814static int 9815ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9816{ 9817 struct scsi_vpd_logical_block_prov *lbp_ptr; 9818 struct ctl_lun *lun; 9819 9820 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9821 9822 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9823 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9824 ctsio->kern_sg_entries = 0; 9825 9826 if (sizeof(*lbp_ptr) < alloc_len) { 9827 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 9828 ctsio->kern_data_len = sizeof(*lbp_ptr); 9829 ctsio->kern_total_len = sizeof(*lbp_ptr); 9830 } else { 9831 ctsio->residual = 0; 9832 ctsio->kern_data_len = alloc_len; 9833 ctsio->kern_total_len = alloc_len; 9834 } 9835 ctsio->kern_data_resid = 0; 9836 ctsio->kern_rel_offset = 0; 9837 ctsio->kern_sg_entries = 0; 9838 9839 /* 9840 * The control device is always connected. The disk device, on the 9841 * other hand, may not be online all the time. Need to change this 9842 * to figure out whether the disk device is actually online or not. 9843 */ 9844 if (lun != NULL) 9845 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9846 lun->be_lun->lun_type; 9847 else 9848 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9849 9850 lbp_ptr->page_code = SVPD_LBP; 9851 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9852 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9853 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9854 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9855 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9856 lbp_ptr->prov_type = SVPD_LBP_THIN; 9857 } 9858 9859 ctl_set_success(ctsio); 9860 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9861 ctsio->be_move_done = ctl_config_move_done; 9862 ctl_datamove((union ctl_io *)ctsio); 9863 return (CTL_RETVAL_COMPLETE); 9864} 9865 9866/* 9867 * INQUIRY with the EVPD bit set. 9868 */ 9869static int 9870ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9871{ 9872 struct ctl_lun *lun; 9873 struct scsi_inquiry *cdb; 9874 int alloc_len, retval; 9875 9876 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9877 cdb = (struct scsi_inquiry *)ctsio->cdb; 9878 alloc_len = scsi_2btoul(cdb->length); 9879 9880 switch (cdb->page_code) { 9881 case SVPD_SUPPORTED_PAGES: 9882 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9883 break; 9884 case SVPD_UNIT_SERIAL_NUMBER: 9885 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9886 break; 9887 case SVPD_DEVICE_ID: 9888 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9889 break; 9890 case SVPD_EXTENDED_INQUIRY_DATA: 9891 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9892 break; 9893 case SVPD_MODE_PAGE_POLICY: 9894 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9895 break; 9896 case SVPD_SCSI_PORTS: 9897 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 9898 break; 9899 case SVPD_SCSI_TPC: 9900 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 9901 break; 9902 case SVPD_BLOCK_LIMITS: 9903 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9904 goto err; 9905 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9906 break; 9907 case SVPD_BDC: 9908 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9909 goto err; 9910 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 9911 break; 9912 case SVPD_LBP: 9913 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9914 goto err; 9915 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 9916 break; 9917 default: 9918err: 9919 ctl_set_invalid_field(ctsio, 9920 /*sks_valid*/ 1, 9921 /*command*/ 1, 9922 /*field*/ 2, 9923 /*bit_valid*/ 0, 9924 /*bit*/ 0); 9925 ctl_done((union ctl_io *)ctsio); 9926 retval = CTL_RETVAL_COMPLETE; 9927 break; 9928 } 9929 9930 return (retval); 9931} 9932 9933/* 9934 * Standard INQUIRY data. 9935 */ 9936static int 9937ctl_inquiry_std(struct ctl_scsiio *ctsio) 9938{ 9939 struct scsi_inquiry_data *inq_ptr; 9940 struct scsi_inquiry *cdb; 9941 struct ctl_softc *softc; 9942 struct ctl_lun *lun; 9943 char *val; 9944 uint32_t alloc_len, data_len; 9945 ctl_port_type port_type; 9946 9947 softc = control_softc; 9948 9949 /* 9950 * Figure out whether we're talking to a Fibre Channel port or not. 9951 * We treat the ioctl front end, and any SCSI adapters, as packetized 9952 * SCSI front ends. 9953 */ 9954 port_type = softc->ctl_ports[ 9955 ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type; 9956 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 9957 port_type = CTL_PORT_SCSI; 9958 9959 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9960 cdb = (struct scsi_inquiry *)ctsio->cdb; 9961 alloc_len = scsi_2btoul(cdb->length); 9962 9963 /* 9964 * We malloc the full inquiry data size here and fill it 9965 * in. If the user only asks for less, we'll give him 9966 * that much. 9967 */ 9968 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 9969 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9970 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 9971 ctsio->kern_sg_entries = 0; 9972 ctsio->kern_data_resid = 0; 9973 ctsio->kern_rel_offset = 0; 9974 9975 if (data_len < alloc_len) { 9976 ctsio->residual = alloc_len - data_len; 9977 ctsio->kern_data_len = data_len; 9978 ctsio->kern_total_len = data_len; 9979 } else { 9980 ctsio->residual = 0; 9981 ctsio->kern_data_len = alloc_len; 9982 ctsio->kern_total_len = alloc_len; 9983 } 9984 9985 /* 9986 * If we have a LUN configured, report it as connected. Otherwise, 9987 * report that it is offline or no device is supported, depending 9988 * on the value of inquiry_pq_no_lun. 9989 * 9990 * According to the spec (SPC-4 r34), the peripheral qualifier 9991 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 9992 * 9993 * "A peripheral device having the specified peripheral device type 9994 * is not connected to this logical unit. However, the device 9995 * server is capable of supporting the specified peripheral device 9996 * type on this logical unit." 9997 * 9998 * According to the same spec, the peripheral qualifier 9999 * SID_QUAL_BAD_LU (011b) is used in this scenario: 10000 * 10001 * "The device server is not capable of supporting a peripheral 10002 * device on this logical unit. For this peripheral qualifier the 10003 * peripheral device type shall be set to 1Fh. All other peripheral 10004 * device type values are reserved for this peripheral qualifier." 10005 * 10006 * Given the text, it would seem that we probably want to report that 10007 * the LUN is offline here. There is no LUN connected, but we can 10008 * support a LUN at the given LUN number. 10009 * 10010 * In the real world, though, it sounds like things are a little 10011 * different: 10012 * 10013 * - Linux, when presented with a LUN with the offline peripheral 10014 * qualifier, will create an sg driver instance for it. So when 10015 * you attach it to CTL, you wind up with a ton of sg driver 10016 * instances. (One for every LUN that Linux bothered to probe.) 10017 * Linux does this despite the fact that it issues a REPORT LUNs 10018 * to LUN 0 to get the inventory of supported LUNs. 10019 * 10020 * - There is other anecdotal evidence (from Emulex folks) about 10021 * arrays that use the offline peripheral qualifier for LUNs that 10022 * are on the "passive" path in an active/passive array. 10023 * 10024 * So the solution is provide a hopefully reasonable default 10025 * (return bad/no LUN) and allow the user to change the behavior 10026 * with a tunable/sysctl variable. 10027 */ 10028 if (lun != NULL) 10029 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10030 lun->be_lun->lun_type; 10031 else if (softc->inquiry_pq_no_lun == 0) 10032 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10033 else 10034 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10035 10036 /* RMB in byte 2 is 0 */ 10037 inq_ptr->version = SCSI_REV_SPC4; 10038 10039 /* 10040 * According to SAM-3, even if a device only supports a single 10041 * level of LUN addressing, it should still set the HISUP bit: 10042 * 10043 * 4.9.1 Logical unit numbers overview 10044 * 10045 * All logical unit number formats described in this standard are 10046 * hierarchical in structure even when only a single level in that 10047 * hierarchy is used. The HISUP bit shall be set to one in the 10048 * standard INQUIRY data (see SPC-2) when any logical unit number 10049 * format described in this standard is used. Non-hierarchical 10050 * formats are outside the scope of this standard. 10051 * 10052 * Therefore we set the HiSup bit here. 10053 * 10054 * The reponse format is 2, per SPC-3. 10055 */ 10056 inq_ptr->response_format = SID_HiSup | 2; 10057 10058 inq_ptr->additional_length = data_len - 10059 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10060 CTL_DEBUG_PRINT(("additional_length = %d\n", 10061 inq_ptr->additional_length)); 10062 10063 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10064 /* 16 bit addressing */ 10065 if (port_type == CTL_PORT_SCSI) 10066 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10067 /* XXX set the SID_MultiP bit here if we're actually going to 10068 respond on multiple ports */ 10069 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10070 10071 /* 16 bit data bus, synchronous transfers */ 10072 if (port_type == CTL_PORT_SCSI) 10073 inq_ptr->flags = SID_WBus16 | SID_Sync; 10074 /* 10075 * XXX KDM do we want to support tagged queueing on the control 10076 * device at all? 10077 */ 10078 if ((lun == NULL) 10079 || (lun->be_lun->lun_type != T_PROCESSOR)) 10080 inq_ptr->flags |= SID_CmdQue; 10081 /* 10082 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10083 * We have 8 bytes for the vendor name, and 16 bytes for the device 10084 * name and 4 bytes for the revision. 10085 */ 10086 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10087 "vendor")) == NULL) { 10088 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10089 } else { 10090 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10091 strncpy(inq_ptr->vendor, val, 10092 min(sizeof(inq_ptr->vendor), strlen(val))); 10093 } 10094 if (lun == NULL) { 10095 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10096 sizeof(inq_ptr->product)); 10097 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10098 switch (lun->be_lun->lun_type) { 10099 case T_DIRECT: 10100 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10101 sizeof(inq_ptr->product)); 10102 break; 10103 case T_PROCESSOR: 10104 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10105 sizeof(inq_ptr->product)); 10106 break; 10107 default: 10108 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10109 sizeof(inq_ptr->product)); 10110 break; 10111 } 10112 } else { 10113 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10114 strncpy(inq_ptr->product, val, 10115 min(sizeof(inq_ptr->product), strlen(val))); 10116 } 10117 10118 /* 10119 * XXX make this a macro somewhere so it automatically gets 10120 * incremented when we make changes. 10121 */ 10122 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10123 "revision")) == NULL) { 10124 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10125 } else { 10126 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10127 strncpy(inq_ptr->revision, val, 10128 min(sizeof(inq_ptr->revision), strlen(val))); 10129 } 10130 10131 /* 10132 * For parallel SCSI, we support double transition and single 10133 * transition clocking. We also support QAS (Quick Arbitration 10134 * and Selection) and Information Unit transfers on both the 10135 * control and array devices. 10136 */ 10137 if (port_type == CTL_PORT_SCSI) 10138 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10139 SID_SPI_IUS; 10140 10141 /* SAM-5 (no version claimed) */ 10142 scsi_ulto2b(0x00A0, inq_ptr->version1); 10143 /* SPC-4 (no version claimed) */ 10144 scsi_ulto2b(0x0460, inq_ptr->version2); 10145 if (port_type == CTL_PORT_FC) { 10146 /* FCP-2 ANSI INCITS.350:2003 */ 10147 scsi_ulto2b(0x0917, inq_ptr->version3); 10148 } else if (port_type == CTL_PORT_SCSI) { 10149 /* SPI-4 ANSI INCITS.362:200x */ 10150 scsi_ulto2b(0x0B56, inq_ptr->version3); 10151 } else if (port_type == CTL_PORT_ISCSI) { 10152 /* iSCSI (no version claimed) */ 10153 scsi_ulto2b(0x0960, inq_ptr->version3); 10154 } else if (port_type == CTL_PORT_SAS) { 10155 /* SAS (no version claimed) */ 10156 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10157 } 10158 10159 if (lun == NULL) { 10160 /* SBC-4 (no version claimed) */ 10161 scsi_ulto2b(0x0600, inq_ptr->version4); 10162 } else { 10163 switch (lun->be_lun->lun_type) { 10164 case T_DIRECT: 10165 /* SBC-4 (no version claimed) */ 10166 scsi_ulto2b(0x0600, inq_ptr->version4); 10167 break; 10168 case T_PROCESSOR: 10169 default: 10170 break; 10171 } 10172 } 10173 10174 ctl_set_success(ctsio); 10175 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10176 ctsio->be_move_done = ctl_config_move_done; 10177 ctl_datamove((union ctl_io *)ctsio); 10178 return (CTL_RETVAL_COMPLETE); 10179} 10180 10181int 10182ctl_inquiry(struct ctl_scsiio *ctsio) 10183{ 10184 struct scsi_inquiry *cdb; 10185 int retval; 10186 10187 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10188 10189 cdb = (struct scsi_inquiry *)ctsio->cdb; 10190 if (cdb->byte2 & SI_EVPD) 10191 retval = ctl_inquiry_evpd(ctsio); 10192 else if (cdb->page_code == 0) 10193 retval = ctl_inquiry_std(ctsio); 10194 else { 10195 ctl_set_invalid_field(ctsio, 10196 /*sks_valid*/ 1, 10197 /*command*/ 1, 10198 /*field*/ 2, 10199 /*bit_valid*/ 0, 10200 /*bit*/ 0); 10201 ctl_done((union ctl_io *)ctsio); 10202 return (CTL_RETVAL_COMPLETE); 10203 } 10204 10205 return (retval); 10206} 10207 10208/* 10209 * For known CDB types, parse the LBA and length. 10210 */ 10211static int 10212ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10213{ 10214 if (io->io_hdr.io_type != CTL_IO_SCSI) 10215 return (1); 10216 10217 switch (io->scsiio.cdb[0]) { 10218 case COMPARE_AND_WRITE: { 10219 struct scsi_compare_and_write *cdb; 10220 10221 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10222 10223 *lba = scsi_8btou64(cdb->addr); 10224 *len = cdb->length; 10225 break; 10226 } 10227 case READ_6: 10228 case WRITE_6: { 10229 struct scsi_rw_6 *cdb; 10230 10231 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10232 10233 *lba = scsi_3btoul(cdb->addr); 10234 /* only 5 bits are valid in the most significant address byte */ 10235 *lba &= 0x1fffff; 10236 *len = cdb->length; 10237 break; 10238 } 10239 case READ_10: 10240 case WRITE_10: { 10241 struct scsi_rw_10 *cdb; 10242 10243 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10244 10245 *lba = scsi_4btoul(cdb->addr); 10246 *len = scsi_2btoul(cdb->length); 10247 break; 10248 } 10249 case WRITE_VERIFY_10: { 10250 struct scsi_write_verify_10 *cdb; 10251 10252 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10253 10254 *lba = scsi_4btoul(cdb->addr); 10255 *len = scsi_2btoul(cdb->length); 10256 break; 10257 } 10258 case READ_12: 10259 case WRITE_12: { 10260 struct scsi_rw_12 *cdb; 10261 10262 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10263 10264 *lba = scsi_4btoul(cdb->addr); 10265 *len = scsi_4btoul(cdb->length); 10266 break; 10267 } 10268 case WRITE_VERIFY_12: { 10269 struct scsi_write_verify_12 *cdb; 10270 10271 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10272 10273 *lba = scsi_4btoul(cdb->addr); 10274 *len = scsi_4btoul(cdb->length); 10275 break; 10276 } 10277 case READ_16: 10278 case WRITE_16: 10279 case WRITE_ATOMIC_16: { 10280 struct scsi_rw_16 *cdb; 10281 10282 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10283 10284 *lba = scsi_8btou64(cdb->addr); 10285 *len = scsi_4btoul(cdb->length); 10286 break; 10287 } 10288 case WRITE_VERIFY_16: { 10289 struct scsi_write_verify_16 *cdb; 10290 10291 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10292 10293 *lba = scsi_8btou64(cdb->addr); 10294 *len = scsi_4btoul(cdb->length); 10295 break; 10296 } 10297 case WRITE_SAME_10: { 10298 struct scsi_write_same_10 *cdb; 10299 10300 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10301 10302 *lba = scsi_4btoul(cdb->addr); 10303 *len = scsi_2btoul(cdb->length); 10304 break; 10305 } 10306 case WRITE_SAME_16: { 10307 struct scsi_write_same_16 *cdb; 10308 10309 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10310 10311 *lba = scsi_8btou64(cdb->addr); 10312 *len = scsi_4btoul(cdb->length); 10313 break; 10314 } 10315 case VERIFY_10: { 10316 struct scsi_verify_10 *cdb; 10317 10318 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10319 10320 *lba = scsi_4btoul(cdb->addr); 10321 *len = scsi_2btoul(cdb->length); 10322 break; 10323 } 10324 case VERIFY_12: { 10325 struct scsi_verify_12 *cdb; 10326 10327 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10328 10329 *lba = scsi_4btoul(cdb->addr); 10330 *len = scsi_4btoul(cdb->length); 10331 break; 10332 } 10333 case VERIFY_16: { 10334 struct scsi_verify_16 *cdb; 10335 10336 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10337 10338 *lba = scsi_8btou64(cdb->addr); 10339 *len = scsi_4btoul(cdb->length); 10340 break; 10341 } 10342 case UNMAP: { 10343 *lba = 0; 10344 *len = UINT64_MAX; 10345 break; 10346 } 10347 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10348 struct scsi_get_lba_status *cdb; 10349 10350 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10351 *lba = scsi_8btou64(cdb->addr); 10352 *len = UINT32_MAX; 10353 break; 10354 } 10355 default: 10356 return (1); 10357 break; /* NOTREACHED */ 10358 } 10359 10360 return (0); 10361} 10362 10363static ctl_action 10364ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10365 bool seq) 10366{ 10367 uint64_t endlba1, endlba2; 10368 10369 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10370 endlba2 = lba2 + len2 - 1; 10371 10372 if ((endlba1 < lba2) || (endlba2 < lba1)) 10373 return (CTL_ACTION_PASS); 10374 else 10375 return (CTL_ACTION_BLOCK); 10376} 10377 10378static int 10379ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10380{ 10381 struct ctl_ptr_len_flags *ptrlen; 10382 struct scsi_unmap_desc *buf, *end, *range; 10383 uint64_t lba; 10384 uint32_t len; 10385 10386 /* If not UNMAP -- go other way. */ 10387 if (io->io_hdr.io_type != CTL_IO_SCSI || 10388 io->scsiio.cdb[0] != UNMAP) 10389 return (CTL_ACTION_ERROR); 10390 10391 /* If UNMAP without data -- block and wait for data. */ 10392 ptrlen = (struct ctl_ptr_len_flags *) 10393 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10394 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10395 ptrlen->ptr == NULL) 10396 return (CTL_ACTION_BLOCK); 10397 10398 /* UNMAP with data -- check for collision. */ 10399 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10400 end = buf + ptrlen->len / sizeof(*buf); 10401 for (range = buf; range < end; range++) { 10402 lba = scsi_8btou64(range->lba); 10403 len = scsi_4btoul(range->length); 10404 if ((lba < lba2 + len2) && (lba + len > lba2)) 10405 return (CTL_ACTION_BLOCK); 10406 } 10407 return (CTL_ACTION_PASS); 10408} 10409 10410static ctl_action 10411ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10412{ 10413 uint64_t lba1, lba2; 10414 uint64_t len1, len2; 10415 int retval; 10416 10417 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10418 return (CTL_ACTION_ERROR); 10419 10420 retval = ctl_extent_check_unmap(io1, lba2, len2); 10421 if (retval != CTL_ACTION_ERROR) 10422 return (retval); 10423 10424 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10425 return (CTL_ACTION_ERROR); 10426 10427 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10428} 10429 10430static ctl_action 10431ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10432{ 10433 uint64_t lba1, lba2; 10434 uint64_t len1, len2; 10435 10436 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10437 return (CTL_ACTION_ERROR); 10438 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10439 return (CTL_ACTION_ERROR); 10440 10441 if (lba1 + len1 == lba2) 10442 return (CTL_ACTION_BLOCK); 10443 return (CTL_ACTION_PASS); 10444} 10445 10446static ctl_action 10447ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10448 union ctl_io *ooa_io) 10449{ 10450 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10451 ctl_serialize_action *serialize_row; 10452 10453 /* 10454 * The initiator attempted multiple untagged commands at the same 10455 * time. Can't do that. 10456 */ 10457 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10458 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10459 && ((pending_io->io_hdr.nexus.targ_port == 10460 ooa_io->io_hdr.nexus.targ_port) 10461 && (pending_io->io_hdr.nexus.initid.id == 10462 ooa_io->io_hdr.nexus.initid.id)) 10463 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10464 CTL_FLAG_STATUS_SENT)) == 0)) 10465 return (CTL_ACTION_OVERLAP); 10466 10467 /* 10468 * The initiator attempted to send multiple tagged commands with 10469 * the same ID. (It's fine if different initiators have the same 10470 * tag ID.) 10471 * 10472 * Even if all of those conditions are true, we don't kill the I/O 10473 * if the command ahead of us has been aborted. We won't end up 10474 * sending it to the FETD, and it's perfectly legal to resend a 10475 * command with the same tag number as long as the previous 10476 * instance of this tag number has been aborted somehow. 10477 */ 10478 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10479 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10480 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10481 && ((pending_io->io_hdr.nexus.targ_port == 10482 ooa_io->io_hdr.nexus.targ_port) 10483 && (pending_io->io_hdr.nexus.initid.id == 10484 ooa_io->io_hdr.nexus.initid.id)) 10485 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10486 CTL_FLAG_STATUS_SENT)) == 0)) 10487 return (CTL_ACTION_OVERLAP_TAG); 10488 10489 /* 10490 * If we get a head of queue tag, SAM-3 says that we should 10491 * immediately execute it. 10492 * 10493 * What happens if this command would normally block for some other 10494 * reason? e.g. a request sense with a head of queue tag 10495 * immediately after a write. Normally that would block, but this 10496 * will result in its getting executed immediately... 10497 * 10498 * We currently return "pass" instead of "skip", so we'll end up 10499 * going through the rest of the queue to check for overlapped tags. 10500 * 10501 * XXX KDM check for other types of blockage first?? 10502 */ 10503 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10504 return (CTL_ACTION_PASS); 10505 10506 /* 10507 * Ordered tags have to block until all items ahead of them 10508 * have completed. If we get called with an ordered tag, we always 10509 * block, if something else is ahead of us in the queue. 10510 */ 10511 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10512 return (CTL_ACTION_BLOCK); 10513 10514 /* 10515 * Simple tags get blocked until all head of queue and ordered tags 10516 * ahead of them have completed. I'm lumping untagged commands in 10517 * with simple tags here. XXX KDM is that the right thing to do? 10518 */ 10519 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10520 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10521 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10522 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10523 return (CTL_ACTION_BLOCK); 10524 10525 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10526 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10527 10528 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10529 10530 switch (serialize_row[pending_entry->seridx]) { 10531 case CTL_SER_BLOCK: 10532 return (CTL_ACTION_BLOCK); 10533 case CTL_SER_EXTENT: 10534 return (ctl_extent_check(ooa_io, pending_io, 10535 (lun->serseq == CTL_LUN_SERSEQ_ON))); 10536 case CTL_SER_EXTENTOPT: 10537 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10538 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10539 return (ctl_extent_check(ooa_io, pending_io, 10540 (lun->serseq == CTL_LUN_SERSEQ_ON))); 10541 return (CTL_ACTION_PASS); 10542 case CTL_SER_EXTENTSEQ: 10543 if (lun->serseq != CTL_LUN_SERSEQ_OFF) 10544 return (ctl_extent_check_seq(ooa_io, pending_io)); 10545 return (CTL_ACTION_PASS); 10546 case CTL_SER_PASS: 10547 return (CTL_ACTION_PASS); 10548 case CTL_SER_BLOCKOPT: 10549 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10550 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10551 return (CTL_ACTION_BLOCK); 10552 return (CTL_ACTION_PASS); 10553 case CTL_SER_SKIP: 10554 return (CTL_ACTION_SKIP); 10555 default: 10556 panic("invalid serialization value %d", 10557 serialize_row[pending_entry->seridx]); 10558 } 10559 10560 return (CTL_ACTION_ERROR); 10561} 10562 10563/* 10564 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10565 * Assumptions: 10566 * - pending_io is generally either incoming, or on the blocked queue 10567 * - starting I/O is the I/O we want to start the check with. 10568 */ 10569static ctl_action 10570ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10571 union ctl_io *starting_io) 10572{ 10573 union ctl_io *ooa_io; 10574 ctl_action action; 10575 10576 mtx_assert(&lun->lun_lock, MA_OWNED); 10577 10578 /* 10579 * Run back along the OOA queue, starting with the current 10580 * blocked I/O and going through every I/O before it on the 10581 * queue. If starting_io is NULL, we'll just end up returning 10582 * CTL_ACTION_PASS. 10583 */ 10584 for (ooa_io = starting_io; ooa_io != NULL; 10585 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10586 ooa_links)){ 10587 10588 /* 10589 * This routine just checks to see whether 10590 * cur_blocked is blocked by ooa_io, which is ahead 10591 * of it in the queue. It doesn't queue/dequeue 10592 * cur_blocked. 10593 */ 10594 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10595 switch (action) { 10596 case CTL_ACTION_BLOCK: 10597 case CTL_ACTION_OVERLAP: 10598 case CTL_ACTION_OVERLAP_TAG: 10599 case CTL_ACTION_SKIP: 10600 case CTL_ACTION_ERROR: 10601 return (action); 10602 break; /* NOTREACHED */ 10603 case CTL_ACTION_PASS: 10604 break; 10605 default: 10606 panic("invalid action %d", action); 10607 break; /* NOTREACHED */ 10608 } 10609 } 10610 10611 return (CTL_ACTION_PASS); 10612} 10613 10614/* 10615 * Assumptions: 10616 * - An I/O has just completed, and has been removed from the per-LUN OOA 10617 * queue, so some items on the blocked queue may now be unblocked. 10618 */ 10619static int 10620ctl_check_blocked(struct ctl_lun *lun) 10621{ 10622 union ctl_io *cur_blocked, *next_blocked; 10623 10624 mtx_assert(&lun->lun_lock, MA_OWNED); 10625 10626 /* 10627 * Run forward from the head of the blocked queue, checking each 10628 * entry against the I/Os prior to it on the OOA queue to see if 10629 * there is still any blockage. 10630 * 10631 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10632 * with our removing a variable on it while it is traversing the 10633 * list. 10634 */ 10635 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10636 cur_blocked != NULL; cur_blocked = next_blocked) { 10637 union ctl_io *prev_ooa; 10638 ctl_action action; 10639 10640 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10641 blocked_links); 10642 10643 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10644 ctl_ooaq, ooa_links); 10645 10646 /* 10647 * If cur_blocked happens to be the first item in the OOA 10648 * queue now, prev_ooa will be NULL, and the action 10649 * returned will just be CTL_ACTION_PASS. 10650 */ 10651 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10652 10653 switch (action) { 10654 case CTL_ACTION_BLOCK: 10655 /* Nothing to do here, still blocked */ 10656 break; 10657 case CTL_ACTION_OVERLAP: 10658 case CTL_ACTION_OVERLAP_TAG: 10659 /* 10660 * This shouldn't happen! In theory we've already 10661 * checked this command for overlap... 10662 */ 10663 break; 10664 case CTL_ACTION_PASS: 10665 case CTL_ACTION_SKIP: { 10666 const struct ctl_cmd_entry *entry; 10667 int isc_retval; 10668 10669 /* 10670 * The skip case shouldn't happen, this transaction 10671 * should have never made it onto the blocked queue. 10672 */ 10673 /* 10674 * This I/O is no longer blocked, we can remove it 10675 * from the blocked queue. Since this is a TAILQ 10676 * (doubly linked list), we can do O(1) removals 10677 * from any place on the list. 10678 */ 10679 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10680 blocked_links); 10681 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10682 10683 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 10684 /* 10685 * Need to send IO back to original side to 10686 * run 10687 */ 10688 union ctl_ha_msg msg_info; 10689 10690 msg_info.hdr.original_sc = 10691 cur_blocked->io_hdr.original_sc; 10692 msg_info.hdr.serializing_sc = cur_blocked; 10693 msg_info.hdr.msg_type = CTL_MSG_R2R; 10694 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 10695 &msg_info, sizeof(msg_info), 0)) > 10696 CTL_HA_STATUS_SUCCESS) { 10697 printf("CTL:Check Blocked error from " 10698 "ctl_ha_msg_send %d\n", 10699 isc_retval); 10700 } 10701 break; 10702 } 10703 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 10704 10705 /* 10706 * Check this I/O for LUN state changes that may 10707 * have happened while this command was blocked. 10708 * The LUN state may have been changed by a command 10709 * ahead of us in the queue, so we need to re-check 10710 * for any states that can be caused by SCSI 10711 * commands. 10712 */ 10713 if (ctl_scsiio_lun_check(lun, entry, 10714 &cur_blocked->scsiio) == 0) { 10715 cur_blocked->io_hdr.flags |= 10716 CTL_FLAG_IS_WAS_ON_RTR; 10717 ctl_enqueue_rtr(cur_blocked); 10718 } else 10719 ctl_done(cur_blocked); 10720 break; 10721 } 10722 default: 10723 /* 10724 * This probably shouldn't happen -- we shouldn't 10725 * get CTL_ACTION_ERROR, or anything else. 10726 */ 10727 break; 10728 } 10729 } 10730 10731 return (CTL_RETVAL_COMPLETE); 10732} 10733 10734/* 10735 * This routine (with one exception) checks LUN flags that can be set by 10736 * commands ahead of us in the OOA queue. These flags have to be checked 10737 * when a command initially comes in, and when we pull a command off the 10738 * blocked queue and are preparing to execute it. The reason we have to 10739 * check these flags for commands on the blocked queue is that the LUN 10740 * state may have been changed by a command ahead of us while we're on the 10741 * blocked queue. 10742 * 10743 * Ordering is somewhat important with these checks, so please pay 10744 * careful attention to the placement of any new checks. 10745 */ 10746static int 10747ctl_scsiio_lun_check(struct ctl_lun *lun, 10748 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 10749{ 10750 struct ctl_softc *softc = lun->ctl_softc; 10751 int retval; 10752 uint32_t residx; 10753 10754 retval = 0; 10755 10756 mtx_assert(&lun->lun_lock, MA_OWNED); 10757 10758 /* 10759 * If this shelf is a secondary shelf controller, we have to reject 10760 * any media access commands. 10761 */ 10762 if ((softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 && 10763 (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) { 10764 ctl_set_lun_standby(ctsio); 10765 retval = 1; 10766 goto bailout; 10767 } 10768 10769 if (entry->pattern & CTL_LUN_PAT_WRITE) { 10770 if (lun->flags & CTL_LUN_READONLY) { 10771 ctl_set_sense(ctsio, /*current_error*/ 1, 10772 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10773 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); 10774 retval = 1; 10775 goto bailout; 10776 } 10777 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 10778 .eca_and_aen & SCP_SWP) != 0) { 10779 ctl_set_sense(ctsio, /*current_error*/ 1, 10780 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10781 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 10782 retval = 1; 10783 goto bailout; 10784 } 10785 } 10786 10787 /* 10788 * Check for a reservation conflict. If this command isn't allowed 10789 * even on reserved LUNs, and if this initiator isn't the one who 10790 * reserved us, reject the command with a reservation conflict. 10791 */ 10792 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 10793 if ((lun->flags & CTL_LUN_RESERVED) 10794 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10795 if (lun->res_idx != residx) { 10796 ctl_set_reservation_conflict(ctsio); 10797 retval = 1; 10798 goto bailout; 10799 } 10800 } 10801 10802 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 10803 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 10804 /* No reservation or command is allowed. */; 10805 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 10806 (lun->res_type == SPR_TYPE_WR_EX || 10807 lun->res_type == SPR_TYPE_WR_EX_RO || 10808 lun->res_type == SPR_TYPE_WR_EX_AR)) { 10809 /* The command is allowed for Write Exclusive resv. */; 10810 } else { 10811 /* 10812 * if we aren't registered or it's a res holder type 10813 * reservation and this isn't the res holder then set a 10814 * conflict. 10815 */ 10816 if (ctl_get_prkey(lun, residx) == 0 10817 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10818 ctl_set_reservation_conflict(ctsio); 10819 retval = 1; 10820 goto bailout; 10821 } 10822 10823 } 10824 10825 if ((lun->flags & CTL_LUN_OFFLINE) 10826 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 10827 ctl_set_lun_not_ready(ctsio); 10828 retval = 1; 10829 goto bailout; 10830 } 10831 10832 /* 10833 * If the LUN is stopped, see if this particular command is allowed 10834 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 10835 */ 10836 if ((lun->flags & CTL_LUN_STOPPED) 10837 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10838 /* "Logical unit not ready, initializing cmd. required" */ 10839 ctl_set_lun_stopped(ctsio); 10840 retval = 1; 10841 goto bailout; 10842 } 10843 10844 if ((lun->flags & CTL_LUN_INOPERABLE) 10845 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10846 /* "Medium format corrupted" */ 10847 ctl_set_medium_format_corrupted(ctsio); 10848 retval = 1; 10849 goto bailout; 10850 } 10851 10852bailout: 10853 return (retval); 10854 10855} 10856 10857static void 10858ctl_failover_io(union ctl_io *io, int have_lock) 10859{ 10860 ctl_set_busy(&io->scsiio); 10861 ctl_done(io); 10862} 10863 10864#ifdef notyet 10865static void 10866ctl_failover(void) 10867{ 10868 struct ctl_lun *lun; 10869 struct ctl_softc *softc; 10870 union ctl_io *next_io, *pending_io; 10871 union ctl_io *io; 10872 int lun_idx; 10873 10874 softc = control_softc; 10875 10876 mtx_lock(&softc->ctl_lock); 10877 /* 10878 * Remove any cmds from the other SC from the rtr queue. These 10879 * will obviously only be for LUNs for which we're the primary. 10880 * We can't send status or get/send data for these commands. 10881 * Since they haven't been executed yet, we can just remove them. 10882 * We'll either abort them or delete them below, depending on 10883 * which HA mode we're in. 10884 */ 10885#ifdef notyet 10886 mtx_lock(&softc->queue_lock); 10887 for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); 10888 io != NULL; io = next_io) { 10889 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 10890 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10891 STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr, 10892 ctl_io_hdr, links); 10893 } 10894 mtx_unlock(&softc->queue_lock); 10895#endif 10896 10897 for (lun_idx=0; lun_idx < softc->num_luns; lun_idx++) { 10898 lun = softc->ctl_luns[lun_idx]; 10899 if (lun==NULL) 10900 continue; 10901 10902 /* 10903 * Processor LUNs are primary on both sides. 10904 * XXX will this always be true? 10905 */ 10906 if (lun->be_lun->lun_type == T_PROCESSOR) 10907 continue; 10908 10909 if ((lun->flags & CTL_LUN_PRIMARY_SC) 10910 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 10911 printf("FAILOVER: primary lun %d\n", lun_idx); 10912 /* 10913 * Remove all commands from the other SC. First from the 10914 * blocked queue then from the ooa queue. Once we have 10915 * removed them. Call ctl_check_blocked to see if there 10916 * is anything that can run. 10917 */ 10918 for (io = (union ctl_io *)TAILQ_FIRST( 10919 &lun->blocked_queue); io != NULL; io = next_io) { 10920 10921 next_io = (union ctl_io *)TAILQ_NEXT( 10922 &io->io_hdr, blocked_links); 10923 10924 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10925 TAILQ_REMOVE(&lun->blocked_queue, 10926 &io->io_hdr,blocked_links); 10927 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10928 TAILQ_REMOVE(&lun->ooa_queue, 10929 &io->io_hdr, ooa_links); 10930 10931 ctl_free_io(io); 10932 } 10933 } 10934 10935 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10936 io != NULL; io = next_io) { 10937 10938 next_io = (union ctl_io *)TAILQ_NEXT( 10939 &io->io_hdr, ooa_links); 10940 10941 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 10942 10943 TAILQ_REMOVE(&lun->ooa_queue, 10944 &io->io_hdr, 10945 ooa_links); 10946 10947 ctl_free_io(io); 10948 } 10949 } 10950 ctl_check_blocked(lun); 10951 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 10952 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 10953 10954 printf("FAILOVER: primary lun %d\n", lun_idx); 10955 /* 10956 * Abort all commands from the other SC. We can't 10957 * send status back for them now. These should get 10958 * cleaned up when they are completed or come out 10959 * for a datamove operation. 10960 */ 10961 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 10962 io != NULL; io = next_io) { 10963 next_io = (union ctl_io *)TAILQ_NEXT( 10964 &io->io_hdr, ooa_links); 10965 10966 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 10967 io->io_hdr.flags |= CTL_FLAG_ABORT; 10968 } 10969 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 10970 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 10971 10972 printf("FAILOVER: secondary lun %d\n", lun_idx); 10973 10974 lun->flags |= CTL_LUN_PRIMARY_SC; 10975 10976 /* 10977 * We send all I/O that was sent to this controller 10978 * and redirected to the other side back with 10979 * busy status, and have the initiator retry it. 10980 * Figuring out how much data has been transferred, 10981 * etc. and picking up where we left off would be 10982 * very tricky. 10983 * 10984 * XXX KDM need to remove I/O from the blocked 10985 * queue as well! 10986 */ 10987 for (pending_io = (union ctl_io *)TAILQ_FIRST( 10988 &lun->ooa_queue); pending_io != NULL; 10989 pending_io = next_io) { 10990 10991 next_io = (union ctl_io *)TAILQ_NEXT( 10992 &pending_io->io_hdr, ooa_links); 10993 10994 pending_io->io_hdr.flags &= 10995 ~CTL_FLAG_SENT_2OTHER_SC; 10996 10997 if (pending_io->io_hdr.flags & 10998 CTL_FLAG_IO_ACTIVE) { 10999 pending_io->io_hdr.flags |= 11000 CTL_FLAG_FAILOVER; 11001 } else { 11002 ctl_set_busy(&pending_io->scsiio); 11003 ctl_done(pending_io); 11004 } 11005 } 11006 11007 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11008 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11009 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11010 printf("FAILOVER: secondary lun %d\n", lun_idx); 11011 /* 11012 * if the first io on the OOA is not on the RtR queue 11013 * add it. 11014 */ 11015 lun->flags |= CTL_LUN_PRIMARY_SC; 11016 11017 pending_io = (union ctl_io *)TAILQ_FIRST( 11018 &lun->ooa_queue); 11019 if (pending_io==NULL) { 11020 printf("Nothing on OOA queue\n"); 11021 continue; 11022 } 11023 11024 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11025 if ((pending_io->io_hdr.flags & 11026 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 11027 pending_io->io_hdr.flags |= 11028 CTL_FLAG_IS_WAS_ON_RTR; 11029 ctl_enqueue_rtr(pending_io); 11030 } 11031#if 0 11032 else 11033 { 11034 printf("Tag 0x%04x is running\n", 11035 pending_io->scsiio.tag_num); 11036 } 11037#endif 11038 11039 next_io = (union ctl_io *)TAILQ_NEXT( 11040 &pending_io->io_hdr, ooa_links); 11041 for (pending_io=next_io; pending_io != NULL; 11042 pending_io = next_io) { 11043 pending_io->io_hdr.flags &= 11044 ~CTL_FLAG_SENT_2OTHER_SC; 11045 next_io = (union ctl_io *)TAILQ_NEXT( 11046 &pending_io->io_hdr, ooa_links); 11047 if (pending_io->io_hdr.flags & 11048 CTL_FLAG_IS_WAS_ON_RTR) { 11049#if 0 11050 printf("Tag 0x%04x is running\n", 11051 pending_io->scsiio.tag_num); 11052#endif 11053 continue; 11054 } 11055 11056 switch (ctl_check_ooa(lun, pending_io, 11057 (union ctl_io *)TAILQ_PREV( 11058 &pending_io->io_hdr, ctl_ooaq, 11059 ooa_links))) { 11060 11061 case CTL_ACTION_BLOCK: 11062 TAILQ_INSERT_TAIL(&lun->blocked_queue, 11063 &pending_io->io_hdr, 11064 blocked_links); 11065 pending_io->io_hdr.flags |= 11066 CTL_FLAG_BLOCKED; 11067 break; 11068 case CTL_ACTION_PASS: 11069 case CTL_ACTION_SKIP: 11070 pending_io->io_hdr.flags |= 11071 CTL_FLAG_IS_WAS_ON_RTR; 11072 ctl_enqueue_rtr(pending_io); 11073 break; 11074 case CTL_ACTION_OVERLAP: 11075 ctl_set_overlapped_cmd( 11076 (struct ctl_scsiio *)pending_io); 11077 ctl_done(pending_io); 11078 break; 11079 case CTL_ACTION_OVERLAP_TAG: 11080 ctl_set_overlapped_tag( 11081 (struct ctl_scsiio *)pending_io, 11082 pending_io->scsiio.tag_num & 0xff); 11083 ctl_done(pending_io); 11084 break; 11085 case CTL_ACTION_ERROR: 11086 default: 11087 ctl_set_internal_failure( 11088 (struct ctl_scsiio *)pending_io, 11089 0, // sks_valid 11090 0); //retry count 11091 ctl_done(pending_io); 11092 break; 11093 } 11094 } 11095 11096 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11097 } else { 11098 panic("Unhandled HA mode failover, LUN flags = %#x, " 11099 "ha_mode = #%x", lun->flags, softc->ha_mode); 11100 } 11101 } 11102 ctl_pause_rtr = 0; 11103 mtx_unlock(&softc->ctl_lock); 11104} 11105#endif 11106 11107static int 11108ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11109{ 11110 struct ctl_lun *lun; 11111 const struct ctl_cmd_entry *entry; 11112 uint32_t initidx, targ_lun; 11113 int retval; 11114 11115 retval = 0; 11116 11117 lun = NULL; 11118 11119 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11120 if ((targ_lun < CTL_MAX_LUNS) 11121 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 11122 /* 11123 * If the LUN is invalid, pretend that it doesn't exist. 11124 * It will go away as soon as all pending I/O has been 11125 * completed. 11126 */ 11127 mtx_lock(&lun->lun_lock); 11128 if (lun->flags & CTL_LUN_DISABLED) { 11129 mtx_unlock(&lun->lun_lock); 11130 lun = NULL; 11131 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11132 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11133 } else { 11134 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11135 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11136 lun->be_lun; 11137 if (lun->be_lun->lun_type == T_PROCESSOR) { 11138 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 11139 } 11140 11141 /* 11142 * Every I/O goes into the OOA queue for a 11143 * particular LUN, and stays there until completion. 11144 */ 11145#ifdef CTL_TIME_IO 11146 if (TAILQ_EMPTY(&lun->ooa_queue)) { 11147 lun->idle_time += getsbinuptime() - 11148 lun->last_busy; 11149 } 11150#endif 11151 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11152 ooa_links); 11153 } 11154 } else { 11155 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11156 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11157 } 11158 11159 /* Get command entry and return error if it is unsuppotyed. */ 11160 entry = ctl_validate_command(ctsio); 11161 if (entry == NULL) { 11162 if (lun) 11163 mtx_unlock(&lun->lun_lock); 11164 return (retval); 11165 } 11166 11167 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11168 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11169 11170 /* 11171 * Check to see whether we can send this command to LUNs that don't 11172 * exist. This should pretty much only be the case for inquiry 11173 * and request sense. Further checks, below, really require having 11174 * a LUN, so we can't really check the command anymore. Just put 11175 * it on the rtr queue. 11176 */ 11177 if (lun == NULL) { 11178 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11179 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11180 ctl_enqueue_rtr((union ctl_io *)ctsio); 11181 return (retval); 11182 } 11183 11184 ctl_set_unsupported_lun(ctsio); 11185 ctl_done((union ctl_io *)ctsio); 11186 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11187 return (retval); 11188 } else { 11189 /* 11190 * Make sure we support this particular command on this LUN. 11191 * e.g., we don't support writes to the control LUN. 11192 */ 11193 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11194 mtx_unlock(&lun->lun_lock); 11195 ctl_set_invalid_opcode(ctsio); 11196 ctl_done((union ctl_io *)ctsio); 11197 return (retval); 11198 } 11199 } 11200 11201 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11202 11203#ifdef CTL_WITH_CA 11204 /* 11205 * If we've got a request sense, it'll clear the contingent 11206 * allegiance condition. Otherwise, if we have a CA condition for 11207 * this initiator, clear it, because it sent down a command other 11208 * than request sense. 11209 */ 11210 if ((ctsio->cdb[0] != REQUEST_SENSE) 11211 && (ctl_is_set(lun->have_ca, initidx))) 11212 ctl_clear_mask(lun->have_ca, initidx); 11213#endif 11214 11215 /* 11216 * If the command has this flag set, it handles its own unit 11217 * attention reporting, we shouldn't do anything. Otherwise we 11218 * check for any pending unit attentions, and send them back to the 11219 * initiator. We only do this when a command initially comes in, 11220 * not when we pull it off the blocked queue. 11221 * 11222 * According to SAM-3, section 5.3.2, the order that things get 11223 * presented back to the host is basically unit attentions caused 11224 * by some sort of reset event, busy status, reservation conflicts 11225 * or task set full, and finally any other status. 11226 * 11227 * One issue here is that some of the unit attentions we report 11228 * don't fall into the "reset" category (e.g. "reported luns data 11229 * has changed"). So reporting it here, before the reservation 11230 * check, may be technically wrong. I guess the only thing to do 11231 * would be to check for and report the reset events here, and then 11232 * check for the other unit attention types after we check for a 11233 * reservation conflict. 11234 * 11235 * XXX KDM need to fix this 11236 */ 11237 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11238 ctl_ua_type ua_type; 11239 scsi_sense_data_type sense_format; 11240 11241 if (lun->flags & CTL_LUN_SENSE_DESC) 11242 sense_format = SSD_TYPE_DESC; 11243 else 11244 sense_format = SSD_TYPE_FIXED; 11245 11246 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11247 sense_format); 11248 if (ua_type != CTL_UA_NONE) { 11249 mtx_unlock(&lun->lun_lock); 11250 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11251 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11252 ctsio->sense_len = SSD_FULL_SIZE; 11253 ctl_done((union ctl_io *)ctsio); 11254 return (retval); 11255 } 11256 } 11257 11258 11259 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11260 mtx_unlock(&lun->lun_lock); 11261 ctl_done((union ctl_io *)ctsio); 11262 return (retval); 11263 } 11264 11265 /* 11266 * XXX CHD this is where we want to send IO to other side if 11267 * this LUN is secondary on this SC. We will need to make a copy 11268 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11269 * the copy we send as FROM_OTHER. 11270 * We also need to stuff the address of the original IO so we can 11271 * find it easily. Something similar will need be done on the other 11272 * side so when we are done we can find the copy. 11273 */ 11274 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11275 union ctl_ha_msg msg_info; 11276 int isc_retval; 11277 11278 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11279 11280 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11281 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11282#if 0 11283 printf("1. ctsio %p\n", ctsio); 11284#endif 11285 msg_info.hdr.serializing_sc = NULL; 11286 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11287 msg_info.scsi.tag_num = ctsio->tag_num; 11288 msg_info.scsi.tag_type = ctsio->tag_type; 11289 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11290 11291 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11292 11293 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11294 (void *)&msg_info, sizeof(msg_info), 0)) > 11295 CTL_HA_STATUS_SUCCESS) { 11296 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 11297 isc_retval); 11298 printf("CTL:opcode is %x\n", ctsio->cdb[0]); 11299 } else { 11300#if 0 11301 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 11302#endif 11303 } 11304 11305 /* 11306 * XXX KDM this I/O is off the incoming queue, but hasn't 11307 * been inserted on any other queue. We may need to come 11308 * up with a holding queue while we wait for serialization 11309 * so that we have an idea of what we're waiting for from 11310 * the other side. 11311 */ 11312 mtx_unlock(&lun->lun_lock); 11313 return (retval); 11314 } 11315 11316 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11317 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11318 ctl_ooaq, ooa_links))) { 11319 case CTL_ACTION_BLOCK: 11320 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11321 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11322 blocked_links); 11323 mtx_unlock(&lun->lun_lock); 11324 return (retval); 11325 case CTL_ACTION_PASS: 11326 case CTL_ACTION_SKIP: 11327 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11328 mtx_unlock(&lun->lun_lock); 11329 ctl_enqueue_rtr((union ctl_io *)ctsio); 11330 break; 11331 case CTL_ACTION_OVERLAP: 11332 mtx_unlock(&lun->lun_lock); 11333 ctl_set_overlapped_cmd(ctsio); 11334 ctl_done((union ctl_io *)ctsio); 11335 break; 11336 case CTL_ACTION_OVERLAP_TAG: 11337 mtx_unlock(&lun->lun_lock); 11338 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11339 ctl_done((union ctl_io *)ctsio); 11340 break; 11341 case CTL_ACTION_ERROR: 11342 default: 11343 mtx_unlock(&lun->lun_lock); 11344 ctl_set_internal_failure(ctsio, 11345 /*sks_valid*/ 0, 11346 /*retry_count*/ 0); 11347 ctl_done((union ctl_io *)ctsio); 11348 break; 11349 } 11350 return (retval); 11351} 11352 11353const struct ctl_cmd_entry * 11354ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11355{ 11356 const struct ctl_cmd_entry *entry; 11357 int service_action; 11358 11359 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11360 if (sa) 11361 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11362 if (entry->flags & CTL_CMD_FLAG_SA5) { 11363 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11364 entry = &((const struct ctl_cmd_entry *) 11365 entry->execute)[service_action]; 11366 } 11367 return (entry); 11368} 11369 11370const struct ctl_cmd_entry * 11371ctl_validate_command(struct ctl_scsiio *ctsio) 11372{ 11373 const struct ctl_cmd_entry *entry; 11374 int i, sa; 11375 uint8_t diff; 11376 11377 entry = ctl_get_cmd_entry(ctsio, &sa); 11378 if (entry->execute == NULL) { 11379 if (sa) 11380 ctl_set_invalid_field(ctsio, 11381 /*sks_valid*/ 1, 11382 /*command*/ 1, 11383 /*field*/ 1, 11384 /*bit_valid*/ 1, 11385 /*bit*/ 4); 11386 else 11387 ctl_set_invalid_opcode(ctsio); 11388 ctl_done((union ctl_io *)ctsio); 11389 return (NULL); 11390 } 11391 KASSERT(entry->length > 0, 11392 ("Not defined length for command 0x%02x/0x%02x", 11393 ctsio->cdb[0], ctsio->cdb[1])); 11394 for (i = 1; i < entry->length; i++) { 11395 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11396 if (diff == 0) 11397 continue; 11398 ctl_set_invalid_field(ctsio, 11399 /*sks_valid*/ 1, 11400 /*command*/ 1, 11401 /*field*/ i, 11402 /*bit_valid*/ 1, 11403 /*bit*/ fls(diff) - 1); 11404 ctl_done((union ctl_io *)ctsio); 11405 return (NULL); 11406 } 11407 return (entry); 11408} 11409 11410static int 11411ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11412{ 11413 11414 switch (lun_type) { 11415 case T_PROCESSOR: 11416 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && 11417 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11418 return (0); 11419 break; 11420 case T_DIRECT: 11421 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && 11422 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11423 return (0); 11424 break; 11425 default: 11426 return (0); 11427 } 11428 return (1); 11429} 11430 11431static int 11432ctl_scsiio(struct ctl_scsiio *ctsio) 11433{ 11434 int retval; 11435 const struct ctl_cmd_entry *entry; 11436 11437 retval = CTL_RETVAL_COMPLETE; 11438 11439 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11440 11441 entry = ctl_get_cmd_entry(ctsio, NULL); 11442 11443 /* 11444 * If this I/O has been aborted, just send it straight to 11445 * ctl_done() without executing it. 11446 */ 11447 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11448 ctl_done((union ctl_io *)ctsio); 11449 goto bailout; 11450 } 11451 11452 /* 11453 * All the checks should have been handled by ctl_scsiio_precheck(). 11454 * We should be clear now to just execute the I/O. 11455 */ 11456 retval = entry->execute(ctsio); 11457 11458bailout: 11459 return (retval); 11460} 11461 11462/* 11463 * Since we only implement one target right now, a bus reset simply resets 11464 * our single target. 11465 */ 11466static int 11467ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11468{ 11469 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11470} 11471 11472static int 11473ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11474 ctl_ua_type ua_type) 11475{ 11476 struct ctl_lun *lun; 11477 int retval; 11478 11479 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11480 union ctl_ha_msg msg_info; 11481 11482 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11483 msg_info.hdr.nexus = io->io_hdr.nexus; 11484 if (ua_type==CTL_UA_TARG_RESET) 11485 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11486 else 11487 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11488 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11489 msg_info.hdr.original_sc = NULL; 11490 msg_info.hdr.serializing_sc = NULL; 11491 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11492 (void *)&msg_info, sizeof(msg_info), 0)) { 11493 } 11494 } 11495 retval = 0; 11496 11497 mtx_lock(&softc->ctl_lock); 11498 STAILQ_FOREACH(lun, &softc->lun_list, links) 11499 retval += ctl_lun_reset(lun, io, ua_type); 11500 mtx_unlock(&softc->ctl_lock); 11501 11502 return (retval); 11503} 11504 11505/* 11506 * The LUN should always be set. The I/O is optional, and is used to 11507 * distinguish between I/Os sent by this initiator, and by other 11508 * initiators. We set unit attention for initiators other than this one. 11509 * SAM-3 is vague on this point. It does say that a unit attention should 11510 * be established for other initiators when a LUN is reset (see section 11511 * 5.7.3), but it doesn't specifically say that the unit attention should 11512 * be established for this particular initiator when a LUN is reset. Here 11513 * is the relevant text, from SAM-3 rev 8: 11514 * 11515 * 5.7.2 When a SCSI initiator port aborts its own tasks 11516 * 11517 * When a SCSI initiator port causes its own task(s) to be aborted, no 11518 * notification that the task(s) have been aborted shall be returned to 11519 * the SCSI initiator port other than the completion response for the 11520 * command or task management function action that caused the task(s) to 11521 * be aborted and notification(s) associated with related effects of the 11522 * action (e.g., a reset unit attention condition). 11523 * 11524 * XXX KDM for now, we're setting unit attention for all initiators. 11525 */ 11526static int 11527ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11528{ 11529 union ctl_io *xio; 11530#if 0 11531 uint32_t initidx; 11532#endif 11533#ifdef CTL_WITH_CA 11534 int i; 11535#endif 11536 11537 mtx_lock(&lun->lun_lock); 11538 /* 11539 * Run through the OOA queue and abort each I/O. 11540 */ 11541 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11542 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11543 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11544 } 11545 11546 /* 11547 * This version sets unit attention for every 11548 */ 11549#if 0 11550 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11551 ctl_est_ua_all(lun, initidx, ua_type); 11552#else 11553 ctl_est_ua_all(lun, -1, ua_type); 11554#endif 11555 11556 /* 11557 * A reset (any kind, really) clears reservations established with 11558 * RESERVE/RELEASE. It does not clear reservations established 11559 * with PERSISTENT RESERVE OUT, but we don't support that at the 11560 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11561 * reservations made with the RESERVE/RELEASE commands, because 11562 * those commands are obsolete in SPC-3. 11563 */ 11564 lun->flags &= ~CTL_LUN_RESERVED; 11565 11566#ifdef CTL_WITH_CA 11567 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11568 ctl_clear_mask(lun->have_ca, i); 11569#endif 11570 mtx_unlock(&lun->lun_lock); 11571 11572 return (0); 11573} 11574 11575static void 11576ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11577 int other_sc) 11578{ 11579 union ctl_io *xio; 11580 11581 mtx_assert(&lun->lun_lock, MA_OWNED); 11582 11583 /* 11584 * Run through the OOA queue and attempt to find the given I/O. 11585 * The target port, initiator ID, tag type and tag number have to 11586 * match the values that we got from the initiator. If we have an 11587 * untagged command to abort, simply abort the first untagged command 11588 * we come to. We only allow one untagged command at a time of course. 11589 */ 11590 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11591 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11592 11593 if ((targ_port == UINT32_MAX || 11594 targ_port == xio->io_hdr.nexus.targ_port) && 11595 (init_id == UINT32_MAX || 11596 init_id == xio->io_hdr.nexus.initid.id)) { 11597 if (targ_port != xio->io_hdr.nexus.targ_port || 11598 init_id != xio->io_hdr.nexus.initid.id) 11599 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11600 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11601 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11602 union ctl_ha_msg msg_info; 11603 11604 msg_info.hdr.nexus = xio->io_hdr.nexus; 11605 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11606 msg_info.task.tag_num = xio->scsiio.tag_num; 11607 msg_info.task.tag_type = xio->scsiio.tag_type; 11608 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11609 msg_info.hdr.original_sc = NULL; 11610 msg_info.hdr.serializing_sc = NULL; 11611 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11612 (void *)&msg_info, sizeof(msg_info), 0); 11613 } 11614 } 11615 } 11616} 11617 11618static int 11619ctl_abort_task_set(union ctl_io *io) 11620{ 11621 struct ctl_softc *softc = control_softc; 11622 struct ctl_lun *lun; 11623 uint32_t targ_lun; 11624 11625 /* 11626 * Look up the LUN. 11627 */ 11628 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11629 mtx_lock(&softc->ctl_lock); 11630 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) 11631 lun = softc->ctl_luns[targ_lun]; 11632 else { 11633 mtx_unlock(&softc->ctl_lock); 11634 return (1); 11635 } 11636 11637 mtx_lock(&lun->lun_lock); 11638 mtx_unlock(&softc->ctl_lock); 11639 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11640 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11641 io->io_hdr.nexus.initid.id, 11642 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11643 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11644 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11645 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11646 } 11647 mtx_unlock(&lun->lun_lock); 11648 return (0); 11649} 11650 11651static int 11652ctl_i_t_nexus_reset(union ctl_io *io) 11653{ 11654 struct ctl_softc *softc = control_softc; 11655 struct ctl_lun *lun; 11656 uint32_t initidx, residx; 11657 11658 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11659 residx = ctl_get_resindex(&io->io_hdr.nexus); 11660 mtx_lock(&softc->ctl_lock); 11661 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11662 mtx_lock(&lun->lun_lock); 11663 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11664 io->io_hdr.nexus.initid.id, 11665 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11666#ifdef CTL_WITH_CA 11667 ctl_clear_mask(lun->have_ca, initidx); 11668#endif 11669 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 11670 lun->flags &= ~CTL_LUN_RESERVED; 11671 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11672 mtx_unlock(&lun->lun_lock); 11673 } 11674 mtx_unlock(&softc->ctl_lock); 11675 return (0); 11676} 11677 11678static int 11679ctl_abort_task(union ctl_io *io) 11680{ 11681 union ctl_io *xio; 11682 struct ctl_lun *lun; 11683 struct ctl_softc *softc; 11684#if 0 11685 struct sbuf sb; 11686 char printbuf[128]; 11687#endif 11688 int found; 11689 uint32_t targ_lun; 11690 11691 softc = control_softc; 11692 found = 0; 11693 11694 /* 11695 * Look up the LUN. 11696 */ 11697 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11698 mtx_lock(&softc->ctl_lock); 11699 if ((targ_lun < CTL_MAX_LUNS) 11700 && (softc->ctl_luns[targ_lun] != NULL)) 11701 lun = softc->ctl_luns[targ_lun]; 11702 else { 11703 mtx_unlock(&softc->ctl_lock); 11704 return (1); 11705 } 11706 11707#if 0 11708 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11709 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11710#endif 11711 11712 mtx_lock(&lun->lun_lock); 11713 mtx_unlock(&softc->ctl_lock); 11714 /* 11715 * Run through the OOA queue and attempt to find the given I/O. 11716 * The target port, initiator ID, tag type and tag number have to 11717 * match the values that we got from the initiator. If we have an 11718 * untagged command to abort, simply abort the first untagged command 11719 * we come to. We only allow one untagged command at a time of course. 11720 */ 11721 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11722 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11723#if 0 11724 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11725 11726 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11727 lun->lun, xio->scsiio.tag_num, 11728 xio->scsiio.tag_type, 11729 (xio->io_hdr.blocked_links.tqe_prev 11730 == NULL) ? "" : " BLOCKED", 11731 (xio->io_hdr.flags & 11732 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11733 (xio->io_hdr.flags & 11734 CTL_FLAG_ABORT) ? " ABORT" : "", 11735 (xio->io_hdr.flags & 11736 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11737 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11738 sbuf_finish(&sb); 11739 printf("%s\n", sbuf_data(&sb)); 11740#endif 11741 11742 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11743 || (xio->io_hdr.nexus.initid.id != io->io_hdr.nexus.initid.id) 11744 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11745 continue; 11746 11747 /* 11748 * If the abort says that the task is untagged, the 11749 * task in the queue must be untagged. Otherwise, 11750 * we just check to see whether the tag numbers 11751 * match. This is because the QLogic firmware 11752 * doesn't pass back the tag type in an abort 11753 * request. 11754 */ 11755#if 0 11756 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11757 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11758 || (xio->scsiio.tag_num == io->taskio.tag_num)) 11759#endif 11760 /* 11761 * XXX KDM we've got problems with FC, because it 11762 * doesn't send down a tag type with aborts. So we 11763 * can only really go by the tag number... 11764 * This may cause problems with parallel SCSI. 11765 * Need to figure that out!! 11766 */ 11767 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11768 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11769 found = 1; 11770 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11771 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11772 union ctl_ha_msg msg_info; 11773 11774 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11775 msg_info.hdr.nexus = io->io_hdr.nexus; 11776 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11777 msg_info.task.tag_num = io->taskio.tag_num; 11778 msg_info.task.tag_type = io->taskio.tag_type; 11779 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11780 msg_info.hdr.original_sc = NULL; 11781 msg_info.hdr.serializing_sc = NULL; 11782#if 0 11783 printf("Sent Abort to other side\n"); 11784#endif 11785 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11786 (void *)&msg_info, sizeof(msg_info), 0) != 11787 CTL_HA_STATUS_SUCCESS) { 11788 } 11789 } 11790#if 0 11791 printf("ctl_abort_task: found I/O to abort\n"); 11792#endif 11793 } 11794 } 11795 mtx_unlock(&lun->lun_lock); 11796 11797 if (found == 0) { 11798 /* 11799 * This isn't really an error. It's entirely possible for 11800 * the abort and command completion to cross on the wire. 11801 * This is more of an informative/diagnostic error. 11802 */ 11803#if 0 11804 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11805 "%d:%d:%d:%d tag %d type %d\n", 11806 io->io_hdr.nexus.initid.id, 11807 io->io_hdr.nexus.targ_port, 11808 io->io_hdr.nexus.targ_target.id, 11809 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11810 io->taskio.tag_type); 11811#endif 11812 } 11813 return (0); 11814} 11815 11816static void 11817ctl_run_task(union ctl_io *io) 11818{ 11819 struct ctl_softc *softc = control_softc; 11820 int retval = 1; 11821 const char *task_desc; 11822 11823 CTL_DEBUG_PRINT(("ctl_run_task\n")); 11824 11825 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 11826 ("ctl_run_task: Unextected io_type %d\n", 11827 io->io_hdr.io_type)); 11828 11829 task_desc = ctl_scsi_task_string(&io->taskio); 11830 if (task_desc != NULL) { 11831#ifdef NEEDTOPORT 11832 csevent_log(CSC_CTL | CSC_SHELF_SW | 11833 CTL_TASK_REPORT, 11834 csevent_LogType_Trace, 11835 csevent_Severity_Information, 11836 csevent_AlertLevel_Green, 11837 csevent_FRU_Firmware, 11838 csevent_FRU_Unknown, 11839 "CTL: received task: %s",task_desc); 11840#endif 11841 } else { 11842#ifdef NEEDTOPORT 11843 csevent_log(CSC_CTL | CSC_SHELF_SW | 11844 CTL_TASK_REPORT, 11845 csevent_LogType_Trace, 11846 csevent_Severity_Information, 11847 csevent_AlertLevel_Green, 11848 csevent_FRU_Firmware, 11849 csevent_FRU_Unknown, 11850 "CTL: received unknown task " 11851 "type: %d (%#x)", 11852 io->taskio.task_action, 11853 io->taskio.task_action); 11854#endif 11855 } 11856 switch (io->taskio.task_action) { 11857 case CTL_TASK_ABORT_TASK: 11858 retval = ctl_abort_task(io); 11859 break; 11860 case CTL_TASK_ABORT_TASK_SET: 11861 case CTL_TASK_CLEAR_TASK_SET: 11862 retval = ctl_abort_task_set(io); 11863 break; 11864 case CTL_TASK_CLEAR_ACA: 11865 break; 11866 case CTL_TASK_I_T_NEXUS_RESET: 11867 retval = ctl_i_t_nexus_reset(io); 11868 break; 11869 case CTL_TASK_LUN_RESET: { 11870 struct ctl_lun *lun; 11871 uint32_t targ_lun; 11872 11873 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11874 mtx_lock(&softc->ctl_lock); 11875 if ((targ_lun < CTL_MAX_LUNS) 11876 && (softc->ctl_luns[targ_lun] != NULL)) 11877 lun = softc->ctl_luns[targ_lun]; 11878 else { 11879 mtx_unlock(&softc->ctl_lock); 11880 retval = 1; 11881 break; 11882 } 11883 11884 if (!(io->io_hdr.flags & 11885 CTL_FLAG_FROM_OTHER_SC)) { 11886 union ctl_ha_msg msg_info; 11887 11888 io->io_hdr.flags |= 11889 CTL_FLAG_SENT_2OTHER_SC; 11890 msg_info.hdr.msg_type = 11891 CTL_MSG_MANAGE_TASKS; 11892 msg_info.hdr.nexus = io->io_hdr.nexus; 11893 msg_info.task.task_action = 11894 CTL_TASK_LUN_RESET; 11895 msg_info.hdr.original_sc = NULL; 11896 msg_info.hdr.serializing_sc = NULL; 11897 if (CTL_HA_STATUS_SUCCESS != 11898 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11899 (void *)&msg_info, 11900 sizeof(msg_info), 0)) { 11901 } 11902 } 11903 11904 retval = ctl_lun_reset(lun, io, 11905 CTL_UA_LUN_RESET); 11906 mtx_unlock(&softc->ctl_lock); 11907 break; 11908 } 11909 case CTL_TASK_TARGET_RESET: 11910 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 11911 break; 11912 case CTL_TASK_BUS_RESET: 11913 retval = ctl_bus_reset(softc, io); 11914 break; 11915 case CTL_TASK_PORT_LOGIN: 11916 break; 11917 case CTL_TASK_PORT_LOGOUT: 11918 break; 11919 default: 11920 printf("ctl_run_task: got unknown task management event %d\n", 11921 io->taskio.task_action); 11922 break; 11923 } 11924 if (retval == 0) 11925 io->io_hdr.status = CTL_SUCCESS; 11926 else 11927 io->io_hdr.status = CTL_ERROR; 11928 ctl_done(io); 11929} 11930 11931/* 11932 * For HA operation. Handle commands that come in from the other 11933 * controller. 11934 */ 11935static void 11936ctl_handle_isc(union ctl_io *io) 11937{ 11938 int free_io; 11939 struct ctl_lun *lun; 11940 struct ctl_softc *softc; 11941 uint32_t targ_lun; 11942 11943 softc = control_softc; 11944 11945 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11946 lun = softc->ctl_luns[targ_lun]; 11947 11948 switch (io->io_hdr.msg_type) { 11949 case CTL_MSG_SERIALIZE: 11950 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 11951 break; 11952 case CTL_MSG_R2R: { 11953 const struct ctl_cmd_entry *entry; 11954 11955 /* 11956 * This is only used in SER_ONLY mode. 11957 */ 11958 free_io = 0; 11959 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11960 mtx_lock(&lun->lun_lock); 11961 if (ctl_scsiio_lun_check(lun, 11962 entry, (struct ctl_scsiio *)io) != 0) { 11963 mtx_unlock(&lun->lun_lock); 11964 ctl_done(io); 11965 break; 11966 } 11967 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11968 mtx_unlock(&lun->lun_lock); 11969 ctl_enqueue_rtr(io); 11970 break; 11971 } 11972 case CTL_MSG_FINISH_IO: 11973 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11974 free_io = 0; 11975 ctl_done(io); 11976 } else { 11977 free_io = 1; 11978 mtx_lock(&lun->lun_lock); 11979 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 11980 ooa_links); 11981 ctl_check_blocked(lun); 11982 mtx_unlock(&lun->lun_lock); 11983 } 11984 break; 11985 case CTL_MSG_PERS_ACTION: 11986 ctl_hndl_per_res_out_on_other_sc( 11987 (union ctl_ha_msg *)&io->presio.pr_msg); 11988 free_io = 1; 11989 break; 11990 case CTL_MSG_BAD_JUJU: 11991 free_io = 0; 11992 ctl_done(io); 11993 break; 11994 case CTL_MSG_DATAMOVE: 11995 /* Only used in XFER mode */ 11996 free_io = 0; 11997 ctl_datamove_remote(io); 11998 break; 11999 case CTL_MSG_DATAMOVE_DONE: 12000 /* Only used in XFER mode */ 12001 free_io = 0; 12002 io->scsiio.be_move_done(io); 12003 break; 12004 default: 12005 free_io = 1; 12006 printf("%s: Invalid message type %d\n", 12007 __func__, io->io_hdr.msg_type); 12008 break; 12009 } 12010 if (free_io) 12011 ctl_free_io(io); 12012 12013} 12014 12015 12016/* 12017 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12018 * there is no match. 12019 */ 12020static ctl_lun_error_pattern 12021ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12022{ 12023 const struct ctl_cmd_entry *entry; 12024 ctl_lun_error_pattern filtered_pattern, pattern; 12025 12026 pattern = desc->error_pattern; 12027 12028 /* 12029 * XXX KDM we need more data passed into this function to match a 12030 * custom pattern, and we actually need to implement custom pattern 12031 * matching. 12032 */ 12033 if (pattern & CTL_LUN_PAT_CMD) 12034 return (CTL_LUN_PAT_CMD); 12035 12036 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12037 return (CTL_LUN_PAT_ANY); 12038 12039 entry = ctl_get_cmd_entry(ctsio, NULL); 12040 12041 filtered_pattern = entry->pattern & pattern; 12042 12043 /* 12044 * If the user requested specific flags in the pattern (e.g. 12045 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12046 * flags. 12047 * 12048 * If the user did not specify any flags, it doesn't matter whether 12049 * or not the command supports the flags. 12050 */ 12051 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12052 (pattern & ~CTL_LUN_PAT_MASK)) 12053 return (CTL_LUN_PAT_NONE); 12054 12055 /* 12056 * If the user asked for a range check, see if the requested LBA 12057 * range overlaps with this command's LBA range. 12058 */ 12059 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12060 uint64_t lba1; 12061 uint64_t len1; 12062 ctl_action action; 12063 int retval; 12064 12065 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12066 if (retval != 0) 12067 return (CTL_LUN_PAT_NONE); 12068 12069 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12070 desc->lba_range.len, FALSE); 12071 /* 12072 * A "pass" means that the LBA ranges don't overlap, so 12073 * this doesn't match the user's range criteria. 12074 */ 12075 if (action == CTL_ACTION_PASS) 12076 return (CTL_LUN_PAT_NONE); 12077 } 12078 12079 return (filtered_pattern); 12080} 12081 12082static void 12083ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12084{ 12085 struct ctl_error_desc *desc, *desc2; 12086 12087 mtx_assert(&lun->lun_lock, MA_OWNED); 12088 12089 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12090 ctl_lun_error_pattern pattern; 12091 /* 12092 * Check to see whether this particular command matches 12093 * the pattern in the descriptor. 12094 */ 12095 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12096 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12097 continue; 12098 12099 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12100 case CTL_LUN_INJ_ABORTED: 12101 ctl_set_aborted(&io->scsiio); 12102 break; 12103 case CTL_LUN_INJ_MEDIUM_ERR: 12104 ctl_set_medium_error(&io->scsiio); 12105 break; 12106 case CTL_LUN_INJ_UA: 12107 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12108 * OCCURRED */ 12109 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12110 break; 12111 case CTL_LUN_INJ_CUSTOM: 12112 /* 12113 * We're assuming the user knows what he is doing. 12114 * Just copy the sense information without doing 12115 * checks. 12116 */ 12117 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12118 MIN(sizeof(desc->custom_sense), 12119 sizeof(io->scsiio.sense_data))); 12120 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12121 io->scsiio.sense_len = SSD_FULL_SIZE; 12122 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12123 break; 12124 case CTL_LUN_INJ_NONE: 12125 default: 12126 /* 12127 * If this is an error injection type we don't know 12128 * about, clear the continuous flag (if it is set) 12129 * so it will get deleted below. 12130 */ 12131 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12132 break; 12133 } 12134 /* 12135 * By default, each error injection action is a one-shot 12136 */ 12137 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12138 continue; 12139 12140 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12141 12142 free(desc, M_CTL); 12143 } 12144} 12145 12146#ifdef CTL_IO_DELAY 12147static void 12148ctl_datamove_timer_wakeup(void *arg) 12149{ 12150 union ctl_io *io; 12151 12152 io = (union ctl_io *)arg; 12153 12154 ctl_datamove(io); 12155} 12156#endif /* CTL_IO_DELAY */ 12157 12158void 12159ctl_datamove(union ctl_io *io) 12160{ 12161 void (*fe_datamove)(union ctl_io *io); 12162 12163 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12164 12165 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12166 12167#ifdef CTL_TIME_IO 12168 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12169 char str[256]; 12170 char path_str[64]; 12171 struct sbuf sb; 12172 12173 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12174 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12175 12176 sbuf_cat(&sb, path_str); 12177 switch (io->io_hdr.io_type) { 12178 case CTL_IO_SCSI: 12179 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12180 sbuf_printf(&sb, "\n"); 12181 sbuf_cat(&sb, path_str); 12182 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12183 io->scsiio.tag_num, io->scsiio.tag_type); 12184 break; 12185 case CTL_IO_TASK: 12186 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12187 "Tag Type: %d\n", io->taskio.task_action, 12188 io->taskio.tag_num, io->taskio.tag_type); 12189 break; 12190 default: 12191 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12192 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12193 break; 12194 } 12195 sbuf_cat(&sb, path_str); 12196 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12197 (intmax_t)time_uptime - io->io_hdr.start_time); 12198 sbuf_finish(&sb); 12199 printf("%s", sbuf_data(&sb)); 12200 } 12201#endif /* CTL_TIME_IO */ 12202 12203#ifdef CTL_IO_DELAY 12204 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12205 struct ctl_lun *lun; 12206 12207 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12208 12209 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12210 } else { 12211 struct ctl_lun *lun; 12212 12213 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12214 if ((lun != NULL) 12215 && (lun->delay_info.datamove_delay > 0)) { 12216 struct callout *callout; 12217 12218 callout = (struct callout *)&io->io_hdr.timer_bytes; 12219 callout_init(callout, /*mpsafe*/ 1); 12220 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12221 callout_reset(callout, 12222 lun->delay_info.datamove_delay * hz, 12223 ctl_datamove_timer_wakeup, io); 12224 if (lun->delay_info.datamove_type == 12225 CTL_DELAY_TYPE_ONESHOT) 12226 lun->delay_info.datamove_delay = 0; 12227 return; 12228 } 12229 } 12230#endif 12231 12232 /* 12233 * This command has been aborted. Set the port status, so we fail 12234 * the data move. 12235 */ 12236 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12237 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 12238 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 12239 io->io_hdr.nexus.targ_port, 12240 (uintmax_t)io->io_hdr.nexus.targ_target.id, 12241 io->io_hdr.nexus.targ_lun); 12242 io->io_hdr.port_status = 31337; 12243 /* 12244 * Note that the backend, in this case, will get the 12245 * callback in its context. In other cases it may get 12246 * called in the frontend's interrupt thread context. 12247 */ 12248 io->scsiio.be_move_done(io); 12249 return; 12250 } 12251 12252 /* Don't confuse frontend with zero length data move. */ 12253 if (io->scsiio.kern_data_len == 0) { 12254 io->scsiio.be_move_done(io); 12255 return; 12256 } 12257 12258 /* 12259 * If we're in XFER mode and this I/O is from the other shelf 12260 * controller, we need to send the DMA to the other side to 12261 * actually transfer the data to/from the host. In serialize only 12262 * mode the transfer happens below CTL and ctl_datamove() is only 12263 * called on the machine that originally received the I/O. 12264 */ 12265 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12266 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12267 union ctl_ha_msg msg; 12268 uint32_t sg_entries_sent; 12269 int do_sg_copy; 12270 int i; 12271 12272 memset(&msg, 0, sizeof(msg)); 12273 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12274 msg.hdr.original_sc = io->io_hdr.original_sc; 12275 msg.hdr.serializing_sc = io; 12276 msg.hdr.nexus = io->io_hdr.nexus; 12277 msg.dt.flags = io->io_hdr.flags; 12278 /* 12279 * We convert everything into a S/G list here. We can't 12280 * pass by reference, only by value between controllers. 12281 * So we can't pass a pointer to the S/G list, only as many 12282 * S/G entries as we can fit in here. If it's possible for 12283 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12284 * then we need to break this up into multiple transfers. 12285 */ 12286 if (io->scsiio.kern_sg_entries == 0) { 12287 msg.dt.kern_sg_entries = 1; 12288 /* 12289 * If this is in cached memory, flush the cache 12290 * before we send the DMA request to the other 12291 * controller. We want to do this in either the 12292 * read or the write case. The read case is 12293 * straightforward. In the write case, we want to 12294 * make sure nothing is in the local cache that 12295 * could overwrite the DMAed data. 12296 */ 12297 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12298 /* 12299 * XXX KDM use bus_dmamap_sync() here. 12300 */ 12301 } 12302 12303 /* 12304 * Convert to a physical address if this is a 12305 * virtual address. 12306 */ 12307 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12308 msg.dt.sg_list[0].addr = 12309 io->scsiio.kern_data_ptr; 12310 } else { 12311 /* 12312 * XXX KDM use busdma here! 12313 */ 12314#if 0 12315 msg.dt.sg_list[0].addr = (void *) 12316 vtophys(io->scsiio.kern_data_ptr); 12317#endif 12318 } 12319 12320 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12321 do_sg_copy = 0; 12322 } else { 12323 struct ctl_sg_entry *sgl; 12324 12325 do_sg_copy = 1; 12326 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12327 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 12328 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12329 /* 12330 * XXX KDM use bus_dmamap_sync() here. 12331 */ 12332 } 12333 } 12334 12335 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12336 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12337 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12338 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12339 msg.dt.sg_sequence = 0; 12340 12341 /* 12342 * Loop until we've sent all of the S/G entries. On the 12343 * other end, we'll recompose these S/G entries into one 12344 * contiguous list before passing it to the 12345 */ 12346 for (sg_entries_sent = 0; sg_entries_sent < 12347 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12348 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/ 12349 sizeof(msg.dt.sg_list[0])), 12350 msg.dt.kern_sg_entries - sg_entries_sent); 12351 12352 if (do_sg_copy != 0) { 12353 struct ctl_sg_entry *sgl; 12354 int j; 12355 12356 sgl = (struct ctl_sg_entry *) 12357 io->scsiio.kern_data_ptr; 12358 /* 12359 * If this is in cached memory, flush the cache 12360 * before we send the DMA request to the other 12361 * controller. We want to do this in either 12362 * the * read or the write case. The read 12363 * case is straightforward. In the write 12364 * case, we want to make sure nothing is 12365 * in the local cache that could overwrite 12366 * the DMAed data. 12367 */ 12368 12369 for (i = sg_entries_sent, j = 0; 12370 i < msg.dt.cur_sg_entries; i++, j++) { 12371 if ((io->io_hdr.flags & 12372 CTL_FLAG_NO_DATASYNC) == 0) { 12373 /* 12374 * XXX KDM use bus_dmamap_sync() 12375 */ 12376 } 12377 if ((io->io_hdr.flags & 12378 CTL_FLAG_BUS_ADDR) == 0) { 12379 /* 12380 * XXX KDM use busdma. 12381 */ 12382#if 0 12383 msg.dt.sg_list[j].addr =(void *) 12384 vtophys(sgl[i].addr); 12385#endif 12386 } else { 12387 msg.dt.sg_list[j].addr = 12388 sgl[i].addr; 12389 } 12390 msg.dt.sg_list[j].len = sgl[i].len; 12391 } 12392 } 12393 12394 sg_entries_sent += msg.dt.cur_sg_entries; 12395 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12396 msg.dt.sg_last = 1; 12397 else 12398 msg.dt.sg_last = 0; 12399 12400 /* 12401 * XXX KDM drop and reacquire the lock here? 12402 */ 12403 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12404 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12405 /* 12406 * XXX do something here. 12407 */ 12408 } 12409 12410 msg.dt.sent_sg_entries = sg_entries_sent; 12411 } 12412 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12413 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 12414 ctl_failover_io(io, /*have_lock*/ 0); 12415 12416 } else { 12417 12418 /* 12419 * Lookup the fe_datamove() function for this particular 12420 * front end. 12421 */ 12422 fe_datamove = 12423 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12424 12425 fe_datamove(io); 12426 } 12427} 12428 12429static void 12430ctl_send_datamove_done(union ctl_io *io, int have_lock) 12431{ 12432 union ctl_ha_msg msg; 12433 int isc_status; 12434 12435 memset(&msg, 0, sizeof(msg)); 12436 12437 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12438 msg.hdr.original_sc = io; 12439 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12440 msg.hdr.nexus = io->io_hdr.nexus; 12441 msg.hdr.status = io->io_hdr.status; 12442 msg.scsi.tag_num = io->scsiio.tag_num; 12443 msg.scsi.tag_type = io->scsiio.tag_type; 12444 msg.scsi.scsi_status = io->scsiio.scsi_status; 12445 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12446 sizeof(io->scsiio.sense_data)); 12447 msg.scsi.sense_len = io->scsiio.sense_len; 12448 msg.scsi.sense_residual = io->scsiio.sense_residual; 12449 msg.scsi.fetd_status = io->io_hdr.port_status; 12450 msg.scsi.residual = io->scsiio.residual; 12451 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12452 12453 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12454 ctl_failover_io(io, /*have_lock*/ have_lock); 12455 return; 12456 } 12457 12458 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 12459 if (isc_status > CTL_HA_STATUS_SUCCESS) { 12460 /* XXX do something if this fails */ 12461 } 12462 12463} 12464 12465/* 12466 * The DMA to the remote side is done, now we need to tell the other side 12467 * we're done so it can continue with its data movement. 12468 */ 12469static void 12470ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12471{ 12472 union ctl_io *io; 12473 12474 io = rq->context; 12475 12476 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12477 printf("%s: ISC DMA write failed with error %d", __func__, 12478 rq->ret); 12479 ctl_set_internal_failure(&io->scsiio, 12480 /*sks_valid*/ 1, 12481 /*retry_count*/ rq->ret); 12482 } 12483 12484 ctl_dt_req_free(rq); 12485 12486 /* 12487 * In this case, we had to malloc the memory locally. Free it. 12488 */ 12489 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12490 int i; 12491 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12492 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12493 } 12494 /* 12495 * The data is in local and remote memory, so now we need to send 12496 * status (good or back) back to the other side. 12497 */ 12498 ctl_send_datamove_done(io, /*have_lock*/ 0); 12499} 12500 12501/* 12502 * We've moved the data from the host/controller into local memory. Now we 12503 * need to push it over to the remote controller's memory. 12504 */ 12505static int 12506ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12507{ 12508 int retval; 12509 12510 retval = 0; 12511 12512 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12513 ctl_datamove_remote_write_cb); 12514 12515 return (retval); 12516} 12517 12518static void 12519ctl_datamove_remote_write(union ctl_io *io) 12520{ 12521 int retval; 12522 void (*fe_datamove)(union ctl_io *io); 12523 12524 /* 12525 * - Get the data from the host/HBA into local memory. 12526 * - DMA memory from the local controller to the remote controller. 12527 * - Send status back to the remote controller. 12528 */ 12529 12530 retval = ctl_datamove_remote_sgl_setup(io); 12531 if (retval != 0) 12532 return; 12533 12534 /* Switch the pointer over so the FETD knows what to do */ 12535 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12536 12537 /* 12538 * Use a custom move done callback, since we need to send completion 12539 * back to the other controller, not to the backend on this side. 12540 */ 12541 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12542 12543 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12544 12545 fe_datamove(io); 12546 12547 return; 12548 12549} 12550 12551static int 12552ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12553{ 12554#if 0 12555 char str[256]; 12556 char path_str[64]; 12557 struct sbuf sb; 12558#endif 12559 12560 /* 12561 * In this case, we had to malloc the memory locally. Free it. 12562 */ 12563 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12564 int i; 12565 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12566 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12567 } 12568 12569#if 0 12570 scsi_path_string(io, path_str, sizeof(path_str)); 12571 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12572 sbuf_cat(&sb, path_str); 12573 scsi_command_string(&io->scsiio, NULL, &sb); 12574 sbuf_printf(&sb, "\n"); 12575 sbuf_cat(&sb, path_str); 12576 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12577 io->scsiio.tag_num, io->scsiio.tag_type); 12578 sbuf_cat(&sb, path_str); 12579 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12580 io->io_hdr.flags, io->io_hdr.status); 12581 sbuf_finish(&sb); 12582 printk("%s", sbuf_data(&sb)); 12583#endif 12584 12585 12586 /* 12587 * The read is done, now we need to send status (good or bad) back 12588 * to the other side. 12589 */ 12590 ctl_send_datamove_done(io, /*have_lock*/ 0); 12591 12592 return (0); 12593} 12594 12595static void 12596ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12597{ 12598 union ctl_io *io; 12599 void (*fe_datamove)(union ctl_io *io); 12600 12601 io = rq->context; 12602 12603 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12604 printf("%s: ISC DMA read failed with error %d", __func__, 12605 rq->ret); 12606 ctl_set_internal_failure(&io->scsiio, 12607 /*sks_valid*/ 1, 12608 /*retry_count*/ rq->ret); 12609 } 12610 12611 ctl_dt_req_free(rq); 12612 12613 /* Switch the pointer over so the FETD knows what to do */ 12614 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12615 12616 /* 12617 * Use a custom move done callback, since we need to send completion 12618 * back to the other controller, not to the backend on this side. 12619 */ 12620 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12621 12622 /* XXX KDM add checks like the ones in ctl_datamove? */ 12623 12624 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12625 12626 fe_datamove(io); 12627} 12628 12629static int 12630ctl_datamove_remote_sgl_setup(union ctl_io *io) 12631{ 12632 struct ctl_sg_entry *local_sglist, *remote_sglist; 12633 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 12634 struct ctl_softc *softc; 12635 int retval; 12636 int i; 12637 12638 retval = 0; 12639 softc = control_softc; 12640 12641 local_sglist = io->io_hdr.local_sglist; 12642 local_dma_sglist = io->io_hdr.local_dma_sglist; 12643 remote_sglist = io->io_hdr.remote_sglist; 12644 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 12645 12646 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 12647 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 12648 local_sglist[i].len = remote_sglist[i].len; 12649 12650 /* 12651 * XXX Detect the situation where the RS-level I/O 12652 * redirector on the other side has already read the 12653 * data off of the AOR RS on this side, and 12654 * transferred it to remote (mirror) memory on the 12655 * other side. Since we already have the data in 12656 * memory here, we just need to use it. 12657 * 12658 * XXX KDM this can probably be removed once we 12659 * get the cache device code in and take the 12660 * current AOR implementation out. 12661 */ 12662#ifdef NEEDTOPORT 12663 if ((remote_sglist[i].addr >= 12664 (void *)vtophys(softc->mirr->addr)) 12665 && (remote_sglist[i].addr < 12666 ((void *)vtophys(softc->mirr->addr) + 12667 CacheMirrorOffset))) { 12668 local_sglist[i].addr = remote_sglist[i].addr - 12669 CacheMirrorOffset; 12670 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12671 CTL_FLAG_DATA_IN) 12672 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 12673 } else { 12674 local_sglist[i].addr = remote_sglist[i].addr + 12675 CacheMirrorOffset; 12676 } 12677#endif 12678#if 0 12679 printf("%s: local %p, remote %p, len %d\n", 12680 __func__, local_sglist[i].addr, 12681 remote_sglist[i].addr, local_sglist[i].len); 12682#endif 12683 } 12684 } else { 12685 uint32_t len_to_go; 12686 12687 /* 12688 * In this case, we don't have automatically allocated 12689 * memory for this I/O on this controller. This typically 12690 * happens with internal CTL I/O -- e.g. inquiry, mode 12691 * sense, etc. Anything coming from RAIDCore will have 12692 * a mirror area available. 12693 */ 12694 len_to_go = io->scsiio.kern_data_len; 12695 12696 /* 12697 * Clear the no datasync flag, we have to use malloced 12698 * buffers. 12699 */ 12700 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 12701 12702 /* 12703 * The difficult thing here is that the size of the various 12704 * S/G segments may be different than the size from the 12705 * remote controller. That'll make it harder when DMAing 12706 * the data back to the other side. 12707 */ 12708 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 12709 sizeof(io->io_hdr.remote_sglist[0])) && 12710 (len_to_go > 0); i++) { 12711 local_sglist[i].len = MIN(len_to_go, 131072); 12712 CTL_SIZE_8B(local_dma_sglist[i].len, 12713 local_sglist[i].len); 12714 local_sglist[i].addr = 12715 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 12716 12717 local_dma_sglist[i].addr = local_sglist[i].addr; 12718 12719 if (local_sglist[i].addr == NULL) { 12720 int j; 12721 12722 printf("malloc failed for %zd bytes!", 12723 local_dma_sglist[i].len); 12724 for (j = 0; j < i; j++) { 12725 free(local_sglist[j].addr, M_CTL); 12726 } 12727 ctl_set_internal_failure(&io->scsiio, 12728 /*sks_valid*/ 1, 12729 /*retry_count*/ 4857); 12730 retval = 1; 12731 goto bailout_error; 12732 12733 } 12734 /* XXX KDM do we need a sync here? */ 12735 12736 len_to_go -= local_sglist[i].len; 12737 } 12738 /* 12739 * Reset the number of S/G entries accordingly. The 12740 * original number of S/G entries is available in 12741 * rem_sg_entries. 12742 */ 12743 io->scsiio.kern_sg_entries = i; 12744 12745#if 0 12746 printf("%s: kern_sg_entries = %d\n", __func__, 12747 io->scsiio.kern_sg_entries); 12748 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12749 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 12750 local_sglist[i].addr, local_sglist[i].len, 12751 local_dma_sglist[i].len); 12752#endif 12753 } 12754 12755 12756 return (retval); 12757 12758bailout_error: 12759 12760 ctl_send_datamove_done(io, /*have_lock*/ 0); 12761 12762 return (retval); 12763} 12764 12765static int 12766ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12767 ctl_ha_dt_cb callback) 12768{ 12769 struct ctl_ha_dt_req *rq; 12770 struct ctl_sg_entry *remote_sglist, *local_sglist; 12771 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 12772 uint32_t local_used, remote_used, total_used; 12773 int retval; 12774 int i, j; 12775 12776 retval = 0; 12777 12778 rq = ctl_dt_req_alloc(); 12779 12780 /* 12781 * If we failed to allocate the request, and if the DMA didn't fail 12782 * anyway, set busy status. This is just a resource allocation 12783 * failure. 12784 */ 12785 if ((rq == NULL) 12786 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 12787 ctl_set_busy(&io->scsiio); 12788 12789 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 12790 12791 if (rq != NULL) 12792 ctl_dt_req_free(rq); 12793 12794 /* 12795 * The data move failed. We need to return status back 12796 * to the other controller. No point in trying to DMA 12797 * data to the remote controller. 12798 */ 12799 12800 ctl_send_datamove_done(io, /*have_lock*/ 0); 12801 12802 retval = 1; 12803 12804 goto bailout; 12805 } 12806 12807 local_sglist = io->io_hdr.local_sglist; 12808 local_dma_sglist = io->io_hdr.local_dma_sglist; 12809 remote_sglist = io->io_hdr.remote_sglist; 12810 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 12811 local_used = 0; 12812 remote_used = 0; 12813 total_used = 0; 12814 12815 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 12816 rq->ret = CTL_HA_STATUS_SUCCESS; 12817 rq->context = io; 12818 callback(rq); 12819 goto bailout; 12820 } 12821 12822 /* 12823 * Pull/push the data over the wire from/to the other controller. 12824 * This takes into account the possibility that the local and 12825 * remote sglists may not be identical in terms of the size of 12826 * the elements and the number of elements. 12827 * 12828 * One fundamental assumption here is that the length allocated for 12829 * both the local and remote sglists is identical. Otherwise, we've 12830 * essentially got a coding error of some sort. 12831 */ 12832 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12833 int isc_ret; 12834 uint32_t cur_len, dma_length; 12835 uint8_t *tmp_ptr; 12836 12837 rq->id = CTL_HA_DATA_CTL; 12838 rq->command = command; 12839 rq->context = io; 12840 12841 /* 12842 * Both pointers should be aligned. But it is possible 12843 * that the allocation length is not. They should both 12844 * also have enough slack left over at the end, though, 12845 * to round up to the next 8 byte boundary. 12846 */ 12847 cur_len = MIN(local_sglist[i].len - local_used, 12848 remote_sglist[j].len - remote_used); 12849 12850 /* 12851 * In this case, we have a size issue and need to decrease 12852 * the size, except in the case where we actually have less 12853 * than 8 bytes left. In that case, we need to increase 12854 * the DMA length to get the last bit. 12855 */ 12856 if ((cur_len & 0x7) != 0) { 12857 if (cur_len > 0x7) { 12858 cur_len = cur_len - (cur_len & 0x7); 12859 dma_length = cur_len; 12860 } else { 12861 CTL_SIZE_8B(dma_length, cur_len); 12862 } 12863 12864 } else 12865 dma_length = cur_len; 12866 12867 /* 12868 * If we had to allocate memory for this I/O, instead of using 12869 * the non-cached mirror memory, we'll need to flush the cache 12870 * before trying to DMA to the other controller. 12871 * 12872 * We could end up doing this multiple times for the same 12873 * segment if we have a larger local segment than remote 12874 * segment. That shouldn't be an issue. 12875 */ 12876 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12877 /* 12878 * XXX KDM use bus_dmamap_sync() here. 12879 */ 12880 } 12881 12882 rq->size = dma_length; 12883 12884 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12885 tmp_ptr += local_used; 12886 12887 /* Use physical addresses when talking to ISC hardware */ 12888 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12889 /* XXX KDM use busdma */ 12890#if 0 12891 rq->local = vtophys(tmp_ptr); 12892#endif 12893 } else 12894 rq->local = tmp_ptr; 12895 12896 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12897 tmp_ptr += remote_used; 12898 rq->remote = tmp_ptr; 12899 12900 rq->callback = NULL; 12901 12902 local_used += cur_len; 12903 if (local_used >= local_sglist[i].len) { 12904 i++; 12905 local_used = 0; 12906 } 12907 12908 remote_used += cur_len; 12909 if (remote_used >= remote_sglist[j].len) { 12910 j++; 12911 remote_used = 0; 12912 } 12913 total_used += cur_len; 12914 12915 if (total_used >= io->scsiio.kern_data_len) 12916 rq->callback = callback; 12917 12918 if ((rq->size & 0x7) != 0) { 12919 printf("%s: warning: size %d is not on 8b boundary\n", 12920 __func__, rq->size); 12921 } 12922 if (((uintptr_t)rq->local & 0x7) != 0) { 12923 printf("%s: warning: local %p not on 8b boundary\n", 12924 __func__, rq->local); 12925 } 12926 if (((uintptr_t)rq->remote & 0x7) != 0) { 12927 printf("%s: warning: remote %p not on 8b boundary\n", 12928 __func__, rq->local); 12929 } 12930#if 0 12931 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12932 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12933 rq->local, rq->remote, rq->size); 12934#endif 12935 12936 isc_ret = ctl_dt_single(rq); 12937 if (isc_ret == CTL_HA_STATUS_WAIT) 12938 continue; 12939 12940 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 12941 rq->ret = CTL_HA_STATUS_SUCCESS; 12942 } else { 12943 rq->ret = isc_ret; 12944 } 12945 callback(rq); 12946 goto bailout; 12947 } 12948 12949bailout: 12950 return (retval); 12951 12952} 12953 12954static void 12955ctl_datamove_remote_read(union ctl_io *io) 12956{ 12957 int retval; 12958 int i; 12959 12960 /* 12961 * This will send an error to the other controller in the case of a 12962 * failure. 12963 */ 12964 retval = ctl_datamove_remote_sgl_setup(io); 12965 if (retval != 0) 12966 return; 12967 12968 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12969 ctl_datamove_remote_read_cb); 12970 if ((retval != 0) 12971 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 12972 /* 12973 * Make sure we free memory if there was an error.. The 12974 * ctl_datamove_remote_xfer() function will send the 12975 * datamove done message, or call the callback with an 12976 * error if there is a problem. 12977 */ 12978 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12979 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12980 } 12981 12982 return; 12983} 12984 12985/* 12986 * Process a datamove request from the other controller. This is used for 12987 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12988 * first. Once that is complete, the data gets DMAed into the remote 12989 * controller's memory. For reads, we DMA from the remote controller's 12990 * memory into our memory first, and then move it out to the FETD. 12991 */ 12992static void 12993ctl_datamove_remote(union ctl_io *io) 12994{ 12995 struct ctl_softc *softc; 12996 12997 softc = control_softc; 12998 12999 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 13000 13001 /* 13002 * Note that we look for an aborted I/O here, but don't do some of 13003 * the other checks that ctl_datamove() normally does. 13004 * We don't need to run the datamove delay code, since that should 13005 * have been done if need be on the other controller. 13006 */ 13007 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 13008 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 13009 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 13010 io->io_hdr.nexus.targ_port, 13011 io->io_hdr.nexus.targ_target.id, 13012 io->io_hdr.nexus.targ_lun); 13013 io->io_hdr.port_status = 31338; 13014 ctl_send_datamove_done(io, /*have_lock*/ 0); 13015 return; 13016 } 13017 13018 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 13019 ctl_datamove_remote_write(io); 13020 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 13021 ctl_datamove_remote_read(io); 13022 } else { 13023 union ctl_ha_msg msg; 13024 struct scsi_sense_data *sense; 13025 uint8_t sks[3]; 13026 int retry_count; 13027 13028 memset(&msg, 0, sizeof(msg)); 13029 13030 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 13031 msg.hdr.status = CTL_SCSI_ERROR; 13032 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 13033 13034 retry_count = 4243; 13035 13036 sense = &msg.scsi.sense_data; 13037 sks[0] = SSD_SCS_VALID; 13038 sks[1] = (retry_count >> 8) & 0xff; 13039 sks[2] = retry_count & 0xff; 13040 13041 /* "Internal target failure" */ 13042 scsi_set_sense_data(sense, 13043 /*sense_format*/ SSD_TYPE_NONE, 13044 /*current_error*/ 1, 13045 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 13046 /*asc*/ 0x44, 13047 /*ascq*/ 0x00, 13048 /*type*/ SSD_ELEM_SKS, 13049 /*size*/ sizeof(sks), 13050 /*data*/ sks, 13051 SSD_ELEM_NONE); 13052 13053 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13054 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13055 ctl_failover_io(io, /*have_lock*/ 1); 13056 return; 13057 } 13058 13059 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 13060 CTL_HA_STATUS_SUCCESS) { 13061 /* XXX KDM what to do if this fails? */ 13062 } 13063 return; 13064 } 13065 13066} 13067 13068static int 13069ctl_process_done(union ctl_io *io) 13070{ 13071 struct ctl_lun *lun; 13072 struct ctl_softc *softc = control_softc; 13073 void (*fe_done)(union ctl_io *io); 13074 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 13075 13076 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13077 13078 fe_done = softc->ctl_ports[targ_port]->fe_done; 13079 13080#ifdef CTL_TIME_IO 13081 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13082 char str[256]; 13083 char path_str[64]; 13084 struct sbuf sb; 13085 13086 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13087 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13088 13089 sbuf_cat(&sb, path_str); 13090 switch (io->io_hdr.io_type) { 13091 case CTL_IO_SCSI: 13092 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 13093 sbuf_printf(&sb, "\n"); 13094 sbuf_cat(&sb, path_str); 13095 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13096 io->scsiio.tag_num, io->scsiio.tag_type); 13097 break; 13098 case CTL_IO_TASK: 13099 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 13100 "Tag Type: %d\n", io->taskio.task_action, 13101 io->taskio.tag_num, io->taskio.tag_type); 13102 break; 13103 default: 13104 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13105 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13106 break; 13107 } 13108 sbuf_cat(&sb, path_str); 13109 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13110 (intmax_t)time_uptime - io->io_hdr.start_time); 13111 sbuf_finish(&sb); 13112 printf("%s", sbuf_data(&sb)); 13113 } 13114#endif /* CTL_TIME_IO */ 13115 13116 switch (io->io_hdr.io_type) { 13117 case CTL_IO_SCSI: 13118 break; 13119 case CTL_IO_TASK: 13120 if (ctl_debug & CTL_DEBUG_INFO) 13121 ctl_io_error_print(io, NULL); 13122 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 13123 ctl_free_io(io); 13124 else 13125 fe_done(io); 13126 return (CTL_RETVAL_COMPLETE); 13127 default: 13128 panic("ctl_process_done: invalid io type %d\n", 13129 io->io_hdr.io_type); 13130 break; /* NOTREACHED */ 13131 } 13132 13133 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13134 if (lun == NULL) { 13135 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13136 io->io_hdr.nexus.targ_mapped_lun)); 13137 goto bailout; 13138 } 13139 13140 mtx_lock(&lun->lun_lock); 13141 13142 /* 13143 * Check to see if we have any errors to inject here. We only 13144 * inject errors for commands that don't already have errors set. 13145 */ 13146 if ((STAILQ_FIRST(&lun->error_list) != NULL) && 13147 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13148 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13149 ctl_inject_error(lun, io); 13150 13151 /* 13152 * XXX KDM how do we treat commands that aren't completed 13153 * successfully? 13154 * 13155 * XXX KDM should we also track I/O latency? 13156 */ 13157 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13158 io->io_hdr.io_type == CTL_IO_SCSI) { 13159#ifdef CTL_TIME_IO 13160 struct bintime cur_bt; 13161#endif 13162 int type; 13163 13164 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13165 CTL_FLAG_DATA_IN) 13166 type = CTL_STATS_READ; 13167 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13168 CTL_FLAG_DATA_OUT) 13169 type = CTL_STATS_WRITE; 13170 else 13171 type = CTL_STATS_NO_IO; 13172 13173 lun->stats.ports[targ_port].bytes[type] += 13174 io->scsiio.kern_total_len; 13175 lun->stats.ports[targ_port].operations[type]++; 13176#ifdef CTL_TIME_IO 13177 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 13178 &io->io_hdr.dma_bt); 13179 lun->stats.ports[targ_port].num_dmas[type] += 13180 io->io_hdr.num_dmas; 13181 getbintime(&cur_bt); 13182 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 13183 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 13184#endif 13185 } 13186 13187 /* 13188 * Remove this from the OOA queue. 13189 */ 13190 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13191#ifdef CTL_TIME_IO 13192 if (TAILQ_EMPTY(&lun->ooa_queue)) 13193 lun->last_busy = getsbinuptime(); 13194#endif 13195 13196 /* 13197 * Run through the blocked queue on this LUN and see if anything 13198 * has become unblocked, now that this transaction is done. 13199 */ 13200 ctl_check_blocked(lun); 13201 13202 /* 13203 * If the LUN has been invalidated, free it if there is nothing 13204 * left on its OOA queue. 13205 */ 13206 if ((lun->flags & CTL_LUN_INVALID) 13207 && TAILQ_EMPTY(&lun->ooa_queue)) { 13208 mtx_unlock(&lun->lun_lock); 13209 mtx_lock(&softc->ctl_lock); 13210 ctl_free_lun(lun); 13211 mtx_unlock(&softc->ctl_lock); 13212 } else 13213 mtx_unlock(&lun->lun_lock); 13214 13215bailout: 13216 13217 /* 13218 * If this command has been aborted, make sure we set the status 13219 * properly. The FETD is responsible for freeing the I/O and doing 13220 * whatever it needs to do to clean up its state. 13221 */ 13222 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13223 ctl_set_task_aborted(&io->scsiio); 13224 13225 /* 13226 * If enabled, print command error status. 13227 */ 13228 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 13229 (ctl_debug & CTL_DEBUG_INFO) != 0) 13230 ctl_io_error_print(io, NULL); 13231 13232 /* 13233 * Tell the FETD or the other shelf controller we're done with this 13234 * command. Note that only SCSI commands get to this point. Task 13235 * management commands are completed above. 13236 * 13237 * We only send status to the other controller if we're in XFER 13238 * mode. In SER_ONLY mode, the I/O is done on the controller that 13239 * received the I/O (from CTL's perspective), and so the status is 13240 * generated there. 13241 * 13242 * XXX KDM if we hold the lock here, we could cause a deadlock 13243 * if the frontend comes back in in this context to queue 13244 * something. 13245 */ 13246 if ((softc->ha_mode == CTL_HA_MODE_XFER) 13247 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13248 union ctl_ha_msg msg; 13249 13250 memset(&msg, 0, sizeof(msg)); 13251 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13252 msg.hdr.original_sc = io->io_hdr.original_sc; 13253 msg.hdr.nexus = io->io_hdr.nexus; 13254 msg.hdr.status = io->io_hdr.status; 13255 msg.scsi.scsi_status = io->scsiio.scsi_status; 13256 msg.scsi.tag_num = io->scsiio.tag_num; 13257 msg.scsi.tag_type = io->scsiio.tag_type; 13258 msg.scsi.sense_len = io->scsiio.sense_len; 13259 msg.scsi.sense_residual = io->scsiio.sense_residual; 13260 msg.scsi.residual = io->scsiio.residual; 13261 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13262 sizeof(io->scsiio.sense_data)); 13263 /* 13264 * We copy this whether or not this is an I/O-related 13265 * command. Otherwise, we'd have to go and check to see 13266 * whether it's a read/write command, and it really isn't 13267 * worth it. 13268 */ 13269 memcpy(&msg.scsi.lbalen, 13270 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 13271 sizeof(msg.scsi.lbalen)); 13272 13273 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13274 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 13275 /* XXX do something here */ 13276 } 13277 13278 ctl_free_io(io); 13279 } else 13280 fe_done(io); 13281 13282 return (CTL_RETVAL_COMPLETE); 13283} 13284 13285#ifdef CTL_WITH_CA 13286/* 13287 * Front end should call this if it doesn't do autosense. When the request 13288 * sense comes back in from the initiator, we'll dequeue this and send it. 13289 */ 13290int 13291ctl_queue_sense(union ctl_io *io) 13292{ 13293 struct ctl_lun *lun; 13294 struct ctl_port *port; 13295 struct ctl_softc *softc; 13296 uint32_t initidx, targ_lun; 13297 13298 softc = control_softc; 13299 13300 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13301 13302 /* 13303 * LUN lookup will likely move to the ctl_work_thread() once we 13304 * have our new queueing infrastructure (that doesn't put things on 13305 * a per-LUN queue initially). That is so that we can handle 13306 * things like an INQUIRY to a LUN that we don't have enabled. We 13307 * can't deal with that right now. 13308 */ 13309 mtx_lock(&softc->ctl_lock); 13310 13311 /* 13312 * If we don't have a LUN for this, just toss the sense 13313 * information. 13314 */ 13315 port = ctl_io_port(&ctsio->io_hdr); 13316 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13317 if ((targ_lun < CTL_MAX_LUNS) 13318 && (softc->ctl_luns[targ_lun] != NULL)) 13319 lun = softc->ctl_luns[targ_lun]; 13320 else 13321 goto bailout; 13322 13323 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13324 13325 mtx_lock(&lun->lun_lock); 13326 /* 13327 * Already have CA set for this LUN...toss the sense information. 13328 */ 13329 if (ctl_is_set(lun->have_ca, initidx)) { 13330 mtx_unlock(&lun->lun_lock); 13331 goto bailout; 13332 } 13333 13334 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13335 MIN(sizeof(lun->pending_sense[initidx]), 13336 sizeof(io->scsiio.sense_data))); 13337 ctl_set_mask(lun->have_ca, initidx); 13338 mtx_unlock(&lun->lun_lock); 13339 13340bailout: 13341 mtx_unlock(&softc->ctl_lock); 13342 13343 ctl_free_io(io); 13344 13345 return (CTL_RETVAL_COMPLETE); 13346} 13347#endif 13348 13349/* 13350 * Primary command inlet from frontend ports. All SCSI and task I/O 13351 * requests must go through this function. 13352 */ 13353int 13354ctl_queue(union ctl_io *io) 13355{ 13356 struct ctl_port *port; 13357 13358 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13359 13360#ifdef CTL_TIME_IO 13361 io->io_hdr.start_time = time_uptime; 13362 getbintime(&io->io_hdr.start_bt); 13363#endif /* CTL_TIME_IO */ 13364 13365 /* Map FE-specific LUN ID into global one. */ 13366 port = ctl_io_port(&io->io_hdr); 13367 io->io_hdr.nexus.targ_mapped_lun = 13368 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13369 13370 switch (io->io_hdr.io_type) { 13371 case CTL_IO_SCSI: 13372 case CTL_IO_TASK: 13373 if (ctl_debug & CTL_DEBUG_CDB) 13374 ctl_io_print(io); 13375 ctl_enqueue_incoming(io); 13376 break; 13377 default: 13378 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13379 return (EINVAL); 13380 } 13381 13382 return (CTL_RETVAL_COMPLETE); 13383} 13384 13385#ifdef CTL_IO_DELAY 13386static void 13387ctl_done_timer_wakeup(void *arg) 13388{ 13389 union ctl_io *io; 13390 13391 io = (union ctl_io *)arg; 13392 ctl_done(io); 13393} 13394#endif /* CTL_IO_DELAY */ 13395 13396void 13397ctl_done(union ctl_io *io) 13398{ 13399 13400 /* 13401 * Enable this to catch duplicate completion issues. 13402 */ 13403#if 0 13404 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13405 printf("%s: type %d msg %d cdb %x iptl: " 13406 "%d:%d:%d:%d tag 0x%04x " 13407 "flag %#x status %x\n", 13408 __func__, 13409 io->io_hdr.io_type, 13410 io->io_hdr.msg_type, 13411 io->scsiio.cdb[0], 13412 io->io_hdr.nexus.initid.id, 13413 io->io_hdr.nexus.targ_port, 13414 io->io_hdr.nexus.targ_target.id, 13415 io->io_hdr.nexus.targ_lun, 13416 (io->io_hdr.io_type == 13417 CTL_IO_TASK) ? 13418 io->taskio.tag_num : 13419 io->scsiio.tag_num, 13420 io->io_hdr.flags, 13421 io->io_hdr.status); 13422 } else 13423 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13424#endif 13425 13426 /* 13427 * This is an internal copy of an I/O, and should not go through 13428 * the normal done processing logic. 13429 */ 13430 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13431 return; 13432 13433 /* 13434 * We need to send a msg to the serializing shelf to finish the IO 13435 * as well. We don't send a finish message to the other shelf if 13436 * this is a task management command. Task management commands 13437 * aren't serialized in the OOA queue, but rather just executed on 13438 * both shelf controllers for commands that originated on that 13439 * controller. 13440 */ 13441 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 13442 && (io->io_hdr.io_type != CTL_IO_TASK)) { 13443 union ctl_ha_msg msg_io; 13444 13445 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 13446 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 13447 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 13448 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 13449 } 13450 /* continue on to finish IO */ 13451 } 13452#ifdef CTL_IO_DELAY 13453 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13454 struct ctl_lun *lun; 13455 13456 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13457 13458 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13459 } else { 13460 struct ctl_lun *lun; 13461 13462 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13463 13464 if ((lun != NULL) 13465 && (lun->delay_info.done_delay > 0)) { 13466 struct callout *callout; 13467 13468 callout = (struct callout *)&io->io_hdr.timer_bytes; 13469 callout_init(callout, /*mpsafe*/ 1); 13470 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13471 callout_reset(callout, 13472 lun->delay_info.done_delay * hz, 13473 ctl_done_timer_wakeup, io); 13474 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13475 lun->delay_info.done_delay = 0; 13476 return; 13477 } 13478 } 13479#endif /* CTL_IO_DELAY */ 13480 13481 ctl_enqueue_done(io); 13482} 13483 13484int 13485ctl_isc(struct ctl_scsiio *ctsio) 13486{ 13487 struct ctl_lun *lun; 13488 int retval; 13489 13490 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13491 13492 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 13493 13494 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 13495 13496 retval = lun->backend->data_submit((union ctl_io *)ctsio); 13497 13498 return (retval); 13499} 13500 13501 13502static void 13503ctl_work_thread(void *arg) 13504{ 13505 struct ctl_thread *thr = (struct ctl_thread *)arg; 13506 struct ctl_softc *softc = thr->ctl_softc; 13507 union ctl_io *io; 13508 int retval; 13509 13510 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13511 13512 for (;;) { 13513 retval = 0; 13514 13515 /* 13516 * We handle the queues in this order: 13517 * - ISC 13518 * - done queue (to free up resources, unblock other commands) 13519 * - RtR queue 13520 * - incoming queue 13521 * 13522 * If those queues are empty, we break out of the loop and 13523 * go to sleep. 13524 */ 13525 mtx_lock(&thr->queue_lock); 13526 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13527 if (io != NULL) { 13528 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13529 mtx_unlock(&thr->queue_lock); 13530 ctl_handle_isc(io); 13531 continue; 13532 } 13533 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13534 if (io != NULL) { 13535 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13536 /* clear any blocked commands, call fe_done */ 13537 mtx_unlock(&thr->queue_lock); 13538 retval = ctl_process_done(io); 13539 continue; 13540 } 13541 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13542 if (io != NULL) { 13543 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13544 mtx_unlock(&thr->queue_lock); 13545 if (io->io_hdr.io_type == CTL_IO_TASK) 13546 ctl_run_task(io); 13547 else 13548 ctl_scsiio_precheck(softc, &io->scsiio); 13549 continue; 13550 } 13551 if (!ctl_pause_rtr) { 13552 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13553 if (io != NULL) { 13554 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13555 mtx_unlock(&thr->queue_lock); 13556 retval = ctl_scsiio(&io->scsiio); 13557 if (retval != CTL_RETVAL_COMPLETE) 13558 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13559 continue; 13560 } 13561 } 13562 13563 /* Sleep until we have something to do. */ 13564 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13565 } 13566} 13567 13568static void 13569ctl_lun_thread(void *arg) 13570{ 13571 struct ctl_softc *softc = (struct ctl_softc *)arg; 13572 struct ctl_be_lun *be_lun; 13573 int retval; 13574 13575 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13576 13577 for (;;) { 13578 retval = 0; 13579 mtx_lock(&softc->ctl_lock); 13580 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13581 if (be_lun != NULL) { 13582 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13583 mtx_unlock(&softc->ctl_lock); 13584 ctl_create_lun(be_lun); 13585 continue; 13586 } 13587 13588 /* Sleep until we have something to do. */ 13589 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13590 PDROP | PRIBIO, "-", 0); 13591 } 13592} 13593 13594static void 13595ctl_thresh_thread(void *arg) 13596{ 13597 struct ctl_softc *softc = (struct ctl_softc *)arg; 13598 struct ctl_lun *lun; 13599 struct ctl_be_lun *be_lun; 13600 struct scsi_da_rw_recovery_page *rwpage; 13601 struct ctl_logical_block_provisioning_page *page; 13602 const char *attr; 13603 uint64_t thres, val; 13604 int i, e; 13605 13606 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13607 13608 for (;;) { 13609 mtx_lock(&softc->ctl_lock); 13610 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13611 be_lun = lun->be_lun; 13612 if ((lun->flags & CTL_LUN_DISABLED) || 13613 (lun->flags & CTL_LUN_OFFLINE) || 13614 lun->backend->lun_attr == NULL) 13615 continue; 13616 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 13617 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 13618 continue; 13619 e = 0; 13620 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 13621 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13622 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13623 continue; 13624 thres = scsi_4btoul(page->descr[i].count); 13625 thres <<= CTL_LBP_EXPONENT; 13626 switch (page->descr[i].resource) { 13627 case 0x01: 13628 attr = "blocksavail"; 13629 break; 13630 case 0x02: 13631 attr = "blocksused"; 13632 break; 13633 case 0xf1: 13634 attr = "poolblocksavail"; 13635 break; 13636 case 0xf2: 13637 attr = "poolblocksused"; 13638 break; 13639 default: 13640 continue; 13641 } 13642 mtx_unlock(&softc->ctl_lock); // XXX 13643 val = lun->backend->lun_attr( 13644 lun->be_lun->be_lun, attr); 13645 mtx_lock(&softc->ctl_lock); 13646 if (val == UINT64_MAX) 13647 continue; 13648 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13649 == SLBPPD_ARMING_INC) 13650 e |= (val >= thres); 13651 else 13652 e |= (val <= thres); 13653 } 13654 mtx_lock(&lun->lun_lock); 13655 if (e) { 13656 if (lun->lasttpt == 0 || 13657 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13658 lun->lasttpt = time_uptime; 13659 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13660 } 13661 } else { 13662 lun->lasttpt = 0; 13663 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13664 } 13665 mtx_unlock(&lun->lun_lock); 13666 } 13667 mtx_unlock(&softc->ctl_lock); 13668 pause("-", CTL_LBP_PERIOD * hz); 13669 } 13670} 13671 13672static void 13673ctl_enqueue_incoming(union ctl_io *io) 13674{ 13675 struct ctl_softc *softc = control_softc; 13676 struct ctl_thread *thr; 13677 u_int idx; 13678 13679 idx = (io->io_hdr.nexus.targ_port * 127 + 13680 io->io_hdr.nexus.initid.id) % worker_threads; 13681 thr = &softc->threads[idx]; 13682 mtx_lock(&thr->queue_lock); 13683 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13684 mtx_unlock(&thr->queue_lock); 13685 wakeup(thr); 13686} 13687 13688static void 13689ctl_enqueue_rtr(union ctl_io *io) 13690{ 13691 struct ctl_softc *softc = control_softc; 13692 struct ctl_thread *thr; 13693 13694 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13695 mtx_lock(&thr->queue_lock); 13696 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13697 mtx_unlock(&thr->queue_lock); 13698 wakeup(thr); 13699} 13700 13701static void 13702ctl_enqueue_done(union ctl_io *io) 13703{ 13704 struct ctl_softc *softc = control_softc; 13705 struct ctl_thread *thr; 13706 13707 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13708 mtx_lock(&thr->queue_lock); 13709 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13710 mtx_unlock(&thr->queue_lock); 13711 wakeup(thr); 13712} 13713 13714#ifdef notyet 13715static void 13716ctl_enqueue_isc(union ctl_io *io) 13717{ 13718 struct ctl_softc *softc = control_softc; 13719 struct ctl_thread *thr; 13720 13721 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13722 mtx_lock(&thr->queue_lock); 13723 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13724 mtx_unlock(&thr->queue_lock); 13725 wakeup(thr); 13726} 13727 13728/* Initialization and failover */ 13729 13730void 13731ctl_init_isc_msg(void) 13732{ 13733 printf("CTL: Still calling this thing\n"); 13734} 13735 13736/* 13737 * Init component 13738 * Initializes component into configuration defined by bootMode 13739 * (see hasc-sv.c) 13740 * returns hasc_Status: 13741 * OK 13742 * ERROR - fatal error 13743 */ 13744static ctl_ha_comp_status 13745ctl_isc_init(struct ctl_ha_component *c) 13746{ 13747 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13748 13749 c->status = ret; 13750 return ret; 13751} 13752 13753/* Start component 13754 * Starts component in state requested. If component starts successfully, 13755 * it must set its own state to the requestrd state 13756 * When requested state is HASC_STATE_HA, the component may refine it 13757 * by adding _SLAVE or _MASTER flags. 13758 * Currently allowed state transitions are: 13759 * UNKNOWN->HA - initial startup 13760 * UNKNOWN->SINGLE - initial startup when no parter detected 13761 * HA->SINGLE - failover 13762 * returns ctl_ha_comp_status: 13763 * OK - component successfully started in requested state 13764 * FAILED - could not start the requested state, failover may 13765 * be possible 13766 * ERROR - fatal error detected, no future startup possible 13767 */ 13768static ctl_ha_comp_status 13769ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 13770{ 13771 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 13772 13773 printf("%s: go\n", __func__); 13774 13775 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 13776 if (c->state == CTL_HA_STATE_UNKNOWN ) { 13777 control_softc->is_single = 0; 13778 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 13779 != CTL_HA_STATUS_SUCCESS) { 13780 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 13781 ret = CTL_HA_COMP_STATUS_ERROR; 13782 } 13783 } else if (CTL_HA_STATE_IS_HA(c->state) 13784 && CTL_HA_STATE_IS_SINGLE(state)){ 13785 // HA->SINGLE transition 13786 ctl_failover(); 13787 control_softc->is_single = 1; 13788 } else { 13789 printf("ctl_isc_start:Invalid state transition %X->%X\n", 13790 c->state, state); 13791 ret = CTL_HA_COMP_STATUS_ERROR; 13792 } 13793 if (CTL_HA_STATE_IS_SINGLE(state)) 13794 control_softc->is_single = 1; 13795 13796 c->state = state; 13797 c->status = ret; 13798 return ret; 13799} 13800 13801/* 13802 * Quiesce component 13803 * The component must clear any error conditions (set status to OK) and 13804 * prepare itself to another Start call 13805 * returns ctl_ha_comp_status: 13806 * OK 13807 * ERROR 13808 */ 13809static ctl_ha_comp_status 13810ctl_isc_quiesce(struct ctl_ha_component *c) 13811{ 13812 int ret = CTL_HA_COMP_STATUS_OK; 13813 13814 ctl_pause_rtr = 1; 13815 c->status = ret; 13816 return ret; 13817} 13818 13819struct ctl_ha_component ctl_ha_component_ctlisc = 13820{ 13821 .name = "CTL ISC", 13822 .state = CTL_HA_STATE_UNKNOWN, 13823 .init = ctl_isc_init, 13824 .start = ctl_isc_start, 13825 .quiesce = ctl_isc_quiesce 13826}; 13827#endif 13828 13829/* 13830 * vim: ts=8 13831 */ 13832