ctl.c revision 288745
1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id$ 36 */ 37/* 38 * CAM Target Layer, a SCSI device emulation subsystem. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43#define _CTL_C 44 45#include <sys/cdefs.h> 46__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 288745 2015-10-05 09:08:58Z mav $"); 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/ctype.h> 51#include <sys/kernel.h> 52#include <sys/types.h> 53#include <sys/kthread.h> 54#include <sys/bio.h> 55#include <sys/fcntl.h> 56#include <sys/lock.h> 57#include <sys/module.h> 58#include <sys/mutex.h> 59#include <sys/condvar.h> 60#include <sys/malloc.h> 61#include <sys/conf.h> 62#include <sys/ioccom.h> 63#include <sys/queue.h> 64#include <sys/sbuf.h> 65#include <sys/smp.h> 66#include <sys/endian.h> 67#include <sys/sysctl.h> 68#include <vm/uma.h> 69 70#include <cam/cam.h> 71#include <cam/scsi/scsi_all.h> 72#include <cam/scsi/scsi_da.h> 73#include <cam/ctl/ctl_io.h> 74#include <cam/ctl/ctl.h> 75#include <cam/ctl/ctl_frontend.h> 76#include <cam/ctl/ctl_util.h> 77#include <cam/ctl/ctl_backend.h> 78#include <cam/ctl/ctl_ioctl.h> 79#include <cam/ctl/ctl_ha.h> 80#include <cam/ctl/ctl_private.h> 81#include <cam/ctl/ctl_debug.h> 82#include <cam/ctl/ctl_scsi_all.h> 83#include <cam/ctl/ctl_error.h> 84 85struct ctl_softc *control_softc = NULL; 86 87/* 88 * Template mode pages. 89 */ 90 91/* 92 * Note that these are default values only. The actual values will be 93 * filled in when the user does a mode sense. 94 */ 95const static struct copan_debugconf_subpage debugconf_page_default = { 96 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 97 DBGCNF_SUBPAGE_CODE, /* subpage */ 98 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 99 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 100 DBGCNF_VERSION, /* page_version */ 101 {CTL_TIME_IO_DEFAULT_SECS>>8, 102 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 103}; 104 105const static struct copan_debugconf_subpage debugconf_page_changeable = { 106 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 107 DBGCNF_SUBPAGE_CODE, /* subpage */ 108 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 109 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 110 0, /* page_version */ 111 {0xff,0xff}, /* ctl_time_io_secs */ 112}; 113 114const static struct scsi_da_rw_recovery_page rw_er_page_default = { 115 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 116 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 117 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 118 /*read_retry_count*/0, 119 /*correction_span*/0, 120 /*head_offset_count*/0, 121 /*data_strobe_offset_cnt*/0, 122 /*byte8*/SMS_RWER_LBPERE, 123 /*write_retry_count*/0, 124 /*reserved2*/0, 125 /*recovery_time_limit*/{0, 0}, 126}; 127 128const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 129 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 130 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 131 /*byte3*/0, 132 /*read_retry_count*/0, 133 /*correction_span*/0, 134 /*head_offset_count*/0, 135 /*data_strobe_offset_cnt*/0, 136 /*byte8*/0, 137 /*write_retry_count*/0, 138 /*reserved2*/0, 139 /*recovery_time_limit*/{0, 0}, 140}; 141 142const static struct scsi_format_page format_page_default = { 143 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 144 /*page_length*/sizeof(struct scsi_format_page) - 2, 145 /*tracks_per_zone*/ {0, 0}, 146 /*alt_sectors_per_zone*/ {0, 0}, 147 /*alt_tracks_per_zone*/ {0, 0}, 148 /*alt_tracks_per_lun*/ {0, 0}, 149 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 150 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 151 /*bytes_per_sector*/ {0, 0}, 152 /*interleave*/ {0, 0}, 153 /*track_skew*/ {0, 0}, 154 /*cylinder_skew*/ {0, 0}, 155 /*flags*/ SFP_HSEC, 156 /*reserved*/ {0, 0, 0} 157}; 158 159const static struct scsi_format_page format_page_changeable = { 160 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 161 /*page_length*/sizeof(struct scsi_format_page) - 2, 162 /*tracks_per_zone*/ {0, 0}, 163 /*alt_sectors_per_zone*/ {0, 0}, 164 /*alt_tracks_per_zone*/ {0, 0}, 165 /*alt_tracks_per_lun*/ {0, 0}, 166 /*sectors_per_track*/ {0, 0}, 167 /*bytes_per_sector*/ {0, 0}, 168 /*interleave*/ {0, 0}, 169 /*track_skew*/ {0, 0}, 170 /*cylinder_skew*/ {0, 0}, 171 /*flags*/ 0, 172 /*reserved*/ {0, 0, 0} 173}; 174 175const static struct scsi_rigid_disk_page rigid_disk_page_default = { 176 /*page_code*/SMS_RIGID_DISK_PAGE, 177 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 178 /*cylinders*/ {0, 0, 0}, 179 /*heads*/ CTL_DEFAULT_HEADS, 180 /*start_write_precomp*/ {0, 0, 0}, 181 /*start_reduced_current*/ {0, 0, 0}, 182 /*step_rate*/ {0, 0}, 183 /*landing_zone_cylinder*/ {0, 0, 0}, 184 /*rpl*/ SRDP_RPL_DISABLED, 185 /*rotational_offset*/ 0, 186 /*reserved1*/ 0, 187 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 188 CTL_DEFAULT_ROTATION_RATE & 0xff}, 189 /*reserved2*/ {0, 0} 190}; 191 192const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 193 /*page_code*/SMS_RIGID_DISK_PAGE, 194 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 195 /*cylinders*/ {0, 0, 0}, 196 /*heads*/ 0, 197 /*start_write_precomp*/ {0, 0, 0}, 198 /*start_reduced_current*/ {0, 0, 0}, 199 /*step_rate*/ {0, 0}, 200 /*landing_zone_cylinder*/ {0, 0, 0}, 201 /*rpl*/ 0, 202 /*rotational_offset*/ 0, 203 /*reserved1*/ 0, 204 /*rotation_rate*/ {0, 0}, 205 /*reserved2*/ {0, 0} 206}; 207 208const static struct scsi_caching_page caching_page_default = { 209 /*page_code*/SMS_CACHING_PAGE, 210 /*page_length*/sizeof(struct scsi_caching_page) - 2, 211 /*flags1*/ SCP_DISC | SCP_WCE, 212 /*ret_priority*/ 0, 213 /*disable_pf_transfer_len*/ {0xff, 0xff}, 214 /*min_prefetch*/ {0, 0}, 215 /*max_prefetch*/ {0xff, 0xff}, 216 /*max_pf_ceiling*/ {0xff, 0xff}, 217 /*flags2*/ 0, 218 /*cache_segments*/ 0, 219 /*cache_seg_size*/ {0, 0}, 220 /*reserved*/ 0, 221 /*non_cache_seg_size*/ {0, 0, 0} 222}; 223 224const static struct scsi_caching_page caching_page_changeable = { 225 /*page_code*/SMS_CACHING_PAGE, 226 /*page_length*/sizeof(struct scsi_caching_page) - 2, 227 /*flags1*/ SCP_WCE | SCP_RCD, 228 /*ret_priority*/ 0, 229 /*disable_pf_transfer_len*/ {0, 0}, 230 /*min_prefetch*/ {0, 0}, 231 /*max_prefetch*/ {0, 0}, 232 /*max_pf_ceiling*/ {0, 0}, 233 /*flags2*/ 0, 234 /*cache_segments*/ 0, 235 /*cache_seg_size*/ {0, 0}, 236 /*reserved*/ 0, 237 /*non_cache_seg_size*/ {0, 0, 0} 238}; 239 240const static struct scsi_control_page control_page_default = { 241 /*page_code*/SMS_CONTROL_MODE_PAGE, 242 /*page_length*/sizeof(struct scsi_control_page) - 2, 243 /*rlec*/0, 244 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 245 /*eca_and_aen*/0, 246 /*flags4*/SCP_TAS, 247 /*aen_holdoff_period*/{0, 0}, 248 /*busy_timeout_period*/{0, 0}, 249 /*extended_selftest_completion_time*/{0, 0} 250}; 251 252const static struct scsi_control_page control_page_changeable = { 253 /*page_code*/SMS_CONTROL_MODE_PAGE, 254 /*page_length*/sizeof(struct scsi_control_page) - 2, 255 /*rlec*/SCP_DSENSE, 256 /*queue_flags*/SCP_QUEUE_ALG_MASK, 257 /*eca_and_aen*/SCP_SWP, 258 /*flags4*/0, 259 /*aen_holdoff_period*/{0, 0}, 260 /*busy_timeout_period*/{0, 0}, 261 /*extended_selftest_completion_time*/{0, 0} 262}; 263 264const static struct scsi_info_exceptions_page ie_page_default = { 265 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 266 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 267 /*info_flags*/SIEP_FLAGS_DEXCPT, 268 /*mrie*/0, 269 /*interval_timer*/{0, 0, 0, 0}, 270 /*report_count*/{0, 0, 0, 0} 271}; 272 273const static struct scsi_info_exceptions_page ie_page_changeable = { 274 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 275 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 276 /*info_flags*/0, 277 /*mrie*/0, 278 /*interval_timer*/{0, 0, 0, 0}, 279 /*report_count*/{0, 0, 0, 0} 280}; 281 282#define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 283 284const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 285 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 286 /*subpage_code*/0x02, 287 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 288 /*flags*/0, 289 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 290 /*descr*/{}}, 291 {{/*flags*/0, 292 /*resource*/0x01, 293 /*reserved*/{0, 0}, 294 /*count*/{0, 0, 0, 0}}, 295 {/*flags*/0, 296 /*resource*/0x02, 297 /*reserved*/{0, 0}, 298 /*count*/{0, 0, 0, 0}}, 299 {/*flags*/0, 300 /*resource*/0xf1, 301 /*reserved*/{0, 0}, 302 /*count*/{0, 0, 0, 0}}, 303 {/*flags*/0, 304 /*resource*/0xf2, 305 /*reserved*/{0, 0}, 306 /*count*/{0, 0, 0, 0}} 307 } 308}; 309 310const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 311 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 312 /*subpage_code*/0x02, 313 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 314 /*flags*/0, 315 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 316 /*descr*/{}}, 317 {{/*flags*/0, 318 /*resource*/0, 319 /*reserved*/{0, 0}, 320 /*count*/{0, 0, 0, 0}}, 321 {/*flags*/0, 322 /*resource*/0, 323 /*reserved*/{0, 0}, 324 /*count*/{0, 0, 0, 0}}, 325 {/*flags*/0, 326 /*resource*/0, 327 /*reserved*/{0, 0}, 328 /*count*/{0, 0, 0, 0}}, 329 {/*flags*/0, 330 /*resource*/0, 331 /*reserved*/{0, 0}, 332 /*count*/{0, 0, 0, 0}} 333 } 334}; 335 336SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 337static int worker_threads = -1; 338TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads); 339SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 340 &worker_threads, 1, "Number of worker threads"); 341static int ctl_debug = CTL_DEBUG_NONE; 342TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug); 343SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 344 &ctl_debug, 0, "Enabled debug flags"); 345 346/* 347 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 348 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 349 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 350 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 351 */ 352#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 353 354static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 355 int param); 356static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 357static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); 358static int ctl_init(void); 359void ctl_shutdown(void); 360static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 361static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 362static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 363static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 364 struct ctl_ooa *ooa_hdr, 365 struct ctl_ooa_entry *kern_entries); 366static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 367 struct thread *td); 368static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 369 struct ctl_be_lun *be_lun); 370static int ctl_free_lun(struct ctl_lun *lun); 371static void ctl_create_lun(struct ctl_be_lun *be_lun); 372static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); 373 374static int ctl_do_mode_select(union ctl_io *io); 375static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 376 uint64_t res_key, uint64_t sa_res_key, 377 uint8_t type, uint32_t residx, 378 struct ctl_scsiio *ctsio, 379 struct scsi_per_res_out *cdb, 380 struct scsi_per_res_out_parms* param); 381static void ctl_pro_preempt_other(struct ctl_lun *lun, 382 union ctl_ha_msg *msg); 383static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 384static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 385static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 386static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 387static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 388static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 389static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 390 int alloc_len); 391static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 392 int alloc_len); 393static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 394static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 395static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 396static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 397static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 398static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 399 bool seq); 400static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 401static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 402 union ctl_io *pending_io, union ctl_io *ooa_io); 403static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 404 union ctl_io *starting_io); 405static int ctl_check_blocked(struct ctl_lun *lun); 406static int ctl_scsiio_lun_check(struct ctl_lun *lun, 407 const struct ctl_cmd_entry *entry, 408 struct ctl_scsiio *ctsio); 409static void ctl_failover_lun(struct ctl_lun *lun); 410static void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua); 411static void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua); 412static void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua); 413static void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua); 414static void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 415 ctl_ua_type ua_type); 416static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 417 struct ctl_scsiio *ctsio); 418static int ctl_scsiio(struct ctl_scsiio *ctsio); 419 420static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 421static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 422 ctl_ua_type ua_type); 423static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 424 ctl_ua_type ua_type); 425static int ctl_abort_task(union ctl_io *io); 426static int ctl_abort_task_set(union ctl_io *io); 427static int ctl_i_t_nexus_reset(union ctl_io *io); 428static void ctl_run_task(union ctl_io *io); 429#ifdef CTL_IO_DELAY 430static void ctl_datamove_timer_wakeup(void *arg); 431static void ctl_done_timer_wakeup(void *arg); 432#endif /* CTL_IO_DELAY */ 433 434static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 435static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 436static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 437static void ctl_datamove_remote_write(union ctl_io *io); 438static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 439static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 440static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 441static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 442 ctl_ha_dt_cb callback); 443static void ctl_datamove_remote_read(union ctl_io *io); 444static void ctl_datamove_remote(union ctl_io *io); 445static int ctl_process_done(union ctl_io *io); 446static void ctl_lun_thread(void *arg); 447static void ctl_thresh_thread(void *arg); 448static void ctl_work_thread(void *arg); 449static void ctl_enqueue_incoming(union ctl_io *io); 450static void ctl_enqueue_rtr(union ctl_io *io); 451static void ctl_enqueue_done(union ctl_io *io); 452static void ctl_enqueue_isc(union ctl_io *io); 453static const struct ctl_cmd_entry * 454 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 455static const struct ctl_cmd_entry * 456 ctl_validate_command(struct ctl_scsiio *ctsio); 457static int ctl_cmd_applicable(uint8_t lun_type, 458 const struct ctl_cmd_entry *entry); 459 460static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); 461static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); 462static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); 463static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); 464 465/* 466 * Load the serialization table. This isn't very pretty, but is probably 467 * the easiest way to do it. 468 */ 469#include "ctl_ser_table.c" 470 471/* 472 * We only need to define open, close and ioctl routines for this driver. 473 */ 474static struct cdevsw ctl_cdevsw = { 475 .d_version = D_VERSION, 476 .d_flags = 0, 477 .d_open = ctl_open, 478 .d_close = ctl_close, 479 .d_ioctl = ctl_ioctl, 480 .d_name = "ctl", 481}; 482 483 484MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 485 486static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 487 488static moduledata_t ctl_moduledata = { 489 "ctl", 490 ctl_module_event_handler, 491 NULL 492}; 493 494DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 495MODULE_VERSION(ctl, 1); 496 497static struct ctl_frontend ha_frontend = 498{ 499 .name = "ha", 500}; 501 502static void 503ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 504 union ctl_ha_msg *msg_info) 505{ 506 struct ctl_scsiio *ctsio; 507 508 if (msg_info->hdr.original_sc == NULL) { 509 printf("%s: original_sc == NULL!\n", __func__); 510 /* XXX KDM now what? */ 511 return; 512 } 513 514 ctsio = &msg_info->hdr.original_sc->scsiio; 515 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 516 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 517 ctsio->io_hdr.status = msg_info->hdr.status; 518 ctsio->scsi_status = msg_info->scsi.scsi_status; 519 ctsio->sense_len = msg_info->scsi.sense_len; 520 ctsio->sense_residual = msg_info->scsi.sense_residual; 521 ctsio->residual = msg_info->scsi.residual; 522 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 523 msg_info->scsi.sense_len); 524 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 525 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 526 ctl_enqueue_isc((union ctl_io *)ctsio); 527} 528 529static void 530ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 531 union ctl_ha_msg *msg_info) 532{ 533 struct ctl_scsiio *ctsio; 534 535 if (msg_info->hdr.serializing_sc == NULL) { 536 printf("%s: serializing_sc == NULL!\n", __func__); 537 /* XXX KDM now what? */ 538 return; 539 } 540 541 ctsio = &msg_info->hdr.serializing_sc->scsiio; 542 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 543 ctl_enqueue_isc((union ctl_io *)ctsio); 544} 545 546void 547ctl_isc_announce_lun(struct ctl_lun *lun) 548{ 549 struct ctl_softc *softc = lun->ctl_softc; 550 union ctl_ha_msg *msg; 551 struct ctl_ha_msg_lun_pr_key pr_key; 552 int i, k; 553 554 if (softc->ha_link != CTL_HA_LINK_ONLINE) 555 return; 556 mtx_lock(&lun->lun_lock); 557 i = sizeof(msg->lun); 558 if (lun->lun_devid) 559 i += lun->lun_devid->len; 560 i += sizeof(pr_key) * lun->pr_key_count; 561alloc: 562 mtx_unlock(&lun->lun_lock); 563 msg = malloc(i, M_CTL, M_WAITOK); 564 mtx_lock(&lun->lun_lock); 565 k = sizeof(msg->lun); 566 if (lun->lun_devid) 567 k += lun->lun_devid->len; 568 k += sizeof(pr_key) * lun->pr_key_count; 569 if (i < k) { 570 free(msg, M_CTL); 571 i = k; 572 goto alloc; 573 } 574 bzero(&msg->lun, sizeof(msg->lun)); 575 msg->hdr.msg_type = CTL_MSG_LUN_SYNC; 576 msg->hdr.nexus.targ_lun = lun->lun; 577 msg->hdr.nexus.targ_mapped_lun = lun->lun; 578 msg->lun.flags = lun->flags; 579 msg->lun.pr_generation = lun->PRGeneration; 580 msg->lun.pr_res_idx = lun->pr_res_idx; 581 msg->lun.pr_res_type = lun->res_type; 582 msg->lun.pr_key_count = lun->pr_key_count; 583 i = 0; 584 if (lun->lun_devid) { 585 msg->lun.lun_devid_len = lun->lun_devid->len; 586 memcpy(&msg->lun.data[i], lun->lun_devid->data, 587 msg->lun.lun_devid_len); 588 i += msg->lun.lun_devid_len; 589 } 590 for (k = 0; k < CTL_MAX_INITIATORS; k++) { 591 if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) 592 continue; 593 pr_key.pr_iid = k; 594 memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); 595 i += sizeof(pr_key); 596 } 597 mtx_unlock(&lun->lun_lock); 598 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 599 M_WAITOK); 600 free(msg, M_CTL); 601} 602 603void 604ctl_isc_announce_port(struct ctl_port *port) 605{ 606 struct ctl_softc *softc = control_softc; 607 union ctl_ha_msg *msg; 608 int i; 609 610 if (port->targ_port < softc->port_min || 611 port->targ_port >= softc->port_max || 612 softc->ha_link != CTL_HA_LINK_ONLINE) 613 return; 614 i = sizeof(msg->port) + strlen(port->port_name) + 1; 615 if (port->lun_map) 616 i += sizeof(uint32_t) * CTL_MAX_LUNS; 617 if (port->port_devid) 618 i += port->port_devid->len; 619 if (port->target_devid) 620 i += port->target_devid->len; 621 msg = malloc(i, M_CTL, M_WAITOK); 622 bzero(&msg->port, sizeof(msg->port)); 623 msg->hdr.msg_type = CTL_MSG_PORT_SYNC; 624 msg->hdr.nexus.targ_port = port->targ_port; 625 msg->port.port_type = port->port_type; 626 msg->port.physical_port = port->physical_port; 627 msg->port.virtual_port = port->virtual_port; 628 msg->port.status = port->status; 629 i = 0; 630 msg->port.name_len = sprintf(&msg->port.data[i], 631 "%d:%s", softc->ha_id, port->port_name) + 1; 632 i += msg->port.name_len; 633 if (port->lun_map) { 634 msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS; 635 memcpy(&msg->port.data[i], port->lun_map, 636 msg->port.lun_map_len); 637 i += msg->port.lun_map_len; 638 } 639 if (port->port_devid) { 640 msg->port.port_devid_len = port->port_devid->len; 641 memcpy(&msg->port.data[i], port->port_devid->data, 642 msg->port.port_devid_len); 643 i += msg->port.port_devid_len; 644 } 645 if (port->target_devid) { 646 msg->port.target_devid_len = port->target_devid->len; 647 memcpy(&msg->port.data[i], port->target_devid->data, 648 msg->port.target_devid_len); 649 i += msg->port.target_devid_len; 650 } 651 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, 652 M_WAITOK); 653 free(msg, M_CTL); 654} 655 656static void 657ctl_isc_ha_link_up(struct ctl_softc *softc) 658{ 659 struct ctl_port *port; 660 struct ctl_lun *lun; 661 662 STAILQ_FOREACH(port, &softc->port_list, links) 663 ctl_isc_announce_port(port); 664 STAILQ_FOREACH(lun, &softc->lun_list, links) 665 ctl_isc_announce_lun(lun); 666} 667 668static void 669ctl_isc_ha_link_down(struct ctl_softc *softc) 670{ 671 struct ctl_port *port; 672 struct ctl_lun *lun; 673 union ctl_io *io; 674 675 mtx_lock(&softc->ctl_lock); 676 STAILQ_FOREACH(lun, &softc->lun_list, links) { 677 mtx_lock(&lun->lun_lock); 678 if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { 679 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 680 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 681 } 682 mtx_unlock(&lun->lun_lock); 683 684 mtx_unlock(&softc->ctl_lock); 685 io = ctl_alloc_io(softc->othersc_pool); 686 mtx_lock(&softc->ctl_lock); 687 ctl_zero_io(io); 688 io->io_hdr.msg_type = CTL_MSG_FAILOVER; 689 io->io_hdr.nexus.targ_mapped_lun = lun->lun; 690 ctl_enqueue_isc(io); 691 } 692 693 STAILQ_FOREACH(port, &softc->port_list, links) { 694 if (port->targ_port >= softc->port_min && 695 port->targ_port < softc->port_max) 696 continue; 697 port->status &= ~CTL_PORT_STATUS_ONLINE; 698 } 699 mtx_unlock(&softc->ctl_lock); 700} 701 702static void 703ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 704{ 705 struct ctl_lun *lun; 706 uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); 707 708 mtx_lock(&softc->ctl_lock); 709 if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS && 710 (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) != NULL) { 711 mtx_lock(&lun->lun_lock); 712 mtx_unlock(&softc->ctl_lock); 713 if (msg->ua.ua_all) { 714 if (msg->ua.ua_set) 715 ctl_est_ua_all(lun, iid, msg->ua.ua_type); 716 else 717 ctl_clr_ua_all(lun, iid, msg->ua.ua_type); 718 } else { 719 if (msg->ua.ua_set) 720 ctl_est_ua(lun, iid, msg->ua.ua_type); 721 else 722 ctl_clr_ua(lun, iid, msg->ua.ua_type); 723 } 724 mtx_unlock(&lun->lun_lock); 725 } else 726 mtx_unlock(&softc->ctl_lock); 727} 728 729static void 730ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 731{ 732 struct ctl_lun *lun; 733 struct ctl_ha_msg_lun_pr_key pr_key; 734 int i, k; 735 ctl_lun_flags oflags; 736 uint32_t targ_lun; 737 738 targ_lun = msg->hdr.nexus.targ_mapped_lun; 739 mtx_lock(&softc->ctl_lock); 740 if ((targ_lun >= CTL_MAX_LUNS) || 741 ((lun = softc->ctl_luns[targ_lun]) == NULL)) { 742 mtx_unlock(&softc->ctl_lock); 743 return; 744 } 745 mtx_lock(&lun->lun_lock); 746 mtx_unlock(&softc->ctl_lock); 747 if (lun->flags & CTL_LUN_DISABLED) { 748 mtx_unlock(&lun->lun_lock); 749 return; 750 } 751 i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; 752 if (msg->lun.lun_devid_len != i || (i > 0 && 753 memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { 754 mtx_unlock(&lun->lun_lock); 755 printf("%s: Received conflicting HA LUN %d\n", 756 __func__, msg->hdr.nexus.targ_lun); 757 return; 758 } else { 759 /* Record whether peer is primary. */ 760 oflags = lun->flags; 761 if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && 762 (msg->lun.flags & CTL_LUN_DISABLED) == 0) 763 lun->flags |= CTL_LUN_PEER_SC_PRIMARY; 764 else 765 lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; 766 if (oflags != lun->flags) 767 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 768 769 /* If peer is primary and we are not -- use data */ 770 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 771 (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { 772 lun->PRGeneration = msg->lun.pr_generation; 773 lun->pr_res_idx = msg->lun.pr_res_idx; 774 lun->res_type = msg->lun.pr_res_type; 775 lun->pr_key_count = msg->lun.pr_key_count; 776 for (k = 0; k < CTL_MAX_INITIATORS; k++) 777 ctl_clr_prkey(lun, k); 778 for (k = 0; k < msg->lun.pr_key_count; k++) { 779 memcpy(&pr_key, &msg->lun.data[i], 780 sizeof(pr_key)); 781 ctl_alloc_prkey(lun, pr_key.pr_iid); 782 ctl_set_prkey(lun, pr_key.pr_iid, 783 pr_key.pr_key); 784 i += sizeof(pr_key); 785 } 786 } 787 788 mtx_unlock(&lun->lun_lock); 789 CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", 790 __func__, msg->hdr.nexus.targ_lun, 791 (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? 792 "primary" : "secondary")); 793 794 /* If we are primary but peer doesn't know -- notify */ 795 if ((lun->flags & CTL_LUN_PRIMARY_SC) && 796 (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) 797 ctl_isc_announce_lun(lun); 798 } 799} 800 801static void 802ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) 803{ 804 struct ctl_port *port; 805 int i, new; 806 807 port = softc->ctl_ports[msg->hdr.nexus.targ_port]; 808 if (port == NULL) { 809 CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, 810 msg->hdr.nexus.targ_port)); 811 new = 1; 812 port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); 813 port->frontend = &ha_frontend; 814 port->targ_port = msg->hdr.nexus.targ_port; 815 } else if (port->frontend == &ha_frontend) { 816 CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, 817 msg->hdr.nexus.targ_port)); 818 new = 0; 819 } else { 820 printf("%s: Received conflicting HA port %d\n", 821 __func__, msg->hdr.nexus.targ_port); 822 return; 823 } 824 port->port_type = msg->port.port_type; 825 port->physical_port = msg->port.physical_port; 826 port->virtual_port = msg->port.virtual_port; 827 port->status = msg->port.status; 828 i = 0; 829 free(port->port_name, M_CTL); 830 port->port_name = strndup(&msg->port.data[i], msg->port.name_len, 831 M_CTL); 832 i += msg->port.name_len; 833 if (msg->port.lun_map_len != 0) { 834 if (port->lun_map == NULL) 835 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 836 M_CTL, M_WAITOK); 837 memcpy(port->lun_map, &msg->port.data[i], 838 sizeof(uint32_t) * CTL_MAX_LUNS); 839 i += msg->port.lun_map_len; 840 } else { 841 free(port->lun_map, M_CTL); 842 port->lun_map = NULL; 843 } 844 if (msg->port.port_devid_len != 0) { 845 if (port->port_devid == NULL || 846 port->port_devid->len != msg->port.port_devid_len) { 847 free(port->port_devid, M_CTL); 848 port->port_devid = malloc(sizeof(struct ctl_devid) + 849 msg->port.port_devid_len, M_CTL, M_WAITOK); 850 } 851 memcpy(port->port_devid->data, &msg->port.data[i], 852 msg->port.port_devid_len); 853 port->port_devid->len = msg->port.port_devid_len; 854 i += msg->port.port_devid_len; 855 } else { 856 free(port->port_devid, M_CTL); 857 port->port_devid = NULL; 858 } 859 if (msg->port.target_devid_len != 0) { 860 if (port->target_devid == NULL || 861 port->target_devid->len != msg->port.target_devid_len) { 862 free(port->target_devid, M_CTL); 863 port->target_devid = malloc(sizeof(struct ctl_devid) + 864 msg->port.target_devid_len, M_CTL, M_WAITOK); 865 } 866 memcpy(port->target_devid->data, &msg->port.data[i], 867 msg->port.target_devid_len); 868 port->target_devid->len = msg->port.target_devid_len; 869 i += msg->port.target_devid_len; 870 } else { 871 free(port->port_devid, M_CTL); 872 port->port_devid = NULL; 873 } 874 if (new) { 875 if (ctl_port_register(port) != 0) { 876 printf("%s: ctl_port_register() failed with error\n", 877 __func__); 878 } 879 } 880} 881 882/* 883 * ISC (Inter Shelf Communication) event handler. Events from the HA 884 * subsystem come in here. 885 */ 886static void 887ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 888{ 889 struct ctl_softc *softc; 890 union ctl_io *io; 891 struct ctl_prio *presio; 892 ctl_ha_status isc_status; 893 894 softc = control_softc; 895 CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); 896 if (event == CTL_HA_EVT_MSG_RECV) { 897 union ctl_ha_msg *msg, msgbuf; 898 899 if (param > sizeof(msgbuf)) 900 msg = malloc(param, M_CTL, M_WAITOK); 901 else 902 msg = &msgbuf; 903 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, 904 M_WAITOK); 905 if (isc_status != CTL_HA_STATUS_SUCCESS) { 906 printf("%s: Error receiving message: %d\n", 907 __func__, isc_status); 908 if (msg != &msgbuf) 909 free(msg, M_CTL); 910 return; 911 } 912 913 CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); 914 switch (msg->hdr.msg_type) { 915 case CTL_MSG_SERIALIZE: 916 io = ctl_alloc_io(softc->othersc_pool); 917 ctl_zero_io(io); 918 // populate ctsio from msg 919 io->io_hdr.io_type = CTL_IO_SCSI; 920 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 921 io->io_hdr.original_sc = msg->hdr.original_sc; 922 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 923 CTL_FLAG_IO_ACTIVE; 924 /* 925 * If we're in serialization-only mode, we don't 926 * want to go through full done processing. Thus 927 * the COPY flag. 928 * 929 * XXX KDM add another flag that is more specific. 930 */ 931 if (softc->ha_mode != CTL_HA_MODE_XFER) 932 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 933 io->io_hdr.nexus = msg->hdr.nexus; 934#if 0 935 printf("port %u, iid %u, lun %u\n", 936 io->io_hdr.nexus.targ_port, 937 io->io_hdr.nexus.initid, 938 io->io_hdr.nexus.targ_lun); 939#endif 940 io->scsiio.tag_num = msg->scsi.tag_num; 941 io->scsiio.tag_type = msg->scsi.tag_type; 942#ifdef CTL_TIME_IO 943 io->io_hdr.start_time = time_uptime; 944 getbintime(&io->io_hdr.start_bt); 945#endif /* CTL_TIME_IO */ 946 io->scsiio.cdb_len = msg->scsi.cdb_len; 947 memcpy(io->scsiio.cdb, msg->scsi.cdb, 948 CTL_MAX_CDBLEN); 949 if (softc->ha_mode == CTL_HA_MODE_XFER) { 950 const struct ctl_cmd_entry *entry; 951 952 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 953 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 954 io->io_hdr.flags |= 955 entry->flags & CTL_FLAG_DATA_MASK; 956 } 957 ctl_enqueue_isc(io); 958 break; 959 960 /* Performed on the Originating SC, XFER mode only */ 961 case CTL_MSG_DATAMOVE: { 962 struct ctl_sg_entry *sgl; 963 int i, j; 964 965 io = msg->hdr.original_sc; 966 if (io == NULL) { 967 printf("%s: original_sc == NULL!\n", __func__); 968 /* XXX KDM do something here */ 969 break; 970 } 971 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 972 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 973 /* 974 * Keep track of this, we need to send it back over 975 * when the datamove is complete. 976 */ 977 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 978 979 if (msg->dt.sg_sequence == 0) { 980 i = msg->dt.kern_sg_entries + 981 io->scsiio.kern_data_len / 982 CTL_HA_DATAMOVE_SEGMENT + 1; 983 sgl = malloc(sizeof(*sgl) * i, M_CTL, 984 M_WAITOK | M_ZERO); 985 io->io_hdr.remote_sglist = sgl; 986 io->io_hdr.local_sglist = 987 &sgl[msg->dt.kern_sg_entries]; 988 989 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 990 991 io->scsiio.kern_sg_entries = 992 msg->dt.kern_sg_entries; 993 io->scsiio.rem_sg_entries = 994 msg->dt.kern_sg_entries; 995 io->scsiio.kern_data_len = 996 msg->dt.kern_data_len; 997 io->scsiio.kern_total_len = 998 msg->dt.kern_total_len; 999 io->scsiio.kern_data_resid = 1000 msg->dt.kern_data_resid; 1001 io->scsiio.kern_rel_offset = 1002 msg->dt.kern_rel_offset; 1003 io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; 1004 io->io_hdr.flags |= msg->dt.flags & 1005 CTL_FLAG_BUS_ADDR; 1006 } else 1007 sgl = (struct ctl_sg_entry *) 1008 io->scsiio.kern_data_ptr; 1009 1010 for (i = msg->dt.sent_sg_entries, j = 0; 1011 i < (msg->dt.sent_sg_entries + 1012 msg->dt.cur_sg_entries); i++, j++) { 1013 sgl[i].addr = msg->dt.sg_list[j].addr; 1014 sgl[i].len = msg->dt.sg_list[j].len; 1015 1016#if 0 1017 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 1018 __func__, 1019 msg->dt.sg_list[j].addr, 1020 msg->dt.sg_list[j].len, 1021 sgl[i].addr, sgl[i].len, j, i); 1022#endif 1023 } 1024 1025 /* 1026 * If this is the last piece of the I/O, we've got 1027 * the full S/G list. Queue processing in the thread. 1028 * Otherwise wait for the next piece. 1029 */ 1030 if (msg->dt.sg_last != 0) 1031 ctl_enqueue_isc(io); 1032 break; 1033 } 1034 /* Performed on the Serializing (primary) SC, XFER mode only */ 1035 case CTL_MSG_DATAMOVE_DONE: { 1036 if (msg->hdr.serializing_sc == NULL) { 1037 printf("%s: serializing_sc == NULL!\n", 1038 __func__); 1039 /* XXX KDM now what? */ 1040 break; 1041 } 1042 /* 1043 * We grab the sense information here in case 1044 * there was a failure, so we can return status 1045 * back to the initiator. 1046 */ 1047 io = msg->hdr.serializing_sc; 1048 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 1049 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1050 io->io_hdr.port_status = msg->scsi.fetd_status; 1051 io->scsiio.residual = msg->scsi.residual; 1052 if (msg->hdr.status != CTL_STATUS_NONE) { 1053 io->io_hdr.status = msg->hdr.status; 1054 io->scsiio.scsi_status = msg->scsi.scsi_status; 1055 io->scsiio.sense_len = msg->scsi.sense_len; 1056 io->scsiio.sense_residual =msg->scsi.sense_residual; 1057 memcpy(&io->scsiio.sense_data, 1058 &msg->scsi.sense_data, 1059 msg->scsi.sense_len); 1060 } 1061 ctl_enqueue_isc(io); 1062 break; 1063 } 1064 1065 /* Preformed on Originating SC, SER_ONLY mode */ 1066 case CTL_MSG_R2R: 1067 io = msg->hdr.original_sc; 1068 if (io == NULL) { 1069 printf("%s: original_sc == NULL!\n", 1070 __func__); 1071 break; 1072 } 1073 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1074 io->io_hdr.msg_type = CTL_MSG_R2R; 1075 io->io_hdr.serializing_sc = msg->hdr.serializing_sc; 1076 ctl_enqueue_isc(io); 1077 break; 1078 1079 /* 1080 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 1081 * mode. 1082 * Performed on the Originating (i.e. secondary) SC in XFER 1083 * mode 1084 */ 1085 case CTL_MSG_FINISH_IO: 1086 if (softc->ha_mode == CTL_HA_MODE_XFER) 1087 ctl_isc_handler_finish_xfer(softc, msg); 1088 else 1089 ctl_isc_handler_finish_ser_only(softc, msg); 1090 break; 1091 1092 /* Preformed on Originating SC */ 1093 case CTL_MSG_BAD_JUJU: 1094 io = msg->hdr.original_sc; 1095 if (io == NULL) { 1096 printf("%s: Bad JUJU!, original_sc is NULL!\n", 1097 __func__); 1098 break; 1099 } 1100 ctl_copy_sense_data(msg, io); 1101 /* 1102 * IO should have already been cleaned up on other 1103 * SC so clear this flag so we won't send a message 1104 * back to finish the IO there. 1105 */ 1106 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 1107 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 1108 1109 /* io = msg->hdr.serializing_sc; */ 1110 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 1111 ctl_enqueue_isc(io); 1112 break; 1113 1114 /* Handle resets sent from the other side */ 1115 case CTL_MSG_MANAGE_TASKS: { 1116 struct ctl_taskio *taskio; 1117 taskio = (struct ctl_taskio *)ctl_alloc_io( 1118 softc->othersc_pool); 1119 ctl_zero_io((union ctl_io *)taskio); 1120 taskio->io_hdr.io_type = CTL_IO_TASK; 1121 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1122 taskio->io_hdr.nexus = msg->hdr.nexus; 1123 taskio->task_action = msg->task.task_action; 1124 taskio->tag_num = msg->task.tag_num; 1125 taskio->tag_type = msg->task.tag_type; 1126#ifdef CTL_TIME_IO 1127 taskio->io_hdr.start_time = time_uptime; 1128 getbintime(&taskio->io_hdr.start_bt); 1129#endif /* CTL_TIME_IO */ 1130 ctl_run_task((union ctl_io *)taskio); 1131 break; 1132 } 1133 /* Persistent Reserve action which needs attention */ 1134 case CTL_MSG_PERS_ACTION: 1135 presio = (struct ctl_prio *)ctl_alloc_io( 1136 softc->othersc_pool); 1137 ctl_zero_io((union ctl_io *)presio); 1138 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 1139 presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 1140 presio->io_hdr.nexus = msg->hdr.nexus; 1141 presio->pr_msg = msg->pr; 1142 ctl_enqueue_isc((union ctl_io *)presio); 1143 break; 1144 case CTL_MSG_UA: 1145 ctl_isc_ua(softc, msg, param); 1146 break; 1147 case CTL_MSG_PORT_SYNC: 1148 ctl_isc_port_sync(softc, msg, param); 1149 break; 1150 case CTL_MSG_LUN_SYNC: 1151 ctl_isc_lun_sync(softc, msg, param); 1152 break; 1153 default: 1154 printf("Received HA message of unknown type %d\n", 1155 msg->hdr.msg_type); 1156 break; 1157 } 1158 if (msg != &msgbuf) 1159 free(msg, M_CTL); 1160 } else if (event == CTL_HA_EVT_LINK_CHANGE) { 1161 printf("CTL: HA link status changed from %d to %d\n", 1162 softc->ha_link, param); 1163 if (param == softc->ha_link) 1164 return; 1165 if (softc->ha_link == CTL_HA_LINK_ONLINE) { 1166 softc->ha_link = param; 1167 ctl_isc_ha_link_down(softc); 1168 } else { 1169 softc->ha_link = param; 1170 if (softc->ha_link == CTL_HA_LINK_ONLINE) 1171 ctl_isc_ha_link_up(softc); 1172 } 1173 return; 1174 } else { 1175 printf("ctl_isc_event_handler: Unknown event %d\n", event); 1176 return; 1177 } 1178} 1179 1180static void 1181ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 1182{ 1183 1184 memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, 1185 src->scsi.sense_len); 1186 dest->scsiio.scsi_status = src->scsi.scsi_status; 1187 dest->scsiio.sense_len = src->scsi.sense_len; 1188 dest->io_hdr.status = src->hdr.status; 1189} 1190 1191static void 1192ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) 1193{ 1194 1195 memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, 1196 src->scsiio.sense_len); 1197 dest->scsi.scsi_status = src->scsiio.scsi_status; 1198 dest->scsi.sense_len = src->scsiio.sense_len; 1199 dest->hdr.status = src->io_hdr.status; 1200} 1201 1202static void 1203ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1204{ 1205 struct ctl_softc *softc = lun->ctl_softc; 1206 ctl_ua_type *pu; 1207 1208 if (initidx < softc->init_min || initidx >= softc->init_max) 1209 return; 1210 mtx_assert(&lun->lun_lock, MA_OWNED); 1211 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1212 if (pu == NULL) 1213 return; 1214 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 1215} 1216 1217static void 1218ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1219{ 1220 struct ctl_softc *softc = lun->ctl_softc; 1221 int i, j; 1222 1223 mtx_assert(&lun->lun_lock, MA_OWNED); 1224 for (i = softc->port_min; i < softc->port_max; i++) { 1225 if (lun->pending_ua[i] == NULL) 1226 continue; 1227 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1228 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1229 continue; 1230 lun->pending_ua[i][j] |= ua; 1231 } 1232 } 1233} 1234 1235static void 1236ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1237{ 1238 struct ctl_softc *softc = lun->ctl_softc; 1239 ctl_ua_type *pu; 1240 1241 if (initidx < softc->init_min || initidx >= softc->init_max) 1242 return; 1243 mtx_assert(&lun->lun_lock, MA_OWNED); 1244 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1245 if (pu == NULL) 1246 return; 1247 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1248} 1249 1250static void 1251ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1252{ 1253 struct ctl_softc *softc = lun->ctl_softc; 1254 int i, j; 1255 1256 mtx_assert(&lun->lun_lock, MA_OWNED); 1257 for (i = softc->port_min; i < softc->port_max; i++) { 1258 if (lun->pending_ua[i] == NULL) 1259 continue; 1260 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1261 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1262 continue; 1263 lun->pending_ua[i][j] &= ~ua; 1264 } 1265 } 1266} 1267 1268static void 1269ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, 1270 ctl_ua_type ua_type) 1271{ 1272 struct ctl_lun *lun; 1273 1274 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 1275 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 1276 mtx_lock(&lun->lun_lock); 1277 ctl_clr_ua(lun, initidx, ua_type); 1278 mtx_unlock(&lun->lun_lock); 1279 } 1280} 1281 1282static int 1283ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) 1284{ 1285 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1286 struct ctl_lun *lun; 1287 struct ctl_lun_req ireq; 1288 int error, value; 1289 1290 value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; 1291 error = sysctl_handle_int(oidp, &value, 0, req); 1292 if ((error != 0) || (req->newptr == NULL)) 1293 return (error); 1294 1295 mtx_lock(&softc->ctl_lock); 1296 if (value == 0) 1297 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1298 else 1299 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1300 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1301 mtx_unlock(&softc->ctl_lock); 1302 bzero(&ireq, sizeof(ireq)); 1303 ireq.reqtype = CTL_LUNREQ_MODIFY; 1304 ireq.reqdata.modify.lun_id = lun->lun; 1305 lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, 1306 curthread); 1307 if (ireq.status != CTL_LUN_OK) { 1308 printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", 1309 __func__, ireq.status, ireq.error_str); 1310 } 1311 mtx_lock(&softc->ctl_lock); 1312 } 1313 mtx_unlock(&softc->ctl_lock); 1314 return (0); 1315} 1316 1317static int 1318ctl_init(void) 1319{ 1320 struct ctl_softc *softc; 1321 void *other_pool; 1322 int i, error, retval; 1323 1324 retval = 0; 1325 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1326 M_WAITOK | M_ZERO); 1327 softc = control_softc; 1328 1329 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1330 "cam/ctl"); 1331 1332 softc->dev->si_drv1 = softc; 1333 1334 sysctl_ctx_init(&softc->sysctl_ctx); 1335 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1336 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1337 CTLFLAG_RD, 0, "CAM Target Layer"); 1338 1339 if (softc->sysctl_tree == NULL) { 1340 printf("%s: unable to allocate sysctl tree\n", __func__); 1341 destroy_dev(softc->dev); 1342 free(control_softc, M_DEVBUF); 1343 control_softc = NULL; 1344 return (ENOMEM); 1345 } 1346 1347 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1348 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1349 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1350 softc->open_count = 0; 1351 1352 /* 1353 * Default to actually sending a SYNCHRONIZE CACHE command down to 1354 * the drive. 1355 */ 1356 softc->flags = CTL_FLAG_REAL_SYNC; 1357 1358 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1359 OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, 1360 "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); 1361 1362 /* 1363 * In Copan's HA scheme, the "master" and "slave" roles are 1364 * figured out through the slot the controller is in. Although it 1365 * is an active/active system, someone has to be in charge. 1366 */ 1367 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1368 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1369 "HA head ID (0 - no HA)"); 1370 if (softc->ha_id == 0 || softc->ha_id > NUM_TARGET_PORT_GROUPS) { 1371 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1372 softc->is_single = 1; 1373 softc->port_cnt = CTL_MAX_PORTS; 1374 softc->port_min = 0; 1375 } else { 1376 softc->port_cnt = CTL_MAX_PORTS / NUM_TARGET_PORT_GROUPS; 1377 softc->port_min = (softc->ha_id - 1) * softc->port_cnt; 1378 } 1379 softc->port_max = softc->port_min + softc->port_cnt; 1380 softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; 1381 softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; 1382 1383 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1384 OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, 1385 "HA link state (0 - offline, 1 - unknown, 2 - online)"); 1386 1387 STAILQ_INIT(&softc->lun_list); 1388 STAILQ_INIT(&softc->pending_lun_queue); 1389 STAILQ_INIT(&softc->fe_list); 1390 STAILQ_INIT(&softc->port_list); 1391 STAILQ_INIT(&softc->be_list); 1392 ctl_tpc_init(softc); 1393 1394 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1395 &other_pool) != 0) 1396 { 1397 printf("ctl: can't allocate %d entry other SC pool, " 1398 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1399 return (ENOMEM); 1400 } 1401 softc->othersc_pool = other_pool; 1402 1403 if (worker_threads <= 0) 1404 worker_threads = max(1, mp_ncpus / 4); 1405 if (worker_threads > CTL_MAX_THREADS) 1406 worker_threads = CTL_MAX_THREADS; 1407 1408 for (i = 0; i < worker_threads; i++) { 1409 struct ctl_thread *thr = &softc->threads[i]; 1410 1411 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1412 thr->ctl_softc = softc; 1413 STAILQ_INIT(&thr->incoming_queue); 1414 STAILQ_INIT(&thr->rtr_queue); 1415 STAILQ_INIT(&thr->done_queue); 1416 STAILQ_INIT(&thr->isc_queue); 1417 1418 error = kproc_kthread_add(ctl_work_thread, thr, 1419 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1420 if (error != 0) { 1421 printf("error creating CTL work thread!\n"); 1422 ctl_pool_free(other_pool); 1423 return (error); 1424 } 1425 } 1426 error = kproc_kthread_add(ctl_lun_thread, softc, 1427 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1428 if (error != 0) { 1429 printf("error creating CTL lun thread!\n"); 1430 ctl_pool_free(other_pool); 1431 return (error); 1432 } 1433 error = kproc_kthread_add(ctl_thresh_thread, softc, 1434 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1435 if (error != 0) { 1436 printf("error creating CTL threshold thread!\n"); 1437 ctl_pool_free(other_pool); 1438 return (error); 1439 } 1440 1441 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1442 OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, 1443 softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); 1444 1445 if (softc->is_single == 0) { 1446 ctl_frontend_register(&ha_frontend); 1447 if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { 1448 printf("ctl_init: ctl_ha_msg_init failed.\n"); 1449 softc->is_single = 1; 1450 } else 1451 if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 1452 != CTL_HA_STATUS_SUCCESS) { 1453 printf("ctl_init: ctl_ha_msg_register failed.\n"); 1454 softc->is_single = 1; 1455 } 1456 } 1457 return (0); 1458} 1459 1460void 1461ctl_shutdown(void) 1462{ 1463 struct ctl_softc *softc; 1464 struct ctl_lun *lun, *next_lun; 1465 1466 softc = (struct ctl_softc *)control_softc; 1467 1468 if (softc->is_single == 0) { 1469 if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) 1470 != CTL_HA_STATUS_SUCCESS) { 1471 printf("ctl_shutdown: ctl_ha_msg_deregister failed.\n"); 1472 } 1473 if (ctl_ha_msg_shutdown(softc) != CTL_HA_STATUS_SUCCESS) { 1474 printf("ctl_shutdown: ctl_ha_msg_shutdown failed.\n"); 1475 } 1476 ctl_frontend_deregister(&ha_frontend); 1477 } 1478 1479 mtx_lock(&softc->ctl_lock); 1480 1481 /* 1482 * Free up each LUN. 1483 */ 1484 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1485 next_lun = STAILQ_NEXT(lun, links); 1486 ctl_free_lun(lun); 1487 } 1488 1489 mtx_unlock(&softc->ctl_lock); 1490 1491#if 0 1492 ctl_shutdown_thread(softc->work_thread); 1493 mtx_destroy(&softc->queue_lock); 1494#endif 1495 1496 ctl_tpc_shutdown(softc); 1497 uma_zdestroy(softc->io_zone); 1498 mtx_destroy(&softc->ctl_lock); 1499 1500 destroy_dev(softc->dev); 1501 1502 sysctl_ctx_free(&softc->sysctl_ctx); 1503 1504 free(control_softc, M_DEVBUF); 1505 control_softc = NULL; 1506} 1507 1508static int 1509ctl_module_event_handler(module_t mod, int what, void *arg) 1510{ 1511 1512 switch (what) { 1513 case MOD_LOAD: 1514 return (ctl_init()); 1515 case MOD_UNLOAD: 1516 return (EBUSY); 1517 default: 1518 return (EOPNOTSUPP); 1519 } 1520} 1521 1522/* 1523 * XXX KDM should we do some access checks here? Bump a reference count to 1524 * prevent a CTL module from being unloaded while someone has it open? 1525 */ 1526static int 1527ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1528{ 1529 return (0); 1530} 1531 1532static int 1533ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1534{ 1535 return (0); 1536} 1537 1538/* 1539 * Remove an initiator by port number and initiator ID. 1540 * Returns 0 for success, -1 for failure. 1541 */ 1542int 1543ctl_remove_initiator(struct ctl_port *port, int iid) 1544{ 1545 struct ctl_softc *softc = control_softc; 1546 1547 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1548 1549 if (iid > CTL_MAX_INIT_PER_PORT) { 1550 printf("%s: initiator ID %u > maximun %u!\n", 1551 __func__, iid, CTL_MAX_INIT_PER_PORT); 1552 return (-1); 1553 } 1554 1555 mtx_lock(&softc->ctl_lock); 1556 port->wwpn_iid[iid].in_use--; 1557 port->wwpn_iid[iid].last_use = time_uptime; 1558 mtx_unlock(&softc->ctl_lock); 1559 1560 return (0); 1561} 1562 1563/* 1564 * Add an initiator to the initiator map. 1565 * Returns iid for success, < 0 for failure. 1566 */ 1567int 1568ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1569{ 1570 struct ctl_softc *softc = control_softc; 1571 time_t best_time; 1572 int i, best; 1573 1574 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1575 1576 if (iid >= CTL_MAX_INIT_PER_PORT) { 1577 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1578 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1579 free(name, M_CTL); 1580 return (-1); 1581 } 1582 1583 mtx_lock(&softc->ctl_lock); 1584 1585 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1586 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1587 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1588 iid = i; 1589 break; 1590 } 1591 if (name != NULL && port->wwpn_iid[i].name != NULL && 1592 strcmp(name, port->wwpn_iid[i].name) == 0) { 1593 iid = i; 1594 break; 1595 } 1596 } 1597 } 1598 1599 if (iid < 0) { 1600 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1601 if (port->wwpn_iid[i].in_use == 0 && 1602 port->wwpn_iid[i].wwpn == 0 && 1603 port->wwpn_iid[i].name == NULL) { 1604 iid = i; 1605 break; 1606 } 1607 } 1608 } 1609 1610 if (iid < 0) { 1611 best = -1; 1612 best_time = INT32_MAX; 1613 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1614 if (port->wwpn_iid[i].in_use == 0) { 1615 if (port->wwpn_iid[i].last_use < best_time) { 1616 best = i; 1617 best_time = port->wwpn_iid[i].last_use; 1618 } 1619 } 1620 } 1621 iid = best; 1622 } 1623 1624 if (iid < 0) { 1625 mtx_unlock(&softc->ctl_lock); 1626 free(name, M_CTL); 1627 return (-2); 1628 } 1629 1630 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1631 /* 1632 * This is not an error yet. 1633 */ 1634 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1635#if 0 1636 printf("%s: port %d iid %u WWPN %#jx arrived" 1637 " again\n", __func__, port->targ_port, 1638 iid, (uintmax_t)wwpn); 1639#endif 1640 goto take; 1641 } 1642 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1643 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1644#if 0 1645 printf("%s: port %d iid %u name '%s' arrived" 1646 " again\n", __func__, port->targ_port, 1647 iid, name); 1648#endif 1649 goto take; 1650 } 1651 1652 /* 1653 * This is an error, but what do we do about it? The 1654 * driver is telling us we have a new WWPN for this 1655 * initiator ID, so we pretty much need to use it. 1656 */ 1657 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1658 " but WWPN %#jx '%s' is still at that address\n", 1659 __func__, port->targ_port, iid, wwpn, name, 1660 (uintmax_t)port->wwpn_iid[iid].wwpn, 1661 port->wwpn_iid[iid].name); 1662 1663 /* 1664 * XXX KDM clear have_ca and ua_pending on each LUN for 1665 * this initiator. 1666 */ 1667 } 1668take: 1669 free(port->wwpn_iid[iid].name, M_CTL); 1670 port->wwpn_iid[iid].name = name; 1671 port->wwpn_iid[iid].wwpn = wwpn; 1672 port->wwpn_iid[iid].in_use++; 1673 mtx_unlock(&softc->ctl_lock); 1674 1675 return (iid); 1676} 1677 1678static int 1679ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1680{ 1681 int len; 1682 1683 switch (port->port_type) { 1684 case CTL_PORT_FC: 1685 { 1686 struct scsi_transportid_fcp *id = 1687 (struct scsi_transportid_fcp *)buf; 1688 if (port->wwpn_iid[iid].wwpn == 0) 1689 return (0); 1690 memset(id, 0, sizeof(*id)); 1691 id->format_protocol = SCSI_PROTO_FC; 1692 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1693 return (sizeof(*id)); 1694 } 1695 case CTL_PORT_ISCSI: 1696 { 1697 struct scsi_transportid_iscsi_port *id = 1698 (struct scsi_transportid_iscsi_port *)buf; 1699 if (port->wwpn_iid[iid].name == NULL) 1700 return (0); 1701 memset(id, 0, 256); 1702 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1703 SCSI_PROTO_ISCSI; 1704 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1705 len = roundup2(min(len, 252), 4); 1706 scsi_ulto2b(len, id->additional_length); 1707 return (sizeof(*id) + len); 1708 } 1709 case CTL_PORT_SAS: 1710 { 1711 struct scsi_transportid_sas *id = 1712 (struct scsi_transportid_sas *)buf; 1713 if (port->wwpn_iid[iid].wwpn == 0) 1714 return (0); 1715 memset(id, 0, sizeof(*id)); 1716 id->format_protocol = SCSI_PROTO_SAS; 1717 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1718 return (sizeof(*id)); 1719 } 1720 default: 1721 { 1722 struct scsi_transportid_spi *id = 1723 (struct scsi_transportid_spi *)buf; 1724 memset(id, 0, sizeof(*id)); 1725 id->format_protocol = SCSI_PROTO_SPI; 1726 scsi_ulto2b(iid, id->scsi_addr); 1727 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1728 return (sizeof(*id)); 1729 } 1730 } 1731} 1732 1733/* 1734 * Serialize a command that went down the "wrong" side, and so was sent to 1735 * this controller for execution. The logic is a little different than the 1736 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1737 * sent back to the other side, but in the success case, we execute the 1738 * command on this side (XFER mode) or tell the other side to execute it 1739 * (SER_ONLY mode). 1740 */ 1741static int 1742ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1743{ 1744 struct ctl_softc *softc; 1745 union ctl_ha_msg msg_info; 1746 struct ctl_lun *lun; 1747 const struct ctl_cmd_entry *entry; 1748 int retval = 0; 1749 uint32_t targ_lun; 1750 1751 softc = control_softc; 1752 1753 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1754 mtx_lock(&softc->ctl_lock); 1755 if ((targ_lun < CTL_MAX_LUNS) && 1756 ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 1757 mtx_lock(&lun->lun_lock); 1758 mtx_unlock(&softc->ctl_lock); 1759 /* 1760 * If the LUN is invalid, pretend that it doesn't exist. 1761 * It will go away as soon as all pending I/O has been 1762 * completed. 1763 */ 1764 if (lun->flags & CTL_LUN_DISABLED) { 1765 mtx_unlock(&lun->lun_lock); 1766 lun = NULL; 1767 } 1768 } else { 1769 mtx_unlock(&softc->ctl_lock); 1770 lun = NULL; 1771 } 1772 if (lun == NULL) { 1773 /* 1774 * The other node would not send this request to us unless 1775 * received announce that we are primary node for this LUN. 1776 * If this LUN does not exist now, it is probably result of 1777 * a race, so respond to initiator in the most opaque way. 1778 */ 1779 ctl_set_busy(ctsio); 1780 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1781 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1782 msg_info.hdr.serializing_sc = NULL; 1783 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1784 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1785 sizeof(msg_info.scsi), M_WAITOK); 1786 return(1); 1787 } 1788 1789 entry = ctl_get_cmd_entry(ctsio, NULL); 1790 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 1791 mtx_unlock(&lun->lun_lock); 1792 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1793 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1794 msg_info.hdr.serializing_sc = NULL; 1795 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1796 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1797 sizeof(msg_info.scsi), M_WAITOK); 1798 return(1); 1799 } 1800 1801 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 1802 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun; 1803 1804 /* 1805 * Every I/O goes into the OOA queue for a 1806 * particular LUN, and stays there until completion. 1807 */ 1808#ifdef CTL_TIME_IO 1809 if (TAILQ_EMPTY(&lun->ooa_queue)) 1810 lun->idle_time += getsbinuptime() - lun->last_busy; 1811#endif 1812 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1813 1814 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1815 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1816 ooa_links))) { 1817 case CTL_ACTION_BLOCK: 1818 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1819 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1820 blocked_links); 1821 mtx_unlock(&lun->lun_lock); 1822 break; 1823 case CTL_ACTION_PASS: 1824 case CTL_ACTION_SKIP: 1825 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1826 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1827 ctl_enqueue_rtr((union ctl_io *)ctsio); 1828 mtx_unlock(&lun->lun_lock); 1829 } else { 1830 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 1831 mtx_unlock(&lun->lun_lock); 1832 1833 /* send msg back to other side */ 1834 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1835 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1836 msg_info.hdr.msg_type = CTL_MSG_R2R; 1837 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1838 sizeof(msg_info.hdr), M_WAITOK); 1839 } 1840 break; 1841 case CTL_ACTION_OVERLAP: 1842 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1843 mtx_unlock(&lun->lun_lock); 1844 retval = 1; 1845 1846 ctl_set_overlapped_cmd(ctsio); 1847 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1848 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1849 msg_info.hdr.serializing_sc = NULL; 1850 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1851 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1852 sizeof(msg_info.scsi), M_WAITOK); 1853 break; 1854 case CTL_ACTION_OVERLAP_TAG: 1855 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1856 mtx_unlock(&lun->lun_lock); 1857 retval = 1; 1858 ctl_set_overlapped_tag(ctsio, ctsio->tag_num); 1859 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1860 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1861 msg_info.hdr.serializing_sc = NULL; 1862 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1863 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1864 sizeof(msg_info.scsi), M_WAITOK); 1865 break; 1866 case CTL_ACTION_ERROR: 1867 default: 1868 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1869 mtx_unlock(&lun->lun_lock); 1870 retval = 1; 1871 1872 ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, 1873 /*retry_count*/ 0); 1874 ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); 1875 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1876 msg_info.hdr.serializing_sc = NULL; 1877 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1878 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1879 sizeof(msg_info.scsi), M_WAITOK); 1880 break; 1881 } 1882 return (retval); 1883} 1884 1885/* 1886 * Returns 0 for success, errno for failure. 1887 */ 1888static int 1889ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 1890 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 1891{ 1892 union ctl_io *io; 1893 int retval; 1894 1895 retval = 0; 1896 1897 mtx_lock(&lun->lun_lock); 1898 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 1899 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 1900 ooa_links)) { 1901 struct ctl_ooa_entry *entry; 1902 1903 /* 1904 * If we've got more than we can fit, just count the 1905 * remaining entries. 1906 */ 1907 if (*cur_fill_num >= ooa_hdr->alloc_num) 1908 continue; 1909 1910 entry = &kern_entries[*cur_fill_num]; 1911 1912 entry->tag_num = io->scsiio.tag_num; 1913 entry->lun_num = lun->lun; 1914#ifdef CTL_TIME_IO 1915 entry->start_bt = io->io_hdr.start_bt; 1916#endif 1917 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 1918 entry->cdb_len = io->scsiio.cdb_len; 1919 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 1920 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 1921 1922 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 1923 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 1924 1925 if (io->io_hdr.flags & CTL_FLAG_ABORT) 1926 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 1927 1928 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 1929 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 1930 1931 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 1932 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 1933 } 1934 mtx_unlock(&lun->lun_lock); 1935 1936 return (retval); 1937} 1938 1939static void * 1940ctl_copyin_alloc(void *user_addr, int len, char *error_str, 1941 size_t error_str_len) 1942{ 1943 void *kptr; 1944 1945 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 1946 1947 if (copyin(user_addr, kptr, len) != 0) { 1948 snprintf(error_str, error_str_len, "Error copying %d bytes " 1949 "from user address %p to kernel address %p", len, 1950 user_addr, kptr); 1951 free(kptr, M_CTL); 1952 return (NULL); 1953 } 1954 1955 return (kptr); 1956} 1957 1958static void 1959ctl_free_args(int num_args, struct ctl_be_arg *args) 1960{ 1961 int i; 1962 1963 if (args == NULL) 1964 return; 1965 1966 for (i = 0; i < num_args; i++) { 1967 free(args[i].kname, M_CTL); 1968 free(args[i].kvalue, M_CTL); 1969 } 1970 1971 free(args, M_CTL); 1972} 1973 1974static struct ctl_be_arg * 1975ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 1976 char *error_str, size_t error_str_len) 1977{ 1978 struct ctl_be_arg *args; 1979 int i; 1980 1981 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 1982 error_str, error_str_len); 1983 1984 if (args == NULL) 1985 goto bailout; 1986 1987 for (i = 0; i < num_args; i++) { 1988 args[i].kname = NULL; 1989 args[i].kvalue = NULL; 1990 } 1991 1992 for (i = 0; i < num_args; i++) { 1993 uint8_t *tmpptr; 1994 1995 args[i].kname = ctl_copyin_alloc(args[i].name, 1996 args[i].namelen, error_str, error_str_len); 1997 if (args[i].kname == NULL) 1998 goto bailout; 1999 2000 if (args[i].kname[args[i].namelen - 1] != '\0') { 2001 snprintf(error_str, error_str_len, "Argument %d " 2002 "name is not NUL-terminated", i); 2003 goto bailout; 2004 } 2005 2006 if (args[i].flags & CTL_BEARG_RD) { 2007 tmpptr = ctl_copyin_alloc(args[i].value, 2008 args[i].vallen, error_str, error_str_len); 2009 if (tmpptr == NULL) 2010 goto bailout; 2011 if ((args[i].flags & CTL_BEARG_ASCII) 2012 && (tmpptr[args[i].vallen - 1] != '\0')) { 2013 snprintf(error_str, error_str_len, "Argument " 2014 "%d value is not NUL-terminated", i); 2015 goto bailout; 2016 } 2017 args[i].kvalue = tmpptr; 2018 } else { 2019 args[i].kvalue = malloc(args[i].vallen, 2020 M_CTL, M_WAITOK | M_ZERO); 2021 } 2022 } 2023 2024 return (args); 2025bailout: 2026 2027 ctl_free_args(num_args, args); 2028 2029 return (NULL); 2030} 2031 2032static void 2033ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2034{ 2035 int i; 2036 2037 for (i = 0; i < num_args; i++) { 2038 if (args[i].flags & CTL_BEARG_WR) 2039 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2040 } 2041} 2042 2043/* 2044 * Escape characters that are illegal or not recommended in XML. 2045 */ 2046int 2047ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2048{ 2049 char *end = str + size; 2050 int retval; 2051 2052 retval = 0; 2053 2054 for (; *str && str < end; str++) { 2055 switch (*str) { 2056 case '&': 2057 retval = sbuf_printf(sb, "&"); 2058 break; 2059 case '>': 2060 retval = sbuf_printf(sb, ">"); 2061 break; 2062 case '<': 2063 retval = sbuf_printf(sb, "<"); 2064 break; 2065 default: 2066 retval = sbuf_putc(sb, *str); 2067 break; 2068 } 2069 2070 if (retval != 0) 2071 break; 2072 2073 } 2074 2075 return (retval); 2076} 2077 2078static void 2079ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2080{ 2081 struct scsi_vpd_id_descriptor *desc; 2082 int i; 2083 2084 if (id == NULL || id->len < 4) 2085 return; 2086 desc = (struct scsi_vpd_id_descriptor *)id->data; 2087 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2088 case SVPD_ID_TYPE_T10: 2089 sbuf_printf(sb, "t10."); 2090 break; 2091 case SVPD_ID_TYPE_EUI64: 2092 sbuf_printf(sb, "eui."); 2093 break; 2094 case SVPD_ID_TYPE_NAA: 2095 sbuf_printf(sb, "naa."); 2096 break; 2097 case SVPD_ID_TYPE_SCSI_NAME: 2098 break; 2099 } 2100 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2101 case SVPD_ID_CODESET_BINARY: 2102 for (i = 0; i < desc->length; i++) 2103 sbuf_printf(sb, "%02x", desc->identifier[i]); 2104 break; 2105 case SVPD_ID_CODESET_ASCII: 2106 sbuf_printf(sb, "%.*s", (int)desc->length, 2107 (char *)desc->identifier); 2108 break; 2109 case SVPD_ID_CODESET_UTF8: 2110 sbuf_printf(sb, "%s", (char *)desc->identifier); 2111 break; 2112 } 2113} 2114 2115static int 2116ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2117 struct thread *td) 2118{ 2119 struct ctl_softc *softc; 2120 int retval; 2121 2122 softc = control_softc; 2123 2124 retval = 0; 2125 2126 switch (cmd) { 2127 case CTL_IO: 2128 retval = ctl_ioctl_io(dev, cmd, addr, flag, td); 2129 break; 2130 case CTL_ENABLE_PORT: 2131 case CTL_DISABLE_PORT: 2132 case CTL_SET_PORT_WWNS: { 2133 struct ctl_port *port; 2134 struct ctl_port_entry *entry; 2135 2136 entry = (struct ctl_port_entry *)addr; 2137 2138 mtx_lock(&softc->ctl_lock); 2139 STAILQ_FOREACH(port, &softc->port_list, links) { 2140 int action, done; 2141 2142 if (port->targ_port < softc->port_min || 2143 port->targ_port >= softc->port_max) 2144 continue; 2145 2146 action = 0; 2147 done = 0; 2148 if ((entry->port_type == CTL_PORT_NONE) 2149 && (entry->targ_port == port->targ_port)) { 2150 /* 2151 * If the user only wants to enable or 2152 * disable or set WWNs on a specific port, 2153 * do the operation and we're done. 2154 */ 2155 action = 1; 2156 done = 1; 2157 } else if (entry->port_type & port->port_type) { 2158 /* 2159 * Compare the user's type mask with the 2160 * particular frontend type to see if we 2161 * have a match. 2162 */ 2163 action = 1; 2164 done = 0; 2165 2166 /* 2167 * Make sure the user isn't trying to set 2168 * WWNs on multiple ports at the same time. 2169 */ 2170 if (cmd == CTL_SET_PORT_WWNS) { 2171 printf("%s: Can't set WWNs on " 2172 "multiple ports\n", __func__); 2173 retval = EINVAL; 2174 break; 2175 } 2176 } 2177 if (action == 0) 2178 continue; 2179 2180 /* 2181 * XXX KDM we have to drop the lock here, because 2182 * the online/offline operations can potentially 2183 * block. We need to reference count the frontends 2184 * so they can't go away, 2185 */ 2186 if (cmd == CTL_ENABLE_PORT) { 2187 mtx_unlock(&softc->ctl_lock); 2188 ctl_port_online(port); 2189 mtx_lock(&softc->ctl_lock); 2190 } else if (cmd == CTL_DISABLE_PORT) { 2191 mtx_unlock(&softc->ctl_lock); 2192 ctl_port_offline(port); 2193 mtx_lock(&softc->ctl_lock); 2194 } else if (cmd == CTL_SET_PORT_WWNS) { 2195 ctl_port_set_wwns(port, 2196 (entry->flags & CTL_PORT_WWNN_VALID) ? 2197 1 : 0, entry->wwnn, 2198 (entry->flags & CTL_PORT_WWPN_VALID) ? 2199 1 : 0, entry->wwpn); 2200 } 2201 if (done != 0) 2202 break; 2203 } 2204 mtx_unlock(&softc->ctl_lock); 2205 break; 2206 } 2207 case CTL_GET_PORT_LIST: { 2208 struct ctl_port *port; 2209 struct ctl_port_list *list; 2210 int i; 2211 2212 list = (struct ctl_port_list *)addr; 2213 2214 if (list->alloc_len != (list->alloc_num * 2215 sizeof(struct ctl_port_entry))) { 2216 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2217 "alloc_num %u * sizeof(struct ctl_port_entry) " 2218 "%zu\n", __func__, list->alloc_len, 2219 list->alloc_num, sizeof(struct ctl_port_entry)); 2220 retval = EINVAL; 2221 break; 2222 } 2223 list->fill_len = 0; 2224 list->fill_num = 0; 2225 list->dropped_num = 0; 2226 i = 0; 2227 mtx_lock(&softc->ctl_lock); 2228 STAILQ_FOREACH(port, &softc->port_list, links) { 2229 struct ctl_port_entry entry, *list_entry; 2230 2231 if (list->fill_num >= list->alloc_num) { 2232 list->dropped_num++; 2233 continue; 2234 } 2235 2236 entry.port_type = port->port_type; 2237 strlcpy(entry.port_name, port->port_name, 2238 sizeof(entry.port_name)); 2239 entry.targ_port = port->targ_port; 2240 entry.physical_port = port->physical_port; 2241 entry.virtual_port = port->virtual_port; 2242 entry.wwnn = port->wwnn; 2243 entry.wwpn = port->wwpn; 2244 if (port->status & CTL_PORT_STATUS_ONLINE) 2245 entry.online = 1; 2246 else 2247 entry.online = 0; 2248 2249 list_entry = &list->entries[i]; 2250 2251 retval = copyout(&entry, list_entry, sizeof(entry)); 2252 if (retval != 0) { 2253 printf("%s: CTL_GET_PORT_LIST: copyout " 2254 "returned %d\n", __func__, retval); 2255 break; 2256 } 2257 i++; 2258 list->fill_num++; 2259 list->fill_len += sizeof(entry); 2260 } 2261 mtx_unlock(&softc->ctl_lock); 2262 2263 /* 2264 * If this is non-zero, we had a copyout fault, so there's 2265 * probably no point in attempting to set the status inside 2266 * the structure. 2267 */ 2268 if (retval != 0) 2269 break; 2270 2271 if (list->dropped_num > 0) 2272 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2273 else 2274 list->status = CTL_PORT_LIST_OK; 2275 break; 2276 } 2277 case CTL_DUMP_OOA: { 2278 struct ctl_lun *lun; 2279 union ctl_io *io; 2280 char printbuf[128]; 2281 struct sbuf sb; 2282 2283 mtx_lock(&softc->ctl_lock); 2284 printf("Dumping OOA queues:\n"); 2285 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2286 mtx_lock(&lun->lun_lock); 2287 for (io = (union ctl_io *)TAILQ_FIRST( 2288 &lun->ooa_queue); io != NULL; 2289 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2290 ooa_links)) { 2291 sbuf_new(&sb, printbuf, sizeof(printbuf), 2292 SBUF_FIXEDLEN); 2293 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2294 (intmax_t)lun->lun, 2295 io->scsiio.tag_num, 2296 (io->io_hdr.flags & 2297 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2298 (io->io_hdr.flags & 2299 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2300 (io->io_hdr.flags & 2301 CTL_FLAG_ABORT) ? " ABORT" : "", 2302 (io->io_hdr.flags & 2303 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2304 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2305 sbuf_finish(&sb); 2306 printf("%s\n", sbuf_data(&sb)); 2307 } 2308 mtx_unlock(&lun->lun_lock); 2309 } 2310 printf("OOA queues dump done\n"); 2311 mtx_unlock(&softc->ctl_lock); 2312 break; 2313 } 2314 case CTL_GET_OOA: { 2315 struct ctl_lun *lun; 2316 struct ctl_ooa *ooa_hdr; 2317 struct ctl_ooa_entry *entries; 2318 uint32_t cur_fill_num; 2319 2320 ooa_hdr = (struct ctl_ooa *)addr; 2321 2322 if ((ooa_hdr->alloc_len == 0) 2323 || (ooa_hdr->alloc_num == 0)) { 2324 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2325 "must be non-zero\n", __func__, 2326 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2327 retval = EINVAL; 2328 break; 2329 } 2330 2331 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2332 sizeof(struct ctl_ooa_entry))) { 2333 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2334 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2335 __func__, ooa_hdr->alloc_len, 2336 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2337 retval = EINVAL; 2338 break; 2339 } 2340 2341 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2342 if (entries == NULL) { 2343 printf("%s: could not allocate %d bytes for OOA " 2344 "dump\n", __func__, ooa_hdr->alloc_len); 2345 retval = ENOMEM; 2346 break; 2347 } 2348 2349 mtx_lock(&softc->ctl_lock); 2350 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2351 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2352 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2353 mtx_unlock(&softc->ctl_lock); 2354 free(entries, M_CTL); 2355 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2356 __func__, (uintmax_t)ooa_hdr->lun_num); 2357 retval = EINVAL; 2358 break; 2359 } 2360 2361 cur_fill_num = 0; 2362 2363 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2364 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2365 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2366 ooa_hdr, entries); 2367 if (retval != 0) 2368 break; 2369 } 2370 if (retval != 0) { 2371 mtx_unlock(&softc->ctl_lock); 2372 free(entries, M_CTL); 2373 break; 2374 } 2375 } else { 2376 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2377 2378 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2379 entries); 2380 } 2381 mtx_unlock(&softc->ctl_lock); 2382 2383 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2384 ooa_hdr->fill_len = ooa_hdr->fill_num * 2385 sizeof(struct ctl_ooa_entry); 2386 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2387 if (retval != 0) { 2388 printf("%s: error copying out %d bytes for OOA dump\n", 2389 __func__, ooa_hdr->fill_len); 2390 } 2391 2392 getbintime(&ooa_hdr->cur_bt); 2393 2394 if (cur_fill_num > ooa_hdr->alloc_num) { 2395 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2396 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2397 } else { 2398 ooa_hdr->dropped_num = 0; 2399 ooa_hdr->status = CTL_OOA_OK; 2400 } 2401 2402 free(entries, M_CTL); 2403 break; 2404 } 2405 case CTL_CHECK_OOA: { 2406 union ctl_io *io; 2407 struct ctl_lun *lun; 2408 struct ctl_ooa_info *ooa_info; 2409 2410 2411 ooa_info = (struct ctl_ooa_info *)addr; 2412 2413 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2414 ooa_info->status = CTL_OOA_INVALID_LUN; 2415 break; 2416 } 2417 mtx_lock(&softc->ctl_lock); 2418 lun = softc->ctl_luns[ooa_info->lun_id]; 2419 if (lun == NULL) { 2420 mtx_unlock(&softc->ctl_lock); 2421 ooa_info->status = CTL_OOA_INVALID_LUN; 2422 break; 2423 } 2424 mtx_lock(&lun->lun_lock); 2425 mtx_unlock(&softc->ctl_lock); 2426 ooa_info->num_entries = 0; 2427 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2428 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2429 &io->io_hdr, ooa_links)) { 2430 ooa_info->num_entries++; 2431 } 2432 mtx_unlock(&lun->lun_lock); 2433 2434 ooa_info->status = CTL_OOA_SUCCESS; 2435 2436 break; 2437 } 2438 case CTL_DELAY_IO: { 2439 struct ctl_io_delay_info *delay_info; 2440#ifdef CTL_IO_DELAY 2441 struct ctl_lun *lun; 2442#endif /* CTL_IO_DELAY */ 2443 2444 delay_info = (struct ctl_io_delay_info *)addr; 2445 2446#ifdef CTL_IO_DELAY 2447 mtx_lock(&softc->ctl_lock); 2448 2449 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2450 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2451 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2452 } else { 2453 lun = softc->ctl_luns[delay_info->lun_id]; 2454 mtx_lock(&lun->lun_lock); 2455 2456 delay_info->status = CTL_DELAY_STATUS_OK; 2457 2458 switch (delay_info->delay_type) { 2459 case CTL_DELAY_TYPE_CONT: 2460 break; 2461 case CTL_DELAY_TYPE_ONESHOT: 2462 break; 2463 default: 2464 delay_info->status = 2465 CTL_DELAY_STATUS_INVALID_TYPE; 2466 break; 2467 } 2468 2469 switch (delay_info->delay_loc) { 2470 case CTL_DELAY_LOC_DATAMOVE: 2471 lun->delay_info.datamove_type = 2472 delay_info->delay_type; 2473 lun->delay_info.datamove_delay = 2474 delay_info->delay_secs; 2475 break; 2476 case CTL_DELAY_LOC_DONE: 2477 lun->delay_info.done_type = 2478 delay_info->delay_type; 2479 lun->delay_info.done_delay = 2480 delay_info->delay_secs; 2481 break; 2482 default: 2483 delay_info->status = 2484 CTL_DELAY_STATUS_INVALID_LOC; 2485 break; 2486 } 2487 mtx_unlock(&lun->lun_lock); 2488 } 2489 2490 mtx_unlock(&softc->ctl_lock); 2491#else 2492 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2493#endif /* CTL_IO_DELAY */ 2494 break; 2495 } 2496 case CTL_REALSYNC_SET: { 2497 int *syncstate; 2498 2499 syncstate = (int *)addr; 2500 2501 mtx_lock(&softc->ctl_lock); 2502 switch (*syncstate) { 2503 case 0: 2504 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2505 break; 2506 case 1: 2507 softc->flags |= CTL_FLAG_REAL_SYNC; 2508 break; 2509 default: 2510 retval = EINVAL; 2511 break; 2512 } 2513 mtx_unlock(&softc->ctl_lock); 2514 break; 2515 } 2516 case CTL_REALSYNC_GET: { 2517 int *syncstate; 2518 2519 syncstate = (int*)addr; 2520 2521 mtx_lock(&softc->ctl_lock); 2522 if (softc->flags & CTL_FLAG_REAL_SYNC) 2523 *syncstate = 1; 2524 else 2525 *syncstate = 0; 2526 mtx_unlock(&softc->ctl_lock); 2527 2528 break; 2529 } 2530 case CTL_SETSYNC: 2531 case CTL_GETSYNC: { 2532 struct ctl_sync_info *sync_info; 2533 struct ctl_lun *lun; 2534 2535 sync_info = (struct ctl_sync_info *)addr; 2536 2537 mtx_lock(&softc->ctl_lock); 2538 lun = softc->ctl_luns[sync_info->lun_id]; 2539 if (lun == NULL) { 2540 mtx_unlock(&softc->ctl_lock); 2541 sync_info->status = CTL_GS_SYNC_NO_LUN; 2542 break; 2543 } 2544 /* 2545 * Get or set the sync interval. We're not bounds checking 2546 * in the set case, hopefully the user won't do something 2547 * silly. 2548 */ 2549 mtx_lock(&lun->lun_lock); 2550 mtx_unlock(&softc->ctl_lock); 2551 if (cmd == CTL_GETSYNC) 2552 sync_info->sync_interval = lun->sync_interval; 2553 else 2554 lun->sync_interval = sync_info->sync_interval; 2555 mtx_unlock(&lun->lun_lock); 2556 2557 sync_info->status = CTL_GS_SYNC_OK; 2558 2559 break; 2560 } 2561 case CTL_GETSTATS: { 2562 struct ctl_stats *stats; 2563 struct ctl_lun *lun; 2564 int i; 2565 2566 stats = (struct ctl_stats *)addr; 2567 2568 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2569 stats->alloc_len) { 2570 stats->status = CTL_SS_NEED_MORE_SPACE; 2571 stats->num_luns = softc->num_luns; 2572 break; 2573 } 2574 /* 2575 * XXX KDM no locking here. If the LUN list changes, 2576 * things can blow up. 2577 */ 2578 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2579 i++, lun = STAILQ_NEXT(lun, links)) { 2580 retval = copyout(&lun->stats, &stats->lun_stats[i], 2581 sizeof(lun->stats)); 2582 if (retval != 0) 2583 break; 2584 } 2585 stats->num_luns = softc->num_luns; 2586 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2587 softc->num_luns; 2588 stats->status = CTL_SS_OK; 2589#ifdef CTL_TIME_IO 2590 stats->flags = CTL_STATS_FLAG_TIME_VALID; 2591#else 2592 stats->flags = CTL_STATS_FLAG_NONE; 2593#endif 2594 getnanouptime(&stats->timestamp); 2595 break; 2596 } 2597 case CTL_ERROR_INJECT: { 2598 struct ctl_error_desc *err_desc, *new_err_desc; 2599 struct ctl_lun *lun; 2600 2601 err_desc = (struct ctl_error_desc *)addr; 2602 2603 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 2604 M_WAITOK | M_ZERO); 2605 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 2606 2607 mtx_lock(&softc->ctl_lock); 2608 lun = softc->ctl_luns[err_desc->lun_id]; 2609 if (lun == NULL) { 2610 mtx_unlock(&softc->ctl_lock); 2611 free(new_err_desc, M_CTL); 2612 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 2613 __func__, (uintmax_t)err_desc->lun_id); 2614 retval = EINVAL; 2615 break; 2616 } 2617 mtx_lock(&lun->lun_lock); 2618 mtx_unlock(&softc->ctl_lock); 2619 2620 /* 2621 * We could do some checking here to verify the validity 2622 * of the request, but given the complexity of error 2623 * injection requests, the checking logic would be fairly 2624 * complex. 2625 * 2626 * For now, if the request is invalid, it just won't get 2627 * executed and might get deleted. 2628 */ 2629 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 2630 2631 /* 2632 * XXX KDM check to make sure the serial number is unique, 2633 * in case we somehow manage to wrap. That shouldn't 2634 * happen for a very long time, but it's the right thing to 2635 * do. 2636 */ 2637 new_err_desc->serial = lun->error_serial; 2638 err_desc->serial = lun->error_serial; 2639 lun->error_serial++; 2640 2641 mtx_unlock(&lun->lun_lock); 2642 break; 2643 } 2644 case CTL_ERROR_INJECT_DELETE: { 2645 struct ctl_error_desc *delete_desc, *desc, *desc2; 2646 struct ctl_lun *lun; 2647 int delete_done; 2648 2649 delete_desc = (struct ctl_error_desc *)addr; 2650 delete_done = 0; 2651 2652 mtx_lock(&softc->ctl_lock); 2653 lun = softc->ctl_luns[delete_desc->lun_id]; 2654 if (lun == NULL) { 2655 mtx_unlock(&softc->ctl_lock); 2656 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 2657 __func__, (uintmax_t)delete_desc->lun_id); 2658 retval = EINVAL; 2659 break; 2660 } 2661 mtx_lock(&lun->lun_lock); 2662 mtx_unlock(&softc->ctl_lock); 2663 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 2664 if (desc->serial != delete_desc->serial) 2665 continue; 2666 2667 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 2668 links); 2669 free(desc, M_CTL); 2670 delete_done = 1; 2671 } 2672 mtx_unlock(&lun->lun_lock); 2673 if (delete_done == 0) { 2674 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 2675 "error serial %ju on LUN %u\n", __func__, 2676 delete_desc->serial, delete_desc->lun_id); 2677 retval = EINVAL; 2678 break; 2679 } 2680 break; 2681 } 2682 case CTL_DUMP_STRUCTS: { 2683 int i, j, k; 2684 struct ctl_port *port; 2685 struct ctl_frontend *fe; 2686 2687 mtx_lock(&softc->ctl_lock); 2688 printf("CTL Persistent Reservation information start:\n"); 2689 for (i = 0; i < CTL_MAX_LUNS; i++) { 2690 struct ctl_lun *lun; 2691 2692 lun = softc->ctl_luns[i]; 2693 2694 if ((lun == NULL) 2695 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 2696 continue; 2697 2698 for (j = 0; j < CTL_MAX_PORTS; j++) { 2699 if (lun->pr_keys[j] == NULL) 2700 continue; 2701 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 2702 if (lun->pr_keys[j][k] == 0) 2703 continue; 2704 printf(" LUN %d port %d iid %d key " 2705 "%#jx\n", i, j, k, 2706 (uintmax_t)lun->pr_keys[j][k]); 2707 } 2708 } 2709 } 2710 printf("CTL Persistent Reservation information end\n"); 2711 printf("CTL Ports:\n"); 2712 STAILQ_FOREACH(port, &softc->port_list, links) { 2713 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 2714 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 2715 port->frontend->name, port->port_type, 2716 port->physical_port, port->virtual_port, 2717 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 2718 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 2719 if (port->wwpn_iid[j].in_use == 0 && 2720 port->wwpn_iid[j].wwpn == 0 && 2721 port->wwpn_iid[j].name == NULL) 2722 continue; 2723 2724 printf(" iid %u use %d WWPN %#jx '%s'\n", 2725 j, port->wwpn_iid[j].in_use, 2726 (uintmax_t)port->wwpn_iid[j].wwpn, 2727 port->wwpn_iid[j].name); 2728 } 2729 } 2730 printf("CTL Port information end\n"); 2731 mtx_unlock(&softc->ctl_lock); 2732 /* 2733 * XXX KDM calling this without a lock. We'd likely want 2734 * to drop the lock before calling the frontend's dump 2735 * routine anyway. 2736 */ 2737 printf("CTL Frontends:\n"); 2738 STAILQ_FOREACH(fe, &softc->fe_list, links) { 2739 printf(" Frontend '%s'\n", fe->name); 2740 if (fe->fe_dump != NULL) 2741 fe->fe_dump(); 2742 } 2743 printf("CTL Frontend information end\n"); 2744 break; 2745 } 2746 case CTL_LUN_REQ: { 2747 struct ctl_lun_req *lun_req; 2748 struct ctl_backend_driver *backend; 2749 2750 lun_req = (struct ctl_lun_req *)addr; 2751 2752 backend = ctl_backend_find(lun_req->backend); 2753 if (backend == NULL) { 2754 lun_req->status = CTL_LUN_ERROR; 2755 snprintf(lun_req->error_str, 2756 sizeof(lun_req->error_str), 2757 "Backend \"%s\" not found.", 2758 lun_req->backend); 2759 break; 2760 } 2761 if (lun_req->num_be_args > 0) { 2762 lun_req->kern_be_args = ctl_copyin_args( 2763 lun_req->num_be_args, 2764 lun_req->be_args, 2765 lun_req->error_str, 2766 sizeof(lun_req->error_str)); 2767 if (lun_req->kern_be_args == NULL) { 2768 lun_req->status = CTL_LUN_ERROR; 2769 break; 2770 } 2771 } 2772 2773 retval = backend->ioctl(dev, cmd, addr, flag, td); 2774 2775 if (lun_req->num_be_args > 0) { 2776 ctl_copyout_args(lun_req->num_be_args, 2777 lun_req->kern_be_args); 2778 ctl_free_args(lun_req->num_be_args, 2779 lun_req->kern_be_args); 2780 } 2781 break; 2782 } 2783 case CTL_LUN_LIST: { 2784 struct sbuf *sb; 2785 struct ctl_lun *lun; 2786 struct ctl_lun_list *list; 2787 struct ctl_option *opt; 2788 2789 list = (struct ctl_lun_list *)addr; 2790 2791 /* 2792 * Allocate a fixed length sbuf here, based on the length 2793 * of the user's buffer. We could allocate an auto-extending 2794 * buffer, and then tell the user how much larger our 2795 * amount of data is than his buffer, but that presents 2796 * some problems: 2797 * 2798 * 1. The sbuf(9) routines use a blocking malloc, and so 2799 * we can't hold a lock while calling them with an 2800 * auto-extending buffer. 2801 * 2802 * 2. There is not currently a LUN reference counting 2803 * mechanism, outside of outstanding transactions on 2804 * the LUN's OOA queue. So a LUN could go away on us 2805 * while we're getting the LUN number, backend-specific 2806 * information, etc. Thus, given the way things 2807 * currently work, we need to hold the CTL lock while 2808 * grabbing LUN information. 2809 * 2810 * So, from the user's standpoint, the best thing to do is 2811 * allocate what he thinks is a reasonable buffer length, 2812 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 2813 * double the buffer length and try again. (And repeat 2814 * that until he succeeds.) 2815 */ 2816 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 2817 if (sb == NULL) { 2818 list->status = CTL_LUN_LIST_ERROR; 2819 snprintf(list->error_str, sizeof(list->error_str), 2820 "Unable to allocate %d bytes for LUN list", 2821 list->alloc_len); 2822 break; 2823 } 2824 2825 sbuf_printf(sb, "<ctllunlist>\n"); 2826 2827 mtx_lock(&softc->ctl_lock); 2828 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2829 mtx_lock(&lun->lun_lock); 2830 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 2831 (uintmax_t)lun->lun); 2832 2833 /* 2834 * Bail out as soon as we see that we've overfilled 2835 * the buffer. 2836 */ 2837 if (retval != 0) 2838 break; 2839 2840 retval = sbuf_printf(sb, "\t<backend_type>%s" 2841 "</backend_type>\n", 2842 (lun->backend == NULL) ? "none" : 2843 lun->backend->name); 2844 2845 if (retval != 0) 2846 break; 2847 2848 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 2849 lun->be_lun->lun_type); 2850 2851 if (retval != 0) 2852 break; 2853 2854 if (lun->backend == NULL) { 2855 retval = sbuf_printf(sb, "</lun>\n"); 2856 if (retval != 0) 2857 break; 2858 continue; 2859 } 2860 2861 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 2862 (lun->be_lun->maxlba > 0) ? 2863 lun->be_lun->maxlba + 1 : 0); 2864 2865 if (retval != 0) 2866 break; 2867 2868 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 2869 lun->be_lun->blocksize); 2870 2871 if (retval != 0) 2872 break; 2873 2874 retval = sbuf_printf(sb, "\t<serial_number>"); 2875 2876 if (retval != 0) 2877 break; 2878 2879 retval = ctl_sbuf_printf_esc(sb, 2880 lun->be_lun->serial_num, 2881 sizeof(lun->be_lun->serial_num)); 2882 2883 if (retval != 0) 2884 break; 2885 2886 retval = sbuf_printf(sb, "</serial_number>\n"); 2887 2888 if (retval != 0) 2889 break; 2890 2891 retval = sbuf_printf(sb, "\t<device_id>"); 2892 2893 if (retval != 0) 2894 break; 2895 2896 retval = ctl_sbuf_printf_esc(sb, 2897 lun->be_lun->device_id, 2898 sizeof(lun->be_lun->device_id)); 2899 2900 if (retval != 0) 2901 break; 2902 2903 retval = sbuf_printf(sb, "</device_id>\n"); 2904 2905 if (retval != 0) 2906 break; 2907 2908 if (lun->backend->lun_info != NULL) { 2909 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 2910 if (retval != 0) 2911 break; 2912 } 2913 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 2914 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 2915 opt->name, opt->value, opt->name); 2916 if (retval != 0) 2917 break; 2918 } 2919 2920 retval = sbuf_printf(sb, "</lun>\n"); 2921 2922 if (retval != 0) 2923 break; 2924 mtx_unlock(&lun->lun_lock); 2925 } 2926 if (lun != NULL) 2927 mtx_unlock(&lun->lun_lock); 2928 mtx_unlock(&softc->ctl_lock); 2929 2930 if ((retval != 0) 2931 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 2932 retval = 0; 2933 sbuf_delete(sb); 2934 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 2935 snprintf(list->error_str, sizeof(list->error_str), 2936 "Out of space, %d bytes is too small", 2937 list->alloc_len); 2938 break; 2939 } 2940 2941 sbuf_finish(sb); 2942 2943 retval = copyout(sbuf_data(sb), list->lun_xml, 2944 sbuf_len(sb) + 1); 2945 2946 list->fill_len = sbuf_len(sb) + 1; 2947 list->status = CTL_LUN_LIST_OK; 2948 sbuf_delete(sb); 2949 break; 2950 } 2951 case CTL_ISCSI: { 2952 struct ctl_iscsi *ci; 2953 struct ctl_frontend *fe; 2954 2955 ci = (struct ctl_iscsi *)addr; 2956 2957 fe = ctl_frontend_find("iscsi"); 2958 if (fe == NULL) { 2959 ci->status = CTL_ISCSI_ERROR; 2960 snprintf(ci->error_str, sizeof(ci->error_str), 2961 "Frontend \"iscsi\" not found."); 2962 break; 2963 } 2964 2965 retval = fe->ioctl(dev, cmd, addr, flag, td); 2966 break; 2967 } 2968 case CTL_PORT_REQ: { 2969 struct ctl_req *req; 2970 struct ctl_frontend *fe; 2971 2972 req = (struct ctl_req *)addr; 2973 2974 fe = ctl_frontend_find(req->driver); 2975 if (fe == NULL) { 2976 req->status = CTL_LUN_ERROR; 2977 snprintf(req->error_str, sizeof(req->error_str), 2978 "Frontend \"%s\" not found.", req->driver); 2979 break; 2980 } 2981 if (req->num_args > 0) { 2982 req->kern_args = ctl_copyin_args(req->num_args, 2983 req->args, req->error_str, sizeof(req->error_str)); 2984 if (req->kern_args == NULL) { 2985 req->status = CTL_LUN_ERROR; 2986 break; 2987 } 2988 } 2989 2990 if (fe->ioctl) 2991 retval = fe->ioctl(dev, cmd, addr, flag, td); 2992 else 2993 retval = ENODEV; 2994 2995 if (req->num_args > 0) { 2996 ctl_copyout_args(req->num_args, req->kern_args); 2997 ctl_free_args(req->num_args, req->kern_args); 2998 } 2999 break; 3000 } 3001 case CTL_PORT_LIST: { 3002 struct sbuf *sb; 3003 struct ctl_port *port; 3004 struct ctl_lun_list *list; 3005 struct ctl_option *opt; 3006 int j; 3007 uint32_t plun; 3008 3009 list = (struct ctl_lun_list *)addr; 3010 3011 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3012 if (sb == NULL) { 3013 list->status = CTL_LUN_LIST_ERROR; 3014 snprintf(list->error_str, sizeof(list->error_str), 3015 "Unable to allocate %d bytes for LUN list", 3016 list->alloc_len); 3017 break; 3018 } 3019 3020 sbuf_printf(sb, "<ctlportlist>\n"); 3021 3022 mtx_lock(&softc->ctl_lock); 3023 STAILQ_FOREACH(port, &softc->port_list, links) { 3024 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3025 (uintmax_t)port->targ_port); 3026 3027 /* 3028 * Bail out as soon as we see that we've overfilled 3029 * the buffer. 3030 */ 3031 if (retval != 0) 3032 break; 3033 3034 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3035 "</frontend_type>\n", port->frontend->name); 3036 if (retval != 0) 3037 break; 3038 3039 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3040 port->port_type); 3041 if (retval != 0) 3042 break; 3043 3044 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3045 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3046 if (retval != 0) 3047 break; 3048 3049 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3050 port->port_name); 3051 if (retval != 0) 3052 break; 3053 3054 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3055 port->physical_port); 3056 if (retval != 0) 3057 break; 3058 3059 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3060 port->virtual_port); 3061 if (retval != 0) 3062 break; 3063 3064 if (port->target_devid != NULL) { 3065 sbuf_printf(sb, "\t<target>"); 3066 ctl_id_sbuf(port->target_devid, sb); 3067 sbuf_printf(sb, "</target>\n"); 3068 } 3069 3070 if (port->port_devid != NULL) { 3071 sbuf_printf(sb, "\t<port>"); 3072 ctl_id_sbuf(port->port_devid, sb); 3073 sbuf_printf(sb, "</port>\n"); 3074 } 3075 3076 if (port->port_info != NULL) { 3077 retval = port->port_info(port->onoff_arg, sb); 3078 if (retval != 0) 3079 break; 3080 } 3081 STAILQ_FOREACH(opt, &port->options, links) { 3082 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3083 opt->name, opt->value, opt->name); 3084 if (retval != 0) 3085 break; 3086 } 3087 3088 if (port->lun_map != NULL) { 3089 sbuf_printf(sb, "\t<lun_map>on</lun_map>\n"); 3090 for (j = 0; j < CTL_MAX_LUNS; j++) { 3091 plun = ctl_lun_map_from_port(port, j); 3092 if (plun >= CTL_MAX_LUNS) 3093 continue; 3094 sbuf_printf(sb, 3095 "\t<lun id=\"%u\">%u</lun>\n", 3096 j, plun); 3097 } 3098 } 3099 3100 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3101 if (port->wwpn_iid[j].in_use == 0 || 3102 (port->wwpn_iid[j].wwpn == 0 && 3103 port->wwpn_iid[j].name == NULL)) 3104 continue; 3105 3106 if (port->wwpn_iid[j].name != NULL) 3107 retval = sbuf_printf(sb, 3108 "\t<initiator id=\"%u\">%s</initiator>\n", 3109 j, port->wwpn_iid[j].name); 3110 else 3111 retval = sbuf_printf(sb, 3112 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3113 j, port->wwpn_iid[j].wwpn); 3114 if (retval != 0) 3115 break; 3116 } 3117 if (retval != 0) 3118 break; 3119 3120 retval = sbuf_printf(sb, "</targ_port>\n"); 3121 if (retval != 0) 3122 break; 3123 } 3124 mtx_unlock(&softc->ctl_lock); 3125 3126 if ((retval != 0) 3127 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3128 retval = 0; 3129 sbuf_delete(sb); 3130 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3131 snprintf(list->error_str, sizeof(list->error_str), 3132 "Out of space, %d bytes is too small", 3133 list->alloc_len); 3134 break; 3135 } 3136 3137 sbuf_finish(sb); 3138 3139 retval = copyout(sbuf_data(sb), list->lun_xml, 3140 sbuf_len(sb) + 1); 3141 3142 list->fill_len = sbuf_len(sb) + 1; 3143 list->status = CTL_LUN_LIST_OK; 3144 sbuf_delete(sb); 3145 break; 3146 } 3147 case CTL_LUN_MAP: { 3148 struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; 3149 struct ctl_port *port; 3150 3151 mtx_lock(&softc->ctl_lock); 3152 if (lm->port < softc->port_min || 3153 lm->port >= softc->port_max || 3154 (port = softc->ctl_ports[lm->port]) == NULL) { 3155 mtx_unlock(&softc->ctl_lock); 3156 return (ENXIO); 3157 } 3158 mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps 3159 if (lm->plun < CTL_MAX_LUNS) { 3160 if (lm->lun == UINT32_MAX) 3161 retval = ctl_lun_map_unset(port, lm->plun); 3162 else if (lm->lun < CTL_MAX_LUNS && 3163 softc->ctl_luns[lm->lun] != NULL) 3164 retval = ctl_lun_map_set(port, lm->plun, lm->lun); 3165 else 3166 return (ENXIO); 3167 } else if (lm->plun == UINT32_MAX) { 3168 if (lm->lun == UINT32_MAX) 3169 retval = ctl_lun_map_deinit(port); 3170 else 3171 retval = ctl_lun_map_init(port); 3172 } else 3173 return (ENXIO); 3174 break; 3175 } 3176 default: { 3177 /* XXX KDM should we fix this? */ 3178#if 0 3179 struct ctl_backend_driver *backend; 3180 unsigned int type; 3181 int found; 3182 3183 found = 0; 3184 3185 /* 3186 * We encode the backend type as the ioctl type for backend 3187 * ioctls. So parse it out here, and then search for a 3188 * backend of this type. 3189 */ 3190 type = _IOC_TYPE(cmd); 3191 3192 STAILQ_FOREACH(backend, &softc->be_list, links) { 3193 if (backend->type == type) { 3194 found = 1; 3195 break; 3196 } 3197 } 3198 if (found == 0) { 3199 printf("ctl: unknown ioctl command %#lx or backend " 3200 "%d\n", cmd, type); 3201 retval = EINVAL; 3202 break; 3203 } 3204 retval = backend->ioctl(dev, cmd, addr, flag, td); 3205#endif 3206 retval = ENOTTY; 3207 break; 3208 } 3209 } 3210 return (retval); 3211} 3212 3213uint32_t 3214ctl_get_initindex(struct ctl_nexus *nexus) 3215{ 3216 return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3217} 3218 3219int 3220ctl_lun_map_init(struct ctl_port *port) 3221{ 3222 struct ctl_softc *softc = control_softc; 3223 struct ctl_lun *lun; 3224 uint32_t i; 3225 3226 if (port->lun_map == NULL) 3227 port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, 3228 M_CTL, M_NOWAIT); 3229 if (port->lun_map == NULL) 3230 return (ENOMEM); 3231 for (i = 0; i < CTL_MAX_LUNS; i++) 3232 port->lun_map[i] = UINT32_MAX; 3233 if (port->status & CTL_PORT_STATUS_ONLINE) { 3234 if (port->lun_disable != NULL) { 3235 STAILQ_FOREACH(lun, &softc->lun_list, links) 3236 port->lun_disable(port->targ_lun_arg, lun->lun); 3237 } 3238 ctl_isc_announce_port(port); 3239 } 3240 return (0); 3241} 3242 3243int 3244ctl_lun_map_deinit(struct ctl_port *port) 3245{ 3246 struct ctl_softc *softc = control_softc; 3247 struct ctl_lun *lun; 3248 3249 if (port->lun_map == NULL) 3250 return (0); 3251 free(port->lun_map, M_CTL); 3252 port->lun_map = NULL; 3253 if (port->status & CTL_PORT_STATUS_ONLINE) { 3254 if (port->lun_enable != NULL) { 3255 STAILQ_FOREACH(lun, &softc->lun_list, links) 3256 port->lun_enable(port->targ_lun_arg, lun->lun); 3257 } 3258 ctl_isc_announce_port(port); 3259 } 3260 return (0); 3261} 3262 3263int 3264ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) 3265{ 3266 int status; 3267 uint32_t old; 3268 3269 if (port->lun_map == NULL) { 3270 status = ctl_lun_map_init(port); 3271 if (status != 0) 3272 return (status); 3273 } 3274 old = port->lun_map[plun]; 3275 port->lun_map[plun] = glun; 3276 if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) { 3277 if (port->lun_enable != NULL) 3278 port->lun_enable(port->targ_lun_arg, plun); 3279 ctl_isc_announce_port(port); 3280 } 3281 return (0); 3282} 3283 3284int 3285ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) 3286{ 3287 uint32_t old; 3288 3289 if (port->lun_map == NULL) 3290 return (0); 3291 old = port->lun_map[plun]; 3292 port->lun_map[plun] = UINT32_MAX; 3293 if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) { 3294 if (port->lun_disable != NULL) 3295 port->lun_disable(port->targ_lun_arg, plun); 3296 ctl_isc_announce_port(port); 3297 } 3298 return (0); 3299} 3300 3301uint32_t 3302ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) 3303{ 3304 3305 if (port == NULL) 3306 return (UINT32_MAX); 3307 if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS) 3308 return (lun_id); 3309 return (port->lun_map[lun_id]); 3310} 3311 3312uint32_t 3313ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) 3314{ 3315 uint32_t i; 3316 3317 if (port == NULL) 3318 return (UINT32_MAX); 3319 if (port->lun_map == NULL) 3320 return (lun_id); 3321 for (i = 0; i < CTL_MAX_LUNS; i++) { 3322 if (port->lun_map[i] == lun_id) 3323 return (i); 3324 } 3325 return (UINT32_MAX); 3326} 3327 3328static struct ctl_port * 3329ctl_io_port(struct ctl_io_hdr *io_hdr) 3330{ 3331 3332 return (control_softc->ctl_ports[io_hdr->nexus.targ_port]); 3333} 3334 3335int 3336ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) 3337{ 3338 int i; 3339 3340 for (i = first; i < last; i++) { 3341 if ((mask[i / 32] & (1 << (i % 32))) == 0) 3342 return (i); 3343 } 3344 return (-1); 3345} 3346 3347int 3348ctl_set_mask(uint32_t *mask, uint32_t bit) 3349{ 3350 uint32_t chunk, piece; 3351 3352 chunk = bit >> 5; 3353 piece = bit % (sizeof(uint32_t) * 8); 3354 3355 if ((mask[chunk] & (1 << piece)) != 0) 3356 return (-1); 3357 else 3358 mask[chunk] |= (1 << piece); 3359 3360 return (0); 3361} 3362 3363int 3364ctl_clear_mask(uint32_t *mask, uint32_t bit) 3365{ 3366 uint32_t chunk, piece; 3367 3368 chunk = bit >> 5; 3369 piece = bit % (sizeof(uint32_t) * 8); 3370 3371 if ((mask[chunk] & (1 << piece)) == 0) 3372 return (-1); 3373 else 3374 mask[chunk] &= ~(1 << piece); 3375 3376 return (0); 3377} 3378 3379int 3380ctl_is_set(uint32_t *mask, uint32_t bit) 3381{ 3382 uint32_t chunk, piece; 3383 3384 chunk = bit >> 5; 3385 piece = bit % (sizeof(uint32_t) * 8); 3386 3387 if ((mask[chunk] & (1 << piece)) == 0) 3388 return (0); 3389 else 3390 return (1); 3391} 3392 3393static uint64_t 3394ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3395{ 3396 uint64_t *t; 3397 3398 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3399 if (t == NULL) 3400 return (0); 3401 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3402} 3403 3404static void 3405ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3406{ 3407 uint64_t *t; 3408 3409 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3410 if (t == NULL) 3411 return; 3412 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3413} 3414 3415static void 3416ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3417{ 3418 uint64_t *p; 3419 u_int i; 3420 3421 i = residx/CTL_MAX_INIT_PER_PORT; 3422 if (lun->pr_keys[i] != NULL) 3423 return; 3424 mtx_unlock(&lun->lun_lock); 3425 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3426 M_WAITOK | M_ZERO); 3427 mtx_lock(&lun->lun_lock); 3428 if (lun->pr_keys[i] == NULL) 3429 lun->pr_keys[i] = p; 3430 else 3431 free(p, M_CTL); 3432} 3433 3434static void 3435ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3436{ 3437 uint64_t *t; 3438 3439 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3440 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3441 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3442} 3443 3444/* 3445 * ctl_softc, pool_name, total_ctl_io are passed in. 3446 * npool is passed out. 3447 */ 3448int 3449ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3450 uint32_t total_ctl_io, void **npool) 3451{ 3452#ifdef IO_POOLS 3453 struct ctl_io_pool *pool; 3454 3455 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3456 M_NOWAIT | M_ZERO); 3457 if (pool == NULL) 3458 return (ENOMEM); 3459 3460 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3461 pool->ctl_softc = ctl_softc; 3462 pool->zone = uma_zsecond_create(pool->name, NULL, 3463 NULL, NULL, NULL, ctl_softc->io_zone); 3464 /* uma_prealloc(pool->zone, total_ctl_io); */ 3465 3466 *npool = pool; 3467#else 3468 *npool = ctl_softc->io_zone; 3469#endif 3470 return (0); 3471} 3472 3473void 3474ctl_pool_free(struct ctl_io_pool *pool) 3475{ 3476 3477 if (pool == NULL) 3478 return; 3479 3480#ifdef IO_POOLS 3481 uma_zdestroy(pool->zone); 3482 free(pool, M_CTL); 3483#endif 3484} 3485 3486union ctl_io * 3487ctl_alloc_io(void *pool_ref) 3488{ 3489 union ctl_io *io; 3490#ifdef IO_POOLS 3491 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3492 3493 io = uma_zalloc(pool->zone, M_WAITOK); 3494#else 3495 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3496#endif 3497 if (io != NULL) 3498 io->io_hdr.pool = pool_ref; 3499 return (io); 3500} 3501 3502union ctl_io * 3503ctl_alloc_io_nowait(void *pool_ref) 3504{ 3505 union ctl_io *io; 3506#ifdef IO_POOLS 3507 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3508 3509 io = uma_zalloc(pool->zone, M_NOWAIT); 3510#else 3511 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3512#endif 3513 if (io != NULL) 3514 io->io_hdr.pool = pool_ref; 3515 return (io); 3516} 3517 3518void 3519ctl_free_io(union ctl_io *io) 3520{ 3521#ifdef IO_POOLS 3522 struct ctl_io_pool *pool; 3523#endif 3524 3525 if (io == NULL) 3526 return; 3527 3528#ifdef IO_POOLS 3529 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3530 uma_zfree(pool->zone, io); 3531#else 3532 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3533#endif 3534} 3535 3536void 3537ctl_zero_io(union ctl_io *io) 3538{ 3539 void *pool_ref; 3540 3541 if (io == NULL) 3542 return; 3543 3544 /* 3545 * May need to preserve linked list pointers at some point too. 3546 */ 3547 pool_ref = io->io_hdr.pool; 3548 memset(io, 0, sizeof(*io)); 3549 io->io_hdr.pool = pool_ref; 3550} 3551 3552/* 3553 * This routine is currently used for internal copies of ctl_ios that need 3554 * to persist for some reason after we've already returned status to the 3555 * FETD. (Thus the flag set.) 3556 * 3557 * XXX XXX 3558 * Note that this makes a blind copy of all fields in the ctl_io, except 3559 * for the pool reference. This includes any memory that has been 3560 * allocated! That memory will no longer be valid after done has been 3561 * called, so this would be VERY DANGEROUS for command that actually does 3562 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3563 * start and stop commands, which don't transfer any data, so this is not a 3564 * problem. If it is used for anything else, the caller would also need to 3565 * allocate data buffer space and this routine would need to be modified to 3566 * copy the data buffer(s) as well. 3567 */ 3568void 3569ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3570{ 3571 void *pool_ref; 3572 3573 if ((src == NULL) 3574 || (dest == NULL)) 3575 return; 3576 3577 /* 3578 * May need to preserve linked list pointers at some point too. 3579 */ 3580 pool_ref = dest->io_hdr.pool; 3581 3582 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest))); 3583 3584 dest->io_hdr.pool = pool_ref; 3585 /* 3586 * We need to know that this is an internal copy, and doesn't need 3587 * to get passed back to the FETD that allocated it. 3588 */ 3589 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3590} 3591 3592int 3593ctl_expand_number(const char *buf, uint64_t *num) 3594{ 3595 char *endptr; 3596 uint64_t number; 3597 unsigned shift; 3598 3599 number = strtoq(buf, &endptr, 0); 3600 3601 switch (tolower((unsigned char)*endptr)) { 3602 case 'e': 3603 shift = 60; 3604 break; 3605 case 'p': 3606 shift = 50; 3607 break; 3608 case 't': 3609 shift = 40; 3610 break; 3611 case 'g': 3612 shift = 30; 3613 break; 3614 case 'm': 3615 shift = 20; 3616 break; 3617 case 'k': 3618 shift = 10; 3619 break; 3620 case 'b': 3621 case '\0': /* No unit. */ 3622 *num = number; 3623 return (0); 3624 default: 3625 /* Unrecognized unit. */ 3626 return (-1); 3627 } 3628 3629 if ((number << shift) >> shift != number) { 3630 /* Overflow */ 3631 return (-1); 3632 } 3633 *num = number << shift; 3634 return (0); 3635} 3636 3637 3638/* 3639 * This routine could be used in the future to load default and/or saved 3640 * mode page parameters for a particuar lun. 3641 */ 3642static int 3643ctl_init_page_index(struct ctl_lun *lun) 3644{ 3645 int i; 3646 struct ctl_page_index *page_index; 3647 const char *value; 3648 uint64_t ival; 3649 3650 memcpy(&lun->mode_pages.index, page_index_template, 3651 sizeof(page_index_template)); 3652 3653 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3654 3655 page_index = &lun->mode_pages.index[i]; 3656 /* 3657 * If this is a disk-only mode page, there's no point in 3658 * setting it up. For some pages, we have to have some 3659 * basic information about the disk in order to calculate the 3660 * mode page data. 3661 */ 3662 if ((lun->be_lun->lun_type != T_DIRECT) 3663 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3664 continue; 3665 3666 switch (page_index->page_code & SMPH_PC_MASK) { 3667 case SMS_RW_ERROR_RECOVERY_PAGE: { 3668 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3669 panic("subpage is incorrect!"); 3670 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3671 &rw_er_page_default, 3672 sizeof(rw_er_page_default)); 3673 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3674 &rw_er_page_changeable, 3675 sizeof(rw_er_page_changeable)); 3676 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3677 &rw_er_page_default, 3678 sizeof(rw_er_page_default)); 3679 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3680 &rw_er_page_default, 3681 sizeof(rw_er_page_default)); 3682 page_index->page_data = 3683 (uint8_t *)lun->mode_pages.rw_er_page; 3684 break; 3685 } 3686 case SMS_FORMAT_DEVICE_PAGE: { 3687 struct scsi_format_page *format_page; 3688 3689 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3690 panic("subpage is incorrect!"); 3691 3692 /* 3693 * Sectors per track are set above. Bytes per 3694 * sector need to be set here on a per-LUN basis. 3695 */ 3696 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 3697 &format_page_default, 3698 sizeof(format_page_default)); 3699 memcpy(&lun->mode_pages.format_page[ 3700 CTL_PAGE_CHANGEABLE], &format_page_changeable, 3701 sizeof(format_page_changeable)); 3702 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 3703 &format_page_default, 3704 sizeof(format_page_default)); 3705 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 3706 &format_page_default, 3707 sizeof(format_page_default)); 3708 3709 format_page = &lun->mode_pages.format_page[ 3710 CTL_PAGE_CURRENT]; 3711 scsi_ulto2b(lun->be_lun->blocksize, 3712 format_page->bytes_per_sector); 3713 3714 format_page = &lun->mode_pages.format_page[ 3715 CTL_PAGE_DEFAULT]; 3716 scsi_ulto2b(lun->be_lun->blocksize, 3717 format_page->bytes_per_sector); 3718 3719 format_page = &lun->mode_pages.format_page[ 3720 CTL_PAGE_SAVED]; 3721 scsi_ulto2b(lun->be_lun->blocksize, 3722 format_page->bytes_per_sector); 3723 3724 page_index->page_data = 3725 (uint8_t *)lun->mode_pages.format_page; 3726 break; 3727 } 3728 case SMS_RIGID_DISK_PAGE: { 3729 struct scsi_rigid_disk_page *rigid_disk_page; 3730 uint32_t sectors_per_cylinder; 3731 uint64_t cylinders; 3732#ifndef __XSCALE__ 3733 int shift; 3734#endif /* !__XSCALE__ */ 3735 3736 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3737 panic("invalid subpage value %d", 3738 page_index->subpage); 3739 3740 /* 3741 * Rotation rate and sectors per track are set 3742 * above. We calculate the cylinders here based on 3743 * capacity. Due to the number of heads and 3744 * sectors per track we're using, smaller arrays 3745 * may turn out to have 0 cylinders. Linux and 3746 * FreeBSD don't pay attention to these mode pages 3747 * to figure out capacity, but Solaris does. It 3748 * seems to deal with 0 cylinders just fine, and 3749 * works out a fake geometry based on the capacity. 3750 */ 3751 memcpy(&lun->mode_pages.rigid_disk_page[ 3752 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 3753 sizeof(rigid_disk_page_default)); 3754 memcpy(&lun->mode_pages.rigid_disk_page[ 3755 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 3756 sizeof(rigid_disk_page_changeable)); 3757 3758 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 3759 CTL_DEFAULT_HEADS; 3760 3761 /* 3762 * The divide method here will be more accurate, 3763 * probably, but results in floating point being 3764 * used in the kernel on i386 (__udivdi3()). On the 3765 * XScale, though, __udivdi3() is implemented in 3766 * software. 3767 * 3768 * The shift method for cylinder calculation is 3769 * accurate if sectors_per_cylinder is a power of 3770 * 2. Otherwise it might be slightly off -- you 3771 * might have a bit of a truncation problem. 3772 */ 3773#ifdef __XSCALE__ 3774 cylinders = (lun->be_lun->maxlba + 1) / 3775 sectors_per_cylinder; 3776#else 3777 for (shift = 31; shift > 0; shift--) { 3778 if (sectors_per_cylinder & (1 << shift)) 3779 break; 3780 } 3781 cylinders = (lun->be_lun->maxlba + 1) >> shift; 3782#endif 3783 3784 /* 3785 * We've basically got 3 bytes, or 24 bits for the 3786 * cylinder size in the mode page. If we're over, 3787 * just round down to 2^24. 3788 */ 3789 if (cylinders > 0xffffff) 3790 cylinders = 0xffffff; 3791 3792 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 3793 CTL_PAGE_DEFAULT]; 3794 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 3795 3796 if ((value = ctl_get_opt(&lun->be_lun->options, 3797 "rpm")) != NULL) { 3798 scsi_ulto2b(strtol(value, NULL, 0), 3799 rigid_disk_page->rotation_rate); 3800 } 3801 3802 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 3803 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3804 sizeof(rigid_disk_page_default)); 3805 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 3806 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 3807 sizeof(rigid_disk_page_default)); 3808 3809 page_index->page_data = 3810 (uint8_t *)lun->mode_pages.rigid_disk_page; 3811 break; 3812 } 3813 case SMS_CACHING_PAGE: { 3814 struct scsi_caching_page *caching_page; 3815 3816 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3817 panic("invalid subpage value %d", 3818 page_index->subpage); 3819 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 3820 &caching_page_default, 3821 sizeof(caching_page_default)); 3822 memcpy(&lun->mode_pages.caching_page[ 3823 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 3824 sizeof(caching_page_changeable)); 3825 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3826 &caching_page_default, 3827 sizeof(caching_page_default)); 3828 caching_page = &lun->mode_pages.caching_page[ 3829 CTL_PAGE_SAVED]; 3830 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 3831 if (value != NULL && strcmp(value, "off") == 0) 3832 caching_page->flags1 &= ~SCP_WCE; 3833 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 3834 if (value != NULL && strcmp(value, "off") == 0) 3835 caching_page->flags1 |= SCP_RCD; 3836 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 3837 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 3838 sizeof(caching_page_default)); 3839 page_index->page_data = 3840 (uint8_t *)lun->mode_pages.caching_page; 3841 break; 3842 } 3843 case SMS_CONTROL_MODE_PAGE: { 3844 struct scsi_control_page *control_page; 3845 3846 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3847 panic("invalid subpage value %d", 3848 page_index->subpage); 3849 3850 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 3851 &control_page_default, 3852 sizeof(control_page_default)); 3853 memcpy(&lun->mode_pages.control_page[ 3854 CTL_PAGE_CHANGEABLE], &control_page_changeable, 3855 sizeof(control_page_changeable)); 3856 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 3857 &control_page_default, 3858 sizeof(control_page_default)); 3859 control_page = &lun->mode_pages.control_page[ 3860 CTL_PAGE_SAVED]; 3861 value = ctl_get_opt(&lun->be_lun->options, "reordering"); 3862 if (value != NULL && strcmp(value, "unrestricted") == 0) { 3863 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; 3864 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; 3865 } 3866 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 3867 &lun->mode_pages.control_page[CTL_PAGE_SAVED], 3868 sizeof(control_page_default)); 3869 page_index->page_data = 3870 (uint8_t *)lun->mode_pages.control_page; 3871 break; 3872 3873 } 3874 case SMS_INFO_EXCEPTIONS_PAGE: { 3875 switch (page_index->subpage) { 3876 case SMS_SUBPAGE_PAGE_0: 3877 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 3878 &ie_page_default, 3879 sizeof(ie_page_default)); 3880 memcpy(&lun->mode_pages.ie_page[ 3881 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 3882 sizeof(ie_page_changeable)); 3883 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 3884 &ie_page_default, 3885 sizeof(ie_page_default)); 3886 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 3887 &ie_page_default, 3888 sizeof(ie_page_default)); 3889 page_index->page_data = 3890 (uint8_t *)lun->mode_pages.ie_page; 3891 break; 3892 case 0x02: { 3893 struct ctl_logical_block_provisioning_page *page; 3894 3895 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 3896 &lbp_page_default, 3897 sizeof(lbp_page_default)); 3898 memcpy(&lun->mode_pages.lbp_page[ 3899 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 3900 sizeof(lbp_page_changeable)); 3901 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 3902 &lbp_page_default, 3903 sizeof(lbp_page_default)); 3904 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 3905 value = ctl_get_opt(&lun->be_lun->options, 3906 "avail-threshold"); 3907 if (value != NULL && 3908 ctl_expand_number(value, &ival) == 0) { 3909 page->descr[0].flags |= SLBPPD_ENABLED | 3910 SLBPPD_ARMING_DEC; 3911 if (lun->be_lun->blocksize) 3912 ival /= lun->be_lun->blocksize; 3913 else 3914 ival /= 512; 3915 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3916 page->descr[0].count); 3917 } 3918 value = ctl_get_opt(&lun->be_lun->options, 3919 "used-threshold"); 3920 if (value != NULL && 3921 ctl_expand_number(value, &ival) == 0) { 3922 page->descr[1].flags |= SLBPPD_ENABLED | 3923 SLBPPD_ARMING_INC; 3924 if (lun->be_lun->blocksize) 3925 ival /= lun->be_lun->blocksize; 3926 else 3927 ival /= 512; 3928 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3929 page->descr[1].count); 3930 } 3931 value = ctl_get_opt(&lun->be_lun->options, 3932 "pool-avail-threshold"); 3933 if (value != NULL && 3934 ctl_expand_number(value, &ival) == 0) { 3935 page->descr[2].flags |= SLBPPD_ENABLED | 3936 SLBPPD_ARMING_DEC; 3937 if (lun->be_lun->blocksize) 3938 ival /= lun->be_lun->blocksize; 3939 else 3940 ival /= 512; 3941 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3942 page->descr[2].count); 3943 } 3944 value = ctl_get_opt(&lun->be_lun->options, 3945 "pool-used-threshold"); 3946 if (value != NULL && 3947 ctl_expand_number(value, &ival) == 0) { 3948 page->descr[3].flags |= SLBPPD_ENABLED | 3949 SLBPPD_ARMING_INC; 3950 if (lun->be_lun->blocksize) 3951 ival /= lun->be_lun->blocksize; 3952 else 3953 ival /= 512; 3954 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 3955 page->descr[3].count); 3956 } 3957 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 3958 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 3959 sizeof(lbp_page_default)); 3960 page_index->page_data = 3961 (uint8_t *)lun->mode_pages.lbp_page; 3962 }} 3963 break; 3964 } 3965 case SMS_VENDOR_SPECIFIC_PAGE:{ 3966 switch (page_index->subpage) { 3967 case DBGCNF_SUBPAGE_CODE: { 3968 struct copan_debugconf_subpage *current_page, 3969 *saved_page; 3970 3971 memcpy(&lun->mode_pages.debugconf_subpage[ 3972 CTL_PAGE_CURRENT], 3973 &debugconf_page_default, 3974 sizeof(debugconf_page_default)); 3975 memcpy(&lun->mode_pages.debugconf_subpage[ 3976 CTL_PAGE_CHANGEABLE], 3977 &debugconf_page_changeable, 3978 sizeof(debugconf_page_changeable)); 3979 memcpy(&lun->mode_pages.debugconf_subpage[ 3980 CTL_PAGE_DEFAULT], 3981 &debugconf_page_default, 3982 sizeof(debugconf_page_default)); 3983 memcpy(&lun->mode_pages.debugconf_subpage[ 3984 CTL_PAGE_SAVED], 3985 &debugconf_page_default, 3986 sizeof(debugconf_page_default)); 3987 page_index->page_data = 3988 (uint8_t *)lun->mode_pages.debugconf_subpage; 3989 3990 current_page = (struct copan_debugconf_subpage *) 3991 (page_index->page_data + 3992 (page_index->page_len * 3993 CTL_PAGE_CURRENT)); 3994 saved_page = (struct copan_debugconf_subpage *) 3995 (page_index->page_data + 3996 (page_index->page_len * 3997 CTL_PAGE_SAVED)); 3998 break; 3999 } 4000 default: 4001 panic("invalid subpage value %d", 4002 page_index->subpage); 4003 break; 4004 } 4005 break; 4006 } 4007 default: 4008 panic("invalid page value %d", 4009 page_index->page_code & SMPH_PC_MASK); 4010 break; 4011 } 4012 } 4013 4014 return (CTL_RETVAL_COMPLETE); 4015} 4016 4017static int 4018ctl_init_log_page_index(struct ctl_lun *lun) 4019{ 4020 struct ctl_page_index *page_index; 4021 int i, j, k, prev; 4022 4023 memcpy(&lun->log_pages.index, log_page_index_template, 4024 sizeof(log_page_index_template)); 4025 4026 prev = -1; 4027 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4028 4029 page_index = &lun->log_pages.index[i]; 4030 /* 4031 * If this is a disk-only mode page, there's no point in 4032 * setting it up. For some pages, we have to have some 4033 * basic information about the disk in order to calculate the 4034 * mode page data. 4035 */ 4036 if ((lun->be_lun->lun_type != T_DIRECT) 4037 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4038 continue; 4039 4040 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4041 lun->backend->lun_attr == NULL) 4042 continue; 4043 4044 if (page_index->page_code != prev) { 4045 lun->log_pages.pages_page[j] = page_index->page_code; 4046 prev = page_index->page_code; 4047 j++; 4048 } 4049 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4050 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4051 k++; 4052 } 4053 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4054 lun->log_pages.index[0].page_len = j; 4055 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4056 lun->log_pages.index[1].page_len = k * 2; 4057 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4058 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4059 lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; 4060 lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); 4061 4062 return (CTL_RETVAL_COMPLETE); 4063} 4064 4065static int 4066hex2bin(const char *str, uint8_t *buf, int buf_size) 4067{ 4068 int i; 4069 u_char c; 4070 4071 memset(buf, 0, buf_size); 4072 while (isspace(str[0])) 4073 str++; 4074 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4075 str += 2; 4076 buf_size *= 2; 4077 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4078 c = str[i]; 4079 if (isdigit(c)) 4080 c -= '0'; 4081 else if (isalpha(c)) 4082 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4083 else 4084 break; 4085 if (c >= 16) 4086 break; 4087 if ((i & 1) == 0) 4088 buf[i / 2] |= (c << 4); 4089 else 4090 buf[i / 2] |= c; 4091 } 4092 return ((i + 1) / 2); 4093} 4094 4095/* 4096 * LUN allocation. 4097 * 4098 * Requirements: 4099 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4100 * wants us to allocate the LUN and he can block. 4101 * - ctl_softc is always set 4102 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4103 * 4104 * Returns 0 for success, non-zero (errno) for failure. 4105 */ 4106static int 4107ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4108 struct ctl_be_lun *const be_lun) 4109{ 4110 struct ctl_lun *nlun, *lun; 4111 struct scsi_vpd_id_descriptor *desc; 4112 struct scsi_vpd_id_t10 *t10id; 4113 const char *eui, *naa, *scsiname, *vendor; 4114 int lun_number, i, lun_malloced; 4115 int devidlen, idlen1, idlen2 = 0, len; 4116 4117 if (be_lun == NULL) 4118 return (EINVAL); 4119 4120 /* 4121 * We currently only support Direct Access or Processor LUN types. 4122 */ 4123 switch (be_lun->lun_type) { 4124 case T_DIRECT: 4125 break; 4126 case T_PROCESSOR: 4127 break; 4128 case T_SEQUENTIAL: 4129 case T_CHANGER: 4130 default: 4131 be_lun->lun_config_status(be_lun->be_lun, 4132 CTL_LUN_CONFIG_FAILURE); 4133 break; 4134 } 4135 if (ctl_lun == NULL) { 4136 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4137 lun_malloced = 1; 4138 } else { 4139 lun_malloced = 0; 4140 lun = ctl_lun; 4141 } 4142 4143 memset(lun, 0, sizeof(*lun)); 4144 if (lun_malloced) 4145 lun->flags = CTL_LUN_MALLOCED; 4146 4147 /* Generate LUN ID. */ 4148 devidlen = max(CTL_DEVID_MIN_LEN, 4149 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4150 idlen1 = sizeof(*t10id) + devidlen; 4151 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4152 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4153 if (scsiname != NULL) { 4154 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4155 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4156 } 4157 eui = ctl_get_opt(&be_lun->options, "eui"); 4158 if (eui != NULL) { 4159 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4160 } 4161 naa = ctl_get_opt(&be_lun->options, "naa"); 4162 if (naa != NULL) { 4163 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4164 } 4165 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4166 M_CTL, M_WAITOK | M_ZERO); 4167 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4168 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4169 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4170 desc->length = idlen1; 4171 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4172 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4173 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4174 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4175 } else { 4176 strncpy(t10id->vendor, vendor, 4177 min(sizeof(t10id->vendor), strlen(vendor))); 4178 } 4179 strncpy((char *)t10id->vendor_spec_id, 4180 (char *)be_lun->device_id, devidlen); 4181 if (scsiname != NULL) { 4182 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4183 desc->length); 4184 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4185 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4186 SVPD_ID_TYPE_SCSI_NAME; 4187 desc->length = idlen2; 4188 strlcpy(desc->identifier, scsiname, idlen2); 4189 } 4190 if (eui != NULL) { 4191 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4192 desc->length); 4193 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4194 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4195 SVPD_ID_TYPE_EUI64; 4196 desc->length = hex2bin(eui, desc->identifier, 16); 4197 desc->length = desc->length > 12 ? 16 : 4198 (desc->length > 8 ? 12 : 8); 4199 len -= 16 - desc->length; 4200 } 4201 if (naa != NULL) { 4202 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4203 desc->length); 4204 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4205 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4206 SVPD_ID_TYPE_NAA; 4207 desc->length = hex2bin(naa, desc->identifier, 16); 4208 desc->length = desc->length > 8 ? 16 : 8; 4209 len -= 16 - desc->length; 4210 } 4211 lun->lun_devid->len = len; 4212 4213 mtx_lock(&ctl_softc->ctl_lock); 4214 /* 4215 * See if the caller requested a particular LUN number. If so, see 4216 * if it is available. Otherwise, allocate the first available LUN. 4217 */ 4218 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4219 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4220 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4221 mtx_unlock(&ctl_softc->ctl_lock); 4222 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4223 printf("ctl: requested LUN ID %d is higher " 4224 "than CTL_MAX_LUNS - 1 (%d)\n", 4225 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4226 } else { 4227 /* 4228 * XXX KDM return an error, or just assign 4229 * another LUN ID in this case?? 4230 */ 4231 printf("ctl: requested LUN ID %d is already " 4232 "in use\n", be_lun->req_lun_id); 4233 } 4234 if (lun->flags & CTL_LUN_MALLOCED) 4235 free(lun, M_CTL); 4236 be_lun->lun_config_status(be_lun->be_lun, 4237 CTL_LUN_CONFIG_FAILURE); 4238 return (ENOSPC); 4239 } 4240 lun_number = be_lun->req_lun_id; 4241 } else { 4242 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); 4243 if (lun_number == -1) { 4244 mtx_unlock(&ctl_softc->ctl_lock); 4245 printf("ctl: can't allocate LUN, out of LUNs\n"); 4246 if (lun->flags & CTL_LUN_MALLOCED) 4247 free(lun, M_CTL); 4248 be_lun->lun_config_status(be_lun->be_lun, 4249 CTL_LUN_CONFIG_FAILURE); 4250 return (ENOSPC); 4251 } 4252 } 4253 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4254 4255 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4256 lun->lun = lun_number; 4257 lun->be_lun = be_lun; 4258 /* 4259 * The processor LUN is always enabled. Disk LUNs come on line 4260 * disabled, and must be enabled by the backend. 4261 */ 4262 lun->flags |= CTL_LUN_DISABLED; 4263 lun->backend = be_lun->be; 4264 be_lun->ctl_lun = lun; 4265 be_lun->lun_id = lun_number; 4266 atomic_add_int(&be_lun->be->num_luns, 1); 4267 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4268 lun->flags |= CTL_LUN_OFFLINE; 4269 4270 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4271 lun->flags |= CTL_LUN_STOPPED; 4272 4273 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4274 lun->flags |= CTL_LUN_INOPERABLE; 4275 4276 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4277 lun->flags |= CTL_LUN_PRIMARY_SC; 4278 4279 lun->ctl_softc = ctl_softc; 4280#ifdef CTL_TIME_IO 4281 lun->last_busy = getsbinuptime(); 4282#endif 4283 TAILQ_INIT(&lun->ooa_queue); 4284 TAILQ_INIT(&lun->blocked_queue); 4285 STAILQ_INIT(&lun->error_list); 4286 ctl_tpc_lun_init(lun); 4287 4288 /* 4289 * Initialize the mode and log page index. 4290 */ 4291 ctl_init_page_index(lun); 4292 ctl_init_log_page_index(lun); 4293 4294 /* 4295 * Now, before we insert this lun on the lun list, set the lun 4296 * inventory changed UA for all other luns. 4297 */ 4298 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4299 mtx_lock(&nlun->lun_lock); 4300 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4301 mtx_unlock(&nlun->lun_lock); 4302 } 4303 4304 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4305 4306 ctl_softc->ctl_luns[lun_number] = lun; 4307 4308 ctl_softc->num_luns++; 4309 4310 /* Setup statistics gathering */ 4311 lun->stats.device_type = be_lun->lun_type; 4312 lun->stats.lun_number = lun_number; 4313 if (lun->stats.device_type == T_DIRECT) 4314 lun->stats.blocksize = be_lun->blocksize; 4315 else 4316 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4317 for (i = 0;i < CTL_MAX_PORTS;i++) 4318 lun->stats.ports[i].targ_port = i; 4319 4320 mtx_unlock(&ctl_softc->ctl_lock); 4321 4322 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4323 return (0); 4324} 4325 4326/* 4327 * Delete a LUN. 4328 * Assumptions: 4329 * - LUN has already been marked invalid and any pending I/O has been taken 4330 * care of. 4331 */ 4332static int 4333ctl_free_lun(struct ctl_lun *lun) 4334{ 4335 struct ctl_softc *softc; 4336 struct ctl_lun *nlun; 4337 int i; 4338 4339 softc = lun->ctl_softc; 4340 4341 mtx_assert(&softc->ctl_lock, MA_OWNED); 4342 4343 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4344 4345 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4346 4347 softc->ctl_luns[lun->lun] = NULL; 4348 4349 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4350 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4351 4352 softc->num_luns--; 4353 4354 /* 4355 * Tell the backend to free resources, if this LUN has a backend. 4356 */ 4357 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4358 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4359 4360 ctl_tpc_lun_shutdown(lun); 4361 mtx_destroy(&lun->lun_lock); 4362 free(lun->lun_devid, M_CTL); 4363 for (i = 0; i < CTL_MAX_PORTS; i++) 4364 free(lun->pending_ua[i], M_CTL); 4365 for (i = 0; i < CTL_MAX_PORTS; i++) 4366 free(lun->pr_keys[i], M_CTL); 4367 free(lun->write_buffer, M_CTL); 4368 if (lun->flags & CTL_LUN_MALLOCED) 4369 free(lun, M_CTL); 4370 4371 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4372 mtx_lock(&nlun->lun_lock); 4373 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4374 mtx_unlock(&nlun->lun_lock); 4375 } 4376 4377 return (0); 4378} 4379 4380static void 4381ctl_create_lun(struct ctl_be_lun *be_lun) 4382{ 4383 struct ctl_softc *softc; 4384 4385 softc = control_softc; 4386 4387 /* 4388 * ctl_alloc_lun() should handle all potential failure cases. 4389 */ 4390 ctl_alloc_lun(softc, NULL, be_lun); 4391} 4392 4393int 4394ctl_add_lun(struct ctl_be_lun *be_lun) 4395{ 4396 struct ctl_softc *softc = control_softc; 4397 4398 mtx_lock(&softc->ctl_lock); 4399 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4400 mtx_unlock(&softc->ctl_lock); 4401 wakeup(&softc->pending_lun_queue); 4402 4403 return (0); 4404} 4405 4406int 4407ctl_enable_lun(struct ctl_be_lun *be_lun) 4408{ 4409 struct ctl_softc *softc; 4410 struct ctl_port *port, *nport; 4411 struct ctl_lun *lun; 4412 int retval; 4413 4414 lun = (struct ctl_lun *)be_lun->ctl_lun; 4415 softc = lun->ctl_softc; 4416 4417 mtx_lock(&softc->ctl_lock); 4418 mtx_lock(&lun->lun_lock); 4419 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4420 /* 4421 * eh? Why did we get called if the LUN is already 4422 * enabled? 4423 */ 4424 mtx_unlock(&lun->lun_lock); 4425 mtx_unlock(&softc->ctl_lock); 4426 return (0); 4427 } 4428 lun->flags &= ~CTL_LUN_DISABLED; 4429 mtx_unlock(&lun->lun_lock); 4430 4431 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) { 4432 nport = STAILQ_NEXT(port, links); 4433 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4434 port->lun_map != NULL || port->lun_enable == NULL) 4435 continue; 4436 4437 /* 4438 * Drop the lock while we call the FETD's enable routine. 4439 * This can lead to a callback into CTL (at least in the 4440 * case of the internal initiator frontend. 4441 */ 4442 mtx_unlock(&softc->ctl_lock); 4443 retval = port->lun_enable(port->targ_lun_arg, lun->lun); 4444 mtx_lock(&softc->ctl_lock); 4445 if (retval != 0) { 4446 printf("%s: FETD %s port %d returned error " 4447 "%d for lun_enable on lun %jd\n", 4448 __func__, port->port_name, port->targ_port, 4449 retval, (intmax_t)lun->lun); 4450 } 4451 } 4452 4453 mtx_unlock(&softc->ctl_lock); 4454 ctl_isc_announce_lun(lun); 4455 4456 return (0); 4457} 4458 4459int 4460ctl_disable_lun(struct ctl_be_lun *be_lun) 4461{ 4462 struct ctl_softc *softc; 4463 struct ctl_port *port; 4464 struct ctl_lun *lun; 4465 int retval; 4466 4467 lun = (struct ctl_lun *)be_lun->ctl_lun; 4468 softc = lun->ctl_softc; 4469 4470 mtx_lock(&softc->ctl_lock); 4471 mtx_lock(&lun->lun_lock); 4472 if (lun->flags & CTL_LUN_DISABLED) { 4473 mtx_unlock(&lun->lun_lock); 4474 mtx_unlock(&softc->ctl_lock); 4475 return (0); 4476 } 4477 lun->flags |= CTL_LUN_DISABLED; 4478 mtx_unlock(&lun->lun_lock); 4479 4480 STAILQ_FOREACH(port, &softc->port_list, links) { 4481 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || 4482 port->lun_map != NULL || port->lun_disable == NULL) 4483 continue; 4484 4485 /* 4486 * Drop the lock before we call the frontend's disable 4487 * routine, to avoid lock order reversals. 4488 * 4489 * XXX KDM what happens if the frontend list changes while 4490 * we're traversing it? It's unlikely, but should be handled. 4491 */ 4492 mtx_unlock(&softc->ctl_lock); 4493 retval = port->lun_disable(port->targ_lun_arg, lun->lun); 4494 mtx_lock(&softc->ctl_lock); 4495 if (retval != 0) { 4496 printf("%s: FETD %s port %d returned error " 4497 "%d for lun_disable on lun %jd\n", 4498 __func__, port->port_name, port->targ_port, 4499 retval, (intmax_t)lun->lun); 4500 } 4501 } 4502 4503 mtx_unlock(&softc->ctl_lock); 4504 ctl_isc_announce_lun(lun); 4505 4506 return (0); 4507} 4508 4509int 4510ctl_start_lun(struct ctl_be_lun *be_lun) 4511{ 4512 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4513 4514 mtx_lock(&lun->lun_lock); 4515 lun->flags &= ~CTL_LUN_STOPPED; 4516 mtx_unlock(&lun->lun_lock); 4517 return (0); 4518} 4519 4520int 4521ctl_stop_lun(struct ctl_be_lun *be_lun) 4522{ 4523 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4524 4525 mtx_lock(&lun->lun_lock); 4526 lun->flags |= CTL_LUN_STOPPED; 4527 mtx_unlock(&lun->lun_lock); 4528 return (0); 4529} 4530 4531int 4532ctl_lun_offline(struct ctl_be_lun *be_lun) 4533{ 4534 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4535 4536 mtx_lock(&lun->lun_lock); 4537 lun->flags |= CTL_LUN_OFFLINE; 4538 mtx_unlock(&lun->lun_lock); 4539 return (0); 4540} 4541 4542int 4543ctl_lun_online(struct ctl_be_lun *be_lun) 4544{ 4545 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4546 4547 mtx_lock(&lun->lun_lock); 4548 lun->flags &= ~CTL_LUN_OFFLINE; 4549 mtx_unlock(&lun->lun_lock); 4550 return (0); 4551} 4552 4553int 4554ctl_lun_primary(struct ctl_be_lun *be_lun) 4555{ 4556 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4557 4558 mtx_lock(&lun->lun_lock); 4559 lun->flags |= CTL_LUN_PRIMARY_SC; 4560 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4561 mtx_unlock(&lun->lun_lock); 4562 ctl_isc_announce_lun(lun); 4563 return (0); 4564} 4565 4566int 4567ctl_lun_secondary(struct ctl_be_lun *be_lun) 4568{ 4569 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4570 4571 mtx_lock(&lun->lun_lock); 4572 lun->flags &= ~CTL_LUN_PRIMARY_SC; 4573 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 4574 mtx_unlock(&lun->lun_lock); 4575 ctl_isc_announce_lun(lun); 4576 return (0); 4577} 4578 4579int 4580ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4581{ 4582 struct ctl_softc *softc; 4583 struct ctl_lun *lun; 4584 4585 lun = (struct ctl_lun *)be_lun->ctl_lun; 4586 softc = lun->ctl_softc; 4587 4588 mtx_lock(&lun->lun_lock); 4589 4590 /* 4591 * The LUN needs to be disabled before it can be marked invalid. 4592 */ 4593 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4594 mtx_unlock(&lun->lun_lock); 4595 return (-1); 4596 } 4597 /* 4598 * Mark the LUN invalid. 4599 */ 4600 lun->flags |= CTL_LUN_INVALID; 4601 4602 /* 4603 * If there is nothing in the OOA queue, go ahead and free the LUN. 4604 * If we have something in the OOA queue, we'll free it when the 4605 * last I/O completes. 4606 */ 4607 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4608 mtx_unlock(&lun->lun_lock); 4609 mtx_lock(&softc->ctl_lock); 4610 ctl_free_lun(lun); 4611 mtx_unlock(&softc->ctl_lock); 4612 } else 4613 mtx_unlock(&lun->lun_lock); 4614 4615 return (0); 4616} 4617 4618int 4619ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4620{ 4621 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4622 4623 mtx_lock(&lun->lun_lock); 4624 lun->flags |= CTL_LUN_INOPERABLE; 4625 mtx_unlock(&lun->lun_lock); 4626 return (0); 4627} 4628 4629int 4630ctl_lun_operable(struct ctl_be_lun *be_lun) 4631{ 4632 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4633 4634 mtx_lock(&lun->lun_lock); 4635 lun->flags &= ~CTL_LUN_INOPERABLE; 4636 mtx_unlock(&lun->lun_lock); 4637 return (0); 4638} 4639 4640void 4641ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 4642{ 4643 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4644 union ctl_ha_msg msg; 4645 4646 mtx_lock(&lun->lun_lock); 4647 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 4648 mtx_unlock(&lun->lun_lock); 4649 if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 4650 /* Send msg to other side. */ 4651 bzero(&msg.ua, sizeof(msg.ua)); 4652 msg.hdr.msg_type = CTL_MSG_UA; 4653 msg.hdr.nexus.initid = -1; 4654 msg.hdr.nexus.targ_port = -1; 4655 msg.hdr.nexus.targ_lun = lun->lun; 4656 msg.hdr.nexus.targ_mapped_lun = lun->lun; 4657 msg.ua.ua_all = 1; 4658 msg.ua.ua_set = 1; 4659 msg.ua.ua_type = CTL_UA_CAPACITY_CHANGED; 4660 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), 4661 M_WAITOK); 4662 } 4663} 4664 4665/* 4666 * Backend "memory move is complete" callback for requests that never 4667 * make it down to say RAIDCore's configuration code. 4668 */ 4669int 4670ctl_config_move_done(union ctl_io *io) 4671{ 4672 int retval; 4673 4674 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 4675 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 4676 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 4677 4678 if ((io->io_hdr.port_status != 0) && 4679 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4680 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4681 /* 4682 * For hardware error sense keys, the sense key 4683 * specific value is defined to be a retry count, 4684 * but we use it to pass back an internal FETD 4685 * error code. XXX KDM Hopefully the FETD is only 4686 * using 16 bits for an error code, since that's 4687 * all the space we have in the sks field. 4688 */ 4689 ctl_set_internal_failure(&io->scsiio, 4690 /*sks_valid*/ 1, 4691 /*retry_count*/ 4692 io->io_hdr.port_status); 4693 } 4694 4695 if (ctl_debug & CTL_DEBUG_CDB_DATA) 4696 ctl_data_print(io); 4697 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 4698 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4699 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 4700 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 4701 /* 4702 * XXX KDM just assuming a single pointer here, and not a 4703 * S/G list. If we start using S/G lists for config data, 4704 * we'll need to know how to clean them up here as well. 4705 */ 4706 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4707 free(io->scsiio.kern_data_ptr, M_CTL); 4708 ctl_done(io); 4709 retval = CTL_RETVAL_COMPLETE; 4710 } else { 4711 /* 4712 * XXX KDM now we need to continue data movement. Some 4713 * options: 4714 * - call ctl_scsiio() again? We don't do this for data 4715 * writes, because for those at least we know ahead of 4716 * time where the write will go and how long it is. For 4717 * config writes, though, that information is largely 4718 * contained within the write itself, thus we need to 4719 * parse out the data again. 4720 * 4721 * - Call some other function once the data is in? 4722 */ 4723 4724 /* 4725 * XXX KDM call ctl_scsiio() again for now, and check flag 4726 * bits to see whether we're allocated or not. 4727 */ 4728 retval = ctl_scsiio(&io->scsiio); 4729 } 4730 return (retval); 4731} 4732 4733/* 4734 * This gets called by a backend driver when it is done with a 4735 * data_submit method. 4736 */ 4737void 4738ctl_data_submit_done(union ctl_io *io) 4739{ 4740 /* 4741 * If the IO_CONT flag is set, we need to call the supplied 4742 * function to continue processing the I/O, instead of completing 4743 * the I/O just yet. 4744 * 4745 * If there is an error, though, we don't want to keep processing. 4746 * Instead, just send status back to the initiator. 4747 */ 4748 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4749 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4750 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4751 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4752 io->scsiio.io_cont(io); 4753 return; 4754 } 4755 ctl_done(io); 4756} 4757 4758/* 4759 * This gets called by a backend driver when it is done with a 4760 * configuration write. 4761 */ 4762void 4763ctl_config_write_done(union ctl_io *io) 4764{ 4765 uint8_t *buf; 4766 4767 /* 4768 * If the IO_CONT flag is set, we need to call the supplied 4769 * function to continue processing the I/O, instead of completing 4770 * the I/O just yet. 4771 * 4772 * If there is an error, though, we don't want to keep processing. 4773 * Instead, just send status back to the initiator. 4774 */ 4775 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 4776 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 4777 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 4778 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 4779 io->scsiio.io_cont(io); 4780 return; 4781 } 4782 /* 4783 * Since a configuration write can be done for commands that actually 4784 * have data allocated, like write buffer, and commands that have 4785 * no data, like start/stop unit, we need to check here. 4786 */ 4787 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4788 buf = io->scsiio.kern_data_ptr; 4789 else 4790 buf = NULL; 4791 ctl_done(io); 4792 if (buf) 4793 free(buf, M_CTL); 4794} 4795 4796void 4797ctl_config_read_done(union ctl_io *io) 4798{ 4799 uint8_t *buf; 4800 4801 /* 4802 * If there is some error -- we are done, skip data transfer. 4803 */ 4804 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 4805 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 4806 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 4807 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 4808 buf = io->scsiio.kern_data_ptr; 4809 else 4810 buf = NULL; 4811 ctl_done(io); 4812 if (buf) 4813 free(buf, M_CTL); 4814 return; 4815 } 4816 4817 /* 4818 * If the IO_CONT flag is set, we need to call the supplied 4819 * function to continue processing the I/O, instead of completing 4820 * the I/O just yet. 4821 */ 4822 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 4823 io->scsiio.io_cont(io); 4824 return; 4825 } 4826 4827 ctl_datamove(io); 4828} 4829 4830/* 4831 * SCSI release command. 4832 */ 4833int 4834ctl_scsi_release(struct ctl_scsiio *ctsio) 4835{ 4836 int length, longid, thirdparty_id, resv_id; 4837 struct ctl_lun *lun; 4838 uint32_t residx; 4839 4840 length = 0; 4841 resv_id = 0; 4842 4843 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 4844 4845 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 4846 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4847 4848 switch (ctsio->cdb[0]) { 4849 case RELEASE_10: { 4850 struct scsi_release_10 *cdb; 4851 4852 cdb = (struct scsi_release_10 *)ctsio->cdb; 4853 4854 if (cdb->byte2 & SR10_LONGID) 4855 longid = 1; 4856 else 4857 thirdparty_id = cdb->thirdparty_id; 4858 4859 resv_id = cdb->resv_id; 4860 length = scsi_2btoul(cdb->length); 4861 break; 4862 } 4863 } 4864 4865 4866 /* 4867 * XXX KDM right now, we only support LUN reservation. We don't 4868 * support 3rd party reservations, or extent reservations, which 4869 * might actually need the parameter list. If we've gotten this 4870 * far, we've got a LUN reservation. Anything else got kicked out 4871 * above. So, according to SPC, ignore the length. 4872 */ 4873 length = 0; 4874 4875 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 4876 && (length > 0)) { 4877 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 4878 ctsio->kern_data_len = length; 4879 ctsio->kern_total_len = length; 4880 ctsio->kern_data_resid = 0; 4881 ctsio->kern_rel_offset = 0; 4882 ctsio->kern_sg_entries = 0; 4883 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 4884 ctsio->be_move_done = ctl_config_move_done; 4885 ctl_datamove((union ctl_io *)ctsio); 4886 4887 return (CTL_RETVAL_COMPLETE); 4888 } 4889 4890 if (length > 0) 4891 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 4892 4893 mtx_lock(&lun->lun_lock); 4894 4895 /* 4896 * According to SPC, it is not an error for an intiator to attempt 4897 * to release a reservation on a LUN that isn't reserved, or that 4898 * is reserved by another initiator. The reservation can only be 4899 * released, though, by the initiator who made it or by one of 4900 * several reset type events. 4901 */ 4902 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 4903 lun->flags &= ~CTL_LUN_RESERVED; 4904 4905 mtx_unlock(&lun->lun_lock); 4906 4907 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 4908 free(ctsio->kern_data_ptr, M_CTL); 4909 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 4910 } 4911 4912 ctl_set_success(ctsio); 4913 ctl_done((union ctl_io *)ctsio); 4914 return (CTL_RETVAL_COMPLETE); 4915} 4916 4917int 4918ctl_scsi_reserve(struct ctl_scsiio *ctsio) 4919{ 4920 int extent, thirdparty, longid; 4921 int resv_id, length; 4922 uint64_t thirdparty_id; 4923 struct ctl_lun *lun; 4924 uint32_t residx; 4925 4926 extent = 0; 4927 thirdparty = 0; 4928 longid = 0; 4929 resv_id = 0; 4930 length = 0; 4931 thirdparty_id = 0; 4932 4933 CTL_DEBUG_PRINT(("ctl_reserve\n")); 4934 4935 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 4936 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 4937 4938 switch (ctsio->cdb[0]) { 4939 case RESERVE_10: { 4940 struct scsi_reserve_10 *cdb; 4941 4942 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 4943 4944 if (cdb->byte2 & SR10_LONGID) 4945 longid = 1; 4946 else 4947 thirdparty_id = cdb->thirdparty_id; 4948 4949 resv_id = cdb->resv_id; 4950 length = scsi_2btoul(cdb->length); 4951 break; 4952 } 4953 } 4954 4955 /* 4956 * XXX KDM right now, we only support LUN reservation. We don't 4957 * support 3rd party reservations, or extent reservations, which 4958 * might actually need the parameter list. If we've gotten this 4959 * far, we've got a LUN reservation. Anything else got kicked out 4960 * above. So, according to SPC, ignore the length. 4961 */ 4962 length = 0; 4963 4964 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 4965 && (length > 0)) { 4966 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 4967 ctsio->kern_data_len = length; 4968 ctsio->kern_total_len = length; 4969 ctsio->kern_data_resid = 0; 4970 ctsio->kern_rel_offset = 0; 4971 ctsio->kern_sg_entries = 0; 4972 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 4973 ctsio->be_move_done = ctl_config_move_done; 4974 ctl_datamove((union ctl_io *)ctsio); 4975 4976 return (CTL_RETVAL_COMPLETE); 4977 } 4978 4979 if (length > 0) 4980 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 4981 4982 mtx_lock(&lun->lun_lock); 4983 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 4984 ctl_set_reservation_conflict(ctsio); 4985 goto bailout; 4986 } 4987 4988 lun->flags |= CTL_LUN_RESERVED; 4989 lun->res_idx = residx; 4990 4991 ctl_set_success(ctsio); 4992 4993bailout: 4994 mtx_unlock(&lun->lun_lock); 4995 4996 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 4997 free(ctsio->kern_data_ptr, M_CTL); 4998 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 4999 } 5000 5001 ctl_done((union ctl_io *)ctsio); 5002 return (CTL_RETVAL_COMPLETE); 5003} 5004 5005int 5006ctl_start_stop(struct ctl_scsiio *ctsio) 5007{ 5008 struct scsi_start_stop_unit *cdb; 5009 struct ctl_lun *lun; 5010 int retval; 5011 5012 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5013 5014 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5015 retval = 0; 5016 5017 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5018 5019 /* 5020 * XXX KDM 5021 * We don't support the immediate bit on a stop unit. In order to 5022 * do that, we would need to code up a way to know that a stop is 5023 * pending, and hold off any new commands until it completes, one 5024 * way or another. Then we could accept or reject those commands 5025 * depending on its status. We would almost need to do the reverse 5026 * of what we do below for an immediate start -- return the copy of 5027 * the ctl_io to the FETD with status to send to the host (and to 5028 * free the copy!) and then free the original I/O once the stop 5029 * actually completes. That way, the OOA queue mechanism can work 5030 * to block commands that shouldn't proceed. Another alternative 5031 * would be to put the copy in the queue in place of the original, 5032 * and return the original back to the caller. That could be 5033 * slightly safer.. 5034 */ 5035 if ((cdb->byte2 & SSS_IMMED) 5036 && ((cdb->how & SSS_START) == 0)) { 5037 ctl_set_invalid_field(ctsio, 5038 /*sks_valid*/ 1, 5039 /*command*/ 1, 5040 /*field*/ 1, 5041 /*bit_valid*/ 1, 5042 /*bit*/ 0); 5043 ctl_done((union ctl_io *)ctsio); 5044 return (CTL_RETVAL_COMPLETE); 5045 } 5046 5047 if ((lun->flags & CTL_LUN_PR_RESERVED) 5048 && ((cdb->how & SSS_START)==0)) { 5049 uint32_t residx; 5050 5051 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5052 if (ctl_get_prkey(lun, residx) == 0 5053 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5054 5055 ctl_set_reservation_conflict(ctsio); 5056 ctl_done((union ctl_io *)ctsio); 5057 return (CTL_RETVAL_COMPLETE); 5058 } 5059 } 5060 5061 /* 5062 * If there is no backend on this device, we can't start or stop 5063 * it. In theory we shouldn't get any start/stop commands in the 5064 * first place at this level if the LUN doesn't have a backend. 5065 * That should get stopped by the command decode code. 5066 */ 5067 if (lun->backend == NULL) { 5068 ctl_set_invalid_opcode(ctsio); 5069 ctl_done((union ctl_io *)ctsio); 5070 return (CTL_RETVAL_COMPLETE); 5071 } 5072 5073 /* 5074 * XXX KDM Copan-specific offline behavior. 5075 * Figure out a reasonable way to port this? 5076 */ 5077#ifdef NEEDTOPORT 5078 mtx_lock(&lun->lun_lock); 5079 5080 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5081 && (lun->flags & CTL_LUN_OFFLINE)) { 5082 /* 5083 * If the LUN is offline, and the on/offline bit isn't set, 5084 * reject the start or stop. Otherwise, let it through. 5085 */ 5086 mtx_unlock(&lun->lun_lock); 5087 ctl_set_lun_not_ready(ctsio); 5088 ctl_done((union ctl_io *)ctsio); 5089 } else { 5090 mtx_unlock(&lun->lun_lock); 5091#endif /* NEEDTOPORT */ 5092 /* 5093 * This could be a start or a stop when we're online, 5094 * or a stop/offline or start/online. A start or stop when 5095 * we're offline is covered in the case above. 5096 */ 5097 /* 5098 * In the non-immediate case, we send the request to 5099 * the backend and return status to the user when 5100 * it is done. 5101 * 5102 * In the immediate case, we allocate a new ctl_io 5103 * to hold a copy of the request, and send that to 5104 * the backend. We then set good status on the 5105 * user's request and return it immediately. 5106 */ 5107 if (cdb->byte2 & SSS_IMMED) { 5108 union ctl_io *new_io; 5109 5110 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5111 ctl_copy_io((union ctl_io *)ctsio, new_io); 5112 retval = lun->backend->config_write(new_io); 5113 ctl_set_success(ctsio); 5114 ctl_done((union ctl_io *)ctsio); 5115 } else { 5116 retval = lun->backend->config_write( 5117 (union ctl_io *)ctsio); 5118 } 5119#ifdef NEEDTOPORT 5120 } 5121#endif 5122 return (retval); 5123} 5124 5125/* 5126 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5127 * we don't really do anything with the LBA and length fields if the user 5128 * passes them in. Instead we'll just flush out the cache for the entire 5129 * LUN. 5130 */ 5131int 5132ctl_sync_cache(struct ctl_scsiio *ctsio) 5133{ 5134 struct ctl_lun *lun; 5135 struct ctl_softc *softc; 5136 struct ctl_lba_len_flags *lbalen; 5137 uint64_t starting_lba; 5138 uint32_t block_count; 5139 int retval; 5140 uint8_t byte2; 5141 5142 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5143 5144 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5145 softc = lun->ctl_softc; 5146 retval = 0; 5147 5148 switch (ctsio->cdb[0]) { 5149 case SYNCHRONIZE_CACHE: { 5150 struct scsi_sync_cache *cdb; 5151 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5152 5153 starting_lba = scsi_4btoul(cdb->begin_lba); 5154 block_count = scsi_2btoul(cdb->lb_count); 5155 byte2 = cdb->byte2; 5156 break; 5157 } 5158 case SYNCHRONIZE_CACHE_16: { 5159 struct scsi_sync_cache_16 *cdb; 5160 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5161 5162 starting_lba = scsi_8btou64(cdb->begin_lba); 5163 block_count = scsi_4btoul(cdb->lb_count); 5164 byte2 = cdb->byte2; 5165 break; 5166 } 5167 default: 5168 ctl_set_invalid_opcode(ctsio); 5169 ctl_done((union ctl_io *)ctsio); 5170 goto bailout; 5171 break; /* NOTREACHED */ 5172 } 5173 5174 /* 5175 * We check the LBA and length, but don't do anything with them. 5176 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5177 * get flushed. This check will just help satisfy anyone who wants 5178 * to see an error for an out of range LBA. 5179 */ 5180 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5181 ctl_set_lba_out_of_range(ctsio); 5182 ctl_done((union ctl_io *)ctsio); 5183 goto bailout; 5184 } 5185 5186 /* 5187 * If this LUN has no backend, we can't flush the cache anyway. 5188 */ 5189 if (lun->backend == NULL) { 5190 ctl_set_invalid_opcode(ctsio); 5191 ctl_done((union ctl_io *)ctsio); 5192 goto bailout; 5193 } 5194 5195 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5196 lbalen->lba = starting_lba; 5197 lbalen->len = block_count; 5198 lbalen->flags = byte2; 5199 5200 /* 5201 * Check to see whether we're configured to send the SYNCHRONIZE 5202 * CACHE command directly to the back end. 5203 */ 5204 mtx_lock(&lun->lun_lock); 5205 if ((softc->flags & CTL_FLAG_REAL_SYNC) 5206 && (++(lun->sync_count) >= lun->sync_interval)) { 5207 lun->sync_count = 0; 5208 mtx_unlock(&lun->lun_lock); 5209 retval = lun->backend->config_write((union ctl_io *)ctsio); 5210 } else { 5211 mtx_unlock(&lun->lun_lock); 5212 ctl_set_success(ctsio); 5213 ctl_done((union ctl_io *)ctsio); 5214 } 5215 5216bailout: 5217 5218 return (retval); 5219} 5220 5221int 5222ctl_format(struct ctl_scsiio *ctsio) 5223{ 5224 struct scsi_format *cdb; 5225 struct ctl_lun *lun; 5226 int length, defect_list_len; 5227 5228 CTL_DEBUG_PRINT(("ctl_format\n")); 5229 5230 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5231 5232 cdb = (struct scsi_format *)ctsio->cdb; 5233 5234 length = 0; 5235 if (cdb->byte2 & SF_FMTDATA) { 5236 if (cdb->byte2 & SF_LONGLIST) 5237 length = sizeof(struct scsi_format_header_long); 5238 else 5239 length = sizeof(struct scsi_format_header_short); 5240 } 5241 5242 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5243 && (length > 0)) { 5244 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5245 ctsio->kern_data_len = length; 5246 ctsio->kern_total_len = length; 5247 ctsio->kern_data_resid = 0; 5248 ctsio->kern_rel_offset = 0; 5249 ctsio->kern_sg_entries = 0; 5250 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5251 ctsio->be_move_done = ctl_config_move_done; 5252 ctl_datamove((union ctl_io *)ctsio); 5253 5254 return (CTL_RETVAL_COMPLETE); 5255 } 5256 5257 defect_list_len = 0; 5258 5259 if (cdb->byte2 & SF_FMTDATA) { 5260 if (cdb->byte2 & SF_LONGLIST) { 5261 struct scsi_format_header_long *header; 5262 5263 header = (struct scsi_format_header_long *) 5264 ctsio->kern_data_ptr; 5265 5266 defect_list_len = scsi_4btoul(header->defect_list_len); 5267 if (defect_list_len != 0) { 5268 ctl_set_invalid_field(ctsio, 5269 /*sks_valid*/ 1, 5270 /*command*/ 0, 5271 /*field*/ 2, 5272 /*bit_valid*/ 0, 5273 /*bit*/ 0); 5274 goto bailout; 5275 } 5276 } else { 5277 struct scsi_format_header_short *header; 5278 5279 header = (struct scsi_format_header_short *) 5280 ctsio->kern_data_ptr; 5281 5282 defect_list_len = scsi_2btoul(header->defect_list_len); 5283 if (defect_list_len != 0) { 5284 ctl_set_invalid_field(ctsio, 5285 /*sks_valid*/ 1, 5286 /*command*/ 0, 5287 /*field*/ 2, 5288 /*bit_valid*/ 0, 5289 /*bit*/ 0); 5290 goto bailout; 5291 } 5292 } 5293 } 5294 5295 /* 5296 * The format command will clear out the "Medium format corrupted" 5297 * status if set by the configuration code. That status is really 5298 * just a way to notify the host that we have lost the media, and 5299 * get them to issue a command that will basically make them think 5300 * they're blowing away the media. 5301 */ 5302 mtx_lock(&lun->lun_lock); 5303 lun->flags &= ~CTL_LUN_INOPERABLE; 5304 mtx_unlock(&lun->lun_lock); 5305 5306 ctl_set_success(ctsio); 5307bailout: 5308 5309 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5310 free(ctsio->kern_data_ptr, M_CTL); 5311 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5312 } 5313 5314 ctl_done((union ctl_io *)ctsio); 5315 return (CTL_RETVAL_COMPLETE); 5316} 5317 5318int 5319ctl_read_buffer(struct ctl_scsiio *ctsio) 5320{ 5321 struct scsi_read_buffer *cdb; 5322 struct ctl_lun *lun; 5323 int buffer_offset, len; 5324 static uint8_t descr[4]; 5325 static uint8_t echo_descr[4] = { 0 }; 5326 5327 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5328 5329 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5330 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5331 5332 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5333 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5334 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5335 ctl_set_invalid_field(ctsio, 5336 /*sks_valid*/ 1, 5337 /*command*/ 1, 5338 /*field*/ 1, 5339 /*bit_valid*/ 1, 5340 /*bit*/ 4); 5341 ctl_done((union ctl_io *)ctsio); 5342 return (CTL_RETVAL_COMPLETE); 5343 } 5344 5345 len = scsi_3btoul(cdb->length); 5346 buffer_offset = scsi_3btoul(cdb->offset); 5347 5348 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5349 ctl_set_invalid_field(ctsio, 5350 /*sks_valid*/ 1, 5351 /*command*/ 1, 5352 /*field*/ 6, 5353 /*bit_valid*/ 0, 5354 /*bit*/ 0); 5355 ctl_done((union ctl_io *)ctsio); 5356 return (CTL_RETVAL_COMPLETE); 5357 } 5358 5359 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5360 descr[0] = 0; 5361 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5362 ctsio->kern_data_ptr = descr; 5363 len = min(len, sizeof(descr)); 5364 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5365 ctsio->kern_data_ptr = echo_descr; 5366 len = min(len, sizeof(echo_descr)); 5367 } else { 5368 if (lun->write_buffer == NULL) { 5369 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5370 M_CTL, M_WAITOK); 5371 } 5372 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5373 } 5374 ctsio->kern_data_len = len; 5375 ctsio->kern_total_len = len; 5376 ctsio->kern_data_resid = 0; 5377 ctsio->kern_rel_offset = 0; 5378 ctsio->kern_sg_entries = 0; 5379 ctl_set_success(ctsio); 5380 ctsio->be_move_done = ctl_config_move_done; 5381 ctl_datamove((union ctl_io *)ctsio); 5382 return (CTL_RETVAL_COMPLETE); 5383} 5384 5385int 5386ctl_write_buffer(struct ctl_scsiio *ctsio) 5387{ 5388 struct scsi_write_buffer *cdb; 5389 struct ctl_lun *lun; 5390 int buffer_offset, len; 5391 5392 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5393 5394 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5395 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5396 5397 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5398 ctl_set_invalid_field(ctsio, 5399 /*sks_valid*/ 1, 5400 /*command*/ 1, 5401 /*field*/ 1, 5402 /*bit_valid*/ 1, 5403 /*bit*/ 4); 5404 ctl_done((union ctl_io *)ctsio); 5405 return (CTL_RETVAL_COMPLETE); 5406 } 5407 5408 len = scsi_3btoul(cdb->length); 5409 buffer_offset = scsi_3btoul(cdb->offset); 5410 5411 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5412 ctl_set_invalid_field(ctsio, 5413 /*sks_valid*/ 1, 5414 /*command*/ 1, 5415 /*field*/ 6, 5416 /*bit_valid*/ 0, 5417 /*bit*/ 0); 5418 ctl_done((union ctl_io *)ctsio); 5419 return (CTL_RETVAL_COMPLETE); 5420 } 5421 5422 /* 5423 * If we've got a kernel request that hasn't been malloced yet, 5424 * malloc it and tell the caller the data buffer is here. 5425 */ 5426 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5427 if (lun->write_buffer == NULL) { 5428 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5429 M_CTL, M_WAITOK); 5430 } 5431 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5432 ctsio->kern_data_len = len; 5433 ctsio->kern_total_len = len; 5434 ctsio->kern_data_resid = 0; 5435 ctsio->kern_rel_offset = 0; 5436 ctsio->kern_sg_entries = 0; 5437 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5438 ctsio->be_move_done = ctl_config_move_done; 5439 ctl_datamove((union ctl_io *)ctsio); 5440 5441 return (CTL_RETVAL_COMPLETE); 5442 } 5443 5444 ctl_set_success(ctsio); 5445 ctl_done((union ctl_io *)ctsio); 5446 return (CTL_RETVAL_COMPLETE); 5447} 5448 5449int 5450ctl_write_same(struct ctl_scsiio *ctsio) 5451{ 5452 struct ctl_lun *lun; 5453 struct ctl_lba_len_flags *lbalen; 5454 uint64_t lba; 5455 uint32_t num_blocks; 5456 int len, retval; 5457 uint8_t byte2; 5458 5459 retval = CTL_RETVAL_COMPLETE; 5460 5461 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5462 5463 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5464 5465 switch (ctsio->cdb[0]) { 5466 case WRITE_SAME_10: { 5467 struct scsi_write_same_10 *cdb; 5468 5469 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5470 5471 lba = scsi_4btoul(cdb->addr); 5472 num_blocks = scsi_2btoul(cdb->length); 5473 byte2 = cdb->byte2; 5474 break; 5475 } 5476 case WRITE_SAME_16: { 5477 struct scsi_write_same_16 *cdb; 5478 5479 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5480 5481 lba = scsi_8btou64(cdb->addr); 5482 num_blocks = scsi_4btoul(cdb->length); 5483 byte2 = cdb->byte2; 5484 break; 5485 } 5486 default: 5487 /* 5488 * We got a command we don't support. This shouldn't 5489 * happen, commands should be filtered out above us. 5490 */ 5491 ctl_set_invalid_opcode(ctsio); 5492 ctl_done((union ctl_io *)ctsio); 5493 5494 return (CTL_RETVAL_COMPLETE); 5495 break; /* NOTREACHED */ 5496 } 5497 5498 /* NDOB and ANCHOR flags can be used only together with UNMAP */ 5499 if ((byte2 & SWS_UNMAP) == 0 && 5500 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { 5501 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5502 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5503 ctl_done((union ctl_io *)ctsio); 5504 return (CTL_RETVAL_COMPLETE); 5505 } 5506 5507 /* 5508 * The first check is to make sure we're in bounds, the second 5509 * check is to catch wrap-around problems. If the lba + num blocks 5510 * is less than the lba, then we've wrapped around and the block 5511 * range is invalid anyway. 5512 */ 5513 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5514 || ((lba + num_blocks) < lba)) { 5515 ctl_set_lba_out_of_range(ctsio); 5516 ctl_done((union ctl_io *)ctsio); 5517 return (CTL_RETVAL_COMPLETE); 5518 } 5519 5520 /* Zero number of blocks means "to the last logical block" */ 5521 if (num_blocks == 0) { 5522 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5523 ctl_set_invalid_field(ctsio, 5524 /*sks_valid*/ 0, 5525 /*command*/ 1, 5526 /*field*/ 0, 5527 /*bit_valid*/ 0, 5528 /*bit*/ 0); 5529 ctl_done((union ctl_io *)ctsio); 5530 return (CTL_RETVAL_COMPLETE); 5531 } 5532 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5533 } 5534 5535 len = lun->be_lun->blocksize; 5536 5537 /* 5538 * If we've got a kernel request that hasn't been malloced yet, 5539 * malloc it and tell the caller the data buffer is here. 5540 */ 5541 if ((byte2 & SWS_NDOB) == 0 && 5542 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5543 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5544 ctsio->kern_data_len = len; 5545 ctsio->kern_total_len = len; 5546 ctsio->kern_data_resid = 0; 5547 ctsio->kern_rel_offset = 0; 5548 ctsio->kern_sg_entries = 0; 5549 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5550 ctsio->be_move_done = ctl_config_move_done; 5551 ctl_datamove((union ctl_io *)ctsio); 5552 5553 return (CTL_RETVAL_COMPLETE); 5554 } 5555 5556 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5557 lbalen->lba = lba; 5558 lbalen->len = num_blocks; 5559 lbalen->flags = byte2; 5560 retval = lun->backend->config_write((union ctl_io *)ctsio); 5561 5562 return (retval); 5563} 5564 5565int 5566ctl_unmap(struct ctl_scsiio *ctsio) 5567{ 5568 struct ctl_lun *lun; 5569 struct scsi_unmap *cdb; 5570 struct ctl_ptr_len_flags *ptrlen; 5571 struct scsi_unmap_header *hdr; 5572 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5573 uint64_t lba; 5574 uint32_t num_blocks; 5575 int len, retval; 5576 uint8_t byte2; 5577 5578 retval = CTL_RETVAL_COMPLETE; 5579 5580 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5581 5582 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5583 cdb = (struct scsi_unmap *)ctsio->cdb; 5584 5585 len = scsi_2btoul(cdb->length); 5586 byte2 = cdb->byte2; 5587 5588 /* 5589 * If we've got a kernel request that hasn't been malloced yet, 5590 * malloc it and tell the caller the data buffer is here. 5591 */ 5592 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5593 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5594 ctsio->kern_data_len = len; 5595 ctsio->kern_total_len = len; 5596 ctsio->kern_data_resid = 0; 5597 ctsio->kern_rel_offset = 0; 5598 ctsio->kern_sg_entries = 0; 5599 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5600 ctsio->be_move_done = ctl_config_move_done; 5601 ctl_datamove((union ctl_io *)ctsio); 5602 5603 return (CTL_RETVAL_COMPLETE); 5604 } 5605 5606 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5607 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5608 if (len < sizeof (*hdr) || 5609 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5610 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5611 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5612 ctl_set_invalid_field(ctsio, 5613 /*sks_valid*/ 0, 5614 /*command*/ 0, 5615 /*field*/ 0, 5616 /*bit_valid*/ 0, 5617 /*bit*/ 0); 5618 goto done; 5619 } 5620 len = scsi_2btoul(hdr->desc_length); 5621 buf = (struct scsi_unmap_desc *)(hdr + 1); 5622 end = buf + len / sizeof(*buf); 5623 5624 endnz = buf; 5625 for (range = buf; range < end; range++) { 5626 lba = scsi_8btou64(range->lba); 5627 num_blocks = scsi_4btoul(range->length); 5628 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5629 || ((lba + num_blocks) < lba)) { 5630 ctl_set_lba_out_of_range(ctsio); 5631 ctl_done((union ctl_io *)ctsio); 5632 return (CTL_RETVAL_COMPLETE); 5633 } 5634 if (num_blocks != 0) 5635 endnz = range + 1; 5636 } 5637 5638 /* 5639 * Block backend can not handle zero last range. 5640 * Filter it out and return if there is nothing left. 5641 */ 5642 len = (uint8_t *)endnz - (uint8_t *)buf; 5643 if (len == 0) { 5644 ctl_set_success(ctsio); 5645 goto done; 5646 } 5647 5648 mtx_lock(&lun->lun_lock); 5649 ptrlen = (struct ctl_ptr_len_flags *) 5650 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5651 ptrlen->ptr = (void *)buf; 5652 ptrlen->len = len; 5653 ptrlen->flags = byte2; 5654 ctl_check_blocked(lun); 5655 mtx_unlock(&lun->lun_lock); 5656 5657 retval = lun->backend->config_write((union ctl_io *)ctsio); 5658 return (retval); 5659 5660done: 5661 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5662 free(ctsio->kern_data_ptr, M_CTL); 5663 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5664 } 5665 ctl_done((union ctl_io *)ctsio); 5666 return (CTL_RETVAL_COMPLETE); 5667} 5668 5669/* 5670 * Note that this function currently doesn't actually do anything inside 5671 * CTL to enforce things if the DQue bit is turned on. 5672 * 5673 * Also note that this function can't be used in the default case, because 5674 * the DQue bit isn't set in the changeable mask for the control mode page 5675 * anyway. This is just here as an example for how to implement a page 5676 * handler, and a placeholder in case we want to allow the user to turn 5677 * tagged queueing on and off. 5678 * 5679 * The D_SENSE bit handling is functional, however, and will turn 5680 * descriptor sense on and off for a given LUN. 5681 */ 5682int 5683ctl_control_page_handler(struct ctl_scsiio *ctsio, 5684 struct ctl_page_index *page_index, uint8_t *page_ptr) 5685{ 5686 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 5687 struct ctl_lun *lun; 5688 int set_ua; 5689 uint32_t initidx; 5690 5691 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5692 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5693 set_ua = 0; 5694 5695 user_cp = (struct scsi_control_page *)page_ptr; 5696 current_cp = (struct scsi_control_page *) 5697 (page_index->page_data + (page_index->page_len * 5698 CTL_PAGE_CURRENT)); 5699 saved_cp = (struct scsi_control_page *) 5700 (page_index->page_data + (page_index->page_len * 5701 CTL_PAGE_SAVED)); 5702 5703 mtx_lock(&lun->lun_lock); 5704 if (((current_cp->rlec & SCP_DSENSE) == 0) 5705 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 5706 /* 5707 * Descriptor sense is currently turned off and the user 5708 * wants to turn it on. 5709 */ 5710 current_cp->rlec |= SCP_DSENSE; 5711 saved_cp->rlec |= SCP_DSENSE; 5712 lun->flags |= CTL_LUN_SENSE_DESC; 5713 set_ua = 1; 5714 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 5715 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 5716 /* 5717 * Descriptor sense is currently turned on, and the user 5718 * wants to turn it off. 5719 */ 5720 current_cp->rlec &= ~SCP_DSENSE; 5721 saved_cp->rlec &= ~SCP_DSENSE; 5722 lun->flags &= ~CTL_LUN_SENSE_DESC; 5723 set_ua = 1; 5724 } 5725 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 5726 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 5727 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5728 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5729 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 5730 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 5731 set_ua = 1; 5732 } 5733 if ((current_cp->eca_and_aen & SCP_SWP) != 5734 (user_cp->eca_and_aen & SCP_SWP)) { 5735 current_cp->eca_and_aen &= ~SCP_SWP; 5736 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5737 saved_cp->eca_and_aen &= ~SCP_SWP; 5738 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 5739 set_ua = 1; 5740 } 5741 if (set_ua != 0) 5742 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5743 mtx_unlock(&lun->lun_lock); 5744 5745 return (0); 5746} 5747 5748int 5749ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 5750 struct ctl_page_index *page_index, uint8_t *page_ptr) 5751{ 5752 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 5753 struct ctl_lun *lun; 5754 int set_ua; 5755 uint32_t initidx; 5756 5757 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5758 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 5759 set_ua = 0; 5760 5761 user_cp = (struct scsi_caching_page *)page_ptr; 5762 current_cp = (struct scsi_caching_page *) 5763 (page_index->page_data + (page_index->page_len * 5764 CTL_PAGE_CURRENT)); 5765 saved_cp = (struct scsi_caching_page *) 5766 (page_index->page_data + (page_index->page_len * 5767 CTL_PAGE_SAVED)); 5768 5769 mtx_lock(&lun->lun_lock); 5770 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 5771 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 5772 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5773 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5774 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 5775 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 5776 set_ua = 1; 5777 } 5778 if (set_ua != 0) 5779 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 5780 mtx_unlock(&lun->lun_lock); 5781 5782 return (0); 5783} 5784 5785int 5786ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 5787 struct ctl_page_index *page_index, 5788 uint8_t *page_ptr) 5789{ 5790 uint8_t *c; 5791 int i; 5792 5793 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 5794 ctl_time_io_secs = 5795 (c[0] << 8) | 5796 (c[1] << 0) | 5797 0; 5798 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 5799 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 5800 printf("page data:"); 5801 for (i=0; i<8; i++) 5802 printf(" %.2x",page_ptr[i]); 5803 printf("\n"); 5804 return (0); 5805} 5806 5807int 5808ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 5809 struct ctl_page_index *page_index, 5810 int pc) 5811{ 5812 struct copan_debugconf_subpage *page; 5813 5814 page = (struct copan_debugconf_subpage *)page_index->page_data + 5815 (page_index->page_len * pc); 5816 5817 switch (pc) { 5818 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 5819 case SMS_PAGE_CTRL_DEFAULT >> 6: 5820 case SMS_PAGE_CTRL_SAVED >> 6: 5821 /* 5822 * We don't update the changable or default bits for this page. 5823 */ 5824 break; 5825 case SMS_PAGE_CTRL_CURRENT >> 6: 5826 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 5827 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 5828 break; 5829 default: 5830#ifdef NEEDTOPORT 5831 EPRINT(0, "Invalid PC %d!!", pc); 5832#endif /* NEEDTOPORT */ 5833 break; 5834 } 5835 return (0); 5836} 5837 5838 5839static int 5840ctl_do_mode_select(union ctl_io *io) 5841{ 5842 struct scsi_mode_page_header *page_header; 5843 struct ctl_page_index *page_index; 5844 struct ctl_scsiio *ctsio; 5845 int control_dev, page_len; 5846 int page_len_offset, page_len_size; 5847 union ctl_modepage_info *modepage_info; 5848 struct ctl_lun *lun; 5849 int *len_left, *len_used; 5850 int retval, i; 5851 5852 ctsio = &io->scsiio; 5853 page_index = NULL; 5854 page_len = 0; 5855 retval = CTL_RETVAL_COMPLETE; 5856 5857 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5858 5859 if (lun->be_lun->lun_type != T_DIRECT) 5860 control_dev = 1; 5861 else 5862 control_dev = 0; 5863 5864 modepage_info = (union ctl_modepage_info *) 5865 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 5866 len_left = &modepage_info->header.len_left; 5867 len_used = &modepage_info->header.len_used; 5868 5869do_next_page: 5870 5871 page_header = (struct scsi_mode_page_header *) 5872 (ctsio->kern_data_ptr + *len_used); 5873 5874 if (*len_left == 0) { 5875 free(ctsio->kern_data_ptr, M_CTL); 5876 ctl_set_success(ctsio); 5877 ctl_done((union ctl_io *)ctsio); 5878 return (CTL_RETVAL_COMPLETE); 5879 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 5880 5881 free(ctsio->kern_data_ptr, M_CTL); 5882 ctl_set_param_len_error(ctsio); 5883 ctl_done((union ctl_io *)ctsio); 5884 return (CTL_RETVAL_COMPLETE); 5885 5886 } else if ((page_header->page_code & SMPH_SPF) 5887 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 5888 5889 free(ctsio->kern_data_ptr, M_CTL); 5890 ctl_set_param_len_error(ctsio); 5891 ctl_done((union ctl_io *)ctsio); 5892 return (CTL_RETVAL_COMPLETE); 5893 } 5894 5895 5896 /* 5897 * XXX KDM should we do something with the block descriptor? 5898 */ 5899 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 5900 5901 if ((control_dev != 0) 5902 && (lun->mode_pages.index[i].page_flags & 5903 CTL_PAGE_FLAG_DISK_ONLY)) 5904 continue; 5905 5906 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 5907 (page_header->page_code & SMPH_PC_MASK)) 5908 continue; 5909 5910 /* 5911 * If neither page has a subpage code, then we've got a 5912 * match. 5913 */ 5914 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 5915 && ((page_header->page_code & SMPH_SPF) == 0)) { 5916 page_index = &lun->mode_pages.index[i]; 5917 page_len = page_header->page_length; 5918 break; 5919 } 5920 5921 /* 5922 * If both pages have subpages, then the subpage numbers 5923 * have to match. 5924 */ 5925 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 5926 && (page_header->page_code & SMPH_SPF)) { 5927 struct scsi_mode_page_header_sp *sph; 5928 5929 sph = (struct scsi_mode_page_header_sp *)page_header; 5930 5931 if (lun->mode_pages.index[i].subpage == 5932 sph->subpage) { 5933 page_index = &lun->mode_pages.index[i]; 5934 page_len = scsi_2btoul(sph->page_length); 5935 break; 5936 } 5937 } 5938 } 5939 5940 /* 5941 * If we couldn't find the page, or if we don't have a mode select 5942 * handler for it, send back an error to the user. 5943 */ 5944 if ((page_index == NULL) 5945 || (page_index->select_handler == NULL)) { 5946 ctl_set_invalid_field(ctsio, 5947 /*sks_valid*/ 1, 5948 /*command*/ 0, 5949 /*field*/ *len_used, 5950 /*bit_valid*/ 0, 5951 /*bit*/ 0); 5952 free(ctsio->kern_data_ptr, M_CTL); 5953 ctl_done((union ctl_io *)ctsio); 5954 return (CTL_RETVAL_COMPLETE); 5955 } 5956 5957 if (page_index->page_code & SMPH_SPF) { 5958 page_len_offset = 2; 5959 page_len_size = 2; 5960 } else { 5961 page_len_size = 1; 5962 page_len_offset = 1; 5963 } 5964 5965 /* 5966 * If the length the initiator gives us isn't the one we specify in 5967 * the mode page header, or if they didn't specify enough data in 5968 * the CDB to avoid truncating this page, kick out the request. 5969 */ 5970 if ((page_len != (page_index->page_len - page_len_offset - 5971 page_len_size)) 5972 || (*len_left < page_index->page_len)) { 5973 5974 5975 ctl_set_invalid_field(ctsio, 5976 /*sks_valid*/ 1, 5977 /*command*/ 0, 5978 /*field*/ *len_used + page_len_offset, 5979 /*bit_valid*/ 0, 5980 /*bit*/ 0); 5981 free(ctsio->kern_data_ptr, M_CTL); 5982 ctl_done((union ctl_io *)ctsio); 5983 return (CTL_RETVAL_COMPLETE); 5984 } 5985 5986 /* 5987 * Run through the mode page, checking to make sure that the bits 5988 * the user changed are actually legal for him to change. 5989 */ 5990 for (i = 0; i < page_index->page_len; i++) { 5991 uint8_t *user_byte, *change_mask, *current_byte; 5992 int bad_bit; 5993 int j; 5994 5995 user_byte = (uint8_t *)page_header + i; 5996 change_mask = page_index->page_data + 5997 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 5998 current_byte = page_index->page_data + 5999 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6000 6001 /* 6002 * Check to see whether the user set any bits in this byte 6003 * that he is not allowed to set. 6004 */ 6005 if ((*user_byte & ~(*change_mask)) == 6006 (*current_byte & ~(*change_mask))) 6007 continue; 6008 6009 /* 6010 * Go through bit by bit to determine which one is illegal. 6011 */ 6012 bad_bit = 0; 6013 for (j = 7; j >= 0; j--) { 6014 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6015 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6016 bad_bit = i; 6017 break; 6018 } 6019 } 6020 ctl_set_invalid_field(ctsio, 6021 /*sks_valid*/ 1, 6022 /*command*/ 0, 6023 /*field*/ *len_used + i, 6024 /*bit_valid*/ 1, 6025 /*bit*/ bad_bit); 6026 free(ctsio->kern_data_ptr, M_CTL); 6027 ctl_done((union ctl_io *)ctsio); 6028 return (CTL_RETVAL_COMPLETE); 6029 } 6030 6031 /* 6032 * Decrement these before we call the page handler, since we may 6033 * end up getting called back one way or another before the handler 6034 * returns to this context. 6035 */ 6036 *len_left -= page_index->page_len; 6037 *len_used += page_index->page_len; 6038 6039 retval = page_index->select_handler(ctsio, page_index, 6040 (uint8_t *)page_header); 6041 6042 /* 6043 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6044 * wait until this queued command completes to finish processing 6045 * the mode page. If it returns anything other than 6046 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6047 * already set the sense information, freed the data pointer, and 6048 * completed the io for us. 6049 */ 6050 if (retval != CTL_RETVAL_COMPLETE) 6051 goto bailout_no_done; 6052 6053 /* 6054 * If the initiator sent us more than one page, parse the next one. 6055 */ 6056 if (*len_left > 0) 6057 goto do_next_page; 6058 6059 ctl_set_success(ctsio); 6060 free(ctsio->kern_data_ptr, M_CTL); 6061 ctl_done((union ctl_io *)ctsio); 6062 6063bailout_no_done: 6064 6065 return (CTL_RETVAL_COMPLETE); 6066 6067} 6068 6069int 6070ctl_mode_select(struct ctl_scsiio *ctsio) 6071{ 6072 int param_len, pf, sp; 6073 int header_size, bd_len; 6074 int len_left, len_used; 6075 struct ctl_page_index *page_index; 6076 struct ctl_lun *lun; 6077 int control_dev, page_len; 6078 union ctl_modepage_info *modepage_info; 6079 int retval; 6080 6081 pf = 0; 6082 sp = 0; 6083 page_len = 0; 6084 len_used = 0; 6085 len_left = 0; 6086 retval = 0; 6087 bd_len = 0; 6088 page_index = NULL; 6089 6090 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6091 6092 if (lun->be_lun->lun_type != T_DIRECT) 6093 control_dev = 1; 6094 else 6095 control_dev = 0; 6096 6097 switch (ctsio->cdb[0]) { 6098 case MODE_SELECT_6: { 6099 struct scsi_mode_select_6 *cdb; 6100 6101 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6102 6103 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6104 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6105 6106 param_len = cdb->length; 6107 header_size = sizeof(struct scsi_mode_header_6); 6108 break; 6109 } 6110 case MODE_SELECT_10: { 6111 struct scsi_mode_select_10 *cdb; 6112 6113 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6114 6115 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6116 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6117 6118 param_len = scsi_2btoul(cdb->length); 6119 header_size = sizeof(struct scsi_mode_header_10); 6120 break; 6121 } 6122 default: 6123 ctl_set_invalid_opcode(ctsio); 6124 ctl_done((union ctl_io *)ctsio); 6125 return (CTL_RETVAL_COMPLETE); 6126 break; /* NOTREACHED */ 6127 } 6128 6129 /* 6130 * From SPC-3: 6131 * "A parameter list length of zero indicates that the Data-Out Buffer 6132 * shall be empty. This condition shall not be considered as an error." 6133 */ 6134 if (param_len == 0) { 6135 ctl_set_success(ctsio); 6136 ctl_done((union ctl_io *)ctsio); 6137 return (CTL_RETVAL_COMPLETE); 6138 } 6139 6140 /* 6141 * Since we'll hit this the first time through, prior to 6142 * allocation, we don't need to free a data buffer here. 6143 */ 6144 if (param_len < header_size) { 6145 ctl_set_param_len_error(ctsio); 6146 ctl_done((union ctl_io *)ctsio); 6147 return (CTL_RETVAL_COMPLETE); 6148 } 6149 6150 /* 6151 * Allocate the data buffer and grab the user's data. In theory, 6152 * we shouldn't have to sanity check the parameter list length here 6153 * because the maximum size is 64K. We should be able to malloc 6154 * that much without too many problems. 6155 */ 6156 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6157 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6158 ctsio->kern_data_len = param_len; 6159 ctsio->kern_total_len = param_len; 6160 ctsio->kern_data_resid = 0; 6161 ctsio->kern_rel_offset = 0; 6162 ctsio->kern_sg_entries = 0; 6163 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6164 ctsio->be_move_done = ctl_config_move_done; 6165 ctl_datamove((union ctl_io *)ctsio); 6166 6167 return (CTL_RETVAL_COMPLETE); 6168 } 6169 6170 switch (ctsio->cdb[0]) { 6171 case MODE_SELECT_6: { 6172 struct scsi_mode_header_6 *mh6; 6173 6174 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6175 bd_len = mh6->blk_desc_len; 6176 break; 6177 } 6178 case MODE_SELECT_10: { 6179 struct scsi_mode_header_10 *mh10; 6180 6181 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6182 bd_len = scsi_2btoul(mh10->blk_desc_len); 6183 break; 6184 } 6185 default: 6186 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6187 break; 6188 } 6189 6190 if (param_len < (header_size + bd_len)) { 6191 free(ctsio->kern_data_ptr, M_CTL); 6192 ctl_set_param_len_error(ctsio); 6193 ctl_done((union ctl_io *)ctsio); 6194 return (CTL_RETVAL_COMPLETE); 6195 } 6196 6197 /* 6198 * Set the IO_CONT flag, so that if this I/O gets passed to 6199 * ctl_config_write_done(), it'll get passed back to 6200 * ctl_do_mode_select() for further processing, or completion if 6201 * we're all done. 6202 */ 6203 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6204 ctsio->io_cont = ctl_do_mode_select; 6205 6206 modepage_info = (union ctl_modepage_info *) 6207 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6208 6209 memset(modepage_info, 0, sizeof(*modepage_info)); 6210 6211 len_left = param_len - header_size - bd_len; 6212 len_used = header_size + bd_len; 6213 6214 modepage_info->header.len_left = len_left; 6215 modepage_info->header.len_used = len_used; 6216 6217 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6218} 6219 6220int 6221ctl_mode_sense(struct ctl_scsiio *ctsio) 6222{ 6223 struct ctl_lun *lun; 6224 int pc, page_code, dbd, llba, subpage; 6225 int alloc_len, page_len, header_len, total_len; 6226 struct scsi_mode_block_descr *block_desc; 6227 struct ctl_page_index *page_index; 6228 int control_dev; 6229 6230 dbd = 0; 6231 llba = 0; 6232 block_desc = NULL; 6233 page_index = NULL; 6234 6235 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6236 6237 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6238 6239 if (lun->be_lun->lun_type != T_DIRECT) 6240 control_dev = 1; 6241 else 6242 control_dev = 0; 6243 6244 switch (ctsio->cdb[0]) { 6245 case MODE_SENSE_6: { 6246 struct scsi_mode_sense_6 *cdb; 6247 6248 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6249 6250 header_len = sizeof(struct scsi_mode_hdr_6); 6251 if (cdb->byte2 & SMS_DBD) 6252 dbd = 1; 6253 else 6254 header_len += sizeof(struct scsi_mode_block_descr); 6255 6256 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6257 page_code = cdb->page & SMS_PAGE_CODE; 6258 subpage = cdb->subpage; 6259 alloc_len = cdb->length; 6260 break; 6261 } 6262 case MODE_SENSE_10: { 6263 struct scsi_mode_sense_10 *cdb; 6264 6265 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6266 6267 header_len = sizeof(struct scsi_mode_hdr_10); 6268 6269 if (cdb->byte2 & SMS_DBD) 6270 dbd = 1; 6271 else 6272 header_len += sizeof(struct scsi_mode_block_descr); 6273 if (cdb->byte2 & SMS10_LLBAA) 6274 llba = 1; 6275 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6276 page_code = cdb->page & SMS_PAGE_CODE; 6277 subpage = cdb->subpage; 6278 alloc_len = scsi_2btoul(cdb->length); 6279 break; 6280 } 6281 default: 6282 ctl_set_invalid_opcode(ctsio); 6283 ctl_done((union ctl_io *)ctsio); 6284 return (CTL_RETVAL_COMPLETE); 6285 break; /* NOTREACHED */ 6286 } 6287 6288 /* 6289 * We have to make a first pass through to calculate the size of 6290 * the pages that match the user's query. Then we allocate enough 6291 * memory to hold it, and actually copy the data into the buffer. 6292 */ 6293 switch (page_code) { 6294 case SMS_ALL_PAGES_PAGE: { 6295 int i; 6296 6297 page_len = 0; 6298 6299 /* 6300 * At the moment, values other than 0 and 0xff here are 6301 * reserved according to SPC-3. 6302 */ 6303 if ((subpage != SMS_SUBPAGE_PAGE_0) 6304 && (subpage != SMS_SUBPAGE_ALL)) { 6305 ctl_set_invalid_field(ctsio, 6306 /*sks_valid*/ 1, 6307 /*command*/ 1, 6308 /*field*/ 3, 6309 /*bit_valid*/ 0, 6310 /*bit*/ 0); 6311 ctl_done((union ctl_io *)ctsio); 6312 return (CTL_RETVAL_COMPLETE); 6313 } 6314 6315 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6316 if ((control_dev != 0) 6317 && (lun->mode_pages.index[i].page_flags & 6318 CTL_PAGE_FLAG_DISK_ONLY)) 6319 continue; 6320 6321 /* 6322 * We don't use this subpage if the user didn't 6323 * request all subpages. 6324 */ 6325 if ((lun->mode_pages.index[i].subpage != 0) 6326 && (subpage == SMS_SUBPAGE_PAGE_0)) 6327 continue; 6328 6329#if 0 6330 printf("found page %#x len %d\n", 6331 lun->mode_pages.index[i].page_code & 6332 SMPH_PC_MASK, 6333 lun->mode_pages.index[i].page_len); 6334#endif 6335 page_len += lun->mode_pages.index[i].page_len; 6336 } 6337 break; 6338 } 6339 default: { 6340 int i; 6341 6342 page_len = 0; 6343 6344 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6345 /* Look for the right page code */ 6346 if ((lun->mode_pages.index[i].page_code & 6347 SMPH_PC_MASK) != page_code) 6348 continue; 6349 6350 /* Look for the right subpage or the subpage wildcard*/ 6351 if ((lun->mode_pages.index[i].subpage != subpage) 6352 && (subpage != SMS_SUBPAGE_ALL)) 6353 continue; 6354 6355 /* Make sure the page is supported for this dev type */ 6356 if ((control_dev != 0) 6357 && (lun->mode_pages.index[i].page_flags & 6358 CTL_PAGE_FLAG_DISK_ONLY)) 6359 continue; 6360 6361#if 0 6362 printf("found page %#x len %d\n", 6363 lun->mode_pages.index[i].page_code & 6364 SMPH_PC_MASK, 6365 lun->mode_pages.index[i].page_len); 6366#endif 6367 6368 page_len += lun->mode_pages.index[i].page_len; 6369 } 6370 6371 if (page_len == 0) { 6372 ctl_set_invalid_field(ctsio, 6373 /*sks_valid*/ 1, 6374 /*command*/ 1, 6375 /*field*/ 2, 6376 /*bit_valid*/ 1, 6377 /*bit*/ 5); 6378 ctl_done((union ctl_io *)ctsio); 6379 return (CTL_RETVAL_COMPLETE); 6380 } 6381 break; 6382 } 6383 } 6384 6385 total_len = header_len + page_len; 6386#if 0 6387 printf("header_len = %d, page_len = %d, total_len = %d\n", 6388 header_len, page_len, total_len); 6389#endif 6390 6391 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6392 ctsio->kern_sg_entries = 0; 6393 ctsio->kern_data_resid = 0; 6394 ctsio->kern_rel_offset = 0; 6395 if (total_len < alloc_len) { 6396 ctsio->residual = alloc_len - total_len; 6397 ctsio->kern_data_len = total_len; 6398 ctsio->kern_total_len = total_len; 6399 } else { 6400 ctsio->residual = 0; 6401 ctsio->kern_data_len = alloc_len; 6402 ctsio->kern_total_len = alloc_len; 6403 } 6404 6405 switch (ctsio->cdb[0]) { 6406 case MODE_SENSE_6: { 6407 struct scsi_mode_hdr_6 *header; 6408 6409 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6410 6411 header->datalen = MIN(total_len - 1, 254); 6412 if (control_dev == 0) { 6413 header->dev_specific = 0x10; /* DPOFUA */ 6414 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6415 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6416 .eca_and_aen & SCP_SWP) != 0) 6417 header->dev_specific |= 0x80; /* WP */ 6418 } 6419 if (dbd) 6420 header->block_descr_len = 0; 6421 else 6422 header->block_descr_len = 6423 sizeof(struct scsi_mode_block_descr); 6424 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6425 break; 6426 } 6427 case MODE_SENSE_10: { 6428 struct scsi_mode_hdr_10 *header; 6429 int datalen; 6430 6431 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6432 6433 datalen = MIN(total_len - 2, 65533); 6434 scsi_ulto2b(datalen, header->datalen); 6435 if (control_dev == 0) { 6436 header->dev_specific = 0x10; /* DPOFUA */ 6437 if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || 6438 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6439 .eca_and_aen & SCP_SWP) != 0) 6440 header->dev_specific |= 0x80; /* WP */ 6441 } 6442 if (dbd) 6443 scsi_ulto2b(0, header->block_descr_len); 6444 else 6445 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6446 header->block_descr_len); 6447 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6448 break; 6449 } 6450 default: 6451 panic("invalid CDB type %#x", ctsio->cdb[0]); 6452 break; /* NOTREACHED */ 6453 } 6454 6455 /* 6456 * If we've got a disk, use its blocksize in the block 6457 * descriptor. Otherwise, just set it to 0. 6458 */ 6459 if (dbd == 0) { 6460 if (control_dev == 0) 6461 scsi_ulto3b(lun->be_lun->blocksize, 6462 block_desc->block_len); 6463 else 6464 scsi_ulto3b(0, block_desc->block_len); 6465 } 6466 6467 switch (page_code) { 6468 case SMS_ALL_PAGES_PAGE: { 6469 int i, data_used; 6470 6471 data_used = header_len; 6472 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6473 struct ctl_page_index *page_index; 6474 6475 page_index = &lun->mode_pages.index[i]; 6476 6477 if ((control_dev != 0) 6478 && (page_index->page_flags & 6479 CTL_PAGE_FLAG_DISK_ONLY)) 6480 continue; 6481 6482 /* 6483 * We don't use this subpage if the user didn't 6484 * request all subpages. We already checked (above) 6485 * to make sure the user only specified a subpage 6486 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6487 */ 6488 if ((page_index->subpage != 0) 6489 && (subpage == SMS_SUBPAGE_PAGE_0)) 6490 continue; 6491 6492 /* 6493 * Call the handler, if it exists, to update the 6494 * page to the latest values. 6495 */ 6496 if (page_index->sense_handler != NULL) 6497 page_index->sense_handler(ctsio, page_index,pc); 6498 6499 memcpy(ctsio->kern_data_ptr + data_used, 6500 page_index->page_data + 6501 (page_index->page_len * pc), 6502 page_index->page_len); 6503 data_used += page_index->page_len; 6504 } 6505 break; 6506 } 6507 default: { 6508 int i, data_used; 6509 6510 data_used = header_len; 6511 6512 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6513 struct ctl_page_index *page_index; 6514 6515 page_index = &lun->mode_pages.index[i]; 6516 6517 /* Look for the right page code */ 6518 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6519 continue; 6520 6521 /* Look for the right subpage or the subpage wildcard*/ 6522 if ((page_index->subpage != subpage) 6523 && (subpage != SMS_SUBPAGE_ALL)) 6524 continue; 6525 6526 /* Make sure the page is supported for this dev type */ 6527 if ((control_dev != 0) 6528 && (page_index->page_flags & 6529 CTL_PAGE_FLAG_DISK_ONLY)) 6530 continue; 6531 6532 /* 6533 * Call the handler, if it exists, to update the 6534 * page to the latest values. 6535 */ 6536 if (page_index->sense_handler != NULL) 6537 page_index->sense_handler(ctsio, page_index,pc); 6538 6539 memcpy(ctsio->kern_data_ptr + data_used, 6540 page_index->page_data + 6541 (page_index->page_len * pc), 6542 page_index->page_len); 6543 data_used += page_index->page_len; 6544 } 6545 break; 6546 } 6547 } 6548 6549 ctl_set_success(ctsio); 6550 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6551 ctsio->be_move_done = ctl_config_move_done; 6552 ctl_datamove((union ctl_io *)ctsio); 6553 return (CTL_RETVAL_COMPLETE); 6554} 6555 6556int 6557ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6558 struct ctl_page_index *page_index, 6559 int pc) 6560{ 6561 struct ctl_lun *lun; 6562 struct scsi_log_param_header *phdr; 6563 uint8_t *data; 6564 uint64_t val; 6565 6566 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6567 data = page_index->page_data; 6568 6569 if (lun->backend->lun_attr != NULL && 6570 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6571 != UINT64_MAX) { 6572 phdr = (struct scsi_log_param_header *)data; 6573 scsi_ulto2b(0x0001, phdr->param_code); 6574 phdr->param_control = SLP_LBIN | SLP_LP; 6575 phdr->param_len = 8; 6576 data = (uint8_t *)(phdr + 1); 6577 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6578 data[4] = 0x02; /* per-pool */ 6579 data += phdr->param_len; 6580 } 6581 6582 if (lun->backend->lun_attr != NULL && 6583 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6584 != UINT64_MAX) { 6585 phdr = (struct scsi_log_param_header *)data; 6586 scsi_ulto2b(0x0002, phdr->param_code); 6587 phdr->param_control = SLP_LBIN | SLP_LP; 6588 phdr->param_len = 8; 6589 data = (uint8_t *)(phdr + 1); 6590 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6591 data[4] = 0x01; /* per-LUN */ 6592 data += phdr->param_len; 6593 } 6594 6595 if (lun->backend->lun_attr != NULL && 6596 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6597 != UINT64_MAX) { 6598 phdr = (struct scsi_log_param_header *)data; 6599 scsi_ulto2b(0x00f1, phdr->param_code); 6600 phdr->param_control = SLP_LBIN | SLP_LP; 6601 phdr->param_len = 8; 6602 data = (uint8_t *)(phdr + 1); 6603 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6604 data[4] = 0x02; /* per-pool */ 6605 data += phdr->param_len; 6606 } 6607 6608 if (lun->backend->lun_attr != NULL && 6609 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6610 != UINT64_MAX) { 6611 phdr = (struct scsi_log_param_header *)data; 6612 scsi_ulto2b(0x00f2, phdr->param_code); 6613 phdr->param_control = SLP_LBIN | SLP_LP; 6614 phdr->param_len = 8; 6615 data = (uint8_t *)(phdr + 1); 6616 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6617 data[4] = 0x02; /* per-pool */ 6618 data += phdr->param_len; 6619 } 6620 6621 page_index->page_len = data - page_index->page_data; 6622 return (0); 6623} 6624 6625int 6626ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, 6627 struct ctl_page_index *page_index, 6628 int pc) 6629{ 6630 struct ctl_lun *lun; 6631 struct stat_page *data; 6632 uint64_t rn, wn, rb, wb; 6633 struct bintime rt, wt; 6634 int i; 6635 6636 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6637 data = (struct stat_page *)page_index->page_data; 6638 6639 scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); 6640 data->sap.hdr.param_control = SLP_LBIN; 6641 data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - 6642 sizeof(struct scsi_log_param_header); 6643 rn = wn = rb = wb = 0; 6644 bintime_clear(&rt); 6645 bintime_clear(&wt); 6646 for (i = 0; i < CTL_MAX_PORTS; i++) { 6647 rn += lun->stats.ports[i].operations[CTL_STATS_READ]; 6648 wn += lun->stats.ports[i].operations[CTL_STATS_WRITE]; 6649 rb += lun->stats.ports[i].bytes[CTL_STATS_READ]; 6650 wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE]; 6651 bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]); 6652 bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]); 6653 } 6654 scsi_u64to8b(rn, data->sap.read_num); 6655 scsi_u64to8b(wn, data->sap.write_num); 6656 if (lun->stats.blocksize > 0) { 6657 scsi_u64to8b(wb / lun->stats.blocksize, 6658 data->sap.recvieved_lba); 6659 scsi_u64to8b(rb / lun->stats.blocksize, 6660 data->sap.transmitted_lba); 6661 } 6662 scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000), 6663 data->sap.read_int); 6664 scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000), 6665 data->sap.write_int); 6666 scsi_u64to8b(0, data->sap.weighted_num); 6667 scsi_u64to8b(0, data->sap.weighted_int); 6668 scsi_ulto2b(SLP_IT, data->it.hdr.param_code); 6669 data->it.hdr.param_control = SLP_LBIN; 6670 data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - 6671 sizeof(struct scsi_log_param_header); 6672#ifdef CTL_TIME_IO 6673 scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); 6674#endif 6675 scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); 6676 data->it.hdr.param_control = SLP_LBIN; 6677 data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - 6678 sizeof(struct scsi_log_param_header); 6679 scsi_ulto4b(3, data->ti.exponent); 6680 scsi_ulto4b(1, data->ti.integer); 6681 6682 page_index->page_len = sizeof(*data); 6683 return (0); 6684} 6685 6686int 6687ctl_log_sense(struct ctl_scsiio *ctsio) 6688{ 6689 struct ctl_lun *lun; 6690 int i, pc, page_code, subpage; 6691 int alloc_len, total_len; 6692 struct ctl_page_index *page_index; 6693 struct scsi_log_sense *cdb; 6694 struct scsi_log_header *header; 6695 6696 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6697 6698 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6699 cdb = (struct scsi_log_sense *)ctsio->cdb; 6700 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6701 page_code = cdb->page & SLS_PAGE_CODE; 6702 subpage = cdb->subpage; 6703 alloc_len = scsi_2btoul(cdb->length); 6704 6705 page_index = NULL; 6706 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6707 page_index = &lun->log_pages.index[i]; 6708 6709 /* Look for the right page code */ 6710 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6711 continue; 6712 6713 /* Look for the right subpage or the subpage wildcard*/ 6714 if (page_index->subpage != subpage) 6715 continue; 6716 6717 break; 6718 } 6719 if (i >= CTL_NUM_LOG_PAGES) { 6720 ctl_set_invalid_field(ctsio, 6721 /*sks_valid*/ 1, 6722 /*command*/ 1, 6723 /*field*/ 2, 6724 /*bit_valid*/ 0, 6725 /*bit*/ 0); 6726 ctl_done((union ctl_io *)ctsio); 6727 return (CTL_RETVAL_COMPLETE); 6728 } 6729 6730 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 6731 6732 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6733 ctsio->kern_sg_entries = 0; 6734 ctsio->kern_data_resid = 0; 6735 ctsio->kern_rel_offset = 0; 6736 if (total_len < alloc_len) { 6737 ctsio->residual = alloc_len - total_len; 6738 ctsio->kern_data_len = total_len; 6739 ctsio->kern_total_len = total_len; 6740 } else { 6741 ctsio->residual = 0; 6742 ctsio->kern_data_len = alloc_len; 6743 ctsio->kern_total_len = alloc_len; 6744 } 6745 6746 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 6747 header->page = page_index->page_code; 6748 if (page_index->subpage) { 6749 header->page |= SL_SPF; 6750 header->subpage = page_index->subpage; 6751 } 6752 scsi_ulto2b(page_index->page_len, header->datalen); 6753 6754 /* 6755 * Call the handler, if it exists, to update the 6756 * page to the latest values. 6757 */ 6758 if (page_index->sense_handler != NULL) 6759 page_index->sense_handler(ctsio, page_index, pc); 6760 6761 memcpy(header + 1, page_index->page_data, page_index->page_len); 6762 6763 ctl_set_success(ctsio); 6764 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6765 ctsio->be_move_done = ctl_config_move_done; 6766 ctl_datamove((union ctl_io *)ctsio); 6767 return (CTL_RETVAL_COMPLETE); 6768} 6769 6770int 6771ctl_read_capacity(struct ctl_scsiio *ctsio) 6772{ 6773 struct scsi_read_capacity *cdb; 6774 struct scsi_read_capacity_data *data; 6775 struct ctl_lun *lun; 6776 uint32_t lba; 6777 6778 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 6779 6780 cdb = (struct scsi_read_capacity *)ctsio->cdb; 6781 6782 lba = scsi_4btoul(cdb->addr); 6783 if (((cdb->pmi & SRC_PMI) == 0) 6784 && (lba != 0)) { 6785 ctl_set_invalid_field(/*ctsio*/ ctsio, 6786 /*sks_valid*/ 1, 6787 /*command*/ 1, 6788 /*field*/ 2, 6789 /*bit_valid*/ 0, 6790 /*bit*/ 0); 6791 ctl_done((union ctl_io *)ctsio); 6792 return (CTL_RETVAL_COMPLETE); 6793 } 6794 6795 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6796 6797 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6798 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 6799 ctsio->residual = 0; 6800 ctsio->kern_data_len = sizeof(*data); 6801 ctsio->kern_total_len = sizeof(*data); 6802 ctsio->kern_data_resid = 0; 6803 ctsio->kern_rel_offset = 0; 6804 ctsio->kern_sg_entries = 0; 6805 6806 /* 6807 * If the maximum LBA is greater than 0xfffffffe, the user must 6808 * issue a SERVICE ACTION IN (16) command, with the read capacity 6809 * serivce action set. 6810 */ 6811 if (lun->be_lun->maxlba > 0xfffffffe) 6812 scsi_ulto4b(0xffffffff, data->addr); 6813 else 6814 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 6815 6816 /* 6817 * XXX KDM this may not be 512 bytes... 6818 */ 6819 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6820 6821 ctl_set_success(ctsio); 6822 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6823 ctsio->be_move_done = ctl_config_move_done; 6824 ctl_datamove((union ctl_io *)ctsio); 6825 return (CTL_RETVAL_COMPLETE); 6826} 6827 6828int 6829ctl_read_capacity_16(struct ctl_scsiio *ctsio) 6830{ 6831 struct scsi_read_capacity_16 *cdb; 6832 struct scsi_read_capacity_data_long *data; 6833 struct ctl_lun *lun; 6834 uint64_t lba; 6835 uint32_t alloc_len; 6836 6837 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 6838 6839 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 6840 6841 alloc_len = scsi_4btoul(cdb->alloc_len); 6842 lba = scsi_8btou64(cdb->addr); 6843 6844 if ((cdb->reladr & SRC16_PMI) 6845 && (lba != 0)) { 6846 ctl_set_invalid_field(/*ctsio*/ ctsio, 6847 /*sks_valid*/ 1, 6848 /*command*/ 1, 6849 /*field*/ 2, 6850 /*bit_valid*/ 0, 6851 /*bit*/ 0); 6852 ctl_done((union ctl_io *)ctsio); 6853 return (CTL_RETVAL_COMPLETE); 6854 } 6855 6856 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6857 6858 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 6859 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 6860 6861 if (sizeof(*data) < alloc_len) { 6862 ctsio->residual = alloc_len - sizeof(*data); 6863 ctsio->kern_data_len = sizeof(*data); 6864 ctsio->kern_total_len = sizeof(*data); 6865 } else { 6866 ctsio->residual = 0; 6867 ctsio->kern_data_len = alloc_len; 6868 ctsio->kern_total_len = alloc_len; 6869 } 6870 ctsio->kern_data_resid = 0; 6871 ctsio->kern_rel_offset = 0; 6872 ctsio->kern_sg_entries = 0; 6873 6874 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 6875 /* XXX KDM this may not be 512 bytes... */ 6876 scsi_ulto4b(lun->be_lun->blocksize, data->length); 6877 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 6878 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 6879 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 6880 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 6881 6882 ctl_set_success(ctsio); 6883 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6884 ctsio->be_move_done = ctl_config_move_done; 6885 ctl_datamove((union ctl_io *)ctsio); 6886 return (CTL_RETVAL_COMPLETE); 6887} 6888 6889int 6890ctl_get_lba_status(struct ctl_scsiio *ctsio) 6891{ 6892 struct scsi_get_lba_status *cdb; 6893 struct scsi_get_lba_status_data *data; 6894 struct ctl_lun *lun; 6895 struct ctl_lba_len_flags *lbalen; 6896 uint64_t lba; 6897 uint32_t alloc_len, total_len; 6898 int retval; 6899 6900 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 6901 6902 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6903 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 6904 lba = scsi_8btou64(cdb->addr); 6905 alloc_len = scsi_4btoul(cdb->alloc_len); 6906 6907 if (lba > lun->be_lun->maxlba) { 6908 ctl_set_lba_out_of_range(ctsio); 6909 ctl_done((union ctl_io *)ctsio); 6910 return (CTL_RETVAL_COMPLETE); 6911 } 6912 6913 total_len = sizeof(*data) + sizeof(data->descr[0]); 6914 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6915 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 6916 6917 if (total_len < alloc_len) { 6918 ctsio->residual = alloc_len - total_len; 6919 ctsio->kern_data_len = total_len; 6920 ctsio->kern_total_len = total_len; 6921 } else { 6922 ctsio->residual = 0; 6923 ctsio->kern_data_len = alloc_len; 6924 ctsio->kern_total_len = alloc_len; 6925 } 6926 ctsio->kern_data_resid = 0; 6927 ctsio->kern_rel_offset = 0; 6928 ctsio->kern_sg_entries = 0; 6929 6930 /* Fill dummy data in case backend can't tell anything. */ 6931 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 6932 scsi_u64to8b(lba, data->descr[0].addr); 6933 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 6934 data->descr[0].length); 6935 data->descr[0].status = 0; /* Mapped or unknown. */ 6936 6937 ctl_set_success(ctsio); 6938 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6939 ctsio->be_move_done = ctl_config_move_done; 6940 6941 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 6942 lbalen->lba = lba; 6943 lbalen->len = total_len; 6944 lbalen->flags = 0; 6945 retval = lun->backend->config_read((union ctl_io *)ctsio); 6946 return (CTL_RETVAL_COMPLETE); 6947} 6948 6949int 6950ctl_read_defect(struct ctl_scsiio *ctsio) 6951{ 6952 struct scsi_read_defect_data_10 *ccb10; 6953 struct scsi_read_defect_data_12 *ccb12; 6954 struct scsi_read_defect_data_hdr_10 *data10; 6955 struct scsi_read_defect_data_hdr_12 *data12; 6956 uint32_t alloc_len, data_len; 6957 uint8_t format; 6958 6959 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 6960 6961 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 6962 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 6963 format = ccb10->format; 6964 alloc_len = scsi_2btoul(ccb10->alloc_length); 6965 data_len = sizeof(*data10); 6966 } else { 6967 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 6968 format = ccb12->format; 6969 alloc_len = scsi_4btoul(ccb12->alloc_length); 6970 data_len = sizeof(*data12); 6971 } 6972 if (alloc_len == 0) { 6973 ctl_set_success(ctsio); 6974 ctl_done((union ctl_io *)ctsio); 6975 return (CTL_RETVAL_COMPLETE); 6976 } 6977 6978 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 6979 if (data_len < alloc_len) { 6980 ctsio->residual = alloc_len - data_len; 6981 ctsio->kern_data_len = data_len; 6982 ctsio->kern_total_len = data_len; 6983 } else { 6984 ctsio->residual = 0; 6985 ctsio->kern_data_len = alloc_len; 6986 ctsio->kern_total_len = alloc_len; 6987 } 6988 ctsio->kern_data_resid = 0; 6989 ctsio->kern_rel_offset = 0; 6990 ctsio->kern_sg_entries = 0; 6991 6992 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 6993 data10 = (struct scsi_read_defect_data_hdr_10 *) 6994 ctsio->kern_data_ptr; 6995 data10->format = format; 6996 scsi_ulto2b(0, data10->length); 6997 } else { 6998 data12 = (struct scsi_read_defect_data_hdr_12 *) 6999 ctsio->kern_data_ptr; 7000 data12->format = format; 7001 scsi_ulto2b(0, data12->generation); 7002 scsi_ulto4b(0, data12->length); 7003 } 7004 7005 ctl_set_success(ctsio); 7006 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7007 ctsio->be_move_done = ctl_config_move_done; 7008 ctl_datamove((union ctl_io *)ctsio); 7009 return (CTL_RETVAL_COMPLETE); 7010} 7011 7012int 7013ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7014{ 7015 struct scsi_maintenance_in *cdb; 7016 int retval; 7017 int alloc_len, ext, total_len = 0, g, pc, pg, gs, os; 7018 int num_target_port_groups, num_target_ports; 7019 struct ctl_lun *lun; 7020 struct ctl_softc *softc; 7021 struct ctl_port *port; 7022 struct scsi_target_group_data *rtg_ptr; 7023 struct scsi_target_group_data_extended *rtg_ext_ptr; 7024 struct scsi_target_port_group_descriptor *tpg_desc; 7025 7026 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7027 7028 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7029 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7030 softc = lun->ctl_softc; 7031 7032 retval = CTL_RETVAL_COMPLETE; 7033 7034 switch (cdb->byte2 & STG_PDF_MASK) { 7035 case STG_PDF_LENGTH: 7036 ext = 0; 7037 break; 7038 case STG_PDF_EXTENDED: 7039 ext = 1; 7040 break; 7041 default: 7042 ctl_set_invalid_field(/*ctsio*/ ctsio, 7043 /*sks_valid*/ 1, 7044 /*command*/ 1, 7045 /*field*/ 2, 7046 /*bit_valid*/ 1, 7047 /*bit*/ 5); 7048 ctl_done((union ctl_io *)ctsio); 7049 return(retval); 7050 } 7051 7052 if (softc->is_single) 7053 num_target_port_groups = 1; 7054 else 7055 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7056 num_target_ports = 0; 7057 mtx_lock(&softc->ctl_lock); 7058 STAILQ_FOREACH(port, &softc->port_list, links) { 7059 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7060 continue; 7061 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7062 continue; 7063 num_target_ports++; 7064 } 7065 mtx_unlock(&softc->ctl_lock); 7066 7067 if (ext) 7068 total_len = sizeof(struct scsi_target_group_data_extended); 7069 else 7070 total_len = sizeof(struct scsi_target_group_data); 7071 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7072 num_target_port_groups + 7073 sizeof(struct scsi_target_port_descriptor) * num_target_ports; 7074 7075 alloc_len = scsi_4btoul(cdb->length); 7076 7077 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7078 7079 ctsio->kern_sg_entries = 0; 7080 7081 if (total_len < alloc_len) { 7082 ctsio->residual = alloc_len - total_len; 7083 ctsio->kern_data_len = total_len; 7084 ctsio->kern_total_len = total_len; 7085 } else { 7086 ctsio->residual = 0; 7087 ctsio->kern_data_len = alloc_len; 7088 ctsio->kern_total_len = alloc_len; 7089 } 7090 ctsio->kern_data_resid = 0; 7091 ctsio->kern_rel_offset = 0; 7092 7093 if (ext) { 7094 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7095 ctsio->kern_data_ptr; 7096 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7097 rtg_ext_ptr->format_type = 0x10; 7098 rtg_ext_ptr->implicit_transition_time = 0; 7099 tpg_desc = &rtg_ext_ptr->groups[0]; 7100 } else { 7101 rtg_ptr = (struct scsi_target_group_data *) 7102 ctsio->kern_data_ptr; 7103 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7104 tpg_desc = &rtg_ptr->groups[0]; 7105 } 7106 7107 mtx_lock(&softc->ctl_lock); 7108 pg = softc->port_min / softc->port_cnt; 7109 if (softc->ha_link == CTL_HA_LINK_OFFLINE) 7110 gs = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; 7111 else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) 7112 gs = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; 7113 else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) 7114 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; 7115 else 7116 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7117 if (lun->flags & CTL_LUN_PRIMARY_SC) { 7118 os = gs; 7119 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7120 } else 7121 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7122 for (g = 0; g < num_target_port_groups; g++) { 7123 tpg_desc->pref_state = (g == pg) ? gs : os; 7124 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | 7125 TPG_U_SUP | TPG_T_SUP; 7126 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7127 tpg_desc->status = TPG_IMPLICIT; 7128 pc = 0; 7129 STAILQ_FOREACH(port, &softc->port_list, links) { 7130 if (port->targ_port < g * softc->port_cnt || 7131 port->targ_port >= (g + 1) * softc->port_cnt) 7132 continue; 7133 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7134 continue; 7135 if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 7136 continue; 7137 scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. 7138 relative_target_port_identifier); 7139 pc++; 7140 } 7141 tpg_desc->target_port_count = pc; 7142 tpg_desc = (struct scsi_target_port_group_descriptor *) 7143 &tpg_desc->descriptors[pc]; 7144 } 7145 mtx_unlock(&softc->ctl_lock); 7146 7147 ctl_set_success(ctsio); 7148 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7149 ctsio->be_move_done = ctl_config_move_done; 7150 ctl_datamove((union ctl_io *)ctsio); 7151 return(retval); 7152} 7153 7154int 7155ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7156{ 7157 struct ctl_lun *lun; 7158 struct scsi_report_supported_opcodes *cdb; 7159 const struct ctl_cmd_entry *entry, *sentry; 7160 struct scsi_report_supported_opcodes_all *all; 7161 struct scsi_report_supported_opcodes_descr *descr; 7162 struct scsi_report_supported_opcodes_one *one; 7163 int retval; 7164 int alloc_len, total_len; 7165 int opcode, service_action, i, j, num; 7166 7167 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7168 7169 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7170 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7171 7172 retval = CTL_RETVAL_COMPLETE; 7173 7174 opcode = cdb->requested_opcode; 7175 service_action = scsi_2btoul(cdb->requested_service_action); 7176 switch (cdb->options & RSO_OPTIONS_MASK) { 7177 case RSO_OPTIONS_ALL: 7178 num = 0; 7179 for (i = 0; i < 256; i++) { 7180 entry = &ctl_cmd_table[i]; 7181 if (entry->flags & CTL_CMD_FLAG_SA5) { 7182 for (j = 0; j < 32; j++) { 7183 sentry = &((const struct ctl_cmd_entry *) 7184 entry->execute)[j]; 7185 if (ctl_cmd_applicable( 7186 lun->be_lun->lun_type, sentry)) 7187 num++; 7188 } 7189 } else { 7190 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7191 entry)) 7192 num++; 7193 } 7194 } 7195 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7196 num * sizeof(struct scsi_report_supported_opcodes_descr); 7197 break; 7198 case RSO_OPTIONS_OC: 7199 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7200 ctl_set_invalid_field(/*ctsio*/ ctsio, 7201 /*sks_valid*/ 1, 7202 /*command*/ 1, 7203 /*field*/ 2, 7204 /*bit_valid*/ 1, 7205 /*bit*/ 2); 7206 ctl_done((union ctl_io *)ctsio); 7207 return (CTL_RETVAL_COMPLETE); 7208 } 7209 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7210 break; 7211 case RSO_OPTIONS_OC_SA: 7212 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7213 service_action >= 32) { 7214 ctl_set_invalid_field(/*ctsio*/ ctsio, 7215 /*sks_valid*/ 1, 7216 /*command*/ 1, 7217 /*field*/ 2, 7218 /*bit_valid*/ 1, 7219 /*bit*/ 2); 7220 ctl_done((union ctl_io *)ctsio); 7221 return (CTL_RETVAL_COMPLETE); 7222 } 7223 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7224 break; 7225 default: 7226 ctl_set_invalid_field(/*ctsio*/ ctsio, 7227 /*sks_valid*/ 1, 7228 /*command*/ 1, 7229 /*field*/ 2, 7230 /*bit_valid*/ 1, 7231 /*bit*/ 2); 7232 ctl_done((union ctl_io *)ctsio); 7233 return (CTL_RETVAL_COMPLETE); 7234 } 7235 7236 alloc_len = scsi_4btoul(cdb->length); 7237 7238 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7239 7240 ctsio->kern_sg_entries = 0; 7241 7242 if (total_len < alloc_len) { 7243 ctsio->residual = alloc_len - total_len; 7244 ctsio->kern_data_len = total_len; 7245 ctsio->kern_total_len = total_len; 7246 } else { 7247 ctsio->residual = 0; 7248 ctsio->kern_data_len = alloc_len; 7249 ctsio->kern_total_len = alloc_len; 7250 } 7251 ctsio->kern_data_resid = 0; 7252 ctsio->kern_rel_offset = 0; 7253 7254 switch (cdb->options & RSO_OPTIONS_MASK) { 7255 case RSO_OPTIONS_ALL: 7256 all = (struct scsi_report_supported_opcodes_all *) 7257 ctsio->kern_data_ptr; 7258 num = 0; 7259 for (i = 0; i < 256; i++) { 7260 entry = &ctl_cmd_table[i]; 7261 if (entry->flags & CTL_CMD_FLAG_SA5) { 7262 for (j = 0; j < 32; j++) { 7263 sentry = &((const struct ctl_cmd_entry *) 7264 entry->execute)[j]; 7265 if (!ctl_cmd_applicable( 7266 lun->be_lun->lun_type, sentry)) 7267 continue; 7268 descr = &all->descr[num++]; 7269 descr->opcode = i; 7270 scsi_ulto2b(j, descr->service_action); 7271 descr->flags = RSO_SERVACTV; 7272 scsi_ulto2b(sentry->length, 7273 descr->cdb_length); 7274 } 7275 } else { 7276 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7277 entry)) 7278 continue; 7279 descr = &all->descr[num++]; 7280 descr->opcode = i; 7281 scsi_ulto2b(0, descr->service_action); 7282 descr->flags = 0; 7283 scsi_ulto2b(entry->length, descr->cdb_length); 7284 } 7285 } 7286 scsi_ulto4b( 7287 num * sizeof(struct scsi_report_supported_opcodes_descr), 7288 all->length); 7289 break; 7290 case RSO_OPTIONS_OC: 7291 one = (struct scsi_report_supported_opcodes_one *) 7292 ctsio->kern_data_ptr; 7293 entry = &ctl_cmd_table[opcode]; 7294 goto fill_one; 7295 case RSO_OPTIONS_OC_SA: 7296 one = (struct scsi_report_supported_opcodes_one *) 7297 ctsio->kern_data_ptr; 7298 entry = &ctl_cmd_table[opcode]; 7299 entry = &((const struct ctl_cmd_entry *) 7300 entry->execute)[service_action]; 7301fill_one: 7302 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7303 one->support = 3; 7304 scsi_ulto2b(entry->length, one->cdb_length); 7305 one->cdb_usage[0] = opcode; 7306 memcpy(&one->cdb_usage[1], entry->usage, 7307 entry->length - 1); 7308 } else 7309 one->support = 1; 7310 break; 7311 } 7312 7313 ctl_set_success(ctsio); 7314 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7315 ctsio->be_move_done = ctl_config_move_done; 7316 ctl_datamove((union ctl_io *)ctsio); 7317 return(retval); 7318} 7319 7320int 7321ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7322{ 7323 struct scsi_report_supported_tmf *cdb; 7324 struct scsi_report_supported_tmf_data *data; 7325 int retval; 7326 int alloc_len, total_len; 7327 7328 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7329 7330 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7331 7332 retval = CTL_RETVAL_COMPLETE; 7333 7334 total_len = sizeof(struct scsi_report_supported_tmf_data); 7335 alloc_len = scsi_4btoul(cdb->length); 7336 7337 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7338 7339 ctsio->kern_sg_entries = 0; 7340 7341 if (total_len < alloc_len) { 7342 ctsio->residual = alloc_len - total_len; 7343 ctsio->kern_data_len = total_len; 7344 ctsio->kern_total_len = total_len; 7345 } else { 7346 ctsio->residual = 0; 7347 ctsio->kern_data_len = alloc_len; 7348 ctsio->kern_total_len = alloc_len; 7349 } 7350 ctsio->kern_data_resid = 0; 7351 ctsio->kern_rel_offset = 0; 7352 7353 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7354 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS; 7355 data->byte2 |= RST_ITNRS; 7356 7357 ctl_set_success(ctsio); 7358 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7359 ctsio->be_move_done = ctl_config_move_done; 7360 ctl_datamove((union ctl_io *)ctsio); 7361 return (retval); 7362} 7363 7364int 7365ctl_report_timestamp(struct ctl_scsiio *ctsio) 7366{ 7367 struct scsi_report_timestamp *cdb; 7368 struct scsi_report_timestamp_data *data; 7369 struct timeval tv; 7370 int64_t timestamp; 7371 int retval; 7372 int alloc_len, total_len; 7373 7374 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7375 7376 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7377 7378 retval = CTL_RETVAL_COMPLETE; 7379 7380 total_len = sizeof(struct scsi_report_timestamp_data); 7381 alloc_len = scsi_4btoul(cdb->length); 7382 7383 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7384 7385 ctsio->kern_sg_entries = 0; 7386 7387 if (total_len < alloc_len) { 7388 ctsio->residual = alloc_len - total_len; 7389 ctsio->kern_data_len = total_len; 7390 ctsio->kern_total_len = total_len; 7391 } else { 7392 ctsio->residual = 0; 7393 ctsio->kern_data_len = alloc_len; 7394 ctsio->kern_total_len = alloc_len; 7395 } 7396 ctsio->kern_data_resid = 0; 7397 ctsio->kern_rel_offset = 0; 7398 7399 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7400 scsi_ulto2b(sizeof(*data) - 2, data->length); 7401 data->origin = RTS_ORIG_OUTSIDE; 7402 getmicrotime(&tv); 7403 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7404 scsi_ulto4b(timestamp >> 16, data->timestamp); 7405 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7406 7407 ctl_set_success(ctsio); 7408 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7409 ctsio->be_move_done = ctl_config_move_done; 7410 ctl_datamove((union ctl_io *)ctsio); 7411 return (retval); 7412} 7413 7414int 7415ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7416{ 7417 struct scsi_per_res_in *cdb; 7418 int alloc_len, total_len = 0; 7419 /* struct scsi_per_res_in_rsrv in_data; */ 7420 struct ctl_lun *lun; 7421 struct ctl_softc *softc; 7422 uint64_t key; 7423 7424 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7425 7426 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7427 7428 alloc_len = scsi_2btoul(cdb->length); 7429 7430 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7431 softc = lun->ctl_softc; 7432 7433retry: 7434 mtx_lock(&lun->lun_lock); 7435 switch (cdb->action) { 7436 case SPRI_RK: /* read keys */ 7437 total_len = sizeof(struct scsi_per_res_in_keys) + 7438 lun->pr_key_count * 7439 sizeof(struct scsi_per_res_key); 7440 break; 7441 case SPRI_RR: /* read reservation */ 7442 if (lun->flags & CTL_LUN_PR_RESERVED) 7443 total_len = sizeof(struct scsi_per_res_in_rsrv); 7444 else 7445 total_len = sizeof(struct scsi_per_res_in_header); 7446 break; 7447 case SPRI_RC: /* report capabilities */ 7448 total_len = sizeof(struct scsi_per_res_cap); 7449 break; 7450 case SPRI_RS: /* read full status */ 7451 total_len = sizeof(struct scsi_per_res_in_header) + 7452 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7453 lun->pr_key_count; 7454 break; 7455 default: 7456 panic("Invalid PR type %x", cdb->action); 7457 } 7458 mtx_unlock(&lun->lun_lock); 7459 7460 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7461 7462 if (total_len < alloc_len) { 7463 ctsio->residual = alloc_len - total_len; 7464 ctsio->kern_data_len = total_len; 7465 ctsio->kern_total_len = total_len; 7466 } else { 7467 ctsio->residual = 0; 7468 ctsio->kern_data_len = alloc_len; 7469 ctsio->kern_total_len = alloc_len; 7470 } 7471 7472 ctsio->kern_data_resid = 0; 7473 ctsio->kern_rel_offset = 0; 7474 ctsio->kern_sg_entries = 0; 7475 7476 mtx_lock(&lun->lun_lock); 7477 switch (cdb->action) { 7478 case SPRI_RK: { // read keys 7479 struct scsi_per_res_in_keys *res_keys; 7480 int i, key_count; 7481 7482 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7483 7484 /* 7485 * We had to drop the lock to allocate our buffer, which 7486 * leaves time for someone to come in with another 7487 * persistent reservation. (That is unlikely, though, 7488 * since this should be the only persistent reservation 7489 * command active right now.) 7490 */ 7491 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7492 (lun->pr_key_count * 7493 sizeof(struct scsi_per_res_key)))){ 7494 mtx_unlock(&lun->lun_lock); 7495 free(ctsio->kern_data_ptr, M_CTL); 7496 printf("%s: reservation length changed, retrying\n", 7497 __func__); 7498 goto retry; 7499 } 7500 7501 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7502 7503 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7504 lun->pr_key_count, res_keys->header.length); 7505 7506 for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { 7507 if ((key = ctl_get_prkey(lun, i)) == 0) 7508 continue; 7509 7510 /* 7511 * We used lun->pr_key_count to calculate the 7512 * size to allocate. If it turns out the number of 7513 * initiators with the registered flag set is 7514 * larger than that (i.e. they haven't been kept in 7515 * sync), we've got a problem. 7516 */ 7517 if (key_count >= lun->pr_key_count) { 7518#ifdef NEEDTOPORT 7519 csevent_log(CSC_CTL | CSC_SHELF_SW | 7520 CTL_PR_ERROR, 7521 csevent_LogType_Fault, 7522 csevent_AlertLevel_Yellow, 7523 csevent_FRU_ShelfController, 7524 csevent_FRU_Firmware, 7525 csevent_FRU_Unknown, 7526 "registered keys %d >= key " 7527 "count %d", key_count, 7528 lun->pr_key_count); 7529#endif 7530 key_count++; 7531 continue; 7532 } 7533 scsi_u64to8b(key, res_keys->keys[key_count].key); 7534 key_count++; 7535 } 7536 break; 7537 } 7538 case SPRI_RR: { // read reservation 7539 struct scsi_per_res_in_rsrv *res; 7540 int tmp_len, header_only; 7541 7542 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7543 7544 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7545 7546 if (lun->flags & CTL_LUN_PR_RESERVED) 7547 { 7548 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7549 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7550 res->header.length); 7551 header_only = 0; 7552 } else { 7553 tmp_len = sizeof(struct scsi_per_res_in_header); 7554 scsi_ulto4b(0, res->header.length); 7555 header_only = 1; 7556 } 7557 7558 /* 7559 * We had to drop the lock to allocate our buffer, which 7560 * leaves time for someone to come in with another 7561 * persistent reservation. (That is unlikely, though, 7562 * since this should be the only persistent reservation 7563 * command active right now.) 7564 */ 7565 if (tmp_len != total_len) { 7566 mtx_unlock(&lun->lun_lock); 7567 free(ctsio->kern_data_ptr, M_CTL); 7568 printf("%s: reservation status changed, retrying\n", 7569 __func__); 7570 goto retry; 7571 } 7572 7573 /* 7574 * No reservation held, so we're done. 7575 */ 7576 if (header_only != 0) 7577 break; 7578 7579 /* 7580 * If the registration is an All Registrants type, the key 7581 * is 0, since it doesn't really matter. 7582 */ 7583 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7584 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7585 res->data.reservation); 7586 } 7587 res->data.scopetype = lun->res_type; 7588 break; 7589 } 7590 case SPRI_RC: //report capabilities 7591 { 7592 struct scsi_per_res_cap *res_cap; 7593 uint16_t type_mask; 7594 7595 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7596 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7597 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 7598 type_mask = SPRI_TM_WR_EX_AR | 7599 SPRI_TM_EX_AC_RO | 7600 SPRI_TM_WR_EX_RO | 7601 SPRI_TM_EX_AC | 7602 SPRI_TM_WR_EX | 7603 SPRI_TM_EX_AC_AR; 7604 scsi_ulto2b(type_mask, res_cap->type_mask); 7605 break; 7606 } 7607 case SPRI_RS: { // read full status 7608 struct scsi_per_res_in_full *res_status; 7609 struct scsi_per_res_in_full_desc *res_desc; 7610 struct ctl_port *port; 7611 int i, len; 7612 7613 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7614 7615 /* 7616 * We had to drop the lock to allocate our buffer, which 7617 * leaves time for someone to come in with another 7618 * persistent reservation. (That is unlikely, though, 7619 * since this should be the only persistent reservation 7620 * command active right now.) 7621 */ 7622 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7623 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7624 lun->pr_key_count)){ 7625 mtx_unlock(&lun->lun_lock); 7626 free(ctsio->kern_data_ptr, M_CTL); 7627 printf("%s: reservation length changed, retrying\n", 7628 __func__); 7629 goto retry; 7630 } 7631 7632 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7633 7634 res_desc = &res_status->desc[0]; 7635 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7636 if ((key = ctl_get_prkey(lun, i)) == 0) 7637 continue; 7638 7639 scsi_u64to8b(key, res_desc->res_key.key); 7640 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7641 (lun->pr_res_idx == i || 7642 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7643 res_desc->flags = SPRI_FULL_R_HOLDER; 7644 res_desc->scopetype = lun->res_type; 7645 } 7646 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7647 res_desc->rel_trgt_port_id); 7648 len = 0; 7649 port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; 7650 if (port != NULL) 7651 len = ctl_create_iid(port, 7652 i % CTL_MAX_INIT_PER_PORT, 7653 res_desc->transport_id); 7654 scsi_ulto4b(len, res_desc->additional_length); 7655 res_desc = (struct scsi_per_res_in_full_desc *) 7656 &res_desc->transport_id[len]; 7657 } 7658 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7659 res_status->header.length); 7660 break; 7661 } 7662 default: 7663 /* 7664 * This is a bug, because we just checked for this above, 7665 * and should have returned an error. 7666 */ 7667 panic("Invalid PR type %x", cdb->action); 7668 break; /* NOTREACHED */ 7669 } 7670 mtx_unlock(&lun->lun_lock); 7671 7672 ctl_set_success(ctsio); 7673 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7674 ctsio->be_move_done = ctl_config_move_done; 7675 ctl_datamove((union ctl_io *)ctsio); 7676 return (CTL_RETVAL_COMPLETE); 7677} 7678 7679/* 7680 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7681 * it should return. 7682 */ 7683static int 7684ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7685 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7686 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7687 struct scsi_per_res_out_parms* param) 7688{ 7689 union ctl_ha_msg persis_io; 7690 int i; 7691 7692 mtx_lock(&lun->lun_lock); 7693 if (sa_res_key == 0) { 7694 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7695 /* validate scope and type */ 7696 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7697 SPR_LU_SCOPE) { 7698 mtx_unlock(&lun->lun_lock); 7699 ctl_set_invalid_field(/*ctsio*/ ctsio, 7700 /*sks_valid*/ 1, 7701 /*command*/ 1, 7702 /*field*/ 2, 7703 /*bit_valid*/ 1, 7704 /*bit*/ 4); 7705 ctl_done((union ctl_io *)ctsio); 7706 return (1); 7707 } 7708 7709 if (type>8 || type==2 || type==4 || type==0) { 7710 mtx_unlock(&lun->lun_lock); 7711 ctl_set_invalid_field(/*ctsio*/ ctsio, 7712 /*sks_valid*/ 1, 7713 /*command*/ 1, 7714 /*field*/ 2, 7715 /*bit_valid*/ 1, 7716 /*bit*/ 0); 7717 ctl_done((union ctl_io *)ctsio); 7718 return (1); 7719 } 7720 7721 /* 7722 * Unregister everybody else and build UA for 7723 * them 7724 */ 7725 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7726 if (i == residx || ctl_get_prkey(lun, i) == 0) 7727 continue; 7728 7729 ctl_clr_prkey(lun, i); 7730 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7731 } 7732 lun->pr_key_count = 1; 7733 lun->res_type = type; 7734 if (lun->res_type != SPR_TYPE_WR_EX_AR 7735 && lun->res_type != SPR_TYPE_EX_AC_AR) 7736 lun->pr_res_idx = residx; 7737 lun->PRGeneration++; 7738 mtx_unlock(&lun->lun_lock); 7739 7740 /* send msg to other side */ 7741 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7742 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7743 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7744 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7745 persis_io.pr.pr_info.res_type = type; 7746 memcpy(persis_io.pr.pr_info.sa_res_key, 7747 param->serv_act_res_key, 7748 sizeof(param->serv_act_res_key)); 7749 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7750 sizeof(persis_io.pr), M_WAITOK); 7751 } else { 7752 /* not all registrants */ 7753 mtx_unlock(&lun->lun_lock); 7754 free(ctsio->kern_data_ptr, M_CTL); 7755 ctl_set_invalid_field(ctsio, 7756 /*sks_valid*/ 1, 7757 /*command*/ 0, 7758 /*field*/ 8, 7759 /*bit_valid*/ 0, 7760 /*bit*/ 0); 7761 ctl_done((union ctl_io *)ctsio); 7762 return (1); 7763 } 7764 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7765 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 7766 int found = 0; 7767 7768 if (res_key == sa_res_key) { 7769 /* special case */ 7770 /* 7771 * The spec implies this is not good but doesn't 7772 * say what to do. There are two choices either 7773 * generate a res conflict or check condition 7774 * with illegal field in parameter data. Since 7775 * that is what is done when the sa_res_key is 7776 * zero I'll take that approach since this has 7777 * to do with the sa_res_key. 7778 */ 7779 mtx_unlock(&lun->lun_lock); 7780 free(ctsio->kern_data_ptr, M_CTL); 7781 ctl_set_invalid_field(ctsio, 7782 /*sks_valid*/ 1, 7783 /*command*/ 0, 7784 /*field*/ 8, 7785 /*bit_valid*/ 0, 7786 /*bit*/ 0); 7787 ctl_done((union ctl_io *)ctsio); 7788 return (1); 7789 } 7790 7791 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7792 if (ctl_get_prkey(lun, i) != sa_res_key) 7793 continue; 7794 7795 found = 1; 7796 ctl_clr_prkey(lun, i); 7797 lun->pr_key_count--; 7798 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7799 } 7800 if (!found) { 7801 mtx_unlock(&lun->lun_lock); 7802 free(ctsio->kern_data_ptr, M_CTL); 7803 ctl_set_reservation_conflict(ctsio); 7804 ctl_done((union ctl_io *)ctsio); 7805 return (CTL_RETVAL_COMPLETE); 7806 } 7807 lun->PRGeneration++; 7808 mtx_unlock(&lun->lun_lock); 7809 7810 /* send msg to other side */ 7811 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7812 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7813 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7814 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7815 persis_io.pr.pr_info.res_type = type; 7816 memcpy(persis_io.pr.pr_info.sa_res_key, 7817 param->serv_act_res_key, 7818 sizeof(param->serv_act_res_key)); 7819 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7820 sizeof(persis_io.pr), M_WAITOK); 7821 } else { 7822 /* Reserved but not all registrants */ 7823 /* sa_res_key is res holder */ 7824 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 7825 /* validate scope and type */ 7826 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7827 SPR_LU_SCOPE) { 7828 mtx_unlock(&lun->lun_lock); 7829 ctl_set_invalid_field(/*ctsio*/ ctsio, 7830 /*sks_valid*/ 1, 7831 /*command*/ 1, 7832 /*field*/ 2, 7833 /*bit_valid*/ 1, 7834 /*bit*/ 4); 7835 ctl_done((union ctl_io *)ctsio); 7836 return (1); 7837 } 7838 7839 if (type>8 || type==2 || type==4 || type==0) { 7840 mtx_unlock(&lun->lun_lock); 7841 ctl_set_invalid_field(/*ctsio*/ ctsio, 7842 /*sks_valid*/ 1, 7843 /*command*/ 1, 7844 /*field*/ 2, 7845 /*bit_valid*/ 1, 7846 /*bit*/ 0); 7847 ctl_done((union ctl_io *)ctsio); 7848 return (1); 7849 } 7850 7851 /* 7852 * Do the following: 7853 * if sa_res_key != res_key remove all 7854 * registrants w/sa_res_key and generate UA 7855 * for these registrants(Registrations 7856 * Preempted) if it wasn't an exclusive 7857 * reservation generate UA(Reservations 7858 * Preempted) for all other registered nexuses 7859 * if the type has changed. Establish the new 7860 * reservation and holder. If res_key and 7861 * sa_res_key are the same do the above 7862 * except don't unregister the res holder. 7863 */ 7864 7865 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7866 if (i == residx || ctl_get_prkey(lun, i) == 0) 7867 continue; 7868 7869 if (sa_res_key == ctl_get_prkey(lun, i)) { 7870 ctl_clr_prkey(lun, i); 7871 lun->pr_key_count--; 7872 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7873 } else if (type != lun->res_type 7874 && (lun->res_type == SPR_TYPE_WR_EX_RO 7875 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 7876 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 7877 } 7878 } 7879 lun->res_type = type; 7880 if (lun->res_type != SPR_TYPE_WR_EX_AR 7881 && lun->res_type != SPR_TYPE_EX_AC_AR) 7882 lun->pr_res_idx = residx; 7883 else 7884 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 7885 lun->PRGeneration++; 7886 mtx_unlock(&lun->lun_lock); 7887 7888 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7889 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7890 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7891 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7892 persis_io.pr.pr_info.res_type = type; 7893 memcpy(persis_io.pr.pr_info.sa_res_key, 7894 param->serv_act_res_key, 7895 sizeof(param->serv_act_res_key)); 7896 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7897 sizeof(persis_io.pr), M_WAITOK); 7898 } else { 7899 /* 7900 * sa_res_key is not the res holder just 7901 * remove registrants 7902 */ 7903 int found=0; 7904 7905 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7906 if (sa_res_key != ctl_get_prkey(lun, i)) 7907 continue; 7908 7909 found = 1; 7910 ctl_clr_prkey(lun, i); 7911 lun->pr_key_count--; 7912 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7913 } 7914 7915 if (!found) { 7916 mtx_unlock(&lun->lun_lock); 7917 free(ctsio->kern_data_ptr, M_CTL); 7918 ctl_set_reservation_conflict(ctsio); 7919 ctl_done((union ctl_io *)ctsio); 7920 return (1); 7921 } 7922 lun->PRGeneration++; 7923 mtx_unlock(&lun->lun_lock); 7924 7925 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 7926 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 7927 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 7928 persis_io.pr.pr_info.residx = lun->pr_res_idx; 7929 persis_io.pr.pr_info.res_type = type; 7930 memcpy(persis_io.pr.pr_info.sa_res_key, 7931 param->serv_act_res_key, 7932 sizeof(param->serv_act_res_key)); 7933 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 7934 sizeof(persis_io.pr), M_WAITOK); 7935 } 7936 } 7937 return (0); 7938} 7939 7940static void 7941ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 7942{ 7943 uint64_t sa_res_key; 7944 int i; 7945 7946 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 7947 7948 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 7949 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 7950 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 7951 if (sa_res_key == 0) { 7952 /* 7953 * Unregister everybody else and build UA for 7954 * them 7955 */ 7956 for(i = 0; i < CTL_MAX_INITIATORS; i++) { 7957 if (i == msg->pr.pr_info.residx || 7958 ctl_get_prkey(lun, i) == 0) 7959 continue; 7960 7961 ctl_clr_prkey(lun, i); 7962 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7963 } 7964 7965 lun->pr_key_count = 1; 7966 lun->res_type = msg->pr.pr_info.res_type; 7967 if (lun->res_type != SPR_TYPE_WR_EX_AR 7968 && lun->res_type != SPR_TYPE_EX_AC_AR) 7969 lun->pr_res_idx = msg->pr.pr_info.residx; 7970 } else { 7971 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7972 if (sa_res_key == ctl_get_prkey(lun, i)) 7973 continue; 7974 7975 ctl_clr_prkey(lun, i); 7976 lun->pr_key_count--; 7977 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7978 } 7979 } 7980 } else { 7981 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 7982 if (i == msg->pr.pr_info.residx || 7983 ctl_get_prkey(lun, i) == 0) 7984 continue; 7985 7986 if (sa_res_key == ctl_get_prkey(lun, i)) { 7987 ctl_clr_prkey(lun, i); 7988 lun->pr_key_count--; 7989 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 7990 } else if (msg->pr.pr_info.res_type != lun->res_type 7991 && (lun->res_type == SPR_TYPE_WR_EX_RO 7992 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 7993 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 7994 } 7995 } 7996 lun->res_type = msg->pr.pr_info.res_type; 7997 if (lun->res_type != SPR_TYPE_WR_EX_AR 7998 && lun->res_type != SPR_TYPE_EX_AC_AR) 7999 lun->pr_res_idx = msg->pr.pr_info.residx; 8000 else 8001 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8002 } 8003 lun->PRGeneration++; 8004 8005} 8006 8007 8008int 8009ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8010{ 8011 int retval; 8012 u_int32_t param_len; 8013 struct scsi_per_res_out *cdb; 8014 struct ctl_lun *lun; 8015 struct scsi_per_res_out_parms* param; 8016 struct ctl_softc *softc; 8017 uint32_t residx; 8018 uint64_t res_key, sa_res_key, key; 8019 uint8_t type; 8020 union ctl_ha_msg persis_io; 8021 int i; 8022 8023 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8024 8025 retval = CTL_RETVAL_COMPLETE; 8026 8027 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8028 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8029 softc = lun->ctl_softc; 8030 8031 /* 8032 * We only support whole-LUN scope. The scope & type are ignored for 8033 * register, register and ignore existing key and clear. 8034 * We sometimes ignore scope and type on preempts too!! 8035 * Verify reservation type here as well. 8036 */ 8037 type = cdb->scope_type & SPR_TYPE_MASK; 8038 if ((cdb->action == SPRO_RESERVE) 8039 || (cdb->action == SPRO_RELEASE)) { 8040 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8041 ctl_set_invalid_field(/*ctsio*/ ctsio, 8042 /*sks_valid*/ 1, 8043 /*command*/ 1, 8044 /*field*/ 2, 8045 /*bit_valid*/ 1, 8046 /*bit*/ 4); 8047 ctl_done((union ctl_io *)ctsio); 8048 return (CTL_RETVAL_COMPLETE); 8049 } 8050 8051 if (type>8 || type==2 || type==4 || type==0) { 8052 ctl_set_invalid_field(/*ctsio*/ ctsio, 8053 /*sks_valid*/ 1, 8054 /*command*/ 1, 8055 /*field*/ 2, 8056 /*bit_valid*/ 1, 8057 /*bit*/ 0); 8058 ctl_done((union ctl_io *)ctsio); 8059 return (CTL_RETVAL_COMPLETE); 8060 } 8061 } 8062 8063 param_len = scsi_4btoul(cdb->length); 8064 8065 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8066 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8067 ctsio->kern_data_len = param_len; 8068 ctsio->kern_total_len = param_len; 8069 ctsio->kern_data_resid = 0; 8070 ctsio->kern_rel_offset = 0; 8071 ctsio->kern_sg_entries = 0; 8072 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8073 ctsio->be_move_done = ctl_config_move_done; 8074 ctl_datamove((union ctl_io *)ctsio); 8075 8076 return (CTL_RETVAL_COMPLETE); 8077 } 8078 8079 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8080 8081 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 8082 res_key = scsi_8btou64(param->res_key.key); 8083 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8084 8085 /* 8086 * Validate the reservation key here except for SPRO_REG_IGNO 8087 * This must be done for all other service actions 8088 */ 8089 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8090 mtx_lock(&lun->lun_lock); 8091 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8092 if (res_key != key) { 8093 /* 8094 * The current key passed in doesn't match 8095 * the one the initiator previously 8096 * registered. 8097 */ 8098 mtx_unlock(&lun->lun_lock); 8099 free(ctsio->kern_data_ptr, M_CTL); 8100 ctl_set_reservation_conflict(ctsio); 8101 ctl_done((union ctl_io *)ctsio); 8102 return (CTL_RETVAL_COMPLETE); 8103 } 8104 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8105 /* 8106 * We are not registered 8107 */ 8108 mtx_unlock(&lun->lun_lock); 8109 free(ctsio->kern_data_ptr, M_CTL); 8110 ctl_set_reservation_conflict(ctsio); 8111 ctl_done((union ctl_io *)ctsio); 8112 return (CTL_RETVAL_COMPLETE); 8113 } else if (res_key != 0) { 8114 /* 8115 * We are not registered and trying to register but 8116 * the register key isn't zero. 8117 */ 8118 mtx_unlock(&lun->lun_lock); 8119 free(ctsio->kern_data_ptr, M_CTL); 8120 ctl_set_reservation_conflict(ctsio); 8121 ctl_done((union ctl_io *)ctsio); 8122 return (CTL_RETVAL_COMPLETE); 8123 } 8124 mtx_unlock(&lun->lun_lock); 8125 } 8126 8127 switch (cdb->action & SPRO_ACTION_MASK) { 8128 case SPRO_REGISTER: 8129 case SPRO_REG_IGNO: { 8130 8131#if 0 8132 printf("Registration received\n"); 8133#endif 8134 8135 /* 8136 * We don't support any of these options, as we report in 8137 * the read capabilities request (see 8138 * ctl_persistent_reserve_in(), above). 8139 */ 8140 if ((param->flags & SPR_SPEC_I_PT) 8141 || (param->flags & SPR_ALL_TG_PT) 8142 || (param->flags & SPR_APTPL)) { 8143 int bit_ptr; 8144 8145 if (param->flags & SPR_APTPL) 8146 bit_ptr = 0; 8147 else if (param->flags & SPR_ALL_TG_PT) 8148 bit_ptr = 2; 8149 else /* SPR_SPEC_I_PT */ 8150 bit_ptr = 3; 8151 8152 free(ctsio->kern_data_ptr, M_CTL); 8153 ctl_set_invalid_field(ctsio, 8154 /*sks_valid*/ 1, 8155 /*command*/ 0, 8156 /*field*/ 20, 8157 /*bit_valid*/ 1, 8158 /*bit*/ bit_ptr); 8159 ctl_done((union ctl_io *)ctsio); 8160 return (CTL_RETVAL_COMPLETE); 8161 } 8162 8163 mtx_lock(&lun->lun_lock); 8164 8165 /* 8166 * The initiator wants to clear the 8167 * key/unregister. 8168 */ 8169 if (sa_res_key == 0) { 8170 if ((res_key == 0 8171 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8172 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8173 && ctl_get_prkey(lun, residx) == 0)) { 8174 mtx_unlock(&lun->lun_lock); 8175 goto done; 8176 } 8177 8178 ctl_clr_prkey(lun, residx); 8179 lun->pr_key_count--; 8180 8181 if (residx == lun->pr_res_idx) { 8182 lun->flags &= ~CTL_LUN_PR_RESERVED; 8183 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8184 8185 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8186 || lun->res_type == SPR_TYPE_EX_AC_RO) 8187 && lun->pr_key_count) { 8188 /* 8189 * If the reservation is a registrants 8190 * only type we need to generate a UA 8191 * for other registered inits. The 8192 * sense code should be RESERVATIONS 8193 * RELEASED 8194 */ 8195 8196 for (i = softc->init_min; i < softc->init_max; i++){ 8197 if (ctl_get_prkey(lun, i) == 0) 8198 continue; 8199 ctl_est_ua(lun, i, 8200 CTL_UA_RES_RELEASE); 8201 } 8202 } 8203 lun->res_type = 0; 8204 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8205 if (lun->pr_key_count==0) { 8206 lun->flags &= ~CTL_LUN_PR_RESERVED; 8207 lun->res_type = 0; 8208 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8209 } 8210 } 8211 lun->PRGeneration++; 8212 mtx_unlock(&lun->lun_lock); 8213 8214 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8215 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8216 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8217 persis_io.pr.pr_info.residx = residx; 8218 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8219 sizeof(persis_io.pr), M_WAITOK); 8220 } else /* sa_res_key != 0 */ { 8221 8222 /* 8223 * If we aren't registered currently then increment 8224 * the key count and set the registered flag. 8225 */ 8226 ctl_alloc_prkey(lun, residx); 8227 if (ctl_get_prkey(lun, residx) == 0) 8228 lun->pr_key_count++; 8229 ctl_set_prkey(lun, residx, sa_res_key); 8230 lun->PRGeneration++; 8231 mtx_unlock(&lun->lun_lock); 8232 8233 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8234 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8235 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8236 persis_io.pr.pr_info.residx = residx; 8237 memcpy(persis_io.pr.pr_info.sa_res_key, 8238 param->serv_act_res_key, 8239 sizeof(param->serv_act_res_key)); 8240 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8241 sizeof(persis_io.pr), M_WAITOK); 8242 } 8243 8244 break; 8245 } 8246 case SPRO_RESERVE: 8247#if 0 8248 printf("Reserve executed type %d\n", type); 8249#endif 8250 mtx_lock(&lun->lun_lock); 8251 if (lun->flags & CTL_LUN_PR_RESERVED) { 8252 /* 8253 * if this isn't the reservation holder and it's 8254 * not a "all registrants" type or if the type is 8255 * different then we have a conflict 8256 */ 8257 if ((lun->pr_res_idx != residx 8258 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8259 || lun->res_type != type) { 8260 mtx_unlock(&lun->lun_lock); 8261 free(ctsio->kern_data_ptr, M_CTL); 8262 ctl_set_reservation_conflict(ctsio); 8263 ctl_done((union ctl_io *)ctsio); 8264 return (CTL_RETVAL_COMPLETE); 8265 } 8266 mtx_unlock(&lun->lun_lock); 8267 } else /* create a reservation */ { 8268 /* 8269 * If it's not an "all registrants" type record 8270 * reservation holder 8271 */ 8272 if (type != SPR_TYPE_WR_EX_AR 8273 && type != SPR_TYPE_EX_AC_AR) 8274 lun->pr_res_idx = residx; /* Res holder */ 8275 else 8276 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8277 8278 lun->flags |= CTL_LUN_PR_RESERVED; 8279 lun->res_type = type; 8280 8281 mtx_unlock(&lun->lun_lock); 8282 8283 /* send msg to other side */ 8284 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8285 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8286 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8287 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8288 persis_io.pr.pr_info.res_type = type; 8289 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8290 sizeof(persis_io.pr), M_WAITOK); 8291 } 8292 break; 8293 8294 case SPRO_RELEASE: 8295 mtx_lock(&lun->lun_lock); 8296 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8297 /* No reservation exists return good status */ 8298 mtx_unlock(&lun->lun_lock); 8299 goto done; 8300 } 8301 /* 8302 * Is this nexus a reservation holder? 8303 */ 8304 if (lun->pr_res_idx != residx 8305 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8306 /* 8307 * not a res holder return good status but 8308 * do nothing 8309 */ 8310 mtx_unlock(&lun->lun_lock); 8311 goto done; 8312 } 8313 8314 if (lun->res_type != type) { 8315 mtx_unlock(&lun->lun_lock); 8316 free(ctsio->kern_data_ptr, M_CTL); 8317 ctl_set_illegal_pr_release(ctsio); 8318 ctl_done((union ctl_io *)ctsio); 8319 return (CTL_RETVAL_COMPLETE); 8320 } 8321 8322 /* okay to release */ 8323 lun->flags &= ~CTL_LUN_PR_RESERVED; 8324 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8325 lun->res_type = 0; 8326 8327 /* 8328 * if this isn't an exclusive access 8329 * res generate UA for all other 8330 * registrants. 8331 */ 8332 if (type != SPR_TYPE_EX_AC 8333 && type != SPR_TYPE_WR_EX) { 8334 for (i = softc->init_min; i < softc->init_max; i++) { 8335 if (i == residx || ctl_get_prkey(lun, i) == 0) 8336 continue; 8337 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8338 } 8339 } 8340 mtx_unlock(&lun->lun_lock); 8341 8342 /* Send msg to other side */ 8343 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8344 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8345 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8346 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8347 sizeof(persis_io.pr), M_WAITOK); 8348 break; 8349 8350 case SPRO_CLEAR: 8351 /* send msg to other side */ 8352 8353 mtx_lock(&lun->lun_lock); 8354 lun->flags &= ~CTL_LUN_PR_RESERVED; 8355 lun->res_type = 0; 8356 lun->pr_key_count = 0; 8357 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8358 8359 ctl_clr_prkey(lun, residx); 8360 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8361 if (ctl_get_prkey(lun, i) != 0) { 8362 ctl_clr_prkey(lun, i); 8363 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8364 } 8365 lun->PRGeneration++; 8366 mtx_unlock(&lun->lun_lock); 8367 8368 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8369 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8370 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8371 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8372 sizeof(persis_io.pr), M_WAITOK); 8373 break; 8374 8375 case SPRO_PREEMPT: 8376 case SPRO_PRE_ABO: { 8377 int nretval; 8378 8379 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8380 residx, ctsio, cdb, param); 8381 if (nretval != 0) 8382 return (CTL_RETVAL_COMPLETE); 8383 break; 8384 } 8385 default: 8386 panic("Invalid PR type %x", cdb->action); 8387 } 8388 8389done: 8390 free(ctsio->kern_data_ptr, M_CTL); 8391 ctl_set_success(ctsio); 8392 ctl_done((union ctl_io *)ctsio); 8393 8394 return (retval); 8395} 8396 8397/* 8398 * This routine is for handling a message from the other SC pertaining to 8399 * persistent reserve out. All the error checking will have been done 8400 * so only perorming the action need be done here to keep the two 8401 * in sync. 8402 */ 8403static void 8404ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8405{ 8406 struct ctl_lun *lun; 8407 struct ctl_softc *softc; 8408 int i; 8409 uint32_t residx, targ_lun; 8410 8411 softc = control_softc; 8412 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8413 mtx_lock(&softc->ctl_lock); 8414 if ((targ_lun >= CTL_MAX_LUNS) || 8415 ((lun = softc->ctl_luns[targ_lun]) == NULL)) { 8416 mtx_unlock(&softc->ctl_lock); 8417 return; 8418 } 8419 mtx_lock(&lun->lun_lock); 8420 mtx_unlock(&softc->ctl_lock); 8421 if (lun->flags & CTL_LUN_DISABLED) { 8422 mtx_unlock(&lun->lun_lock); 8423 return; 8424 } 8425 residx = ctl_get_initindex(&msg->hdr.nexus); 8426 switch(msg->pr.pr_info.action) { 8427 case CTL_PR_REG_KEY: 8428 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8429 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8430 lun->pr_key_count++; 8431 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8432 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8433 lun->PRGeneration++; 8434 break; 8435 8436 case CTL_PR_UNREG_KEY: 8437 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8438 lun->pr_key_count--; 8439 8440 /* XXX Need to see if the reservation has been released */ 8441 /* if so do we need to generate UA? */ 8442 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8443 lun->flags &= ~CTL_LUN_PR_RESERVED; 8444 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8445 8446 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8447 || lun->res_type == SPR_TYPE_EX_AC_RO) 8448 && lun->pr_key_count) { 8449 /* 8450 * If the reservation is a registrants 8451 * only type we need to generate a UA 8452 * for other registered inits. The 8453 * sense code should be RESERVATIONS 8454 * RELEASED 8455 */ 8456 8457 for (i = softc->init_min; i < softc->init_max; i++) { 8458 if (ctl_get_prkey(lun, i) == 0) 8459 continue; 8460 8461 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8462 } 8463 } 8464 lun->res_type = 0; 8465 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8466 if (lun->pr_key_count==0) { 8467 lun->flags &= ~CTL_LUN_PR_RESERVED; 8468 lun->res_type = 0; 8469 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8470 } 8471 } 8472 lun->PRGeneration++; 8473 break; 8474 8475 case CTL_PR_RESERVE: 8476 lun->flags |= CTL_LUN_PR_RESERVED; 8477 lun->res_type = msg->pr.pr_info.res_type; 8478 lun->pr_res_idx = msg->pr.pr_info.residx; 8479 8480 break; 8481 8482 case CTL_PR_RELEASE: 8483 /* 8484 * if this isn't an exclusive access res generate UA for all 8485 * other registrants. 8486 */ 8487 if (lun->res_type != SPR_TYPE_EX_AC 8488 && lun->res_type != SPR_TYPE_WR_EX) { 8489 for (i = softc->init_min; i < softc->init_max; i++) 8490 if (i == residx || ctl_get_prkey(lun, i) == 0) 8491 continue; 8492 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8493 } 8494 8495 lun->flags &= ~CTL_LUN_PR_RESERVED; 8496 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8497 lun->res_type = 0; 8498 break; 8499 8500 case CTL_PR_PREEMPT: 8501 ctl_pro_preempt_other(lun, msg); 8502 break; 8503 case CTL_PR_CLEAR: 8504 lun->flags &= ~CTL_LUN_PR_RESERVED; 8505 lun->res_type = 0; 8506 lun->pr_key_count = 0; 8507 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8508 8509 for (i=0; i < CTL_MAX_INITIATORS; i++) { 8510 if (ctl_get_prkey(lun, i) == 0) 8511 continue; 8512 ctl_clr_prkey(lun, i); 8513 ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); 8514 } 8515 lun->PRGeneration++; 8516 break; 8517 } 8518 8519 mtx_unlock(&lun->lun_lock); 8520} 8521 8522int 8523ctl_read_write(struct ctl_scsiio *ctsio) 8524{ 8525 struct ctl_lun *lun; 8526 struct ctl_lba_len_flags *lbalen; 8527 uint64_t lba; 8528 uint32_t num_blocks; 8529 int flags, retval; 8530 int isread; 8531 8532 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8533 8534 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8535 8536 flags = 0; 8537 retval = CTL_RETVAL_COMPLETE; 8538 8539 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8540 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8541 switch (ctsio->cdb[0]) { 8542 case READ_6: 8543 case WRITE_6: { 8544 struct scsi_rw_6 *cdb; 8545 8546 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8547 8548 lba = scsi_3btoul(cdb->addr); 8549 /* only 5 bits are valid in the most significant address byte */ 8550 lba &= 0x1fffff; 8551 num_blocks = cdb->length; 8552 /* 8553 * This is correct according to SBC-2. 8554 */ 8555 if (num_blocks == 0) 8556 num_blocks = 256; 8557 break; 8558 } 8559 case READ_10: 8560 case WRITE_10: { 8561 struct scsi_rw_10 *cdb; 8562 8563 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8564 if (cdb->byte2 & SRW10_FUA) 8565 flags |= CTL_LLF_FUA; 8566 if (cdb->byte2 & SRW10_DPO) 8567 flags |= CTL_LLF_DPO; 8568 lba = scsi_4btoul(cdb->addr); 8569 num_blocks = scsi_2btoul(cdb->length); 8570 break; 8571 } 8572 case WRITE_VERIFY_10: { 8573 struct scsi_write_verify_10 *cdb; 8574 8575 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8576 flags |= CTL_LLF_FUA; 8577 if (cdb->byte2 & SWV_DPO) 8578 flags |= CTL_LLF_DPO; 8579 lba = scsi_4btoul(cdb->addr); 8580 num_blocks = scsi_2btoul(cdb->length); 8581 break; 8582 } 8583 case READ_12: 8584 case WRITE_12: { 8585 struct scsi_rw_12 *cdb; 8586 8587 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8588 if (cdb->byte2 & SRW12_FUA) 8589 flags |= CTL_LLF_FUA; 8590 if (cdb->byte2 & SRW12_DPO) 8591 flags |= CTL_LLF_DPO; 8592 lba = scsi_4btoul(cdb->addr); 8593 num_blocks = scsi_4btoul(cdb->length); 8594 break; 8595 } 8596 case WRITE_VERIFY_12: { 8597 struct scsi_write_verify_12 *cdb; 8598 8599 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8600 flags |= CTL_LLF_FUA; 8601 if (cdb->byte2 & SWV_DPO) 8602 flags |= CTL_LLF_DPO; 8603 lba = scsi_4btoul(cdb->addr); 8604 num_blocks = scsi_4btoul(cdb->length); 8605 break; 8606 } 8607 case READ_16: 8608 case WRITE_16: { 8609 struct scsi_rw_16 *cdb; 8610 8611 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8612 if (cdb->byte2 & SRW12_FUA) 8613 flags |= CTL_LLF_FUA; 8614 if (cdb->byte2 & SRW12_DPO) 8615 flags |= CTL_LLF_DPO; 8616 lba = scsi_8btou64(cdb->addr); 8617 num_blocks = scsi_4btoul(cdb->length); 8618 break; 8619 } 8620 case WRITE_ATOMIC_16: { 8621 struct scsi_rw_16 *cdb; 8622 8623 if (lun->be_lun->atomicblock == 0) { 8624 ctl_set_invalid_opcode(ctsio); 8625 ctl_done((union ctl_io *)ctsio); 8626 return (CTL_RETVAL_COMPLETE); 8627 } 8628 8629 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8630 if (cdb->byte2 & SRW12_FUA) 8631 flags |= CTL_LLF_FUA; 8632 if (cdb->byte2 & SRW12_DPO) 8633 flags |= CTL_LLF_DPO; 8634 lba = scsi_8btou64(cdb->addr); 8635 num_blocks = scsi_4btoul(cdb->length); 8636 if (num_blocks > lun->be_lun->atomicblock) { 8637 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8638 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8639 /*bit*/ 0); 8640 ctl_done((union ctl_io *)ctsio); 8641 return (CTL_RETVAL_COMPLETE); 8642 } 8643 break; 8644 } 8645 case WRITE_VERIFY_16: { 8646 struct scsi_write_verify_16 *cdb; 8647 8648 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8649 flags |= CTL_LLF_FUA; 8650 if (cdb->byte2 & SWV_DPO) 8651 flags |= CTL_LLF_DPO; 8652 lba = scsi_8btou64(cdb->addr); 8653 num_blocks = scsi_4btoul(cdb->length); 8654 break; 8655 } 8656 default: 8657 /* 8658 * We got a command we don't support. This shouldn't 8659 * happen, commands should be filtered out above us. 8660 */ 8661 ctl_set_invalid_opcode(ctsio); 8662 ctl_done((union ctl_io *)ctsio); 8663 8664 return (CTL_RETVAL_COMPLETE); 8665 break; /* NOTREACHED */ 8666 } 8667 8668 /* 8669 * The first check is to make sure we're in bounds, the second 8670 * check is to catch wrap-around problems. If the lba + num blocks 8671 * is less than the lba, then we've wrapped around and the block 8672 * range is invalid anyway. 8673 */ 8674 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8675 || ((lba + num_blocks) < lba)) { 8676 ctl_set_lba_out_of_range(ctsio); 8677 ctl_done((union ctl_io *)ctsio); 8678 return (CTL_RETVAL_COMPLETE); 8679 } 8680 8681 /* 8682 * According to SBC-3, a transfer length of 0 is not an error. 8683 * Note that this cannot happen with WRITE(6) or READ(6), since 0 8684 * translates to 256 blocks for those commands. 8685 */ 8686 if (num_blocks == 0) { 8687 ctl_set_success(ctsio); 8688 ctl_done((union ctl_io *)ctsio); 8689 return (CTL_RETVAL_COMPLETE); 8690 } 8691 8692 /* Set FUA and/or DPO if caches are disabled. */ 8693 if (isread) { 8694 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8695 SCP_RCD) != 0) 8696 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 8697 } else { 8698 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8699 SCP_WCE) == 0) 8700 flags |= CTL_LLF_FUA; 8701 } 8702 8703 lbalen = (struct ctl_lba_len_flags *) 8704 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8705 lbalen->lba = lba; 8706 lbalen->len = num_blocks; 8707 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 8708 8709 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8710 ctsio->kern_rel_offset = 0; 8711 8712 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 8713 8714 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8715 8716 return (retval); 8717} 8718 8719static int 8720ctl_cnw_cont(union ctl_io *io) 8721{ 8722 struct ctl_scsiio *ctsio; 8723 struct ctl_lun *lun; 8724 struct ctl_lba_len_flags *lbalen; 8725 int retval; 8726 8727 ctsio = &io->scsiio; 8728 ctsio->io_hdr.status = CTL_STATUS_NONE; 8729 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 8730 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8731 lbalen = (struct ctl_lba_len_flags *) 8732 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8733 lbalen->flags &= ~CTL_LLF_COMPARE; 8734 lbalen->flags |= CTL_LLF_WRITE; 8735 8736 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 8737 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8738 return (retval); 8739} 8740 8741int 8742ctl_cnw(struct ctl_scsiio *ctsio) 8743{ 8744 struct ctl_lun *lun; 8745 struct ctl_lba_len_flags *lbalen; 8746 uint64_t lba; 8747 uint32_t num_blocks; 8748 int flags, retval; 8749 8750 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8751 8752 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 8753 8754 flags = 0; 8755 retval = CTL_RETVAL_COMPLETE; 8756 8757 switch (ctsio->cdb[0]) { 8758 case COMPARE_AND_WRITE: { 8759 struct scsi_compare_and_write *cdb; 8760 8761 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 8762 if (cdb->byte2 & SRW10_FUA) 8763 flags |= CTL_LLF_FUA; 8764 if (cdb->byte2 & SRW10_DPO) 8765 flags |= CTL_LLF_DPO; 8766 lba = scsi_8btou64(cdb->addr); 8767 num_blocks = cdb->length; 8768 break; 8769 } 8770 default: 8771 /* 8772 * We got a command we don't support. This shouldn't 8773 * happen, commands should be filtered out above us. 8774 */ 8775 ctl_set_invalid_opcode(ctsio); 8776 ctl_done((union ctl_io *)ctsio); 8777 8778 return (CTL_RETVAL_COMPLETE); 8779 break; /* NOTREACHED */ 8780 } 8781 8782 /* 8783 * The first check is to make sure we're in bounds, the second 8784 * check is to catch wrap-around problems. If the lba + num blocks 8785 * is less than the lba, then we've wrapped around and the block 8786 * range is invalid anyway. 8787 */ 8788 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8789 || ((lba + num_blocks) < lba)) { 8790 ctl_set_lba_out_of_range(ctsio); 8791 ctl_done((union ctl_io *)ctsio); 8792 return (CTL_RETVAL_COMPLETE); 8793 } 8794 8795 /* 8796 * According to SBC-3, a transfer length of 0 is not an error. 8797 */ 8798 if (num_blocks == 0) { 8799 ctl_set_success(ctsio); 8800 ctl_done((union ctl_io *)ctsio); 8801 return (CTL_RETVAL_COMPLETE); 8802 } 8803 8804 /* Set FUA if write cache is disabled. */ 8805 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 8806 SCP_WCE) == 0) 8807 flags |= CTL_LLF_FUA; 8808 8809 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 8810 ctsio->kern_rel_offset = 0; 8811 8812 /* 8813 * Set the IO_CONT flag, so that if this I/O gets passed to 8814 * ctl_data_submit_done(), it'll get passed back to 8815 * ctl_ctl_cnw_cont() for further processing. 8816 */ 8817 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 8818 ctsio->io_cont = ctl_cnw_cont; 8819 8820 lbalen = (struct ctl_lba_len_flags *) 8821 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8822 lbalen->lba = lba; 8823 lbalen->len = num_blocks; 8824 lbalen->flags = CTL_LLF_COMPARE | flags; 8825 8826 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 8827 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8828 return (retval); 8829} 8830 8831int 8832ctl_verify(struct ctl_scsiio *ctsio) 8833{ 8834 struct ctl_lun *lun; 8835 struct ctl_lba_len_flags *lbalen; 8836 uint64_t lba; 8837 uint32_t num_blocks; 8838 int bytchk, flags; 8839 int retval; 8840 8841 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8842 8843 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 8844 8845 bytchk = 0; 8846 flags = CTL_LLF_FUA; 8847 retval = CTL_RETVAL_COMPLETE; 8848 8849 switch (ctsio->cdb[0]) { 8850 case VERIFY_10: { 8851 struct scsi_verify_10 *cdb; 8852 8853 cdb = (struct scsi_verify_10 *)ctsio->cdb; 8854 if (cdb->byte2 & SVFY_BYTCHK) 8855 bytchk = 1; 8856 if (cdb->byte2 & SVFY_DPO) 8857 flags |= CTL_LLF_DPO; 8858 lba = scsi_4btoul(cdb->addr); 8859 num_blocks = scsi_2btoul(cdb->length); 8860 break; 8861 } 8862 case VERIFY_12: { 8863 struct scsi_verify_12 *cdb; 8864 8865 cdb = (struct scsi_verify_12 *)ctsio->cdb; 8866 if (cdb->byte2 & SVFY_BYTCHK) 8867 bytchk = 1; 8868 if (cdb->byte2 & SVFY_DPO) 8869 flags |= CTL_LLF_DPO; 8870 lba = scsi_4btoul(cdb->addr); 8871 num_blocks = scsi_4btoul(cdb->length); 8872 break; 8873 } 8874 case VERIFY_16: { 8875 struct scsi_rw_16 *cdb; 8876 8877 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8878 if (cdb->byte2 & SVFY_BYTCHK) 8879 bytchk = 1; 8880 if (cdb->byte2 & SVFY_DPO) 8881 flags |= CTL_LLF_DPO; 8882 lba = scsi_8btou64(cdb->addr); 8883 num_blocks = scsi_4btoul(cdb->length); 8884 break; 8885 } 8886 default: 8887 /* 8888 * We got a command we don't support. This shouldn't 8889 * happen, commands should be filtered out above us. 8890 */ 8891 ctl_set_invalid_opcode(ctsio); 8892 ctl_done((union ctl_io *)ctsio); 8893 return (CTL_RETVAL_COMPLETE); 8894 } 8895 8896 /* 8897 * The first check is to make sure we're in bounds, the second 8898 * check is to catch wrap-around problems. If the lba + num blocks 8899 * is less than the lba, then we've wrapped around and the block 8900 * range is invalid anyway. 8901 */ 8902 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8903 || ((lba + num_blocks) < lba)) { 8904 ctl_set_lba_out_of_range(ctsio); 8905 ctl_done((union ctl_io *)ctsio); 8906 return (CTL_RETVAL_COMPLETE); 8907 } 8908 8909 /* 8910 * According to SBC-3, a transfer length of 0 is not an error. 8911 */ 8912 if (num_blocks == 0) { 8913 ctl_set_success(ctsio); 8914 ctl_done((union ctl_io *)ctsio); 8915 return (CTL_RETVAL_COMPLETE); 8916 } 8917 8918 lbalen = (struct ctl_lba_len_flags *) 8919 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 8920 lbalen->lba = lba; 8921 lbalen->len = num_blocks; 8922 if (bytchk) { 8923 lbalen->flags = CTL_LLF_COMPARE | flags; 8924 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 8925 } else { 8926 lbalen->flags = CTL_LLF_VERIFY | flags; 8927 ctsio->kern_total_len = 0; 8928 } 8929 ctsio->kern_rel_offset = 0; 8930 8931 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 8932 retval = lun->backend->data_submit((union ctl_io *)ctsio); 8933 return (retval); 8934} 8935 8936int 8937ctl_report_luns(struct ctl_scsiio *ctsio) 8938{ 8939 struct ctl_softc *softc = control_softc; 8940 struct scsi_report_luns *cdb; 8941 struct scsi_report_luns_data *lun_data; 8942 struct ctl_lun *lun, *request_lun; 8943 struct ctl_port *port; 8944 int num_luns, retval; 8945 uint32_t alloc_len, lun_datalen; 8946 int num_filled, well_known; 8947 uint32_t initidx, targ_lun_id, lun_id; 8948 8949 retval = CTL_RETVAL_COMPLETE; 8950 well_known = 0; 8951 8952 cdb = (struct scsi_report_luns *)ctsio->cdb; 8953 port = ctl_io_port(&ctsio->io_hdr); 8954 8955 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 8956 8957 mtx_lock(&softc->ctl_lock); 8958 num_luns = 0; 8959 for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) { 8960 if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS) 8961 num_luns++; 8962 } 8963 mtx_unlock(&softc->ctl_lock); 8964 8965 switch (cdb->select_report) { 8966 case RPL_REPORT_DEFAULT: 8967 case RPL_REPORT_ALL: 8968 break; 8969 case RPL_REPORT_WELLKNOWN: 8970 well_known = 1; 8971 num_luns = 0; 8972 break; 8973 default: 8974 ctl_set_invalid_field(ctsio, 8975 /*sks_valid*/ 1, 8976 /*command*/ 1, 8977 /*field*/ 2, 8978 /*bit_valid*/ 0, 8979 /*bit*/ 0); 8980 ctl_done((union ctl_io *)ctsio); 8981 return (retval); 8982 break; /* NOTREACHED */ 8983 } 8984 8985 alloc_len = scsi_4btoul(cdb->length); 8986 /* 8987 * The initiator has to allocate at least 16 bytes for this request, 8988 * so he can at least get the header and the first LUN. Otherwise 8989 * we reject the request (per SPC-3 rev 14, section 6.21). 8990 */ 8991 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 8992 sizeof(struct scsi_report_luns_lundata))) { 8993 ctl_set_invalid_field(ctsio, 8994 /*sks_valid*/ 1, 8995 /*command*/ 1, 8996 /*field*/ 6, 8997 /*bit_valid*/ 0, 8998 /*bit*/ 0); 8999 ctl_done((union ctl_io *)ctsio); 9000 return (retval); 9001 } 9002 9003 request_lun = (struct ctl_lun *) 9004 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9005 9006 lun_datalen = sizeof(*lun_data) + 9007 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9008 9009 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9010 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9011 ctsio->kern_sg_entries = 0; 9012 9013 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9014 9015 mtx_lock(&softc->ctl_lock); 9016 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9017 lun_id = ctl_lun_map_from_port(port, targ_lun_id); 9018 if (lun_id >= CTL_MAX_LUNS) 9019 continue; 9020 lun = softc->ctl_luns[lun_id]; 9021 if (lun == NULL) 9022 continue; 9023 9024 if (targ_lun_id <= 0xff) { 9025 /* 9026 * Peripheral addressing method, bus number 0. 9027 */ 9028 lun_data->luns[num_filled].lundata[0] = 9029 RPL_LUNDATA_ATYP_PERIPH; 9030 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9031 num_filled++; 9032 } else if (targ_lun_id <= 0x3fff) { 9033 /* 9034 * Flat addressing method. 9035 */ 9036 lun_data->luns[num_filled].lundata[0] = 9037 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); 9038 lun_data->luns[num_filled].lundata[1] = 9039 (targ_lun_id & 0xff); 9040 num_filled++; 9041 } else if (targ_lun_id <= 0xffffff) { 9042 /* 9043 * Extended flat addressing method. 9044 */ 9045 lun_data->luns[num_filled].lundata[0] = 9046 RPL_LUNDATA_ATYP_EXTLUN | 0x12; 9047 scsi_ulto3b(targ_lun_id, 9048 &lun_data->luns[num_filled].lundata[1]); 9049 num_filled++; 9050 } else { 9051 printf("ctl_report_luns: bogus LUN number %jd, " 9052 "skipping\n", (intmax_t)targ_lun_id); 9053 } 9054 /* 9055 * According to SPC-3, rev 14 section 6.21: 9056 * 9057 * "The execution of a REPORT LUNS command to any valid and 9058 * installed logical unit shall clear the REPORTED LUNS DATA 9059 * HAS CHANGED unit attention condition for all logical 9060 * units of that target with respect to the requesting 9061 * initiator. A valid and installed logical unit is one 9062 * having a PERIPHERAL QUALIFIER of 000b in the standard 9063 * INQUIRY data (see 6.4.2)." 9064 * 9065 * If request_lun is NULL, the LUN this report luns command 9066 * was issued to is either disabled or doesn't exist. In that 9067 * case, we shouldn't clear any pending lun change unit 9068 * attention. 9069 */ 9070 if (request_lun != NULL) { 9071 mtx_lock(&lun->lun_lock); 9072 ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); 9073 mtx_unlock(&lun->lun_lock); 9074 } 9075 } 9076 mtx_unlock(&softc->ctl_lock); 9077 9078 /* 9079 * It's quite possible that we've returned fewer LUNs than we allocated 9080 * space for. Trim it. 9081 */ 9082 lun_datalen = sizeof(*lun_data) + 9083 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9084 9085 if (lun_datalen < alloc_len) { 9086 ctsio->residual = alloc_len - lun_datalen; 9087 ctsio->kern_data_len = lun_datalen; 9088 ctsio->kern_total_len = lun_datalen; 9089 } else { 9090 ctsio->residual = 0; 9091 ctsio->kern_data_len = alloc_len; 9092 ctsio->kern_total_len = alloc_len; 9093 } 9094 ctsio->kern_data_resid = 0; 9095 ctsio->kern_rel_offset = 0; 9096 ctsio->kern_sg_entries = 0; 9097 9098 /* 9099 * We set this to the actual data length, regardless of how much 9100 * space we actually have to return results. If the user looks at 9101 * this value, he'll know whether or not he allocated enough space 9102 * and reissue the command if necessary. We don't support well 9103 * known logical units, so if the user asks for that, return none. 9104 */ 9105 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9106 9107 /* 9108 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9109 * this request. 9110 */ 9111 ctl_set_success(ctsio); 9112 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9113 ctsio->be_move_done = ctl_config_move_done; 9114 ctl_datamove((union ctl_io *)ctsio); 9115 return (retval); 9116} 9117 9118int 9119ctl_request_sense(struct ctl_scsiio *ctsio) 9120{ 9121 struct scsi_request_sense *cdb; 9122 struct scsi_sense_data *sense_ptr; 9123 struct ctl_softc *ctl_softc; 9124 struct ctl_lun *lun; 9125 uint32_t initidx; 9126 int have_error; 9127 scsi_sense_data_type sense_format; 9128 ctl_ua_type ua_type; 9129 9130 cdb = (struct scsi_request_sense *)ctsio->cdb; 9131 9132 ctl_softc = control_softc; 9133 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9134 9135 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9136 9137 /* 9138 * Determine which sense format the user wants. 9139 */ 9140 if (cdb->byte2 & SRS_DESC) 9141 sense_format = SSD_TYPE_DESC; 9142 else 9143 sense_format = SSD_TYPE_FIXED; 9144 9145 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9146 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9147 ctsio->kern_sg_entries = 0; 9148 9149 /* 9150 * struct scsi_sense_data, which is currently set to 256 bytes, is 9151 * larger than the largest allowed value for the length field in the 9152 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9153 */ 9154 ctsio->residual = 0; 9155 ctsio->kern_data_len = cdb->length; 9156 ctsio->kern_total_len = cdb->length; 9157 9158 ctsio->kern_data_resid = 0; 9159 ctsio->kern_rel_offset = 0; 9160 ctsio->kern_sg_entries = 0; 9161 9162 /* 9163 * If we don't have a LUN, we don't have any pending sense. 9164 */ 9165 if (lun == NULL) 9166 goto no_sense; 9167 9168 have_error = 0; 9169 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9170 /* 9171 * Check for pending sense, and then for pending unit attentions. 9172 * Pending sense gets returned first, then pending unit attentions. 9173 */ 9174 mtx_lock(&lun->lun_lock); 9175#ifdef CTL_WITH_CA 9176 if (ctl_is_set(lun->have_ca, initidx)) { 9177 scsi_sense_data_type stored_format; 9178 9179 /* 9180 * Check to see which sense format was used for the stored 9181 * sense data. 9182 */ 9183 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9184 9185 /* 9186 * If the user requested a different sense format than the 9187 * one we stored, then we need to convert it to the other 9188 * format. If we're going from descriptor to fixed format 9189 * sense data, we may lose things in translation, depending 9190 * on what options were used. 9191 * 9192 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9193 * for some reason we'll just copy it out as-is. 9194 */ 9195 if ((stored_format == SSD_TYPE_FIXED) 9196 && (sense_format == SSD_TYPE_DESC)) 9197 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9198 &lun->pending_sense[initidx], 9199 (struct scsi_sense_data_desc *)sense_ptr); 9200 else if ((stored_format == SSD_TYPE_DESC) 9201 && (sense_format == SSD_TYPE_FIXED)) 9202 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9203 &lun->pending_sense[initidx], 9204 (struct scsi_sense_data_fixed *)sense_ptr); 9205 else 9206 memcpy(sense_ptr, &lun->pending_sense[initidx], 9207 MIN(sizeof(*sense_ptr), 9208 sizeof(lun->pending_sense[initidx]))); 9209 9210 ctl_clear_mask(lun->have_ca, initidx); 9211 have_error = 1; 9212 } else 9213#endif 9214 { 9215 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); 9216 if (ua_type != CTL_UA_NONE) 9217 have_error = 1; 9218 if (ua_type == CTL_UA_LUN_CHANGE) { 9219 mtx_unlock(&lun->lun_lock); 9220 mtx_lock(&ctl_softc->ctl_lock); 9221 ctl_clr_ua_allluns(ctl_softc, initidx, ua_type); 9222 mtx_unlock(&ctl_softc->ctl_lock); 9223 mtx_lock(&lun->lun_lock); 9224 } 9225 9226 } 9227 mtx_unlock(&lun->lun_lock); 9228 9229 /* 9230 * We already have a pending error, return it. 9231 */ 9232 if (have_error != 0) { 9233 /* 9234 * We report the SCSI status as OK, since the status of the 9235 * request sense command itself is OK. 9236 * We report 0 for the sense length, because we aren't doing 9237 * autosense in this case. We're reporting sense as 9238 * parameter data. 9239 */ 9240 ctl_set_success(ctsio); 9241 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9242 ctsio->be_move_done = ctl_config_move_done; 9243 ctl_datamove((union ctl_io *)ctsio); 9244 return (CTL_RETVAL_COMPLETE); 9245 } 9246 9247no_sense: 9248 9249 /* 9250 * No sense information to report, so we report that everything is 9251 * okay. 9252 */ 9253 ctl_set_sense_data(sense_ptr, 9254 lun, 9255 sense_format, 9256 /*current_error*/ 1, 9257 /*sense_key*/ SSD_KEY_NO_SENSE, 9258 /*asc*/ 0x00, 9259 /*ascq*/ 0x00, 9260 SSD_ELEM_NONE); 9261 9262 /* 9263 * We report 0 for the sense length, because we aren't doing 9264 * autosense in this case. We're reporting sense as parameter data. 9265 */ 9266 ctl_set_success(ctsio); 9267 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9268 ctsio->be_move_done = ctl_config_move_done; 9269 ctl_datamove((union ctl_io *)ctsio); 9270 return (CTL_RETVAL_COMPLETE); 9271} 9272 9273int 9274ctl_tur(struct ctl_scsiio *ctsio) 9275{ 9276 9277 CTL_DEBUG_PRINT(("ctl_tur\n")); 9278 9279 ctl_set_success(ctsio); 9280 ctl_done((union ctl_io *)ctsio); 9281 9282 return (CTL_RETVAL_COMPLETE); 9283} 9284 9285/* 9286 * SCSI VPD page 0x00, the Supported VPD Pages page. 9287 */ 9288static int 9289ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9290{ 9291 struct scsi_vpd_supported_pages *pages; 9292 int sup_page_size; 9293 struct ctl_lun *lun; 9294 int p; 9295 9296 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9297 9298 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9299 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9300 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9301 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9302 ctsio->kern_sg_entries = 0; 9303 9304 if (sup_page_size < alloc_len) { 9305 ctsio->residual = alloc_len - sup_page_size; 9306 ctsio->kern_data_len = sup_page_size; 9307 ctsio->kern_total_len = sup_page_size; 9308 } else { 9309 ctsio->residual = 0; 9310 ctsio->kern_data_len = alloc_len; 9311 ctsio->kern_total_len = alloc_len; 9312 } 9313 ctsio->kern_data_resid = 0; 9314 ctsio->kern_rel_offset = 0; 9315 ctsio->kern_sg_entries = 0; 9316 9317 /* 9318 * The control device is always connected. The disk device, on the 9319 * other hand, may not be online all the time. Need to change this 9320 * to figure out whether the disk device is actually online or not. 9321 */ 9322 if (lun != NULL) 9323 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9324 lun->be_lun->lun_type; 9325 else 9326 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9327 9328 p = 0; 9329 /* Supported VPD pages */ 9330 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9331 /* Serial Number */ 9332 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9333 /* Device Identification */ 9334 pages->page_list[p++] = SVPD_DEVICE_ID; 9335 /* Extended INQUIRY Data */ 9336 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9337 /* Mode Page Policy */ 9338 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9339 /* SCSI Ports */ 9340 pages->page_list[p++] = SVPD_SCSI_PORTS; 9341 /* Third-party Copy */ 9342 pages->page_list[p++] = SVPD_SCSI_TPC; 9343 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9344 /* Block limits */ 9345 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9346 /* Block Device Characteristics */ 9347 pages->page_list[p++] = SVPD_BDC; 9348 /* Logical Block Provisioning */ 9349 pages->page_list[p++] = SVPD_LBP; 9350 } 9351 pages->length = p; 9352 9353 ctl_set_success(ctsio); 9354 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9355 ctsio->be_move_done = ctl_config_move_done; 9356 ctl_datamove((union ctl_io *)ctsio); 9357 return (CTL_RETVAL_COMPLETE); 9358} 9359 9360/* 9361 * SCSI VPD page 0x80, the Unit Serial Number page. 9362 */ 9363static int 9364ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9365{ 9366 struct scsi_vpd_unit_serial_number *sn_ptr; 9367 struct ctl_lun *lun; 9368 int data_len; 9369 9370 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9371 9372 data_len = 4 + CTL_SN_LEN; 9373 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9374 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9375 if (data_len < alloc_len) { 9376 ctsio->residual = alloc_len - data_len; 9377 ctsio->kern_data_len = data_len; 9378 ctsio->kern_total_len = data_len; 9379 } else { 9380 ctsio->residual = 0; 9381 ctsio->kern_data_len = alloc_len; 9382 ctsio->kern_total_len = alloc_len; 9383 } 9384 ctsio->kern_data_resid = 0; 9385 ctsio->kern_rel_offset = 0; 9386 ctsio->kern_sg_entries = 0; 9387 9388 /* 9389 * The control device is always connected. The disk device, on the 9390 * other hand, may not be online all the time. Need to change this 9391 * to figure out whether the disk device is actually online or not. 9392 */ 9393 if (lun != NULL) 9394 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9395 lun->be_lun->lun_type; 9396 else 9397 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9398 9399 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9400 sn_ptr->length = CTL_SN_LEN; 9401 /* 9402 * If we don't have a LUN, we just leave the serial number as 9403 * all spaces. 9404 */ 9405 if (lun != NULL) { 9406 strncpy((char *)sn_ptr->serial_num, 9407 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9408 } else 9409 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9410 9411 ctl_set_success(ctsio); 9412 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9413 ctsio->be_move_done = ctl_config_move_done; 9414 ctl_datamove((union ctl_io *)ctsio); 9415 return (CTL_RETVAL_COMPLETE); 9416} 9417 9418 9419/* 9420 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9421 */ 9422static int 9423ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9424{ 9425 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9426 struct ctl_lun *lun; 9427 int data_len; 9428 9429 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9430 9431 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9432 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9433 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9434 ctsio->kern_sg_entries = 0; 9435 9436 if (data_len < alloc_len) { 9437 ctsio->residual = alloc_len - data_len; 9438 ctsio->kern_data_len = data_len; 9439 ctsio->kern_total_len = data_len; 9440 } else { 9441 ctsio->residual = 0; 9442 ctsio->kern_data_len = alloc_len; 9443 ctsio->kern_total_len = alloc_len; 9444 } 9445 ctsio->kern_data_resid = 0; 9446 ctsio->kern_rel_offset = 0; 9447 ctsio->kern_sg_entries = 0; 9448 9449 /* 9450 * The control device is always connected. The disk device, on the 9451 * other hand, may not be online all the time. 9452 */ 9453 if (lun != NULL) 9454 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9455 lun->be_lun->lun_type; 9456 else 9457 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9458 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9459 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9460 /* 9461 * We support head of queue, ordered and simple tags. 9462 */ 9463 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9464 /* 9465 * Volatile cache supported. 9466 */ 9467 eid_ptr->flags3 = SVPD_EID_V_SUP; 9468 9469 /* 9470 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9471 * attention for a particular IT nexus on all LUNs once we report 9472 * it to that nexus once. This bit is required as of SPC-4. 9473 */ 9474 eid_ptr->flags4 = SVPD_EID_LUICLT; 9475 9476 /* 9477 * XXX KDM in order to correctly answer this, we would need 9478 * information from the SIM to determine how much sense data it 9479 * can send. So this would really be a path inquiry field, most 9480 * likely. This can be set to a maximum of 252 according to SPC-4, 9481 * but the hardware may or may not be able to support that much. 9482 * 0 just means that the maximum sense data length is not reported. 9483 */ 9484 eid_ptr->max_sense_length = 0; 9485 9486 ctl_set_success(ctsio); 9487 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9488 ctsio->be_move_done = ctl_config_move_done; 9489 ctl_datamove((union ctl_io *)ctsio); 9490 return (CTL_RETVAL_COMPLETE); 9491} 9492 9493static int 9494ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9495{ 9496 struct scsi_vpd_mode_page_policy *mpp_ptr; 9497 struct ctl_lun *lun; 9498 int data_len; 9499 9500 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9501 9502 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9503 sizeof(struct scsi_vpd_mode_page_policy_descr); 9504 9505 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9506 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9507 ctsio->kern_sg_entries = 0; 9508 9509 if (data_len < alloc_len) { 9510 ctsio->residual = alloc_len - data_len; 9511 ctsio->kern_data_len = data_len; 9512 ctsio->kern_total_len = data_len; 9513 } else { 9514 ctsio->residual = 0; 9515 ctsio->kern_data_len = alloc_len; 9516 ctsio->kern_total_len = alloc_len; 9517 } 9518 ctsio->kern_data_resid = 0; 9519 ctsio->kern_rel_offset = 0; 9520 ctsio->kern_sg_entries = 0; 9521 9522 /* 9523 * The control device is always connected. The disk device, on the 9524 * other hand, may not be online all the time. 9525 */ 9526 if (lun != NULL) 9527 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9528 lun->be_lun->lun_type; 9529 else 9530 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9531 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9532 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9533 mpp_ptr->descr[0].page_code = 0x3f; 9534 mpp_ptr->descr[0].subpage_code = 0xff; 9535 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9536 9537 ctl_set_success(ctsio); 9538 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9539 ctsio->be_move_done = ctl_config_move_done; 9540 ctl_datamove((union ctl_io *)ctsio); 9541 return (CTL_RETVAL_COMPLETE); 9542} 9543 9544/* 9545 * SCSI VPD page 0x83, the Device Identification page. 9546 */ 9547static int 9548ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9549{ 9550 struct scsi_vpd_device_id *devid_ptr; 9551 struct scsi_vpd_id_descriptor *desc; 9552 struct ctl_softc *softc; 9553 struct ctl_lun *lun; 9554 struct ctl_port *port; 9555 int data_len; 9556 uint8_t proto; 9557 9558 softc = control_softc; 9559 9560 port = ctl_io_port(&ctsio->io_hdr); 9561 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9562 9563 data_len = sizeof(struct scsi_vpd_device_id) + 9564 sizeof(struct scsi_vpd_id_descriptor) + 9565 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9566 sizeof(struct scsi_vpd_id_descriptor) + 9567 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9568 if (lun && lun->lun_devid) 9569 data_len += lun->lun_devid->len; 9570 if (port && port->port_devid) 9571 data_len += port->port_devid->len; 9572 if (port && port->target_devid) 9573 data_len += port->target_devid->len; 9574 9575 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9576 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9577 ctsio->kern_sg_entries = 0; 9578 9579 if (data_len < alloc_len) { 9580 ctsio->residual = alloc_len - data_len; 9581 ctsio->kern_data_len = data_len; 9582 ctsio->kern_total_len = data_len; 9583 } else { 9584 ctsio->residual = 0; 9585 ctsio->kern_data_len = alloc_len; 9586 ctsio->kern_total_len = alloc_len; 9587 } 9588 ctsio->kern_data_resid = 0; 9589 ctsio->kern_rel_offset = 0; 9590 ctsio->kern_sg_entries = 0; 9591 9592 /* 9593 * The control device is always connected. The disk device, on the 9594 * other hand, may not be online all the time. 9595 */ 9596 if (lun != NULL) 9597 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9598 lun->be_lun->lun_type; 9599 else 9600 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9601 devid_ptr->page_code = SVPD_DEVICE_ID; 9602 scsi_ulto2b(data_len - 4, devid_ptr->length); 9603 9604 if (port && port->port_type == CTL_PORT_FC) 9605 proto = SCSI_PROTO_FC << 4; 9606 else if (port && port->port_type == CTL_PORT_ISCSI) 9607 proto = SCSI_PROTO_ISCSI << 4; 9608 else 9609 proto = SCSI_PROTO_SPI << 4; 9610 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9611 9612 /* 9613 * We're using a LUN association here. i.e., this device ID is a 9614 * per-LUN identifier. 9615 */ 9616 if (lun && lun->lun_devid) { 9617 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9618 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9619 lun->lun_devid->len); 9620 } 9621 9622 /* 9623 * This is for the WWPN which is a port association. 9624 */ 9625 if (port && port->port_devid) { 9626 memcpy(desc, port->port_devid->data, port->port_devid->len); 9627 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9628 port->port_devid->len); 9629 } 9630 9631 /* 9632 * This is for the Relative Target Port(type 4h) identifier 9633 */ 9634 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9635 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9636 SVPD_ID_TYPE_RELTARG; 9637 desc->length = 4; 9638 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9639 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9640 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9641 9642 /* 9643 * This is for the Target Port Group(type 5h) identifier 9644 */ 9645 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9646 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9647 SVPD_ID_TYPE_TPORTGRP; 9648 desc->length = 4; 9649 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / softc->port_cnt + 1, 9650 &desc->identifier[2]); 9651 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9652 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9653 9654 /* 9655 * This is for the Target identifier 9656 */ 9657 if (port && port->target_devid) { 9658 memcpy(desc, port->target_devid->data, port->target_devid->len); 9659 } 9660 9661 ctl_set_success(ctsio); 9662 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9663 ctsio->be_move_done = ctl_config_move_done; 9664 ctl_datamove((union ctl_io *)ctsio); 9665 return (CTL_RETVAL_COMPLETE); 9666} 9667 9668static int 9669ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9670{ 9671 struct ctl_softc *softc = control_softc; 9672 struct scsi_vpd_scsi_ports *sp; 9673 struct scsi_vpd_port_designation *pd; 9674 struct scsi_vpd_port_designation_cont *pdc; 9675 struct ctl_lun *lun; 9676 struct ctl_port *port; 9677 int data_len, num_target_ports, iid_len, id_len; 9678 9679 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9680 9681 num_target_ports = 0; 9682 iid_len = 0; 9683 id_len = 0; 9684 mtx_lock(&softc->ctl_lock); 9685 STAILQ_FOREACH(port, &softc->port_list, links) { 9686 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9687 continue; 9688 if (lun != NULL && 9689 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9690 continue; 9691 num_target_ports++; 9692 if (port->init_devid) 9693 iid_len += port->init_devid->len; 9694 if (port->port_devid) 9695 id_len += port->port_devid->len; 9696 } 9697 mtx_unlock(&softc->ctl_lock); 9698 9699 data_len = sizeof(struct scsi_vpd_scsi_ports) + 9700 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 9701 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 9702 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9703 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 9704 ctsio->kern_sg_entries = 0; 9705 9706 if (data_len < alloc_len) { 9707 ctsio->residual = alloc_len - data_len; 9708 ctsio->kern_data_len = data_len; 9709 ctsio->kern_total_len = data_len; 9710 } else { 9711 ctsio->residual = 0; 9712 ctsio->kern_data_len = alloc_len; 9713 ctsio->kern_total_len = alloc_len; 9714 } 9715 ctsio->kern_data_resid = 0; 9716 ctsio->kern_rel_offset = 0; 9717 ctsio->kern_sg_entries = 0; 9718 9719 /* 9720 * The control device is always connected. The disk device, on the 9721 * other hand, may not be online all the time. Need to change this 9722 * to figure out whether the disk device is actually online or not. 9723 */ 9724 if (lun != NULL) 9725 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 9726 lun->be_lun->lun_type; 9727 else 9728 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9729 9730 sp->page_code = SVPD_SCSI_PORTS; 9731 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 9732 sp->page_length); 9733 pd = &sp->design[0]; 9734 9735 mtx_lock(&softc->ctl_lock); 9736 STAILQ_FOREACH(port, &softc->port_list, links) { 9737 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 9738 continue; 9739 if (lun != NULL && 9740 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 9741 continue; 9742 scsi_ulto2b(port->targ_port, pd->relative_port_id); 9743 if (port->init_devid) { 9744 iid_len = port->init_devid->len; 9745 memcpy(pd->initiator_transportid, 9746 port->init_devid->data, port->init_devid->len); 9747 } else 9748 iid_len = 0; 9749 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 9750 pdc = (struct scsi_vpd_port_designation_cont *) 9751 (&pd->initiator_transportid[iid_len]); 9752 if (port->port_devid) { 9753 id_len = port->port_devid->len; 9754 memcpy(pdc->target_port_descriptors, 9755 port->port_devid->data, port->port_devid->len); 9756 } else 9757 id_len = 0; 9758 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 9759 pd = (struct scsi_vpd_port_designation *) 9760 ((uint8_t *)pdc->target_port_descriptors + id_len); 9761 } 9762 mtx_unlock(&softc->ctl_lock); 9763 9764 ctl_set_success(ctsio); 9765 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9766 ctsio->be_move_done = ctl_config_move_done; 9767 ctl_datamove((union ctl_io *)ctsio); 9768 return (CTL_RETVAL_COMPLETE); 9769} 9770 9771static int 9772ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 9773{ 9774 struct scsi_vpd_block_limits *bl_ptr; 9775 struct ctl_lun *lun; 9776 int bs; 9777 9778 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9779 9780 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 9781 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 9782 ctsio->kern_sg_entries = 0; 9783 9784 if (sizeof(*bl_ptr) < alloc_len) { 9785 ctsio->residual = alloc_len - sizeof(*bl_ptr); 9786 ctsio->kern_data_len = sizeof(*bl_ptr); 9787 ctsio->kern_total_len = sizeof(*bl_ptr); 9788 } else { 9789 ctsio->residual = 0; 9790 ctsio->kern_data_len = alloc_len; 9791 ctsio->kern_total_len = alloc_len; 9792 } 9793 ctsio->kern_data_resid = 0; 9794 ctsio->kern_rel_offset = 0; 9795 ctsio->kern_sg_entries = 0; 9796 9797 /* 9798 * The control device is always connected. The disk device, on the 9799 * other hand, may not be online all the time. Need to change this 9800 * to figure out whether the disk device is actually online or not. 9801 */ 9802 if (lun != NULL) 9803 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9804 lun->be_lun->lun_type; 9805 else 9806 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9807 9808 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 9809 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 9810 bl_ptr->max_cmp_write_len = 0xff; 9811 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 9812 if (lun != NULL) { 9813 bs = lun->be_lun->blocksize; 9814 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 9815 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9816 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 9817 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 9818 if (lun->be_lun->ublockexp != 0) { 9819 scsi_ulto4b((1 << lun->be_lun->ublockexp), 9820 bl_ptr->opt_unmap_grain); 9821 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 9822 bl_ptr->unmap_grain_align); 9823 } 9824 } 9825 scsi_ulto4b(lun->be_lun->atomicblock, 9826 bl_ptr->max_atomic_transfer_length); 9827 scsi_ulto4b(0, bl_ptr->atomic_alignment); 9828 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 9829 } 9830 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 9831 9832 ctl_set_success(ctsio); 9833 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9834 ctsio->be_move_done = ctl_config_move_done; 9835 ctl_datamove((union ctl_io *)ctsio); 9836 return (CTL_RETVAL_COMPLETE); 9837} 9838 9839static int 9840ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 9841{ 9842 struct scsi_vpd_block_device_characteristics *bdc_ptr; 9843 struct ctl_lun *lun; 9844 const char *value; 9845 u_int i; 9846 9847 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9848 9849 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 9850 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 9851 ctsio->kern_sg_entries = 0; 9852 9853 if (sizeof(*bdc_ptr) < alloc_len) { 9854 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 9855 ctsio->kern_data_len = sizeof(*bdc_ptr); 9856 ctsio->kern_total_len = sizeof(*bdc_ptr); 9857 } else { 9858 ctsio->residual = 0; 9859 ctsio->kern_data_len = alloc_len; 9860 ctsio->kern_total_len = alloc_len; 9861 } 9862 ctsio->kern_data_resid = 0; 9863 ctsio->kern_rel_offset = 0; 9864 ctsio->kern_sg_entries = 0; 9865 9866 /* 9867 * The control device is always connected. The disk device, on the 9868 * other hand, may not be online all the time. Need to change this 9869 * to figure out whether the disk device is actually online or not. 9870 */ 9871 if (lun != NULL) 9872 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9873 lun->be_lun->lun_type; 9874 else 9875 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9876 bdc_ptr->page_code = SVPD_BDC; 9877 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 9878 if (lun != NULL && 9879 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 9880 i = strtol(value, NULL, 0); 9881 else 9882 i = CTL_DEFAULT_ROTATION_RATE; 9883 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 9884 if (lun != NULL && 9885 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 9886 i = strtol(value, NULL, 0); 9887 else 9888 i = 0; 9889 bdc_ptr->wab_wac_ff = (i & 0x0f); 9890 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 9891 9892 ctl_set_success(ctsio); 9893 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9894 ctsio->be_move_done = ctl_config_move_done; 9895 ctl_datamove((union ctl_io *)ctsio); 9896 return (CTL_RETVAL_COMPLETE); 9897} 9898 9899static int 9900ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 9901{ 9902 struct scsi_vpd_logical_block_prov *lbp_ptr; 9903 struct ctl_lun *lun; 9904 9905 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9906 9907 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 9908 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 9909 ctsio->kern_sg_entries = 0; 9910 9911 if (sizeof(*lbp_ptr) < alloc_len) { 9912 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 9913 ctsio->kern_data_len = sizeof(*lbp_ptr); 9914 ctsio->kern_total_len = sizeof(*lbp_ptr); 9915 } else { 9916 ctsio->residual = 0; 9917 ctsio->kern_data_len = alloc_len; 9918 ctsio->kern_total_len = alloc_len; 9919 } 9920 ctsio->kern_data_resid = 0; 9921 ctsio->kern_rel_offset = 0; 9922 ctsio->kern_sg_entries = 0; 9923 9924 /* 9925 * The control device is always connected. The disk device, on the 9926 * other hand, may not be online all the time. Need to change this 9927 * to figure out whether the disk device is actually online or not. 9928 */ 9929 if (lun != NULL) 9930 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9931 lun->be_lun->lun_type; 9932 else 9933 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9934 9935 lbp_ptr->page_code = SVPD_LBP; 9936 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 9937 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 9938 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 9939 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 9940 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 9941 lbp_ptr->prov_type = SVPD_LBP_THIN; 9942 } 9943 9944 ctl_set_success(ctsio); 9945 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9946 ctsio->be_move_done = ctl_config_move_done; 9947 ctl_datamove((union ctl_io *)ctsio); 9948 return (CTL_RETVAL_COMPLETE); 9949} 9950 9951/* 9952 * INQUIRY with the EVPD bit set. 9953 */ 9954static int 9955ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 9956{ 9957 struct ctl_lun *lun; 9958 struct scsi_inquiry *cdb; 9959 int alloc_len, retval; 9960 9961 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9962 cdb = (struct scsi_inquiry *)ctsio->cdb; 9963 alloc_len = scsi_2btoul(cdb->length); 9964 9965 switch (cdb->page_code) { 9966 case SVPD_SUPPORTED_PAGES: 9967 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 9968 break; 9969 case SVPD_UNIT_SERIAL_NUMBER: 9970 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 9971 break; 9972 case SVPD_DEVICE_ID: 9973 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 9974 break; 9975 case SVPD_EXTENDED_INQUIRY_DATA: 9976 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 9977 break; 9978 case SVPD_MODE_PAGE_POLICY: 9979 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 9980 break; 9981 case SVPD_SCSI_PORTS: 9982 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 9983 break; 9984 case SVPD_SCSI_TPC: 9985 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 9986 break; 9987 case SVPD_BLOCK_LIMITS: 9988 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9989 goto err; 9990 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 9991 break; 9992 case SVPD_BDC: 9993 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9994 goto err; 9995 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 9996 break; 9997 case SVPD_LBP: 9998 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 9999 goto err; 10000 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10001 break; 10002 default: 10003err: 10004 ctl_set_invalid_field(ctsio, 10005 /*sks_valid*/ 1, 10006 /*command*/ 1, 10007 /*field*/ 2, 10008 /*bit_valid*/ 0, 10009 /*bit*/ 0); 10010 ctl_done((union ctl_io *)ctsio); 10011 retval = CTL_RETVAL_COMPLETE; 10012 break; 10013 } 10014 10015 return (retval); 10016} 10017 10018/* 10019 * Standard INQUIRY data. 10020 */ 10021static int 10022ctl_inquiry_std(struct ctl_scsiio *ctsio) 10023{ 10024 struct scsi_inquiry_data *inq_ptr; 10025 struct scsi_inquiry *cdb; 10026 struct ctl_softc *softc; 10027 struct ctl_port *port; 10028 struct ctl_lun *lun; 10029 char *val; 10030 uint32_t alloc_len, data_len; 10031 ctl_port_type port_type; 10032 10033 softc = control_softc; 10034 10035 /* 10036 * Figure out whether we're talking to a Fibre Channel port or not. 10037 * We treat the ioctl front end, and any SCSI adapters, as packetized 10038 * SCSI front ends. 10039 */ 10040 port = ctl_io_port(&ctsio->io_hdr); 10041 if (port != NULL) 10042 port_type = port->port_type; 10043 else 10044 port_type = CTL_PORT_SCSI; 10045 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10046 port_type = CTL_PORT_SCSI; 10047 10048 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10049 cdb = (struct scsi_inquiry *)ctsio->cdb; 10050 alloc_len = scsi_2btoul(cdb->length); 10051 10052 /* 10053 * We malloc the full inquiry data size here and fill it 10054 * in. If the user only asks for less, we'll give him 10055 * that much. 10056 */ 10057 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10058 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10059 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10060 ctsio->kern_sg_entries = 0; 10061 ctsio->kern_data_resid = 0; 10062 ctsio->kern_rel_offset = 0; 10063 10064 if (data_len < alloc_len) { 10065 ctsio->residual = alloc_len - data_len; 10066 ctsio->kern_data_len = data_len; 10067 ctsio->kern_total_len = data_len; 10068 } else { 10069 ctsio->residual = 0; 10070 ctsio->kern_data_len = alloc_len; 10071 ctsio->kern_total_len = alloc_len; 10072 } 10073 10074 if (lun != NULL) { 10075 if ((lun->flags & CTL_LUN_PRIMARY_SC) || 10076 softc->ha_link >= CTL_HA_LINK_UNKNOWN) { 10077 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10078 lun->be_lun->lun_type; 10079 } else { 10080 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | 10081 lun->be_lun->lun_type; 10082 } 10083 } else 10084 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10085 10086 /* RMB in byte 2 is 0 */ 10087 inq_ptr->version = SCSI_REV_SPC4; 10088 10089 /* 10090 * According to SAM-3, even if a device only supports a single 10091 * level of LUN addressing, it should still set the HISUP bit: 10092 * 10093 * 4.9.1 Logical unit numbers overview 10094 * 10095 * All logical unit number formats described in this standard are 10096 * hierarchical in structure even when only a single level in that 10097 * hierarchy is used. The HISUP bit shall be set to one in the 10098 * standard INQUIRY data (see SPC-2) when any logical unit number 10099 * format described in this standard is used. Non-hierarchical 10100 * formats are outside the scope of this standard. 10101 * 10102 * Therefore we set the HiSup bit here. 10103 * 10104 * The reponse format is 2, per SPC-3. 10105 */ 10106 inq_ptr->response_format = SID_HiSup | 2; 10107 10108 inq_ptr->additional_length = data_len - 10109 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10110 CTL_DEBUG_PRINT(("additional_length = %d\n", 10111 inq_ptr->additional_length)); 10112 10113 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10114 /* 16 bit addressing */ 10115 if (port_type == CTL_PORT_SCSI) 10116 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10117 /* XXX set the SID_MultiP bit here if we're actually going to 10118 respond on multiple ports */ 10119 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10120 10121 /* 16 bit data bus, synchronous transfers */ 10122 if (port_type == CTL_PORT_SCSI) 10123 inq_ptr->flags = SID_WBus16 | SID_Sync; 10124 /* 10125 * XXX KDM do we want to support tagged queueing on the control 10126 * device at all? 10127 */ 10128 if ((lun == NULL) 10129 || (lun->be_lun->lun_type != T_PROCESSOR)) 10130 inq_ptr->flags |= SID_CmdQue; 10131 /* 10132 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10133 * We have 8 bytes for the vendor name, and 16 bytes for the device 10134 * name and 4 bytes for the revision. 10135 */ 10136 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10137 "vendor")) == NULL) { 10138 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10139 } else { 10140 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10141 strncpy(inq_ptr->vendor, val, 10142 min(sizeof(inq_ptr->vendor), strlen(val))); 10143 } 10144 if (lun == NULL) { 10145 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10146 sizeof(inq_ptr->product)); 10147 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10148 switch (lun->be_lun->lun_type) { 10149 case T_DIRECT: 10150 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10151 sizeof(inq_ptr->product)); 10152 break; 10153 case T_PROCESSOR: 10154 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10155 sizeof(inq_ptr->product)); 10156 break; 10157 default: 10158 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10159 sizeof(inq_ptr->product)); 10160 break; 10161 } 10162 } else { 10163 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10164 strncpy(inq_ptr->product, val, 10165 min(sizeof(inq_ptr->product), strlen(val))); 10166 } 10167 10168 /* 10169 * XXX make this a macro somewhere so it automatically gets 10170 * incremented when we make changes. 10171 */ 10172 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10173 "revision")) == NULL) { 10174 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10175 } else { 10176 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10177 strncpy(inq_ptr->revision, val, 10178 min(sizeof(inq_ptr->revision), strlen(val))); 10179 } 10180 10181 /* 10182 * For parallel SCSI, we support double transition and single 10183 * transition clocking. We also support QAS (Quick Arbitration 10184 * and Selection) and Information Unit transfers on both the 10185 * control and array devices. 10186 */ 10187 if (port_type == CTL_PORT_SCSI) 10188 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10189 SID_SPI_IUS; 10190 10191 /* SAM-5 (no version claimed) */ 10192 scsi_ulto2b(0x00A0, inq_ptr->version1); 10193 /* SPC-4 (no version claimed) */ 10194 scsi_ulto2b(0x0460, inq_ptr->version2); 10195 if (port_type == CTL_PORT_FC) { 10196 /* FCP-2 ANSI INCITS.350:2003 */ 10197 scsi_ulto2b(0x0917, inq_ptr->version3); 10198 } else if (port_type == CTL_PORT_SCSI) { 10199 /* SPI-4 ANSI INCITS.362:200x */ 10200 scsi_ulto2b(0x0B56, inq_ptr->version3); 10201 } else if (port_type == CTL_PORT_ISCSI) { 10202 /* iSCSI (no version claimed) */ 10203 scsi_ulto2b(0x0960, inq_ptr->version3); 10204 } else if (port_type == CTL_PORT_SAS) { 10205 /* SAS (no version claimed) */ 10206 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10207 } 10208 10209 if (lun == NULL) { 10210 /* SBC-4 (no version claimed) */ 10211 scsi_ulto2b(0x0600, inq_ptr->version4); 10212 } else { 10213 switch (lun->be_lun->lun_type) { 10214 case T_DIRECT: 10215 /* SBC-4 (no version claimed) */ 10216 scsi_ulto2b(0x0600, inq_ptr->version4); 10217 break; 10218 case T_PROCESSOR: 10219 default: 10220 break; 10221 } 10222 } 10223 10224 ctl_set_success(ctsio); 10225 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10226 ctsio->be_move_done = ctl_config_move_done; 10227 ctl_datamove((union ctl_io *)ctsio); 10228 return (CTL_RETVAL_COMPLETE); 10229} 10230 10231int 10232ctl_inquiry(struct ctl_scsiio *ctsio) 10233{ 10234 struct scsi_inquiry *cdb; 10235 int retval; 10236 10237 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10238 10239 cdb = (struct scsi_inquiry *)ctsio->cdb; 10240 if (cdb->byte2 & SI_EVPD) 10241 retval = ctl_inquiry_evpd(ctsio); 10242 else if (cdb->page_code == 0) 10243 retval = ctl_inquiry_std(ctsio); 10244 else { 10245 ctl_set_invalid_field(ctsio, 10246 /*sks_valid*/ 1, 10247 /*command*/ 1, 10248 /*field*/ 2, 10249 /*bit_valid*/ 0, 10250 /*bit*/ 0); 10251 ctl_done((union ctl_io *)ctsio); 10252 return (CTL_RETVAL_COMPLETE); 10253 } 10254 10255 return (retval); 10256} 10257 10258/* 10259 * For known CDB types, parse the LBA and length. 10260 */ 10261static int 10262ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10263{ 10264 if (io->io_hdr.io_type != CTL_IO_SCSI) 10265 return (1); 10266 10267 switch (io->scsiio.cdb[0]) { 10268 case COMPARE_AND_WRITE: { 10269 struct scsi_compare_and_write *cdb; 10270 10271 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10272 10273 *lba = scsi_8btou64(cdb->addr); 10274 *len = cdb->length; 10275 break; 10276 } 10277 case READ_6: 10278 case WRITE_6: { 10279 struct scsi_rw_6 *cdb; 10280 10281 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10282 10283 *lba = scsi_3btoul(cdb->addr); 10284 /* only 5 bits are valid in the most significant address byte */ 10285 *lba &= 0x1fffff; 10286 *len = cdb->length; 10287 break; 10288 } 10289 case READ_10: 10290 case WRITE_10: { 10291 struct scsi_rw_10 *cdb; 10292 10293 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10294 10295 *lba = scsi_4btoul(cdb->addr); 10296 *len = scsi_2btoul(cdb->length); 10297 break; 10298 } 10299 case WRITE_VERIFY_10: { 10300 struct scsi_write_verify_10 *cdb; 10301 10302 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10303 10304 *lba = scsi_4btoul(cdb->addr); 10305 *len = scsi_2btoul(cdb->length); 10306 break; 10307 } 10308 case READ_12: 10309 case WRITE_12: { 10310 struct scsi_rw_12 *cdb; 10311 10312 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10313 10314 *lba = scsi_4btoul(cdb->addr); 10315 *len = scsi_4btoul(cdb->length); 10316 break; 10317 } 10318 case WRITE_VERIFY_12: { 10319 struct scsi_write_verify_12 *cdb; 10320 10321 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10322 10323 *lba = scsi_4btoul(cdb->addr); 10324 *len = scsi_4btoul(cdb->length); 10325 break; 10326 } 10327 case READ_16: 10328 case WRITE_16: 10329 case WRITE_ATOMIC_16: { 10330 struct scsi_rw_16 *cdb; 10331 10332 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10333 10334 *lba = scsi_8btou64(cdb->addr); 10335 *len = scsi_4btoul(cdb->length); 10336 break; 10337 } 10338 case WRITE_VERIFY_16: { 10339 struct scsi_write_verify_16 *cdb; 10340 10341 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10342 10343 *lba = scsi_8btou64(cdb->addr); 10344 *len = scsi_4btoul(cdb->length); 10345 break; 10346 } 10347 case WRITE_SAME_10: { 10348 struct scsi_write_same_10 *cdb; 10349 10350 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10351 10352 *lba = scsi_4btoul(cdb->addr); 10353 *len = scsi_2btoul(cdb->length); 10354 break; 10355 } 10356 case WRITE_SAME_16: { 10357 struct scsi_write_same_16 *cdb; 10358 10359 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10360 10361 *lba = scsi_8btou64(cdb->addr); 10362 *len = scsi_4btoul(cdb->length); 10363 break; 10364 } 10365 case VERIFY_10: { 10366 struct scsi_verify_10 *cdb; 10367 10368 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10369 10370 *lba = scsi_4btoul(cdb->addr); 10371 *len = scsi_2btoul(cdb->length); 10372 break; 10373 } 10374 case VERIFY_12: { 10375 struct scsi_verify_12 *cdb; 10376 10377 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10378 10379 *lba = scsi_4btoul(cdb->addr); 10380 *len = scsi_4btoul(cdb->length); 10381 break; 10382 } 10383 case VERIFY_16: { 10384 struct scsi_verify_16 *cdb; 10385 10386 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10387 10388 *lba = scsi_8btou64(cdb->addr); 10389 *len = scsi_4btoul(cdb->length); 10390 break; 10391 } 10392 case UNMAP: { 10393 *lba = 0; 10394 *len = UINT64_MAX; 10395 break; 10396 } 10397 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10398 struct scsi_get_lba_status *cdb; 10399 10400 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10401 *lba = scsi_8btou64(cdb->addr); 10402 *len = UINT32_MAX; 10403 break; 10404 } 10405 default: 10406 return (1); 10407 break; /* NOTREACHED */ 10408 } 10409 10410 return (0); 10411} 10412 10413static ctl_action 10414ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10415 bool seq) 10416{ 10417 uint64_t endlba1, endlba2; 10418 10419 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10420 endlba2 = lba2 + len2 - 1; 10421 10422 if ((endlba1 < lba2) || (endlba2 < lba1)) 10423 return (CTL_ACTION_PASS); 10424 else 10425 return (CTL_ACTION_BLOCK); 10426} 10427 10428static int 10429ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10430{ 10431 struct ctl_ptr_len_flags *ptrlen; 10432 struct scsi_unmap_desc *buf, *end, *range; 10433 uint64_t lba; 10434 uint32_t len; 10435 10436 /* If not UNMAP -- go other way. */ 10437 if (io->io_hdr.io_type != CTL_IO_SCSI || 10438 io->scsiio.cdb[0] != UNMAP) 10439 return (CTL_ACTION_ERROR); 10440 10441 /* If UNMAP without data -- block and wait for data. */ 10442 ptrlen = (struct ctl_ptr_len_flags *) 10443 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10444 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10445 ptrlen->ptr == NULL) 10446 return (CTL_ACTION_BLOCK); 10447 10448 /* UNMAP with data -- check for collision. */ 10449 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10450 end = buf + ptrlen->len / sizeof(*buf); 10451 for (range = buf; range < end; range++) { 10452 lba = scsi_8btou64(range->lba); 10453 len = scsi_4btoul(range->length); 10454 if ((lba < lba2 + len2) && (lba + len > lba2)) 10455 return (CTL_ACTION_BLOCK); 10456 } 10457 return (CTL_ACTION_PASS); 10458} 10459 10460static ctl_action 10461ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10462{ 10463 uint64_t lba1, lba2; 10464 uint64_t len1, len2; 10465 int retval; 10466 10467 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10468 return (CTL_ACTION_ERROR); 10469 10470 retval = ctl_extent_check_unmap(io1, lba2, len2); 10471 if (retval != CTL_ACTION_ERROR) 10472 return (retval); 10473 10474 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10475 return (CTL_ACTION_ERROR); 10476 10477 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10478} 10479 10480static ctl_action 10481ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10482{ 10483 uint64_t lba1, lba2; 10484 uint64_t len1, len2; 10485 10486 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10487 return (CTL_ACTION_ERROR); 10488 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10489 return (CTL_ACTION_ERROR); 10490 10491 if (lba1 + len1 == lba2) 10492 return (CTL_ACTION_BLOCK); 10493 return (CTL_ACTION_PASS); 10494} 10495 10496static ctl_action 10497ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10498 union ctl_io *ooa_io) 10499{ 10500 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10501 ctl_serialize_action *serialize_row; 10502 10503 /* 10504 * The initiator attempted multiple untagged commands at the same 10505 * time. Can't do that. 10506 */ 10507 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10508 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10509 && ((pending_io->io_hdr.nexus.targ_port == 10510 ooa_io->io_hdr.nexus.targ_port) 10511 && (pending_io->io_hdr.nexus.initid == 10512 ooa_io->io_hdr.nexus.initid)) 10513 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10514 CTL_FLAG_STATUS_SENT)) == 0)) 10515 return (CTL_ACTION_OVERLAP); 10516 10517 /* 10518 * The initiator attempted to send multiple tagged commands with 10519 * the same ID. (It's fine if different initiators have the same 10520 * tag ID.) 10521 * 10522 * Even if all of those conditions are true, we don't kill the I/O 10523 * if the command ahead of us has been aborted. We won't end up 10524 * sending it to the FETD, and it's perfectly legal to resend a 10525 * command with the same tag number as long as the previous 10526 * instance of this tag number has been aborted somehow. 10527 */ 10528 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10529 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10530 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10531 && ((pending_io->io_hdr.nexus.targ_port == 10532 ooa_io->io_hdr.nexus.targ_port) 10533 && (pending_io->io_hdr.nexus.initid == 10534 ooa_io->io_hdr.nexus.initid)) 10535 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10536 CTL_FLAG_STATUS_SENT)) == 0)) 10537 return (CTL_ACTION_OVERLAP_TAG); 10538 10539 /* 10540 * If we get a head of queue tag, SAM-3 says that we should 10541 * immediately execute it. 10542 * 10543 * What happens if this command would normally block for some other 10544 * reason? e.g. a request sense with a head of queue tag 10545 * immediately after a write. Normally that would block, but this 10546 * will result in its getting executed immediately... 10547 * 10548 * We currently return "pass" instead of "skip", so we'll end up 10549 * going through the rest of the queue to check for overlapped tags. 10550 * 10551 * XXX KDM check for other types of blockage first?? 10552 */ 10553 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10554 return (CTL_ACTION_PASS); 10555 10556 /* 10557 * Ordered tags have to block until all items ahead of them 10558 * have completed. If we get called with an ordered tag, we always 10559 * block, if something else is ahead of us in the queue. 10560 */ 10561 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10562 return (CTL_ACTION_BLOCK); 10563 10564 /* 10565 * Simple tags get blocked until all head of queue and ordered tags 10566 * ahead of them have completed. I'm lumping untagged commands in 10567 * with simple tags here. XXX KDM is that the right thing to do? 10568 */ 10569 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10570 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10571 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10572 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10573 return (CTL_ACTION_BLOCK); 10574 10575 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10576 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10577 10578 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10579 10580 switch (serialize_row[pending_entry->seridx]) { 10581 case CTL_SER_BLOCK: 10582 return (CTL_ACTION_BLOCK); 10583 case CTL_SER_EXTENT: 10584 return (ctl_extent_check(ooa_io, pending_io, 10585 (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10586 case CTL_SER_EXTENTOPT: 10587 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10588 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10589 return (ctl_extent_check(ooa_io, pending_io, 10590 (lun->be_lun && 10591 lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); 10592 return (CTL_ACTION_PASS); 10593 case CTL_SER_EXTENTSEQ: 10594 if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) 10595 return (ctl_extent_check_seq(ooa_io, pending_io)); 10596 return (CTL_ACTION_PASS); 10597 case CTL_SER_PASS: 10598 return (CTL_ACTION_PASS); 10599 case CTL_SER_BLOCKOPT: 10600 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10601 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10602 return (CTL_ACTION_BLOCK); 10603 return (CTL_ACTION_PASS); 10604 case CTL_SER_SKIP: 10605 return (CTL_ACTION_SKIP); 10606 default: 10607 panic("invalid serialization value %d", 10608 serialize_row[pending_entry->seridx]); 10609 } 10610 10611 return (CTL_ACTION_ERROR); 10612} 10613 10614/* 10615 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10616 * Assumptions: 10617 * - pending_io is generally either incoming, or on the blocked queue 10618 * - starting I/O is the I/O we want to start the check with. 10619 */ 10620static ctl_action 10621ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10622 union ctl_io *starting_io) 10623{ 10624 union ctl_io *ooa_io; 10625 ctl_action action; 10626 10627 mtx_assert(&lun->lun_lock, MA_OWNED); 10628 10629 /* 10630 * Run back along the OOA queue, starting with the current 10631 * blocked I/O and going through every I/O before it on the 10632 * queue. If starting_io is NULL, we'll just end up returning 10633 * CTL_ACTION_PASS. 10634 */ 10635 for (ooa_io = starting_io; ooa_io != NULL; 10636 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 10637 ooa_links)){ 10638 10639 /* 10640 * This routine just checks to see whether 10641 * cur_blocked is blocked by ooa_io, which is ahead 10642 * of it in the queue. It doesn't queue/dequeue 10643 * cur_blocked. 10644 */ 10645 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 10646 switch (action) { 10647 case CTL_ACTION_BLOCK: 10648 case CTL_ACTION_OVERLAP: 10649 case CTL_ACTION_OVERLAP_TAG: 10650 case CTL_ACTION_SKIP: 10651 case CTL_ACTION_ERROR: 10652 return (action); 10653 break; /* NOTREACHED */ 10654 case CTL_ACTION_PASS: 10655 break; 10656 default: 10657 panic("invalid action %d", action); 10658 break; /* NOTREACHED */ 10659 } 10660 } 10661 10662 return (CTL_ACTION_PASS); 10663} 10664 10665/* 10666 * Assumptions: 10667 * - An I/O has just completed, and has been removed from the per-LUN OOA 10668 * queue, so some items on the blocked queue may now be unblocked. 10669 */ 10670static int 10671ctl_check_blocked(struct ctl_lun *lun) 10672{ 10673 struct ctl_softc *softc = lun->ctl_softc; 10674 union ctl_io *cur_blocked, *next_blocked; 10675 10676 mtx_assert(&lun->lun_lock, MA_OWNED); 10677 10678 /* 10679 * Run forward from the head of the blocked queue, checking each 10680 * entry against the I/Os prior to it on the OOA queue to see if 10681 * there is still any blockage. 10682 * 10683 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 10684 * with our removing a variable on it while it is traversing the 10685 * list. 10686 */ 10687 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 10688 cur_blocked != NULL; cur_blocked = next_blocked) { 10689 union ctl_io *prev_ooa; 10690 ctl_action action; 10691 10692 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 10693 blocked_links); 10694 10695 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 10696 ctl_ooaq, ooa_links); 10697 10698 /* 10699 * If cur_blocked happens to be the first item in the OOA 10700 * queue now, prev_ooa will be NULL, and the action 10701 * returned will just be CTL_ACTION_PASS. 10702 */ 10703 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 10704 10705 switch (action) { 10706 case CTL_ACTION_BLOCK: 10707 /* Nothing to do here, still blocked */ 10708 break; 10709 case CTL_ACTION_OVERLAP: 10710 case CTL_ACTION_OVERLAP_TAG: 10711 /* 10712 * This shouldn't happen! In theory we've already 10713 * checked this command for overlap... 10714 */ 10715 break; 10716 case CTL_ACTION_PASS: 10717 case CTL_ACTION_SKIP: { 10718 const struct ctl_cmd_entry *entry; 10719 10720 /* 10721 * The skip case shouldn't happen, this transaction 10722 * should have never made it onto the blocked queue. 10723 */ 10724 /* 10725 * This I/O is no longer blocked, we can remove it 10726 * from the blocked queue. Since this is a TAILQ 10727 * (doubly linked list), we can do O(1) removals 10728 * from any place on the list. 10729 */ 10730 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 10731 blocked_links); 10732 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 10733 10734 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 10735 (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ 10736 /* 10737 * Need to send IO back to original side to 10738 * run 10739 */ 10740 union ctl_ha_msg msg_info; 10741 10742 cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 10743 msg_info.hdr.original_sc = 10744 cur_blocked->io_hdr.original_sc; 10745 msg_info.hdr.serializing_sc = cur_blocked; 10746 msg_info.hdr.msg_type = CTL_MSG_R2R; 10747 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 10748 sizeof(msg_info.hdr), M_NOWAIT); 10749 break; 10750 } 10751 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 10752 10753 /* 10754 * Check this I/O for LUN state changes that may 10755 * have happened while this command was blocked. 10756 * The LUN state may have been changed by a command 10757 * ahead of us in the queue, so we need to re-check 10758 * for any states that can be caused by SCSI 10759 * commands. 10760 */ 10761 if (ctl_scsiio_lun_check(lun, entry, 10762 &cur_blocked->scsiio) == 0) { 10763 cur_blocked->io_hdr.flags |= 10764 CTL_FLAG_IS_WAS_ON_RTR; 10765 ctl_enqueue_rtr(cur_blocked); 10766 } else 10767 ctl_done(cur_blocked); 10768 break; 10769 } 10770 default: 10771 /* 10772 * This probably shouldn't happen -- we shouldn't 10773 * get CTL_ACTION_ERROR, or anything else. 10774 */ 10775 break; 10776 } 10777 } 10778 10779 return (CTL_RETVAL_COMPLETE); 10780} 10781 10782/* 10783 * This routine (with one exception) checks LUN flags that can be set by 10784 * commands ahead of us in the OOA queue. These flags have to be checked 10785 * when a command initially comes in, and when we pull a command off the 10786 * blocked queue and are preparing to execute it. The reason we have to 10787 * check these flags for commands on the blocked queue is that the LUN 10788 * state may have been changed by a command ahead of us while we're on the 10789 * blocked queue. 10790 * 10791 * Ordering is somewhat important with these checks, so please pay 10792 * careful attention to the placement of any new checks. 10793 */ 10794static int 10795ctl_scsiio_lun_check(struct ctl_lun *lun, 10796 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 10797{ 10798 struct ctl_softc *softc = lun->ctl_softc; 10799 int retval; 10800 uint32_t residx; 10801 10802 retval = 0; 10803 10804 mtx_assert(&lun->lun_lock, MA_OWNED); 10805 10806 /* 10807 * If this shelf is a secondary shelf controller, we may have to 10808 * reject some commands disallowed by HA mode and link state. 10809 */ 10810 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 10811 if (softc->ha_link == CTL_HA_LINK_OFFLINE && 10812 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10813 ctl_set_lun_unavail(ctsio); 10814 retval = 1; 10815 goto bailout; 10816 } 10817 if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && 10818 (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { 10819 ctl_set_lun_transit(ctsio); 10820 retval = 1; 10821 goto bailout; 10822 } 10823 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && 10824 (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { 10825 ctl_set_lun_standby(ctsio); 10826 retval = 1; 10827 goto bailout; 10828 } 10829 10830 /* The rest of checks are only done on executing side */ 10831 if (softc->ha_mode == CTL_HA_MODE_XFER) 10832 goto bailout; 10833 } 10834 10835 if (entry->pattern & CTL_LUN_PAT_WRITE) { 10836 if (lun->be_lun && 10837 lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { 10838 ctl_set_sense(ctsio, /*current_error*/ 1, 10839 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10840 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); 10841 retval = 1; 10842 goto bailout; 10843 } 10844 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 10845 .eca_and_aen & SCP_SWP) != 0) { 10846 ctl_set_sense(ctsio, /*current_error*/ 1, 10847 /*sense_key*/ SSD_KEY_DATA_PROTECT, 10848 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 10849 retval = 1; 10850 goto bailout; 10851 } 10852 } 10853 10854 /* 10855 * Check for a reservation conflict. If this command isn't allowed 10856 * even on reserved LUNs, and if this initiator isn't the one who 10857 * reserved us, reject the command with a reservation conflict. 10858 */ 10859 residx = ctl_get_initindex(&ctsio->io_hdr.nexus); 10860 if ((lun->flags & CTL_LUN_RESERVED) 10861 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 10862 if (lun->res_idx != residx) { 10863 ctl_set_reservation_conflict(ctsio); 10864 retval = 1; 10865 goto bailout; 10866 } 10867 } 10868 10869 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 10870 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 10871 /* No reservation or command is allowed. */; 10872 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 10873 (lun->res_type == SPR_TYPE_WR_EX || 10874 lun->res_type == SPR_TYPE_WR_EX_RO || 10875 lun->res_type == SPR_TYPE_WR_EX_AR)) { 10876 /* The command is allowed for Write Exclusive resv. */; 10877 } else { 10878 /* 10879 * if we aren't registered or it's a res holder type 10880 * reservation and this isn't the res holder then set a 10881 * conflict. 10882 */ 10883 if (ctl_get_prkey(lun, residx) == 0 10884 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 10885 ctl_set_reservation_conflict(ctsio); 10886 retval = 1; 10887 goto bailout; 10888 } 10889 } 10890 10891 if ((lun->flags & CTL_LUN_OFFLINE) 10892 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0)) { 10893 ctl_set_lun_not_ready(ctsio); 10894 retval = 1; 10895 goto bailout; 10896 } 10897 10898 if ((lun->flags & CTL_LUN_STOPPED) 10899 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 10900 /* "Logical unit not ready, initializing cmd. required" */ 10901 ctl_set_lun_stopped(ctsio); 10902 retval = 1; 10903 goto bailout; 10904 } 10905 10906 if ((lun->flags & CTL_LUN_INOPERABLE) 10907 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 10908 /* "Medium format corrupted" */ 10909 ctl_set_medium_format_corrupted(ctsio); 10910 retval = 1; 10911 goto bailout; 10912 } 10913 10914bailout: 10915 return (retval); 10916} 10917 10918static void 10919ctl_failover_io(union ctl_io *io, int have_lock) 10920{ 10921 ctl_set_busy(&io->scsiio); 10922 ctl_done(io); 10923} 10924 10925static void 10926ctl_failover_lun(struct ctl_lun *lun) 10927{ 10928 struct ctl_softc *softc = lun->ctl_softc; 10929 struct ctl_io_hdr *io, *next_io; 10930 10931 CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", lun->lun)); 10932 if (softc->ha_mode == CTL_HA_MODE_XFER) { 10933 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 10934 /* We are master */ 10935 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10936 if (io->flags & CTL_FLAG_IO_ACTIVE) { 10937 io->flags |= CTL_FLAG_ABORT; 10938 } else { /* This can be only due to DATAMOVE */ 10939 io->msg_type = CTL_MSG_DATAMOVE_DONE; 10940 io->flags |= CTL_FLAG_IO_ACTIVE; 10941 io->port_status = 31340; 10942 ctl_enqueue_isc((union ctl_io *)io); 10943 } 10944 } 10945 /* We are slave */ 10946 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 10947 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10948 if (io->flags & CTL_FLAG_IO_ACTIVE) { 10949 io->flags |= CTL_FLAG_FAILOVER; 10950 } else { 10951 ctl_set_busy(&((union ctl_io *)io)-> 10952 scsiio); 10953 ctl_done((union ctl_io *)io); 10954 } 10955 } 10956 } 10957 } else { /* SERIALIZE modes */ 10958 TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, 10959 next_io) { 10960 /* We are master */ 10961 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10962 TAILQ_REMOVE(&lun->blocked_queue, io, 10963 blocked_links); 10964 io->flags &= ~CTL_FLAG_BLOCKED; 10965 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 10966 ctl_free_io((union ctl_io *)io); 10967 } 10968 } 10969 TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { 10970 /* We are master */ 10971 if (io->flags & CTL_FLAG_FROM_OTHER_SC) { 10972 TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); 10973 ctl_free_io((union ctl_io *)io); 10974 } 10975 /* We are slave */ 10976 if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { 10977 io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; 10978 if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { 10979 ctl_set_busy(&((union ctl_io *)io)-> 10980 scsiio); 10981 ctl_done((union ctl_io *)io); 10982 } 10983 } 10984 } 10985 ctl_check_blocked(lun); 10986 } 10987} 10988 10989static int 10990ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 10991{ 10992 struct ctl_lun *lun; 10993 const struct ctl_cmd_entry *entry; 10994 uint32_t initidx, targ_lun; 10995 int retval; 10996 10997 retval = 0; 10998 10999 lun = NULL; 11000 11001 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11002 if ((targ_lun < CTL_MAX_LUNS) 11003 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 11004 /* 11005 * If the LUN is invalid, pretend that it doesn't exist. 11006 * It will go away as soon as all pending I/O has been 11007 * completed. 11008 */ 11009 mtx_lock(&lun->lun_lock); 11010 if (lun->flags & CTL_LUN_DISABLED) { 11011 mtx_unlock(&lun->lun_lock); 11012 lun = NULL; 11013 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11014 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11015 } else { 11016 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11017 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11018 lun->be_lun; 11019 11020 /* 11021 * Every I/O goes into the OOA queue for a 11022 * particular LUN, and stays there until completion. 11023 */ 11024#ifdef CTL_TIME_IO 11025 if (TAILQ_EMPTY(&lun->ooa_queue)) { 11026 lun->idle_time += getsbinuptime() - 11027 lun->last_busy; 11028 } 11029#endif 11030 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11031 ooa_links); 11032 } 11033 } else { 11034 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11035 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11036 } 11037 11038 /* Get command entry and return error if it is unsuppotyed. */ 11039 entry = ctl_validate_command(ctsio); 11040 if (entry == NULL) { 11041 if (lun) 11042 mtx_unlock(&lun->lun_lock); 11043 return (retval); 11044 } 11045 11046 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11047 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11048 11049 /* 11050 * Check to see whether we can send this command to LUNs that don't 11051 * exist. This should pretty much only be the case for inquiry 11052 * and request sense. Further checks, below, really require having 11053 * a LUN, so we can't really check the command anymore. Just put 11054 * it on the rtr queue. 11055 */ 11056 if (lun == NULL) { 11057 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11058 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11059 ctl_enqueue_rtr((union ctl_io *)ctsio); 11060 return (retval); 11061 } 11062 11063 ctl_set_unsupported_lun(ctsio); 11064 ctl_done((union ctl_io *)ctsio); 11065 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11066 return (retval); 11067 } else { 11068 /* 11069 * Make sure we support this particular command on this LUN. 11070 * e.g., we don't support writes to the control LUN. 11071 */ 11072 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11073 mtx_unlock(&lun->lun_lock); 11074 ctl_set_invalid_opcode(ctsio); 11075 ctl_done((union ctl_io *)ctsio); 11076 return (retval); 11077 } 11078 } 11079 11080 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11081 11082#ifdef CTL_WITH_CA 11083 /* 11084 * If we've got a request sense, it'll clear the contingent 11085 * allegiance condition. Otherwise, if we have a CA condition for 11086 * this initiator, clear it, because it sent down a command other 11087 * than request sense. 11088 */ 11089 if ((ctsio->cdb[0] != REQUEST_SENSE) 11090 && (ctl_is_set(lun->have_ca, initidx))) 11091 ctl_clear_mask(lun->have_ca, initidx); 11092#endif 11093 11094 /* 11095 * If the command has this flag set, it handles its own unit 11096 * attention reporting, we shouldn't do anything. Otherwise we 11097 * check for any pending unit attentions, and send them back to the 11098 * initiator. We only do this when a command initially comes in, 11099 * not when we pull it off the blocked queue. 11100 * 11101 * According to SAM-3, section 5.3.2, the order that things get 11102 * presented back to the host is basically unit attentions caused 11103 * by some sort of reset event, busy status, reservation conflicts 11104 * or task set full, and finally any other status. 11105 * 11106 * One issue here is that some of the unit attentions we report 11107 * don't fall into the "reset" category (e.g. "reported luns data 11108 * has changed"). So reporting it here, before the reservation 11109 * check, may be technically wrong. I guess the only thing to do 11110 * would be to check for and report the reset events here, and then 11111 * check for the other unit attention types after we check for a 11112 * reservation conflict. 11113 * 11114 * XXX KDM need to fix this 11115 */ 11116 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11117 ctl_ua_type ua_type; 11118 scsi_sense_data_type sense_format; 11119 11120 if (lun->flags & CTL_LUN_SENSE_DESC) 11121 sense_format = SSD_TYPE_DESC; 11122 else 11123 sense_format = SSD_TYPE_FIXED; 11124 11125 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11126 sense_format); 11127 if (ua_type != CTL_UA_NONE) { 11128 mtx_unlock(&lun->lun_lock); 11129 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11130 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11131 ctsio->sense_len = SSD_FULL_SIZE; 11132 ctl_done((union ctl_io *)ctsio); 11133 return (retval); 11134 } 11135 } 11136 11137 11138 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11139 mtx_unlock(&lun->lun_lock); 11140 ctl_done((union ctl_io *)ctsio); 11141 return (retval); 11142 } 11143 11144 /* 11145 * XXX CHD this is where we want to send IO to other side if 11146 * this LUN is secondary on this SC. We will need to make a copy 11147 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11148 * the copy we send as FROM_OTHER. 11149 * We also need to stuff the address of the original IO so we can 11150 * find it easily. Something similar will need be done on the other 11151 * side so when we are done we can find the copy. 11152 */ 11153 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 11154 (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0) { 11155 union ctl_ha_msg msg_info; 11156 int isc_retval; 11157 11158 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11159 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11160 mtx_unlock(&lun->lun_lock); 11161 11162 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11163 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11164 msg_info.hdr.serializing_sc = NULL; 11165 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11166 msg_info.scsi.tag_num = ctsio->tag_num; 11167 msg_info.scsi.tag_type = ctsio->tag_type; 11168 msg_info.scsi.cdb_len = ctsio->cdb_len; 11169 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11170 11171 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11172 sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), 11173 M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { 11174 ctl_set_busy(ctsio); 11175 ctl_done((union ctl_io *)ctsio); 11176 return (retval); 11177 } 11178 return (retval); 11179 } 11180 11181 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11182 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11183 ctl_ooaq, ooa_links))) { 11184 case CTL_ACTION_BLOCK: 11185 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11186 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11187 blocked_links); 11188 mtx_unlock(&lun->lun_lock); 11189 return (retval); 11190 case CTL_ACTION_PASS: 11191 case CTL_ACTION_SKIP: 11192 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11193 mtx_unlock(&lun->lun_lock); 11194 ctl_enqueue_rtr((union ctl_io *)ctsio); 11195 break; 11196 case CTL_ACTION_OVERLAP: 11197 mtx_unlock(&lun->lun_lock); 11198 ctl_set_overlapped_cmd(ctsio); 11199 ctl_done((union ctl_io *)ctsio); 11200 break; 11201 case CTL_ACTION_OVERLAP_TAG: 11202 mtx_unlock(&lun->lun_lock); 11203 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11204 ctl_done((union ctl_io *)ctsio); 11205 break; 11206 case CTL_ACTION_ERROR: 11207 default: 11208 mtx_unlock(&lun->lun_lock); 11209 ctl_set_internal_failure(ctsio, 11210 /*sks_valid*/ 0, 11211 /*retry_count*/ 0); 11212 ctl_done((union ctl_io *)ctsio); 11213 break; 11214 } 11215 return (retval); 11216} 11217 11218const struct ctl_cmd_entry * 11219ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11220{ 11221 const struct ctl_cmd_entry *entry; 11222 int service_action; 11223 11224 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11225 if (sa) 11226 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11227 if (entry->flags & CTL_CMD_FLAG_SA5) { 11228 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11229 entry = &((const struct ctl_cmd_entry *) 11230 entry->execute)[service_action]; 11231 } 11232 return (entry); 11233} 11234 11235const struct ctl_cmd_entry * 11236ctl_validate_command(struct ctl_scsiio *ctsio) 11237{ 11238 const struct ctl_cmd_entry *entry; 11239 int i, sa; 11240 uint8_t diff; 11241 11242 entry = ctl_get_cmd_entry(ctsio, &sa); 11243 if (entry->execute == NULL) { 11244 if (sa) 11245 ctl_set_invalid_field(ctsio, 11246 /*sks_valid*/ 1, 11247 /*command*/ 1, 11248 /*field*/ 1, 11249 /*bit_valid*/ 1, 11250 /*bit*/ 4); 11251 else 11252 ctl_set_invalid_opcode(ctsio); 11253 ctl_done((union ctl_io *)ctsio); 11254 return (NULL); 11255 } 11256 KASSERT(entry->length > 0, 11257 ("Not defined length for command 0x%02x/0x%02x", 11258 ctsio->cdb[0], ctsio->cdb[1])); 11259 for (i = 1; i < entry->length; i++) { 11260 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11261 if (diff == 0) 11262 continue; 11263 ctl_set_invalid_field(ctsio, 11264 /*sks_valid*/ 1, 11265 /*command*/ 1, 11266 /*field*/ i, 11267 /*bit_valid*/ 1, 11268 /*bit*/ fls(diff) - 1); 11269 ctl_done((union ctl_io *)ctsio); 11270 return (NULL); 11271 } 11272 return (entry); 11273} 11274 11275static int 11276ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11277{ 11278 11279 switch (lun_type) { 11280 case T_PROCESSOR: 11281 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && 11282 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11283 return (0); 11284 break; 11285 case T_DIRECT: 11286 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && 11287 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11288 return (0); 11289 break; 11290 default: 11291 return (0); 11292 } 11293 return (1); 11294} 11295 11296static int 11297ctl_scsiio(struct ctl_scsiio *ctsio) 11298{ 11299 int retval; 11300 const struct ctl_cmd_entry *entry; 11301 11302 retval = CTL_RETVAL_COMPLETE; 11303 11304 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11305 11306 entry = ctl_get_cmd_entry(ctsio, NULL); 11307 11308 /* 11309 * If this I/O has been aborted, just send it straight to 11310 * ctl_done() without executing it. 11311 */ 11312 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11313 ctl_done((union ctl_io *)ctsio); 11314 goto bailout; 11315 } 11316 11317 /* 11318 * All the checks should have been handled by ctl_scsiio_precheck(). 11319 * We should be clear now to just execute the I/O. 11320 */ 11321 retval = entry->execute(ctsio); 11322 11323bailout: 11324 return (retval); 11325} 11326 11327/* 11328 * Since we only implement one target right now, a bus reset simply resets 11329 * our single target. 11330 */ 11331static int 11332ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11333{ 11334 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11335} 11336 11337static int 11338ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11339 ctl_ua_type ua_type) 11340{ 11341 struct ctl_port *port; 11342 struct ctl_lun *lun; 11343 int retval; 11344 11345 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11346 union ctl_ha_msg msg_info; 11347 11348 msg_info.hdr.nexus = io->io_hdr.nexus; 11349 if (ua_type==CTL_UA_TARG_RESET) 11350 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11351 else 11352 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11353 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11354 msg_info.hdr.original_sc = NULL; 11355 msg_info.hdr.serializing_sc = NULL; 11356 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11357 sizeof(msg_info.task), M_WAITOK); 11358 } 11359 retval = 0; 11360 11361 mtx_lock(&softc->ctl_lock); 11362 port = softc->ctl_ports[io->io_hdr.nexus.targ_port]; 11363 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11364 if (port != NULL && 11365 ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) 11366 continue; 11367 retval += ctl_lun_reset(lun, io, ua_type); 11368 } 11369 mtx_unlock(&softc->ctl_lock); 11370 11371 return (retval); 11372} 11373 11374/* 11375 * The LUN should always be set. The I/O is optional, and is used to 11376 * distinguish between I/Os sent by this initiator, and by other 11377 * initiators. We set unit attention for initiators other than this one. 11378 * SAM-3 is vague on this point. It does say that a unit attention should 11379 * be established for other initiators when a LUN is reset (see section 11380 * 5.7.3), but it doesn't specifically say that the unit attention should 11381 * be established for this particular initiator when a LUN is reset. Here 11382 * is the relevant text, from SAM-3 rev 8: 11383 * 11384 * 5.7.2 When a SCSI initiator port aborts its own tasks 11385 * 11386 * When a SCSI initiator port causes its own task(s) to be aborted, no 11387 * notification that the task(s) have been aborted shall be returned to 11388 * the SCSI initiator port other than the completion response for the 11389 * command or task management function action that caused the task(s) to 11390 * be aborted and notification(s) associated with related effects of the 11391 * action (e.g., a reset unit attention condition). 11392 * 11393 * XXX KDM for now, we're setting unit attention for all initiators. 11394 */ 11395static int 11396ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11397{ 11398 union ctl_io *xio; 11399#if 0 11400 uint32_t initidx; 11401#endif 11402#ifdef CTL_WITH_CA 11403 int i; 11404#endif 11405 11406 mtx_lock(&lun->lun_lock); 11407 /* 11408 * Run through the OOA queue and abort each I/O. 11409 */ 11410 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11411 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11412 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11413 } 11414 11415 /* 11416 * This version sets unit attention for every 11417 */ 11418#if 0 11419 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11420 ctl_est_ua_all(lun, initidx, ua_type); 11421#else 11422 ctl_est_ua_all(lun, -1, ua_type); 11423#endif 11424 11425 /* 11426 * A reset (any kind, really) clears reservations established with 11427 * RESERVE/RELEASE. It does not clear reservations established 11428 * with PERSISTENT RESERVE OUT, but we don't support that at the 11429 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11430 * reservations made with the RESERVE/RELEASE commands, because 11431 * those commands are obsolete in SPC-3. 11432 */ 11433 lun->flags &= ~CTL_LUN_RESERVED; 11434 11435#ifdef CTL_WITH_CA 11436 for (i = 0; i < CTL_MAX_INITIATORS; i++) 11437 ctl_clear_mask(lun->have_ca, i); 11438#endif 11439 mtx_unlock(&lun->lun_lock); 11440 11441 return (0); 11442} 11443 11444static void 11445ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 11446 int other_sc) 11447{ 11448 union ctl_io *xio; 11449 11450 mtx_assert(&lun->lun_lock, MA_OWNED); 11451 11452 /* 11453 * Run through the OOA queue and attempt to find the given I/O. 11454 * The target port, initiator ID, tag type and tag number have to 11455 * match the values that we got from the initiator. If we have an 11456 * untagged command to abort, simply abort the first untagged command 11457 * we come to. We only allow one untagged command at a time of course. 11458 */ 11459 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11460 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11461 11462 if ((targ_port == UINT32_MAX || 11463 targ_port == xio->io_hdr.nexus.targ_port) && 11464 (init_id == UINT32_MAX || 11465 init_id == xio->io_hdr.nexus.initid)) { 11466 if (targ_port != xio->io_hdr.nexus.targ_port || 11467 init_id != xio->io_hdr.nexus.initid) 11468 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 11469 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11470 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11471 union ctl_ha_msg msg_info; 11472 11473 msg_info.hdr.nexus = xio->io_hdr.nexus; 11474 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11475 msg_info.task.tag_num = xio->scsiio.tag_num; 11476 msg_info.task.tag_type = xio->scsiio.tag_type; 11477 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11478 msg_info.hdr.original_sc = NULL; 11479 msg_info.hdr.serializing_sc = NULL; 11480 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11481 sizeof(msg_info.task), M_NOWAIT); 11482 } 11483 } 11484 } 11485} 11486 11487static int 11488ctl_abort_task_set(union ctl_io *io) 11489{ 11490 struct ctl_softc *softc = control_softc; 11491 struct ctl_lun *lun; 11492 uint32_t targ_lun; 11493 11494 /* 11495 * Look up the LUN. 11496 */ 11497 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11498 mtx_lock(&softc->ctl_lock); 11499 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) 11500 lun = softc->ctl_luns[targ_lun]; 11501 else { 11502 mtx_unlock(&softc->ctl_lock); 11503 return (1); 11504 } 11505 11506 mtx_lock(&lun->lun_lock); 11507 mtx_unlock(&softc->ctl_lock); 11508 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 11509 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11510 io->io_hdr.nexus.initid, 11511 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11512 } else { /* CTL_TASK_CLEAR_TASK_SET */ 11513 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 11514 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 11515 } 11516 mtx_unlock(&lun->lun_lock); 11517 return (0); 11518} 11519 11520static int 11521ctl_i_t_nexus_reset(union ctl_io *io) 11522{ 11523 struct ctl_softc *softc = control_softc; 11524 struct ctl_lun *lun; 11525 uint32_t initidx; 11526 11527 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11528 union ctl_ha_msg msg_info; 11529 11530 msg_info.hdr.nexus = io->io_hdr.nexus; 11531 msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; 11532 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11533 msg_info.hdr.original_sc = NULL; 11534 msg_info.hdr.serializing_sc = NULL; 11535 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11536 sizeof(msg_info.task), M_WAITOK); 11537 } 11538 11539 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11540 mtx_lock(&softc->ctl_lock); 11541 STAILQ_FOREACH(lun, &softc->lun_list, links) { 11542 mtx_lock(&lun->lun_lock); 11543 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 11544 io->io_hdr.nexus.initid, 1); 11545#ifdef CTL_WITH_CA 11546 ctl_clear_mask(lun->have_ca, initidx); 11547#endif 11548 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) 11549 lun->flags &= ~CTL_LUN_RESERVED; 11550 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 11551 mtx_unlock(&lun->lun_lock); 11552 } 11553 mtx_unlock(&softc->ctl_lock); 11554 return (0); 11555} 11556 11557static int 11558ctl_abort_task(union ctl_io *io) 11559{ 11560 union ctl_io *xio; 11561 struct ctl_lun *lun; 11562 struct ctl_softc *softc; 11563#if 0 11564 struct sbuf sb; 11565 char printbuf[128]; 11566#endif 11567 int found; 11568 uint32_t targ_lun; 11569 11570 softc = control_softc; 11571 found = 0; 11572 11573 /* 11574 * Look up the LUN. 11575 */ 11576 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11577 mtx_lock(&softc->ctl_lock); 11578 if ((targ_lun < CTL_MAX_LUNS) 11579 && (softc->ctl_luns[targ_lun] != NULL)) 11580 lun = softc->ctl_luns[targ_lun]; 11581 else { 11582 mtx_unlock(&softc->ctl_lock); 11583 return (1); 11584 } 11585 11586#if 0 11587 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 11588 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 11589#endif 11590 11591 mtx_lock(&lun->lun_lock); 11592 mtx_unlock(&softc->ctl_lock); 11593 /* 11594 * Run through the OOA queue and attempt to find the given I/O. 11595 * The target port, initiator ID, tag type and tag number have to 11596 * match the values that we got from the initiator. If we have an 11597 * untagged command to abort, simply abort the first untagged command 11598 * we come to. We only allow one untagged command at a time of course. 11599 */ 11600 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11601 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11602#if 0 11603 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 11604 11605 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 11606 lun->lun, xio->scsiio.tag_num, 11607 xio->scsiio.tag_type, 11608 (xio->io_hdr.blocked_links.tqe_prev 11609 == NULL) ? "" : " BLOCKED", 11610 (xio->io_hdr.flags & 11611 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 11612 (xio->io_hdr.flags & 11613 CTL_FLAG_ABORT) ? " ABORT" : "", 11614 (xio->io_hdr.flags & 11615 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 11616 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 11617 sbuf_finish(&sb); 11618 printf("%s\n", sbuf_data(&sb)); 11619#endif 11620 11621 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 11622 || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) 11623 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 11624 continue; 11625 11626 /* 11627 * If the abort says that the task is untagged, the 11628 * task in the queue must be untagged. Otherwise, 11629 * we just check to see whether the tag numbers 11630 * match. This is because the QLogic firmware 11631 * doesn't pass back the tag type in an abort 11632 * request. 11633 */ 11634#if 0 11635 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 11636 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 11637 || (xio->scsiio.tag_num == io->taskio.tag_num)) 11638#endif 11639 /* 11640 * XXX KDM we've got problems with FC, because it 11641 * doesn't send down a tag type with aborts. So we 11642 * can only really go by the tag number... 11643 * This may cause problems with parallel SCSI. 11644 * Need to figure that out!! 11645 */ 11646 if (xio->scsiio.tag_num == io->taskio.tag_num) { 11647 xio->io_hdr.flags |= CTL_FLAG_ABORT; 11648 found = 1; 11649 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 11650 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 11651 union ctl_ha_msg msg_info; 11652 11653 msg_info.hdr.nexus = io->io_hdr.nexus; 11654 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 11655 msg_info.task.tag_num = io->taskio.tag_num; 11656 msg_info.task.tag_type = io->taskio.tag_type; 11657 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11658 msg_info.hdr.original_sc = NULL; 11659 msg_info.hdr.serializing_sc = NULL; 11660#if 0 11661 printf("Sent Abort to other side\n"); 11662#endif 11663 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11664 sizeof(msg_info.task), M_NOWAIT); 11665 } 11666#if 0 11667 printf("ctl_abort_task: found I/O to abort\n"); 11668#endif 11669 } 11670 } 11671 mtx_unlock(&lun->lun_lock); 11672 11673 if (found == 0) { 11674 /* 11675 * This isn't really an error. It's entirely possible for 11676 * the abort and command completion to cross on the wire. 11677 * This is more of an informative/diagnostic error. 11678 */ 11679#if 0 11680 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 11681 "%u:%u:%u tag %d type %d\n", 11682 io->io_hdr.nexus.initid, 11683 io->io_hdr.nexus.targ_port, 11684 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 11685 io->taskio.tag_type); 11686#endif 11687 } 11688 return (0); 11689} 11690 11691static void 11692ctl_run_task(union ctl_io *io) 11693{ 11694 struct ctl_softc *softc = control_softc; 11695 int retval = 1; 11696 const char *task_desc; 11697 11698 CTL_DEBUG_PRINT(("ctl_run_task\n")); 11699 11700 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 11701 ("ctl_run_task: Unextected io_type %d\n", 11702 io->io_hdr.io_type)); 11703 11704 task_desc = ctl_scsi_task_string(&io->taskio); 11705 if (task_desc != NULL) { 11706#ifdef NEEDTOPORT 11707 csevent_log(CSC_CTL | CSC_SHELF_SW | 11708 CTL_TASK_REPORT, 11709 csevent_LogType_Trace, 11710 csevent_Severity_Information, 11711 csevent_AlertLevel_Green, 11712 csevent_FRU_Firmware, 11713 csevent_FRU_Unknown, 11714 "CTL: received task: %s",task_desc); 11715#endif 11716 } else { 11717#ifdef NEEDTOPORT 11718 csevent_log(CSC_CTL | CSC_SHELF_SW | 11719 CTL_TASK_REPORT, 11720 csevent_LogType_Trace, 11721 csevent_Severity_Information, 11722 csevent_AlertLevel_Green, 11723 csevent_FRU_Firmware, 11724 csevent_FRU_Unknown, 11725 "CTL: received unknown task " 11726 "type: %d (%#x)", 11727 io->taskio.task_action, 11728 io->taskio.task_action); 11729#endif 11730 } 11731 switch (io->taskio.task_action) { 11732 case CTL_TASK_ABORT_TASK: 11733 retval = ctl_abort_task(io); 11734 break; 11735 case CTL_TASK_ABORT_TASK_SET: 11736 case CTL_TASK_CLEAR_TASK_SET: 11737 retval = ctl_abort_task_set(io); 11738 break; 11739 case CTL_TASK_CLEAR_ACA: 11740 break; 11741 case CTL_TASK_I_T_NEXUS_RESET: 11742 retval = ctl_i_t_nexus_reset(io); 11743 break; 11744 case CTL_TASK_LUN_RESET: { 11745 struct ctl_lun *lun; 11746 uint32_t targ_lun; 11747 11748 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11749 mtx_lock(&softc->ctl_lock); 11750 if ((targ_lun < CTL_MAX_LUNS) 11751 && (softc->ctl_luns[targ_lun] != NULL)) 11752 lun = softc->ctl_luns[targ_lun]; 11753 else { 11754 mtx_unlock(&softc->ctl_lock); 11755 retval = 1; 11756 break; 11757 } 11758 retval = ctl_lun_reset(lun, io, CTL_UA_LUN_RESET); 11759 mtx_unlock(&softc->ctl_lock); 11760 11761 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { 11762 union ctl_ha_msg msg_info; 11763 11764 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11765 msg_info.hdr.nexus = io->io_hdr.nexus; 11766 msg_info.task.task_action = CTL_TASK_LUN_RESET; 11767 msg_info.hdr.original_sc = NULL; 11768 msg_info.hdr.serializing_sc = NULL; 11769 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 11770 sizeof(msg_info.task), M_WAITOK); 11771 } 11772 break; 11773 } 11774 case CTL_TASK_TARGET_RESET: 11775 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 11776 break; 11777 case CTL_TASK_BUS_RESET: 11778 retval = ctl_bus_reset(softc, io); 11779 break; 11780 case CTL_TASK_PORT_LOGIN: 11781 break; 11782 case CTL_TASK_PORT_LOGOUT: 11783 break; 11784 default: 11785 printf("ctl_run_task: got unknown task management event %d\n", 11786 io->taskio.task_action); 11787 break; 11788 } 11789 if (retval == 0) 11790 io->io_hdr.status = CTL_SUCCESS; 11791 else 11792 io->io_hdr.status = CTL_ERROR; 11793 ctl_done(io); 11794} 11795 11796/* 11797 * For HA operation. Handle commands that come in from the other 11798 * controller. 11799 */ 11800static void 11801ctl_handle_isc(union ctl_io *io) 11802{ 11803 int free_io; 11804 struct ctl_lun *lun; 11805 struct ctl_softc *softc; 11806 uint32_t targ_lun; 11807 11808 softc = control_softc; 11809 11810 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 11811 lun = softc->ctl_luns[targ_lun]; 11812 11813 switch (io->io_hdr.msg_type) { 11814 case CTL_MSG_SERIALIZE: 11815 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 11816 break; 11817 case CTL_MSG_R2R: { 11818 const struct ctl_cmd_entry *entry; 11819 11820 /* 11821 * This is only used in SER_ONLY mode. 11822 */ 11823 free_io = 0; 11824 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 11825 mtx_lock(&lun->lun_lock); 11826 if (ctl_scsiio_lun_check(lun, 11827 entry, (struct ctl_scsiio *)io) != 0) { 11828 mtx_unlock(&lun->lun_lock); 11829 ctl_done(io); 11830 break; 11831 } 11832 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11833 mtx_unlock(&lun->lun_lock); 11834 ctl_enqueue_rtr(io); 11835 break; 11836 } 11837 case CTL_MSG_FINISH_IO: 11838 if (softc->ha_mode == CTL_HA_MODE_XFER) { 11839 free_io = 0; 11840 ctl_done(io); 11841 } else { 11842 free_io = 1; 11843 mtx_lock(&lun->lun_lock); 11844 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 11845 ooa_links); 11846 ctl_check_blocked(lun); 11847 mtx_unlock(&lun->lun_lock); 11848 } 11849 break; 11850 case CTL_MSG_PERS_ACTION: 11851 ctl_hndl_per_res_out_on_other_sc( 11852 (union ctl_ha_msg *)&io->presio.pr_msg); 11853 free_io = 1; 11854 break; 11855 case CTL_MSG_BAD_JUJU: 11856 free_io = 0; 11857 ctl_done(io); 11858 break; 11859 case CTL_MSG_DATAMOVE: 11860 /* Only used in XFER mode */ 11861 free_io = 0; 11862 ctl_datamove_remote(io); 11863 break; 11864 case CTL_MSG_DATAMOVE_DONE: 11865 /* Only used in XFER mode */ 11866 free_io = 0; 11867 io->scsiio.be_move_done(io); 11868 break; 11869 case CTL_MSG_FAILOVER: 11870 mtx_lock(&lun->lun_lock); 11871 ctl_failover_lun(lun); 11872 mtx_unlock(&lun->lun_lock); 11873 free_io = 1; 11874 break; 11875 default: 11876 free_io = 1; 11877 printf("%s: Invalid message type %d\n", 11878 __func__, io->io_hdr.msg_type); 11879 break; 11880 } 11881 if (free_io) 11882 ctl_free_io(io); 11883 11884} 11885 11886 11887/* 11888 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 11889 * there is no match. 11890 */ 11891static ctl_lun_error_pattern 11892ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 11893{ 11894 const struct ctl_cmd_entry *entry; 11895 ctl_lun_error_pattern filtered_pattern, pattern; 11896 11897 pattern = desc->error_pattern; 11898 11899 /* 11900 * XXX KDM we need more data passed into this function to match a 11901 * custom pattern, and we actually need to implement custom pattern 11902 * matching. 11903 */ 11904 if (pattern & CTL_LUN_PAT_CMD) 11905 return (CTL_LUN_PAT_CMD); 11906 11907 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 11908 return (CTL_LUN_PAT_ANY); 11909 11910 entry = ctl_get_cmd_entry(ctsio, NULL); 11911 11912 filtered_pattern = entry->pattern & pattern; 11913 11914 /* 11915 * If the user requested specific flags in the pattern (e.g. 11916 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 11917 * flags. 11918 * 11919 * If the user did not specify any flags, it doesn't matter whether 11920 * or not the command supports the flags. 11921 */ 11922 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 11923 (pattern & ~CTL_LUN_PAT_MASK)) 11924 return (CTL_LUN_PAT_NONE); 11925 11926 /* 11927 * If the user asked for a range check, see if the requested LBA 11928 * range overlaps with this command's LBA range. 11929 */ 11930 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 11931 uint64_t lba1; 11932 uint64_t len1; 11933 ctl_action action; 11934 int retval; 11935 11936 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 11937 if (retval != 0) 11938 return (CTL_LUN_PAT_NONE); 11939 11940 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 11941 desc->lba_range.len, FALSE); 11942 /* 11943 * A "pass" means that the LBA ranges don't overlap, so 11944 * this doesn't match the user's range criteria. 11945 */ 11946 if (action == CTL_ACTION_PASS) 11947 return (CTL_LUN_PAT_NONE); 11948 } 11949 11950 return (filtered_pattern); 11951} 11952 11953static void 11954ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 11955{ 11956 struct ctl_error_desc *desc, *desc2; 11957 11958 mtx_assert(&lun->lun_lock, MA_OWNED); 11959 11960 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 11961 ctl_lun_error_pattern pattern; 11962 /* 11963 * Check to see whether this particular command matches 11964 * the pattern in the descriptor. 11965 */ 11966 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 11967 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 11968 continue; 11969 11970 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 11971 case CTL_LUN_INJ_ABORTED: 11972 ctl_set_aborted(&io->scsiio); 11973 break; 11974 case CTL_LUN_INJ_MEDIUM_ERR: 11975 ctl_set_medium_error(&io->scsiio); 11976 break; 11977 case CTL_LUN_INJ_UA: 11978 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 11979 * OCCURRED */ 11980 ctl_set_ua(&io->scsiio, 0x29, 0x00); 11981 break; 11982 case CTL_LUN_INJ_CUSTOM: 11983 /* 11984 * We're assuming the user knows what he is doing. 11985 * Just copy the sense information without doing 11986 * checks. 11987 */ 11988 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 11989 MIN(sizeof(desc->custom_sense), 11990 sizeof(io->scsiio.sense_data))); 11991 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 11992 io->scsiio.sense_len = SSD_FULL_SIZE; 11993 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11994 break; 11995 case CTL_LUN_INJ_NONE: 11996 default: 11997 /* 11998 * If this is an error injection type we don't know 11999 * about, clear the continuous flag (if it is set) 12000 * so it will get deleted below. 12001 */ 12002 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12003 break; 12004 } 12005 /* 12006 * By default, each error injection action is a one-shot 12007 */ 12008 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12009 continue; 12010 12011 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12012 12013 free(desc, M_CTL); 12014 } 12015} 12016 12017#ifdef CTL_IO_DELAY 12018static void 12019ctl_datamove_timer_wakeup(void *arg) 12020{ 12021 union ctl_io *io; 12022 12023 io = (union ctl_io *)arg; 12024 12025 ctl_datamove(io); 12026} 12027#endif /* CTL_IO_DELAY */ 12028 12029void 12030ctl_datamove(union ctl_io *io) 12031{ 12032 void (*fe_datamove)(union ctl_io *io); 12033 12034 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12035 12036 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12037 12038#ifdef CTL_TIME_IO 12039 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12040 char str[256]; 12041 char path_str[64]; 12042 struct sbuf sb; 12043 12044 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12045 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12046 12047 sbuf_cat(&sb, path_str); 12048 switch (io->io_hdr.io_type) { 12049 case CTL_IO_SCSI: 12050 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12051 sbuf_printf(&sb, "\n"); 12052 sbuf_cat(&sb, path_str); 12053 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12054 io->scsiio.tag_num, io->scsiio.tag_type); 12055 break; 12056 case CTL_IO_TASK: 12057 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12058 "Tag Type: %d\n", io->taskio.task_action, 12059 io->taskio.tag_num, io->taskio.tag_type); 12060 break; 12061 default: 12062 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12063 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12064 break; 12065 } 12066 sbuf_cat(&sb, path_str); 12067 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12068 (intmax_t)time_uptime - io->io_hdr.start_time); 12069 sbuf_finish(&sb); 12070 printf("%s", sbuf_data(&sb)); 12071 } 12072#endif /* CTL_TIME_IO */ 12073 12074#ifdef CTL_IO_DELAY 12075 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12076 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12077 } else { 12078 struct ctl_lun *lun; 12079 12080 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12081 if ((lun != NULL) 12082 && (lun->delay_info.datamove_delay > 0)) { 12083 12084 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 12085 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12086 callout_reset(&io->io_hdr.delay_callout, 12087 lun->delay_info.datamove_delay * hz, 12088 ctl_datamove_timer_wakeup, io); 12089 if (lun->delay_info.datamove_type == 12090 CTL_DELAY_TYPE_ONESHOT) 12091 lun->delay_info.datamove_delay = 0; 12092 return; 12093 } 12094 } 12095#endif 12096 12097 /* 12098 * This command has been aborted. Set the port status, so we fail 12099 * the data move. 12100 */ 12101 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12102 printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", 12103 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12104 io->io_hdr.nexus.targ_port, 12105 io->io_hdr.nexus.targ_lun); 12106 io->io_hdr.port_status = 31337; 12107 /* 12108 * Note that the backend, in this case, will get the 12109 * callback in its context. In other cases it may get 12110 * called in the frontend's interrupt thread context. 12111 */ 12112 io->scsiio.be_move_done(io); 12113 return; 12114 } 12115 12116 /* Don't confuse frontend with zero length data move. */ 12117 if (io->scsiio.kern_data_len == 0) { 12118 io->scsiio.be_move_done(io); 12119 return; 12120 } 12121 12122 /* 12123 * If we're in XFER mode and this I/O is from the other shelf 12124 * controller, we need to send the DMA to the other side to 12125 * actually transfer the data to/from the host. In serialize only 12126 * mode the transfer happens below CTL and ctl_datamove() is only 12127 * called on the machine that originally received the I/O. 12128 */ 12129 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12130 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12131 union ctl_ha_msg msg; 12132 uint32_t sg_entries_sent; 12133 int do_sg_copy; 12134 int i; 12135 12136 memset(&msg, 0, sizeof(msg)); 12137 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12138 msg.hdr.original_sc = io->io_hdr.original_sc; 12139 msg.hdr.serializing_sc = io; 12140 msg.hdr.nexus = io->io_hdr.nexus; 12141 msg.dt.flags = io->io_hdr.flags; 12142 /* 12143 * We convert everything into a S/G list here. We can't 12144 * pass by reference, only by value between controllers. 12145 * So we can't pass a pointer to the S/G list, only as many 12146 * S/G entries as we can fit in here. If it's possible for 12147 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12148 * then we need to break this up into multiple transfers. 12149 */ 12150 if (io->scsiio.kern_sg_entries == 0) { 12151 msg.dt.kern_sg_entries = 1; 12152#if 0 12153 /* 12154 * Convert to a physical address if this is a 12155 * virtual address. 12156 */ 12157 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12158 msg.dt.sg_list[0].addr = 12159 io->scsiio.kern_data_ptr; 12160 } else { 12161 /* 12162 * XXX KDM use busdma here! 12163 */ 12164 msg.dt.sg_list[0].addr = (void *) 12165 vtophys(io->scsiio.kern_data_ptr); 12166 } 12167#else 12168 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12169 ("HA does not support BUS_ADDR")); 12170 msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; 12171#endif 12172 12173 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12174 do_sg_copy = 0; 12175 } else { 12176 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12177 do_sg_copy = 1; 12178 } 12179 12180 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12181 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12182 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12183 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12184 msg.dt.sg_sequence = 0; 12185 12186 /* 12187 * Loop until we've sent all of the S/G entries. On the 12188 * other end, we'll recompose these S/G entries into one 12189 * contiguous list before passing it to the 12190 */ 12191 for (sg_entries_sent = 0; sg_entries_sent < 12192 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12193 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/ 12194 sizeof(msg.dt.sg_list[0])), 12195 msg.dt.kern_sg_entries - sg_entries_sent); 12196 12197 if (do_sg_copy != 0) { 12198 struct ctl_sg_entry *sgl; 12199 int j; 12200 12201 sgl = (struct ctl_sg_entry *) 12202 io->scsiio.kern_data_ptr; 12203 /* 12204 * If this is in cached memory, flush the cache 12205 * before we send the DMA request to the other 12206 * controller. We want to do this in either 12207 * the * read or the write case. The read 12208 * case is straightforward. In the write 12209 * case, we want to make sure nothing is 12210 * in the local cache that could overwrite 12211 * the DMAed data. 12212 */ 12213 12214 for (i = sg_entries_sent, j = 0; 12215 i < msg.dt.cur_sg_entries; i++, j++) { 12216#if 0 12217 if ((io->io_hdr.flags & 12218 CTL_FLAG_BUS_ADDR) == 0) { 12219 /* 12220 * XXX KDM use busdma. 12221 */ 12222 msg.dt.sg_list[j].addr =(void *) 12223 vtophys(sgl[i].addr); 12224 } else { 12225 msg.dt.sg_list[j].addr = 12226 sgl[i].addr; 12227 } 12228#else 12229 KASSERT((io->io_hdr.flags & 12230 CTL_FLAG_BUS_ADDR) == 0, 12231 ("HA does not support BUS_ADDR")); 12232 msg.dt.sg_list[j].addr = sgl[i].addr; 12233#endif 12234 msg.dt.sg_list[j].len = sgl[i].len; 12235 } 12236 } 12237 12238 sg_entries_sent += msg.dt.cur_sg_entries; 12239 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12240 msg.dt.sg_last = 1; 12241 else 12242 msg.dt.sg_last = 0; 12243 12244 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12245 sizeof(msg.dt) - sizeof(msg.dt.sg_list) + 12246 sizeof(struct ctl_sg_entry)*msg.dt.cur_sg_entries, 12247 M_WAITOK) > CTL_HA_STATUS_SUCCESS) { 12248 io->io_hdr.port_status = 31341; 12249 io->scsiio.be_move_done(io); 12250 return; 12251 } 12252 12253 msg.dt.sent_sg_entries = sg_entries_sent; 12254 } 12255 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12256 } else { 12257 12258 /* 12259 * Lookup the fe_datamove() function for this particular 12260 * front end. 12261 */ 12262 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12263 12264 fe_datamove(io); 12265 } 12266} 12267 12268static void 12269ctl_send_datamove_done(union ctl_io *io, int have_lock) 12270{ 12271 union ctl_ha_msg msg; 12272 12273 memset(&msg, 0, sizeof(msg)); 12274 12275 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12276 msg.hdr.original_sc = io; 12277 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12278 msg.hdr.nexus = io->io_hdr.nexus; 12279 msg.hdr.status = io->io_hdr.status; 12280 msg.scsi.tag_num = io->scsiio.tag_num; 12281 msg.scsi.tag_type = io->scsiio.tag_type; 12282 msg.scsi.scsi_status = io->scsiio.scsi_status; 12283 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12284 io->scsiio.sense_len); 12285 msg.scsi.sense_len = io->scsiio.sense_len; 12286 msg.scsi.sense_residual = io->scsiio.sense_residual; 12287 msg.scsi.fetd_status = io->io_hdr.port_status; 12288 msg.scsi.residual = io->scsiio.residual; 12289 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12290 12291 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12292 ctl_failover_io(io, /*have_lock*/ have_lock); 12293 return; 12294 } 12295 12296 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12297 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12298 msg.scsi.sense_len, M_WAITOK); 12299} 12300 12301/* 12302 * The DMA to the remote side is done, now we need to tell the other side 12303 * we're done so it can continue with its data movement. 12304 */ 12305static void 12306ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12307{ 12308 union ctl_io *io; 12309 int i; 12310 12311 io = rq->context; 12312 12313 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12314 printf("%s: ISC DMA write failed with error %d", __func__, 12315 rq->ret); 12316 ctl_set_internal_failure(&io->scsiio, 12317 /*sks_valid*/ 1, 12318 /*retry_count*/ rq->ret); 12319 } 12320 12321 ctl_dt_req_free(rq); 12322 12323 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12324 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12325 free(io->io_hdr.remote_sglist, M_CTL); 12326 io->io_hdr.remote_sglist = NULL; 12327 io->io_hdr.local_sglist = NULL; 12328 12329 /* 12330 * The data is in local and remote memory, so now we need to send 12331 * status (good or back) back to the other side. 12332 */ 12333 ctl_send_datamove_done(io, /*have_lock*/ 0); 12334} 12335 12336/* 12337 * We've moved the data from the host/controller into local memory. Now we 12338 * need to push it over to the remote controller's memory. 12339 */ 12340static int 12341ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12342{ 12343 int retval; 12344 12345 retval = 0; 12346 12347 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12348 ctl_datamove_remote_write_cb); 12349 12350 return (retval); 12351} 12352 12353static void 12354ctl_datamove_remote_write(union ctl_io *io) 12355{ 12356 int retval; 12357 void (*fe_datamove)(union ctl_io *io); 12358 12359 /* 12360 * - Get the data from the host/HBA into local memory. 12361 * - DMA memory from the local controller to the remote controller. 12362 * - Send status back to the remote controller. 12363 */ 12364 12365 retval = ctl_datamove_remote_sgl_setup(io); 12366 if (retval != 0) 12367 return; 12368 12369 /* Switch the pointer over so the FETD knows what to do */ 12370 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12371 12372 /* 12373 * Use a custom move done callback, since we need to send completion 12374 * back to the other controller, not to the backend on this side. 12375 */ 12376 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12377 12378 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12379 12380 fe_datamove(io); 12381 12382 return; 12383 12384} 12385 12386static int 12387ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12388{ 12389#if 0 12390 char str[256]; 12391 char path_str[64]; 12392 struct sbuf sb; 12393#endif 12394 int i; 12395 12396 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12397 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12398 free(io->io_hdr.remote_sglist, M_CTL); 12399 io->io_hdr.remote_sglist = NULL; 12400 io->io_hdr.local_sglist = NULL; 12401 12402#if 0 12403 scsi_path_string(io, path_str, sizeof(path_str)); 12404 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12405 sbuf_cat(&sb, path_str); 12406 scsi_command_string(&io->scsiio, NULL, &sb); 12407 sbuf_printf(&sb, "\n"); 12408 sbuf_cat(&sb, path_str); 12409 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12410 io->scsiio.tag_num, io->scsiio.tag_type); 12411 sbuf_cat(&sb, path_str); 12412 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 12413 io->io_hdr.flags, io->io_hdr.status); 12414 sbuf_finish(&sb); 12415 printk("%s", sbuf_data(&sb)); 12416#endif 12417 12418 12419 /* 12420 * The read is done, now we need to send status (good or bad) back 12421 * to the other side. 12422 */ 12423 ctl_send_datamove_done(io, /*have_lock*/ 0); 12424 12425 return (0); 12426} 12427 12428static void 12429ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 12430{ 12431 union ctl_io *io; 12432 void (*fe_datamove)(union ctl_io *io); 12433 12434 io = rq->context; 12435 12436 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12437 printf("%s: ISC DMA read failed with error %d\n", __func__, 12438 rq->ret); 12439 ctl_set_internal_failure(&io->scsiio, 12440 /*sks_valid*/ 1, 12441 /*retry_count*/ rq->ret); 12442 } 12443 12444 ctl_dt_req_free(rq); 12445 12446 /* Switch the pointer over so the FETD knows what to do */ 12447 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12448 12449 /* 12450 * Use a custom move done callback, since we need to send completion 12451 * back to the other controller, not to the backend on this side. 12452 */ 12453 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 12454 12455 /* XXX KDM add checks like the ones in ctl_datamove? */ 12456 12457 fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; 12458 12459 fe_datamove(io); 12460} 12461 12462static int 12463ctl_datamove_remote_sgl_setup(union ctl_io *io) 12464{ 12465 struct ctl_sg_entry *local_sglist, *remote_sglist; 12466 struct ctl_softc *softc; 12467 uint32_t len_to_go; 12468 int retval; 12469 int i; 12470 12471 retval = 0; 12472 softc = control_softc; 12473 local_sglist = io->io_hdr.local_sglist; 12474 remote_sglist = io->io_hdr.remote_sglist; 12475 len_to_go = io->scsiio.kern_data_len; 12476 12477 /* 12478 * The difficult thing here is that the size of the various 12479 * S/G segments may be different than the size from the 12480 * remote controller. That'll make it harder when DMAing 12481 * the data back to the other side. 12482 */ 12483 for (i = 0; len_to_go > 0; i++) { 12484 local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); 12485 local_sglist[i].addr = 12486 malloc(local_sglist[i].len, M_CTL, M_WAITOK); 12487 12488 len_to_go -= local_sglist[i].len; 12489 } 12490 /* 12491 * Reset the number of S/G entries accordingly. The original 12492 * number of S/G entries is available in rem_sg_entries. 12493 */ 12494 io->scsiio.kern_sg_entries = i; 12495 12496#if 0 12497 printf("%s: kern_sg_entries = %d\n", __func__, 12498 io->scsiio.kern_sg_entries); 12499 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12500 printf("%s: sg[%d] = %p, %d\n", __func__, i, 12501 local_sglist[i].addr, local_sglist[i].len); 12502#endif 12503 12504 return (retval); 12505} 12506 12507static int 12508ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 12509 ctl_ha_dt_cb callback) 12510{ 12511 struct ctl_ha_dt_req *rq; 12512 struct ctl_sg_entry *remote_sglist, *local_sglist; 12513 uint32_t local_used, remote_used, total_used; 12514 int i, j, isc_ret; 12515 12516 rq = ctl_dt_req_alloc(); 12517 12518 /* 12519 * If we failed to allocate the request, and if the DMA didn't fail 12520 * anyway, set busy status. This is just a resource allocation 12521 * failure. 12522 */ 12523 if ((rq == NULL) 12524 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 12525 ctl_set_busy(&io->scsiio); 12526 12527 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 12528 12529 if (rq != NULL) 12530 ctl_dt_req_free(rq); 12531 12532 /* 12533 * The data move failed. We need to return status back 12534 * to the other controller. No point in trying to DMA 12535 * data to the remote controller. 12536 */ 12537 12538 ctl_send_datamove_done(io, /*have_lock*/ 0); 12539 12540 return (1); 12541 } 12542 12543 local_sglist = io->io_hdr.local_sglist; 12544 remote_sglist = io->io_hdr.remote_sglist; 12545 local_used = 0; 12546 remote_used = 0; 12547 total_used = 0; 12548 12549 /* 12550 * Pull/push the data over the wire from/to the other controller. 12551 * This takes into account the possibility that the local and 12552 * remote sglists may not be identical in terms of the size of 12553 * the elements and the number of elements. 12554 * 12555 * One fundamental assumption here is that the length allocated for 12556 * both the local and remote sglists is identical. Otherwise, we've 12557 * essentially got a coding error of some sort. 12558 */ 12559 isc_ret = CTL_HA_STATUS_SUCCESS; 12560 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 12561 uint32_t cur_len; 12562 uint8_t *tmp_ptr; 12563 12564 rq->command = command; 12565 rq->context = io; 12566 12567 /* 12568 * Both pointers should be aligned. But it is possible 12569 * that the allocation length is not. They should both 12570 * also have enough slack left over at the end, though, 12571 * to round up to the next 8 byte boundary. 12572 */ 12573 cur_len = MIN(local_sglist[i].len - local_used, 12574 remote_sglist[j].len - remote_used); 12575 rq->size = cur_len; 12576 12577 tmp_ptr = (uint8_t *)local_sglist[i].addr; 12578 tmp_ptr += local_used; 12579 12580#if 0 12581 /* Use physical addresses when talking to ISC hardware */ 12582 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 12583 /* XXX KDM use busdma */ 12584 rq->local = vtophys(tmp_ptr); 12585 } else 12586 rq->local = tmp_ptr; 12587#else 12588 KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, 12589 ("HA does not support BUS_ADDR")); 12590 rq->local = tmp_ptr; 12591#endif 12592 12593 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 12594 tmp_ptr += remote_used; 12595 rq->remote = tmp_ptr; 12596 12597 rq->callback = NULL; 12598 12599 local_used += cur_len; 12600 if (local_used >= local_sglist[i].len) { 12601 i++; 12602 local_used = 0; 12603 } 12604 12605 remote_used += cur_len; 12606 if (remote_used >= remote_sglist[j].len) { 12607 j++; 12608 remote_used = 0; 12609 } 12610 total_used += cur_len; 12611 12612 if (total_used >= io->scsiio.kern_data_len) 12613 rq->callback = callback; 12614 12615#if 0 12616 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 12617 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 12618 rq->local, rq->remote, rq->size); 12619#endif 12620 12621 isc_ret = ctl_dt_single(rq); 12622 if (isc_ret > CTL_HA_STATUS_SUCCESS) 12623 break; 12624 } 12625 if (isc_ret != CTL_HA_STATUS_WAIT) { 12626 rq->ret = isc_ret; 12627 callback(rq); 12628 } 12629 12630 return (0); 12631} 12632 12633static void 12634ctl_datamove_remote_read(union ctl_io *io) 12635{ 12636 int retval; 12637 int i; 12638 12639 /* 12640 * This will send an error to the other controller in the case of a 12641 * failure. 12642 */ 12643 retval = ctl_datamove_remote_sgl_setup(io); 12644 if (retval != 0) 12645 return; 12646 12647 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 12648 ctl_datamove_remote_read_cb); 12649 if (retval != 0) { 12650 /* 12651 * Make sure we free memory if there was an error.. The 12652 * ctl_datamove_remote_xfer() function will send the 12653 * datamove done message, or call the callback with an 12654 * error if there is a problem. 12655 */ 12656 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12657 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12658 free(io->io_hdr.remote_sglist, M_CTL); 12659 io->io_hdr.remote_sglist = NULL; 12660 io->io_hdr.local_sglist = NULL; 12661 } 12662 12663 return; 12664} 12665 12666/* 12667 * Process a datamove request from the other controller. This is used for 12668 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 12669 * first. Once that is complete, the data gets DMAed into the remote 12670 * controller's memory. For reads, we DMA from the remote controller's 12671 * memory into our memory first, and then move it out to the FETD. 12672 */ 12673static void 12674ctl_datamove_remote(union ctl_io *io) 12675{ 12676 12677 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12678 12679 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12680 ctl_failover_io(io, /*have_lock*/ 0); 12681 return; 12682 } 12683 12684 /* 12685 * Note that we look for an aborted I/O here, but don't do some of 12686 * the other checks that ctl_datamove() normally does. 12687 * We don't need to run the datamove delay code, since that should 12688 * have been done if need be on the other controller. 12689 */ 12690 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12691 printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, 12692 io->scsiio.tag_num, io->io_hdr.nexus.initid, 12693 io->io_hdr.nexus.targ_port, 12694 io->io_hdr.nexus.targ_lun); 12695 io->io_hdr.port_status = 31338; 12696 ctl_send_datamove_done(io, /*have_lock*/ 0); 12697 return; 12698 } 12699 12700 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) 12701 ctl_datamove_remote_write(io); 12702 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) 12703 ctl_datamove_remote_read(io); 12704 else { 12705 io->io_hdr.port_status = 31339; 12706 ctl_send_datamove_done(io, /*have_lock*/ 0); 12707 } 12708} 12709 12710static int 12711ctl_process_done(union ctl_io *io) 12712{ 12713 struct ctl_lun *lun; 12714 struct ctl_softc *softc = control_softc; 12715 void (*fe_done)(union ctl_io *io); 12716 union ctl_ha_msg msg; 12717 uint32_t targ_port = io->io_hdr.nexus.targ_port; 12718 12719 CTL_DEBUG_PRINT(("ctl_process_done\n")); 12720 12721 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) 12722 fe_done = softc->ctl_ports[targ_port]->fe_done; 12723 else 12724 fe_done = NULL; 12725 12726#ifdef CTL_TIME_IO 12727 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12728 char str[256]; 12729 char path_str[64]; 12730 struct sbuf sb; 12731 12732 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12733 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12734 12735 sbuf_cat(&sb, path_str); 12736 switch (io->io_hdr.io_type) { 12737 case CTL_IO_SCSI: 12738 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12739 sbuf_printf(&sb, "\n"); 12740 sbuf_cat(&sb, path_str); 12741 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12742 io->scsiio.tag_num, io->scsiio.tag_type); 12743 break; 12744 case CTL_IO_TASK: 12745 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12746 "Tag Type: %d\n", io->taskio.task_action, 12747 io->taskio.tag_num, io->taskio.tag_type); 12748 break; 12749 default: 12750 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12751 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12752 break; 12753 } 12754 sbuf_cat(&sb, path_str); 12755 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 12756 (intmax_t)time_uptime - io->io_hdr.start_time); 12757 sbuf_finish(&sb); 12758 printf("%s", sbuf_data(&sb)); 12759 } 12760#endif /* CTL_TIME_IO */ 12761 12762 switch (io->io_hdr.io_type) { 12763 case CTL_IO_SCSI: 12764 break; 12765 case CTL_IO_TASK: 12766 if (ctl_debug & CTL_DEBUG_INFO) 12767 ctl_io_error_print(io, NULL); 12768 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 12769 ctl_free_io(io); 12770 else 12771 fe_done(io); 12772 return (CTL_RETVAL_COMPLETE); 12773 default: 12774 panic("ctl_process_done: invalid io type %d\n", 12775 io->io_hdr.io_type); 12776 break; /* NOTREACHED */ 12777 } 12778 12779 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12780 if (lun == NULL) { 12781 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 12782 io->io_hdr.nexus.targ_mapped_lun)); 12783 goto bailout; 12784 } 12785 12786 mtx_lock(&lun->lun_lock); 12787 12788 /* 12789 * Check to see if we have any errors to inject here. We only 12790 * inject errors for commands that don't already have errors set. 12791 */ 12792 if ((STAILQ_FIRST(&lun->error_list) != NULL) && 12793 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 12794 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 12795 ctl_inject_error(lun, io); 12796 12797 /* 12798 * XXX KDM how do we treat commands that aren't completed 12799 * successfully? 12800 * 12801 * XXX KDM should we also track I/O latency? 12802 */ 12803 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 12804 io->io_hdr.io_type == CTL_IO_SCSI) { 12805#ifdef CTL_TIME_IO 12806 struct bintime cur_bt; 12807#endif 12808 int type; 12809 12810 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12811 CTL_FLAG_DATA_IN) 12812 type = CTL_STATS_READ; 12813 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 12814 CTL_FLAG_DATA_OUT) 12815 type = CTL_STATS_WRITE; 12816 else 12817 type = CTL_STATS_NO_IO; 12818 12819 lun->stats.ports[targ_port].bytes[type] += 12820 io->scsiio.kern_total_len; 12821 lun->stats.ports[targ_port].operations[type]++; 12822#ifdef CTL_TIME_IO 12823 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 12824 &io->io_hdr.dma_bt); 12825 lun->stats.ports[targ_port].num_dmas[type] += 12826 io->io_hdr.num_dmas; 12827 getbintime(&cur_bt); 12828 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 12829 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 12830#endif 12831 } 12832 12833 /* 12834 * Remove this from the OOA queue. 12835 */ 12836 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 12837#ifdef CTL_TIME_IO 12838 if (TAILQ_EMPTY(&lun->ooa_queue)) 12839 lun->last_busy = getsbinuptime(); 12840#endif 12841 12842 /* 12843 * Run through the blocked queue on this LUN and see if anything 12844 * has become unblocked, now that this transaction is done. 12845 */ 12846 ctl_check_blocked(lun); 12847 12848 /* 12849 * If the LUN has been invalidated, free it if there is nothing 12850 * left on its OOA queue. 12851 */ 12852 if ((lun->flags & CTL_LUN_INVALID) 12853 && TAILQ_EMPTY(&lun->ooa_queue)) { 12854 mtx_unlock(&lun->lun_lock); 12855 mtx_lock(&softc->ctl_lock); 12856 ctl_free_lun(lun); 12857 mtx_unlock(&softc->ctl_lock); 12858 } else 12859 mtx_unlock(&lun->lun_lock); 12860 12861bailout: 12862 12863 /* 12864 * If this command has been aborted, make sure we set the status 12865 * properly. The FETD is responsible for freeing the I/O and doing 12866 * whatever it needs to do to clean up its state. 12867 */ 12868 if (io->io_hdr.flags & CTL_FLAG_ABORT) 12869 ctl_set_task_aborted(&io->scsiio); 12870 12871 /* 12872 * If enabled, print command error status. 12873 */ 12874 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && 12875 (ctl_debug & CTL_DEBUG_INFO) != 0) 12876 ctl_io_error_print(io, NULL); 12877 12878 /* 12879 * Tell the FETD or the other shelf controller we're done with this 12880 * command. Note that only SCSI commands get to this point. Task 12881 * management commands are completed above. 12882 */ 12883 if ((softc->ha_mode != CTL_HA_MODE_XFER) && 12884 (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { 12885 memset(&msg, 0, sizeof(msg)); 12886 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 12887 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12888 msg.hdr.nexus = io->io_hdr.nexus; 12889 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12890 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), 12891 M_WAITOK); 12892 } 12893 if ((softc->ha_mode == CTL_HA_MODE_XFER) 12894 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12895 memset(&msg, 0, sizeof(msg)); 12896 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 12897 msg.hdr.original_sc = io->io_hdr.original_sc; 12898 msg.hdr.nexus = io->io_hdr.nexus; 12899 msg.hdr.status = io->io_hdr.status; 12900 msg.scsi.scsi_status = io->scsiio.scsi_status; 12901 msg.scsi.tag_num = io->scsiio.tag_num; 12902 msg.scsi.tag_type = io->scsiio.tag_type; 12903 msg.scsi.sense_len = io->scsiio.sense_len; 12904 msg.scsi.sense_residual = io->scsiio.sense_residual; 12905 msg.scsi.residual = io->scsiio.residual; 12906 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12907 io->scsiio.sense_len); 12908 /* 12909 * We copy this whether or not this is an I/O-related 12910 * command. Otherwise, we'd have to go and check to see 12911 * whether it's a read/write command, and it really isn't 12912 * worth it. 12913 */ 12914 memcpy(&msg.scsi.lbalen, 12915 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 12916 sizeof(msg.scsi.lbalen)); 12917 12918 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12919 sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + 12920 msg.scsi.sense_len, M_WAITOK); 12921 ctl_free_io(io); 12922 } else 12923 fe_done(io); 12924 12925 return (CTL_RETVAL_COMPLETE); 12926} 12927 12928#ifdef CTL_WITH_CA 12929/* 12930 * Front end should call this if it doesn't do autosense. When the request 12931 * sense comes back in from the initiator, we'll dequeue this and send it. 12932 */ 12933int 12934ctl_queue_sense(union ctl_io *io) 12935{ 12936 struct ctl_lun *lun; 12937 struct ctl_port *port; 12938 struct ctl_softc *softc; 12939 uint32_t initidx, targ_lun; 12940 12941 softc = control_softc; 12942 12943 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 12944 12945 /* 12946 * LUN lookup will likely move to the ctl_work_thread() once we 12947 * have our new queueing infrastructure (that doesn't put things on 12948 * a per-LUN queue initially). That is so that we can handle 12949 * things like an INQUIRY to a LUN that we don't have enabled. We 12950 * can't deal with that right now. 12951 */ 12952 mtx_lock(&softc->ctl_lock); 12953 12954 /* 12955 * If we don't have a LUN for this, just toss the sense 12956 * information. 12957 */ 12958 port = ctl_io_port(&ctsio->io_hdr); 12959 targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 12960 if ((targ_lun < CTL_MAX_LUNS) 12961 && (softc->ctl_luns[targ_lun] != NULL)) 12962 lun = softc->ctl_luns[targ_lun]; 12963 else 12964 goto bailout; 12965 12966 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12967 12968 mtx_lock(&lun->lun_lock); 12969 /* 12970 * Already have CA set for this LUN...toss the sense information. 12971 */ 12972 if (ctl_is_set(lun->have_ca, initidx)) { 12973 mtx_unlock(&lun->lun_lock); 12974 goto bailout; 12975 } 12976 12977 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 12978 MIN(sizeof(lun->pending_sense[initidx]), 12979 sizeof(io->scsiio.sense_data))); 12980 ctl_set_mask(lun->have_ca, initidx); 12981 mtx_unlock(&lun->lun_lock); 12982 12983bailout: 12984 mtx_unlock(&softc->ctl_lock); 12985 12986 ctl_free_io(io); 12987 12988 return (CTL_RETVAL_COMPLETE); 12989} 12990#endif 12991 12992/* 12993 * Primary command inlet from frontend ports. All SCSI and task I/O 12994 * requests must go through this function. 12995 */ 12996int 12997ctl_queue(union ctl_io *io) 12998{ 12999 struct ctl_port *port; 13000 13001 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13002 13003#ifdef CTL_TIME_IO 13004 io->io_hdr.start_time = time_uptime; 13005 getbintime(&io->io_hdr.start_bt); 13006#endif /* CTL_TIME_IO */ 13007 13008 /* Map FE-specific LUN ID into global one. */ 13009 port = ctl_io_port(&io->io_hdr); 13010 io->io_hdr.nexus.targ_mapped_lun = 13011 ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); 13012 13013 switch (io->io_hdr.io_type) { 13014 case CTL_IO_SCSI: 13015 case CTL_IO_TASK: 13016 if (ctl_debug & CTL_DEBUG_CDB) 13017 ctl_io_print(io); 13018 ctl_enqueue_incoming(io); 13019 break; 13020 default: 13021 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13022 return (EINVAL); 13023 } 13024 13025 return (CTL_RETVAL_COMPLETE); 13026} 13027 13028#ifdef CTL_IO_DELAY 13029static void 13030ctl_done_timer_wakeup(void *arg) 13031{ 13032 union ctl_io *io; 13033 13034 io = (union ctl_io *)arg; 13035 ctl_done(io); 13036} 13037#endif /* CTL_IO_DELAY */ 13038 13039void 13040ctl_done(union ctl_io *io) 13041{ 13042 13043 /* 13044 * Enable this to catch duplicate completion issues. 13045 */ 13046#if 0 13047 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13048 printf("%s: type %d msg %d cdb %x iptl: " 13049 "%u:%u:%u tag 0x%04x " 13050 "flag %#x status %x\n", 13051 __func__, 13052 io->io_hdr.io_type, 13053 io->io_hdr.msg_type, 13054 io->scsiio.cdb[0], 13055 io->io_hdr.nexus.initid, 13056 io->io_hdr.nexus.targ_port, 13057 io->io_hdr.nexus.targ_lun, 13058 (io->io_hdr.io_type == 13059 CTL_IO_TASK) ? 13060 io->taskio.tag_num : 13061 io->scsiio.tag_num, 13062 io->io_hdr.flags, 13063 io->io_hdr.status); 13064 } else 13065 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13066#endif 13067 13068 /* 13069 * This is an internal copy of an I/O, and should not go through 13070 * the normal done processing logic. 13071 */ 13072 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13073 return; 13074 13075#ifdef CTL_IO_DELAY 13076 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13077 struct ctl_lun *lun; 13078 13079 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13080 13081 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13082 } else { 13083 struct ctl_lun *lun; 13084 13085 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13086 13087 if ((lun != NULL) 13088 && (lun->delay_info.done_delay > 0)) { 13089 13090 callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); 13091 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13092 callout_reset(&io->io_hdr.delay_callout, 13093 lun->delay_info.done_delay * hz, 13094 ctl_done_timer_wakeup, io); 13095 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13096 lun->delay_info.done_delay = 0; 13097 return; 13098 } 13099 } 13100#endif /* CTL_IO_DELAY */ 13101 13102 ctl_enqueue_done(io); 13103} 13104 13105static void 13106ctl_work_thread(void *arg) 13107{ 13108 struct ctl_thread *thr = (struct ctl_thread *)arg; 13109 struct ctl_softc *softc = thr->ctl_softc; 13110 union ctl_io *io; 13111 int retval; 13112 13113 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13114 13115 for (;;) { 13116 retval = 0; 13117 13118 /* 13119 * We handle the queues in this order: 13120 * - ISC 13121 * - done queue (to free up resources, unblock other commands) 13122 * - RtR queue 13123 * - incoming queue 13124 * 13125 * If those queues are empty, we break out of the loop and 13126 * go to sleep. 13127 */ 13128 mtx_lock(&thr->queue_lock); 13129 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13130 if (io != NULL) { 13131 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13132 mtx_unlock(&thr->queue_lock); 13133 ctl_handle_isc(io); 13134 continue; 13135 } 13136 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13137 if (io != NULL) { 13138 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13139 /* clear any blocked commands, call fe_done */ 13140 mtx_unlock(&thr->queue_lock); 13141 retval = ctl_process_done(io); 13142 continue; 13143 } 13144 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13145 if (io != NULL) { 13146 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13147 mtx_unlock(&thr->queue_lock); 13148 if (io->io_hdr.io_type == CTL_IO_TASK) 13149 ctl_run_task(io); 13150 else 13151 ctl_scsiio_precheck(softc, &io->scsiio); 13152 continue; 13153 } 13154 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 13155 if (io != NULL) { 13156 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 13157 mtx_unlock(&thr->queue_lock); 13158 retval = ctl_scsiio(&io->scsiio); 13159 if (retval != CTL_RETVAL_COMPLETE) 13160 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 13161 continue; 13162 } 13163 13164 /* Sleep until we have something to do. */ 13165 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 13166 } 13167} 13168 13169static void 13170ctl_lun_thread(void *arg) 13171{ 13172 struct ctl_softc *softc = (struct ctl_softc *)arg; 13173 struct ctl_be_lun *be_lun; 13174 int retval; 13175 13176 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 13177 13178 for (;;) { 13179 retval = 0; 13180 mtx_lock(&softc->ctl_lock); 13181 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 13182 if (be_lun != NULL) { 13183 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 13184 mtx_unlock(&softc->ctl_lock); 13185 ctl_create_lun(be_lun); 13186 continue; 13187 } 13188 13189 /* Sleep until we have something to do. */ 13190 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 13191 PDROP | PRIBIO, "-", 0); 13192 } 13193} 13194 13195static void 13196ctl_thresh_thread(void *arg) 13197{ 13198 struct ctl_softc *softc = (struct ctl_softc *)arg; 13199 struct ctl_lun *lun; 13200 struct ctl_be_lun *be_lun; 13201 struct scsi_da_rw_recovery_page *rwpage; 13202 struct ctl_logical_block_provisioning_page *page; 13203 const char *attr; 13204 union ctl_ha_msg msg; 13205 uint64_t thres, val; 13206 int i, e, set; 13207 13208 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 13209 13210 for (;;) { 13211 mtx_lock(&softc->ctl_lock); 13212 STAILQ_FOREACH(lun, &softc->lun_list, links) { 13213 be_lun = lun->be_lun; 13214 if ((lun->flags & CTL_LUN_DISABLED) || 13215 (lun->flags & CTL_LUN_OFFLINE) || 13216 lun->backend->lun_attr == NULL) 13217 continue; 13218 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && 13219 softc->ha_mode == CTL_HA_MODE_XFER) 13220 continue; 13221 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 13222 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 13223 continue; 13224 e = 0; 13225 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 13226 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 13227 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 13228 continue; 13229 thres = scsi_4btoul(page->descr[i].count); 13230 thres <<= CTL_LBP_EXPONENT; 13231 switch (page->descr[i].resource) { 13232 case 0x01: 13233 attr = "blocksavail"; 13234 break; 13235 case 0x02: 13236 attr = "blocksused"; 13237 break; 13238 case 0xf1: 13239 attr = "poolblocksavail"; 13240 break; 13241 case 0xf2: 13242 attr = "poolblocksused"; 13243 break; 13244 default: 13245 continue; 13246 } 13247 mtx_unlock(&softc->ctl_lock); // XXX 13248 val = lun->backend->lun_attr( 13249 lun->be_lun->be_lun, attr); 13250 mtx_lock(&softc->ctl_lock); 13251 if (val == UINT64_MAX) 13252 continue; 13253 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 13254 == SLBPPD_ARMING_INC) 13255 e |= (val >= thres); 13256 else 13257 e |= (val <= thres); 13258 } 13259 mtx_lock(&lun->lun_lock); 13260 if (e) { 13261 if (lun->lasttpt == 0 || 13262 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 13263 lun->lasttpt = time_uptime; 13264 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13265 set = 1; 13266 } else 13267 set = 0; 13268 } else { 13269 lun->lasttpt = 0; 13270 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 13271 set = -1; 13272 } 13273 mtx_unlock(&lun->lun_lock); 13274 if (set != 0 && 13275 lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { 13276 /* Send msg to other side. */ 13277 bzero(&msg.ua, sizeof(msg.ua)); 13278 msg.hdr.msg_type = CTL_MSG_UA; 13279 msg.hdr.nexus.initid = -1; 13280 msg.hdr.nexus.targ_port = -1; 13281 msg.hdr.nexus.targ_lun = lun->lun; 13282 msg.hdr.nexus.targ_mapped_lun = lun->lun; 13283 msg.ua.ua_all = 1; 13284 msg.ua.ua_set = (set > 0); 13285 msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; 13286 mtx_unlock(&softc->ctl_lock); // XXX 13287 ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13288 sizeof(msg.ua), M_WAITOK); 13289 mtx_lock(&softc->ctl_lock); 13290 } 13291 } 13292 mtx_unlock(&softc->ctl_lock); 13293 pause("-", CTL_LBP_PERIOD * hz); 13294 } 13295} 13296 13297static void 13298ctl_enqueue_incoming(union ctl_io *io) 13299{ 13300 struct ctl_softc *softc = control_softc; 13301 struct ctl_thread *thr; 13302 u_int idx; 13303 13304 idx = (io->io_hdr.nexus.targ_port * 127 + 13305 io->io_hdr.nexus.initid) % worker_threads; 13306 thr = &softc->threads[idx]; 13307 mtx_lock(&thr->queue_lock); 13308 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 13309 mtx_unlock(&thr->queue_lock); 13310 wakeup(thr); 13311} 13312 13313static void 13314ctl_enqueue_rtr(union ctl_io *io) 13315{ 13316 struct ctl_softc *softc = control_softc; 13317 struct ctl_thread *thr; 13318 13319 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13320 mtx_lock(&thr->queue_lock); 13321 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 13322 mtx_unlock(&thr->queue_lock); 13323 wakeup(thr); 13324} 13325 13326static void 13327ctl_enqueue_done(union ctl_io *io) 13328{ 13329 struct ctl_softc *softc = control_softc; 13330 struct ctl_thread *thr; 13331 13332 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13333 mtx_lock(&thr->queue_lock); 13334 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 13335 mtx_unlock(&thr->queue_lock); 13336 wakeup(thr); 13337} 13338 13339static void 13340ctl_enqueue_isc(union ctl_io *io) 13341{ 13342 struct ctl_softc *softc = control_softc; 13343 struct ctl_thread *thr; 13344 13345 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 13346 mtx_lock(&thr->queue_lock); 13347 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 13348 mtx_unlock(&thr->queue_lock); 13349 wakeup(thr); 13350} 13351 13352/* 13353 * vim: ts=8 13354 */ 13355